summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/VMMR3
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/VMMR3')
-rw-r--r--src/VBox/VMM/VMMR3/APIC.cpp1599
-rw-r--r--src/VBox/VMM/VMMR3/CFGM.cpp3428
-rw-r--r--src/VBox/VMM/VMMR3/CPUM.cpp4632
-rw-r--r--src/VBox/VMM/VMMR3/CPUMDbg.cpp1288
-rw-r--r--src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp5902
-rw-r--r--src/VBox/VMM/VMMR3/CPUMR3Db.cpp1149
-rw-r--r--src/VBox/VMM/VMMR3/DBGF.cpp2352
-rw-r--r--src/VBox/VMM/VMMR3/DBGFAddr.cpp498
-rw-r--r--src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp1367
-rw-r--r--src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp675
-rw-r--r--src/VBox/VMM/VMMR3/DBGFCpu.cpp208
-rw-r--r--src/VBox/VMM/VMMR3/DBGFDisas.cpp798
-rw-r--r--src/VBox/VMM/VMMR3/DBGFInfo.cpp1474
-rw-r--r--src/VBox/VMM/VMMR3/DBGFLog.cpp197
-rw-r--r--src/VBox/VMM/VMMR3/DBGFMem.cpp652
-rw-r--r--src/VBox/VMM/VMMR3/DBGFModule.cpp300
-rw-r--r--src/VBox/VMM/VMMR3/DBGFOS.cpp750
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Bp.cpp2776
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3BugCheck.cpp930
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Flow.cpp2322
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3FlowTrace.cpp1975
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3ModInMem.cpp1111
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3PlugIn.cpp627
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3SampleReport.cpp782
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Trace.cpp458
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Tracer.cpp1184
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Type.cpp1287
-rw-r--r--src/VBox/VMM/VMMR3/DBGFReg.cpp2775
-rw-r--r--src/VBox/VMM/VMMR3/DBGFStack.cpp1163
-rw-r--r--src/VBox/VMM/VMMR3/EM.cpp2759
-rw-r--r--src/VBox/VMM/VMMR3/EMHM.cpp487
-rw-r--r--src/VBox/VMM/VMMR3/EMR3Dbg.cpp350
-rw-r--r--src/VBox/VMM/VMMR3/EMR3Nem.cpp487
-rw-r--r--src/VBox/VMM/VMMR3/GCM.cpp258
-rw-r--r--src/VBox/VMM/VMMR3/GIM.cpp705
-rw-r--r--src/VBox/VMM/VMMR3/GIMHv.cpp2277
-rw-r--r--src/VBox/VMM/VMMR3/GIMKvm.cpp630
-rw-r--r--src/VBox/VMM/VMMR3/GIMMinimal.cpp140
-rw-r--r--src/VBox/VMM/VMMR3/GMM.cpp453
-rw-r--r--src/VBox/VMM/VMMR3/GVMMR3.cpp227
-rw-r--r--src/VBox/VMM/VMMR3/HM.cpp3517
-rw-r--r--src/VBox/VMM/VMMR3/IEMR3.cpp545
-rw-r--r--src/VBox/VMM/VMMR3/IOM.cpp477
-rw-r--r--src/VBox/VMM/VMMR3/IOMR3IoPort.cpp781
-rw-r--r--src/VBox/VMM/VMMR3/IOMR3Mmio.cpp701
-rw-r--r--src/VBox/VMM/VMMR3/MM.cpp728
-rw-r--r--src/VBox/VMM/VMMR3/MMHeap.cpp761
-rw-r--r--src/VBox/VMM/VMMR3/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMR3/NEMR3.cpp606
-rw-r--r--src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp4615
-rw-r--r--src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp2838
-rw-r--r--src/VBox/VMM/VMMR3/NEMR3Native-win.cpp3019
-rw-r--r--src/VBox/VMM/VMMR3/PDM.cpp3096
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp1807
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp1305
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp280
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp1744
-rw-r--r--src/VBox/VMM/VMMR3/PDMBlkCache.cpp2802
-rw-r--r--src/VBox/VMM/VMMR3/PDMCritSect.cpp1342
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevHlp.cpp6397
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevHlpTracing.cpp587
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp509
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevice.cpp1291
-rw-r--r--src/VBox/VMM/VMMR3/PDMDriver.cpp2347
-rw-r--r--src/VBox/VMM/VMMR3/PDMLdr.cpp1782
-rw-r--r--src/VBox/VMM/VMMR3/PDMNetShaper.cpp549
-rw-r--r--src/VBox/VMM/VMMR3/PDMQueue.cpp892
-rw-r--r--src/VBox/VMM/VMMR3/PDMR3Task.cpp638
-rw-r--r--src/VBox/VMM/VMMR3/PDMThread.cpp1103
-rw-r--r--src/VBox/VMM/VMMR3/PDMUsb.cpp2421
-rw-r--r--src/VBox/VMM/VMMR3/PGM.cpp2691
-rw-r--r--src/VBox/VMM/VMMR3/PGMDbg.cpp3494
-rw-r--r--src/VBox/VMM/VMMR3/PGMHandler.cpp345
-rw-r--r--src/VBox/VMM/VMMR3/PGMPhys.cpp6000
-rw-r--r--src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h71
-rw-r--r--src/VBox/VMM/VMMR3/PGMPool.cpp1345
-rw-r--r--src/VBox/VMM/VMMR3/PGMR3DbgA.asm485
-rw-r--r--src/VBox/VMM/VMMR3/PGMSavedState.cpp3259
-rw-r--r--src/VBox/VMM/VMMR3/PGMSharedPage.cpp452
-rw-r--r--src/VBox/VMM/VMMR3/SELM.cpp685
-rw-r--r--src/VBox/VMM/VMMR3/SSM.cpp9944
-rw-r--r--src/VBox/VMM/VMMR3/STAM.cpp3196
-rw-r--r--src/VBox/VMM/VMMR3/TM.cpp4314
-rw-r--r--src/VBox/VMM/VMMR3/TRPM.cpp471
-rw-r--r--src/VBox/VMM/VMMR3/VM.cpp4458
-rw-r--r--src/VBox/VMM/VMMR3/VMEmt.cpp1462
-rw-r--r--src/VBox/VMM/VMMR3/VMM.cpp2582
-rw-r--r--src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp701
-rw-r--r--src/VBox/VMM/VMMR3/VMMR3.def520
-rw-r--r--src/VBox/VMM/VMMR3/VMMR3VTable.cpp82
-rw-r--r--src/VBox/VMM/VMMR3/VMMTests.cpp197
-rw-r--r--src/VBox/VMM/VMMR3/VMReq.cpp1343
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h234
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h242
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h393
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h282
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Ryzen_7_1800X_Eight_Core.h5234
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Hygon_C86_7185_32_core.h5234
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_80186.h85
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_80286.h85
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_80386.h85
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_80486.h83
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_8086.h85
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Atom_330_1_60GHz.h220
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core2_T7600_2_33GHz.h205
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core2_X6800_2_93GHz.h272
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_Duo_T2600_2_16GHz.h235
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h349
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h342
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h396
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h379
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_5600U.h378
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_6700K.h520
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h287
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h226
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Pentium_N3530_2_16GHz.h275
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h258
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h280
-rw-r--r--src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h414
-rw-r--r--src/VBox/VMM/VMMR3/cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h427
121 files changed, 168514 insertions, 0 deletions
diff --git a/src/VBox/VMM/VMMR3/APIC.cpp b/src/VBox/VMM/VMMR3/APIC.cpp
new file mode 100644
index 00000000..6753ac60
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/APIC.cpp
@@ -0,0 +1,1599 @@
+/* $Id: APIC.cpp $ */
+/** @file
+ * APIC - Advanced Programmable Interrupt Controller.
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DEV_APIC
+#include <VBox/log.h>
+#include "APICInternal.h"
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pdmdev.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/vm.h>
+
+
+#ifndef VBOX_DEVICE_STRUCT_TESTCASE
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The current APIC saved state version. */
+#define APIC_SAVED_STATE_VERSION 5
+/** VirtualBox 5.1 beta2 - pre fActiveLintX. */
+#define APIC_SAVED_STATE_VERSION_VBOX_51_BETA2 4
+/** The saved state version used by VirtualBox 5.0 and
+ * earlier. */
+#define APIC_SAVED_STATE_VERSION_VBOX_50 3
+/** The saved state version used by VirtualBox v3 and earlier.
+ * This does not include the config. */
+#define APIC_SAVED_STATE_VERSION_VBOX_30 2
+/** Some ancient version... */
+#define APIC_SAVED_STATE_VERSION_ANCIENT 1
+
+#ifdef VBOX_WITH_STATISTICS
+# define X2APIC_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Ia32X2ApicN, kCpumMsrWrFn_Ia32X2ApicN, 0, 0, 0, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
+# define X2APIC_MSRRANGE_INVALID(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_WriteOnly, kCpumMsrWrFn_ReadOnly, 0, 0, 0, 0, UINT64_MAX /*fWrGpMask*/, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define X2APIC_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Ia32X2ApicN, kCpumMsrWrFn_Ia32X2ApicN, 0, 0, 0, 0, 0, a_szName }
+# define X2APIC_MSRRANGE_INVALID(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_WriteOnly, kCpumMsrWrFn_ReadOnly, 0, 0, 0, 0, UINT64_MAX /*fWrGpMask*/, a_szName }
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/**
+ * MSR range supported by the x2APIC.
+ * See Intel spec. 10.12.2 "x2APIC Register Availability".
+ */
+static CPUMMSRRANGE const g_MsrRange_x2Apic = X2APIC_MSRRANGE(MSR_IA32_X2APIC_START, MSR_IA32_X2APIC_END, "x2APIC range");
+static CPUMMSRRANGE const g_MsrRange_x2Apic_Invalid = X2APIC_MSRRANGE_INVALID(MSR_IA32_X2APIC_START, MSR_IA32_X2APIC_END, "x2APIC range invalid");
+#undef X2APIC_MSRRANGE
+#undef X2APIC_MSRRANGE_GP
+
+/** Saved state field descriptors for XAPICPAGE. */
+static const SSMFIELD g_aXApicPageFields[] =
+{
+ SSMFIELD_ENTRY( XAPICPAGE, id.u8ApicId),
+ SSMFIELD_ENTRY( XAPICPAGE, version.all.u32Version),
+ SSMFIELD_ENTRY( XAPICPAGE, tpr.u8Tpr),
+ SSMFIELD_ENTRY( XAPICPAGE, apr.u8Apr),
+ SSMFIELD_ENTRY( XAPICPAGE, ppr.u8Ppr),
+ SSMFIELD_ENTRY( XAPICPAGE, ldr.all.u32Ldr),
+ SSMFIELD_ENTRY( XAPICPAGE, dfr.all.u32Dfr),
+ SSMFIELD_ENTRY( XAPICPAGE, svr.all.u32Svr),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[0].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[1].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[2].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[3].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[4].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[5].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[6].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, isr.u[7].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[0].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[1].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[2].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[3].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[4].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[5].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[6].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, tmr.u[7].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[0].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[1].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[2].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[3].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[4].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[5].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[6].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, irr.u[7].u32Reg),
+ SSMFIELD_ENTRY( XAPICPAGE, esr.all.u32Errors),
+ SSMFIELD_ENTRY( XAPICPAGE, icr_lo.all.u32IcrLo),
+ SSMFIELD_ENTRY( XAPICPAGE, icr_hi.all.u32IcrHi),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_timer.all.u32LvtTimer),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_thermal.all.u32LvtThermal),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_perf.all.u32LvtPerf),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_lint0.all.u32LvtLint0),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_lint1.all.u32LvtLint1),
+ SSMFIELD_ENTRY( XAPICPAGE, lvt_error.all.u32LvtError),
+ SSMFIELD_ENTRY( XAPICPAGE, timer_icr.u32InitialCount),
+ SSMFIELD_ENTRY( XAPICPAGE, timer_ccr.u32CurrentCount),
+ SSMFIELD_ENTRY( XAPICPAGE, timer_dcr.all.u32DivideValue),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X2APICPAGE. */
+static const SSMFIELD g_aX2ApicPageFields[] =
+{
+ SSMFIELD_ENTRY(X2APICPAGE, id.u32ApicId),
+ SSMFIELD_ENTRY(X2APICPAGE, version.all.u32Version),
+ SSMFIELD_ENTRY(X2APICPAGE, tpr.u8Tpr),
+ SSMFIELD_ENTRY(X2APICPAGE, ppr.u8Ppr),
+ SSMFIELD_ENTRY(X2APICPAGE, ldr.u32LogicalApicId),
+ SSMFIELD_ENTRY(X2APICPAGE, svr.all.u32Svr),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[0].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[1].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[2].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[3].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[4].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[5].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[6].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, isr.u[7].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[0].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[1].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[2].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[3].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[4].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[5].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[6].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, tmr.u[7].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[0].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[1].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[2].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[3].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[4].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[5].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[6].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, irr.u[7].u32Reg),
+ SSMFIELD_ENTRY(X2APICPAGE, esr.all.u32Errors),
+ SSMFIELD_ENTRY(X2APICPAGE, icr_lo.all.u32IcrLo),
+ SSMFIELD_ENTRY(X2APICPAGE, icr_hi.u32IcrHi),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_timer.all.u32LvtTimer),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_thermal.all.u32LvtThermal),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_perf.all.u32LvtPerf),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_lint0.all.u32LvtLint0),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_lint1.all.u32LvtLint1),
+ SSMFIELD_ENTRY(X2APICPAGE, lvt_error.all.u32LvtError),
+ SSMFIELD_ENTRY(X2APICPAGE, timer_icr.u32InitialCount),
+ SSMFIELD_ENTRY(X2APICPAGE, timer_ccr.u32CurrentCount),
+ SSMFIELD_ENTRY(X2APICPAGE, timer_dcr.all.u32DivideValue),
+ SSMFIELD_ENTRY_TERM()
+};
+
+
+/**
+ * Sets the CPUID feature bits for the APIC mode.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmMode The APIC mode.
+ */
+static void apicR3SetCpuIdFeatureLevel(PVM pVM, PDMAPICMODE enmMode)
+{
+ switch (enmMode)
+ {
+ case PDMAPICMODE_NONE:
+ CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
+ CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
+ break;
+
+ case PDMAPICMODE_APIC:
+ CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
+ break;
+
+ case PDMAPICMODE_X2APIC:
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
+ break;
+
+ default:
+ AssertMsgFailed(("Unknown/invalid APIC mode: %d\n", (int)enmMode));
+ }
+}
+
+
+/**
+ * Receives an INIT IPI.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(void) APICR3InitIpi(PVMCPU pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ LogFlow(("APIC%u: APICR3InitIpi\n", pVCpu->idCpu));
+ apicInitIpi(pVCpu);
+}
+
+
+/**
+ * Sets whether Hyper-V compatibility mode (MSR interface) is enabled or not.
+ *
+ * This mode is a hybrid of xAPIC and x2APIC modes, some caveats:
+ * 1. MSRs are used even ones that are missing (illegal) in x2APIC like DFR.
+ * 2. A single ICR is used by the guest to send IPIs rather than 2 ICR writes.
+ * 3. It is unclear what the behaviour will be when invalid bits are set,
+ * currently we follow x2APIC behaviour of causing a \#GP.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fHyperVCompatMode Whether the compatibility mode is enabled.
+ */
+VMMR3_INT_DECL(void) APICR3HvSetCompatMode(PVM pVM, bool fHyperVCompatMode)
+{
+ Assert(pVM);
+ PAPIC pApic = VM_TO_APIC(pVM);
+ pApic->fHyperVCompatMode = fHyperVCompatMode;
+
+ if (fHyperVCompatMode)
+ LogRel(("APIC: Enabling Hyper-V x2APIC compatibility mode\n"));
+
+ int rc = CPUMR3MsrRangesInsert(pVM, &g_MsrRange_x2Apic);
+ AssertLogRelRC(rc);
+}
+
+
+/**
+ * Helper for dumping an APIC 256-bit sparse register.
+ *
+ * @param pApicReg The APIC 256-bit spare register.
+ * @param pHlp The debug output helper.
+ */
+static void apicR3DbgInfo256BitReg(volatile const XAPIC256BITREG *pApicReg, PCDBGFINFOHLP pHlp)
+{
+ ssize_t const cFragments = RT_ELEMENTS(pApicReg->u);
+ unsigned const cBitsPerFragment = sizeof(pApicReg->u[0].u32Reg) * 8;
+ XAPIC256BITREG ApicReg;
+ RT_ZERO(ApicReg);
+
+ pHlp->pfnPrintf(pHlp, " ");
+ for (ssize_t i = cFragments - 1; i >= 0; i--)
+ {
+ uint32_t const uFragment = pApicReg->u[i].u32Reg;
+ ApicReg.u[i].u32Reg = uFragment;
+ pHlp->pfnPrintf(pHlp, "%08x", uFragment);
+ }
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ uint32_t cPending = 0;
+ pHlp->pfnPrintf(pHlp, " Pending:");
+ for (ssize_t i = cFragments - 1; i >= 0; i--)
+ {
+ uint32_t uFragment = ApicReg.u[i].u32Reg;
+ if (uFragment)
+ {
+ do
+ {
+ unsigned idxSetBit = ASMBitLastSetU32(uFragment);
+ --idxSetBit;
+ ASMBitClear(&uFragment, idxSetBit);
+
+ idxSetBit += (i * cBitsPerFragment);
+ pHlp->pfnPrintf(pHlp, " %#02x", idxSetBit);
+ ++cPending;
+ } while (uFragment);
+ }
+ }
+ if (!cPending)
+ pHlp->pfnPrintf(pHlp, " None");
+ pHlp->pfnPrintf(pHlp, "\n");
+}
+
+
+/**
+ * Helper for dumping an APIC pending-interrupt bitmap.
+ *
+ * @param pApicPib The pending-interrupt bitmap.
+ * @param pHlp The debug output helper.
+ */
+static void apicR3DbgInfoPib(PCAPICPIB pApicPib, PCDBGFINFOHLP pHlp)
+{
+ /* Copy the pending-interrupt bitmap as an APIC 256-bit sparse register. */
+ XAPIC256BITREG ApicReg;
+ RT_ZERO(ApicReg);
+ ssize_t const cFragmentsDst = RT_ELEMENTS(ApicReg.u);
+ ssize_t const cFragmentsSrc = RT_ELEMENTS(pApicPib->au64VectorBitmap);
+ AssertCompile(RT_ELEMENTS(ApicReg.u) == 2 * RT_ELEMENTS(pApicPib->au64VectorBitmap));
+ for (ssize_t idxPib = cFragmentsSrc - 1, idxReg = cFragmentsDst - 1; idxPib >= 0; idxPib--, idxReg -= 2)
+ {
+ uint64_t const uFragment = pApicPib->au64VectorBitmap[idxPib];
+ uint32_t const uFragmentLo = RT_LO_U32(uFragment);
+ uint32_t const uFragmentHi = RT_HI_U32(uFragment);
+ ApicReg.u[idxReg].u32Reg = uFragmentHi;
+ ApicReg.u[idxReg - 1].u32Reg = uFragmentLo;
+ }
+
+ /* Dump it. */
+ apicR3DbgInfo256BitReg(&ApicReg, pHlp);
+}
+
+
+/**
+ * Dumps basic APIC state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) apicR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
+
+ uint64_t const uBaseMsr = pApicCpu->uApicBaseMsr;
+ APICMODE const enmMode = apicGetMode(uBaseMsr);
+ bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu);
+
+ pHlp->pfnPrintf(pHlp, "APIC%u:\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, " APIC Base MSR = %#RX64 (Addr=%#RX64%s%s%s)\n", uBaseMsr,
+ MSR_IA32_APICBASE_GET_ADDR(uBaseMsr), uBaseMsr & MSR_IA32_APICBASE_EN ? " en" : "",
+ uBaseMsr & MSR_IA32_APICBASE_BSP ? " bsp" : "", uBaseMsr & MSR_IA32_APICBASE_EXTD ? " extd" : "");
+ pHlp->pfnPrintf(pHlp, " Mode = %u (%s)\n", enmMode, apicGetModeName(enmMode));
+ if (fX2ApicMode)
+ pHlp->pfnPrintf(pHlp, " APIC ID = %u (%#x)\n", pX2ApicPage->id.u32ApicId,
+ pX2ApicPage->id.u32ApicId);
+ else
+ pHlp->pfnPrintf(pHlp, " APIC ID = %u (%#x)\n", pXApicPage->id.u8ApicId, pXApicPage->id.u8ApicId);
+ pHlp->pfnPrintf(pHlp, " Version = %#x\n", pXApicPage->version.all.u32Version);
+ pHlp->pfnPrintf(pHlp, " APIC Version = %#x\n", pXApicPage->version.u.u8Version);
+ pHlp->pfnPrintf(pHlp, " Max LVT entry index (0..N) = %u\n", pXApicPage->version.u.u8MaxLvtEntry);
+ pHlp->pfnPrintf(pHlp, " EOI Broadcast supression = %RTbool\n", pXApicPage->version.u.fEoiBroadcastSupression);
+ if (!fX2ApicMode)
+ pHlp->pfnPrintf(pHlp, " APR = %u (%#x)\n", pXApicPage->apr.u8Apr, pXApicPage->apr.u8Apr);
+ pHlp->pfnPrintf(pHlp, " TPR = %u (%#x)\n", pXApicPage->tpr.u8Tpr, pXApicPage->tpr.u8Tpr);
+ pHlp->pfnPrintf(pHlp, " Task-priority class = %#x\n", XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >> 4);
+ pHlp->pfnPrintf(pHlp, " Task-priority subclass = %#x\n", XAPIC_TPR_GET_TP_SUBCLASS(pXApicPage->tpr.u8Tpr));
+ pHlp->pfnPrintf(pHlp, " PPR = %u (%#x)\n", pXApicPage->ppr.u8Ppr, pXApicPage->ppr.u8Ppr);
+ pHlp->pfnPrintf(pHlp, " Processor-priority class = %#x\n", XAPIC_PPR_GET_PP(pXApicPage->ppr.u8Ppr) >> 4);
+ pHlp->pfnPrintf(pHlp, " Processor-priority subclass = %#x\n", XAPIC_PPR_GET_PP_SUBCLASS(pXApicPage->ppr.u8Ppr));
+ if (!fX2ApicMode)
+ pHlp->pfnPrintf(pHlp, " RRD = %u (%#x)\n", pXApicPage->rrd.u32Rrd, pXApicPage->rrd.u32Rrd);
+ pHlp->pfnPrintf(pHlp, " LDR = %#x\n", pXApicPage->ldr.all.u32Ldr);
+ pHlp->pfnPrintf(pHlp, " Logical APIC ID = %#x\n", fX2ApicMode ? pX2ApicPage->ldr.u32LogicalApicId
+ : pXApicPage->ldr.u.u8LogicalApicId);
+ if (!fX2ApicMode)
+ {
+ pHlp->pfnPrintf(pHlp, " DFR = %#x\n", pXApicPage->dfr.all.u32Dfr);
+ pHlp->pfnPrintf(pHlp, " Model = %#x (%s)\n", pXApicPage->dfr.u.u4Model,
+ apicGetDestFormatName((XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model));
+ }
+ pHlp->pfnPrintf(pHlp, " SVR = %#x\n", pXApicPage->svr.all.u32Svr);
+ pHlp->pfnPrintf(pHlp, " Vector = %u (%#x)\n", pXApicPage->svr.u.u8SpuriousVector,
+ pXApicPage->svr.u.u8SpuriousVector);
+ pHlp->pfnPrintf(pHlp, " Software Enabled = %RTbool\n", RT_BOOL(pXApicPage->svr.u.fApicSoftwareEnable));
+ pHlp->pfnPrintf(pHlp, " Supress EOI broadcast = %RTbool\n", RT_BOOL(pXApicPage->svr.u.fSupressEoiBroadcast));
+ pHlp->pfnPrintf(pHlp, " ISR\n");
+ apicR3DbgInfo256BitReg(&pXApicPage->isr, pHlp);
+ pHlp->pfnPrintf(pHlp, " TMR\n");
+ apicR3DbgInfo256BitReg(&pXApicPage->tmr, pHlp);
+ pHlp->pfnPrintf(pHlp, " IRR\n");
+ apicR3DbgInfo256BitReg(&pXApicPage->irr, pHlp);
+ pHlp->pfnPrintf(pHlp, " PIB\n");
+ apicR3DbgInfoPib((PCAPICPIB)pApicCpu->pvApicPibR3, pHlp);
+ pHlp->pfnPrintf(pHlp, " Level PIB\n");
+ apicR3DbgInfoPib(&pApicCpu->ApicPibLevel, pHlp);
+ pHlp->pfnPrintf(pHlp, " ESR Internal = %#x\n", pApicCpu->uEsrInternal);
+ pHlp->pfnPrintf(pHlp, " ESR = %#x\n", pXApicPage->esr.all.u32Errors);
+ pHlp->pfnPrintf(pHlp, " Redirectable IPI = %RTbool\n", pXApicPage->esr.u.fRedirectableIpi);
+ pHlp->pfnPrintf(pHlp, " Send Illegal Vector = %RTbool\n", pXApicPage->esr.u.fSendIllegalVector);
+ pHlp->pfnPrintf(pHlp, " Recv Illegal Vector = %RTbool\n", pXApicPage->esr.u.fRcvdIllegalVector);
+ pHlp->pfnPrintf(pHlp, " Illegal Register Address = %RTbool\n", pXApicPage->esr.u.fIllegalRegAddr);
+ pHlp->pfnPrintf(pHlp, " ICR Low = %#x\n", pXApicPage->icr_lo.all.u32IcrLo);
+ pHlp->pfnPrintf(pHlp, " Vector = %u (%#x)\n", pXApicPage->icr_lo.u.u8Vector,
+ pXApicPage->icr_lo.u.u8Vector);
+ pHlp->pfnPrintf(pHlp, " Delivery Mode = %#x (%s)\n", pXApicPage->icr_lo.u.u3DeliveryMode,
+ apicGetDeliveryModeName((XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode));
+ pHlp->pfnPrintf(pHlp, " Destination Mode = %#x (%s)\n", pXApicPage->icr_lo.u.u1DestMode,
+ apicGetDestModeName((XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode));
+ if (!fX2ApicMode)
+ pHlp->pfnPrintf(pHlp, " Delivery Status = %u\n", pXApicPage->icr_lo.u.u1DeliveryStatus);
+ pHlp->pfnPrintf(pHlp, " Level = %u\n", pXApicPage->icr_lo.u.u1Level);
+ pHlp->pfnPrintf(pHlp, " Trigger Mode = %u (%s)\n", pXApicPage->icr_lo.u.u1TriggerMode,
+ apicGetTriggerModeName((XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode));
+ pHlp->pfnPrintf(pHlp, " Destination shorthand = %#x (%s)\n", pXApicPage->icr_lo.u.u2DestShorthand,
+ apicGetDestShorthandName((XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand));
+ pHlp->pfnPrintf(pHlp, " ICR High = %#x\n", pXApicPage->icr_hi.all.u32IcrHi);
+ pHlp->pfnPrintf(pHlp, " Destination field/mask = %#x\n", fX2ApicMode ? pX2ApicPage->icr_hi.u32IcrHi
+ : pXApicPage->icr_hi.u.u8Dest);
+}
+
+
+/**
+ * Helper for dumping the LVT timer.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pHlp The debug output helper.
+ */
+static void apicR3InfoLvtTimer(PVMCPU pVCpu, PCDBGFINFOHLP pHlp)
+{
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ uint32_t const uLvtTimer = pXApicPage->lvt_timer.all.u32LvtTimer;
+ pHlp->pfnPrintf(pHlp, "LVT Timer = %#RX32\n", uLvtTimer);
+ pHlp->pfnPrintf(pHlp, " Vector = %u (%#x)\n", pXApicPage->lvt_timer.u.u8Vector, pXApicPage->lvt_timer.u.u8Vector);
+ pHlp->pfnPrintf(pHlp, " Delivery status = %u\n", pXApicPage->lvt_timer.u.u1DeliveryStatus);
+ pHlp->pfnPrintf(pHlp, " Masked = %RTbool\n", XAPIC_LVT_IS_MASKED(uLvtTimer));
+ pHlp->pfnPrintf(pHlp, " Timer Mode = %#x (%s)\n", pXApicPage->lvt_timer.u.u2TimerMode,
+ apicGetTimerModeName((XAPICTIMERMODE)pXApicPage->lvt_timer.u.u2TimerMode));
+}
+
+
+/**
+ * Dumps APIC Local Vector Table (LVT) information.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) apicR3InfoLvt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+
+ /*
+ * Delivery modes available in the LVT entries. They're different (more reserved stuff) from the
+ * ICR delivery modes and hence we don't use apicGetDeliveryMode but mostly because we want small,
+ * fixed-length strings to fit our formatting needs here.
+ */
+ static const char * const s_apszLvtDeliveryModes[] =
+ {
+ "Fixed ",
+ "Rsvd ",
+ "SMI ",
+ "Rsvd ",
+ "NMI ",
+ "INIT ",
+ "Rsvd ",
+ "ExtINT"
+ };
+ /* Delivery Status. */
+ static const char * const s_apszLvtDeliveryStatus[] =
+ {
+ "Idle",
+ "Pend"
+ };
+ const char *pszNotApplicable = "";
+
+ pHlp->pfnPrintf(pHlp, "VCPU[%u] APIC Local Vector Table (LVT):\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, "lvt timermode mask trigger rirr polarity dlvr_st dlvr_mode vector\n");
+ /* Timer. */
+ {
+ /* Timer modes. */
+ static const char * const s_apszLvtTimerModes[] =
+ {
+ "One-shot ",
+ "Periodic ",
+ "TSC-dline"
+ };
+ const uint32_t uLvtTimer = pXApicPage->lvt_timer.all.u32LvtTimer;
+ const XAPICTIMERMODE enmTimerMode = XAPIC_LVT_GET_TIMER_MODE(uLvtTimer);
+ const char *pszTimerMode = s_apszLvtTimerModes[enmTimerMode];
+ const uint8_t uMask = XAPIC_LVT_IS_MASKED(uLvtTimer);
+ const uint8_t uDeliveryStatus = uLvtTimer & XAPIC_LVT_DELIVERY_STATUS;
+ const char *pszDeliveryStatus = s_apszLvtDeliveryStatus[uDeliveryStatus];
+ const uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtTimer);
+
+ pHlp->pfnPrintf(pHlp, "%-7s %9s %u %5s %1s %8s %4s %6s %3u (%#x)\n",
+ "Timer",
+ pszTimerMode,
+ uMask,
+ pszNotApplicable, /* TriggerMode */
+ pszNotApplicable, /* Remote IRR */
+ pszNotApplicable, /* Polarity */
+ pszDeliveryStatus,
+ pszNotApplicable, /* Delivery Mode */
+ uVector,
+ uVector);
+ }
+
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+ /* Thermal sensor. */
+ {
+ uint32_t const uLvtThermal = pXApicPage->lvt_thermal.all.u32LvtThermal;
+ const uint8_t uMask = XAPIC_LVT_IS_MASKED(uLvtThermal);
+ const uint8_t uDeliveryStatus = uLvtThermal & XAPIC_LVT_DELIVERY_STATUS;
+ const char *pszDeliveryStatus = s_apszLvtDeliveryStatus[uDeliveryStatus];
+ const XAPICDELIVERYMODE enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvtThermal);
+ const char *pszDeliveryMode = s_apszLvtDeliveryModes[enmDeliveryMode];
+ const uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtThermal);
+
+ pHlp->pfnPrintf(pHlp, "%-7s %9s %u %5s %1s %8s %4s %6s %3u (%#x)\n",
+ "Thermal",
+ pszNotApplicable, /* Timer mode */
+ uMask,
+ pszNotApplicable, /* TriggerMode */
+ pszNotApplicable, /* Remote IRR */
+ pszNotApplicable, /* Polarity */
+ pszDeliveryStatus,
+ pszDeliveryMode,
+ uVector,
+ uVector);
+ }
+#endif
+
+ /* Performance Monitor Counters. */
+ {
+ uint32_t const uLvtPerf = pXApicPage->lvt_thermal.all.u32LvtThermal;
+ const uint8_t uMask = XAPIC_LVT_IS_MASKED(uLvtPerf);
+ const uint8_t uDeliveryStatus = uLvtPerf & XAPIC_LVT_DELIVERY_STATUS;
+ const char *pszDeliveryStatus = s_apszLvtDeliveryStatus[uDeliveryStatus];
+ const XAPICDELIVERYMODE enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvtPerf);
+ const char *pszDeliveryMode = s_apszLvtDeliveryModes[enmDeliveryMode];
+ const uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtPerf);
+
+ pHlp->pfnPrintf(pHlp, "%-7s %9s %u %5s %1s %8s %4s %6s %3u (%#x)\n",
+ "Perf",
+ pszNotApplicable, /* Timer mode */
+ uMask,
+ pszNotApplicable, /* TriggerMode */
+ pszNotApplicable, /* Remote IRR */
+ pszNotApplicable, /* Polarity */
+ pszDeliveryStatus,
+ pszDeliveryMode,
+ uVector,
+ uVector);
+ }
+
+ /* LINT0, LINT1. */
+ {
+ /* LINTx name. */
+ static const char * const s_apszLvtLint[] =
+ {
+ "LINT0",
+ "LINT1"
+ };
+ /* Trigger mode. */
+ static const char * const s_apszLvtTriggerModes[] =
+ {
+ "Edge ",
+ "Level"
+ };
+ /* Polarity. */
+ static const char * const s_apszLvtPolarity[] =
+ {
+ "ActiveHi",
+ "ActiveLo"
+ };
+
+ uint32_t aLvtLint[2];
+ aLvtLint[0] = pXApicPage->lvt_lint0.all.u32LvtLint0;
+ aLvtLint[1] = pXApicPage->lvt_lint1.all.u32LvtLint1;
+ for (size_t i = 0; i < RT_ELEMENTS(aLvtLint); i++)
+ {
+ uint32_t const uLvtLint = aLvtLint[i];
+ const char *pszLint = s_apszLvtLint[i];
+ const uint8_t uMask = XAPIC_LVT_IS_MASKED(uLvtLint);
+ const XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvtLint);
+ const char *pszTriggerMode = s_apszLvtTriggerModes[enmTriggerMode];
+ const uint8_t uRemoteIrr = XAPIC_LVT_GET_REMOTE_IRR(uLvtLint);
+ const uint8_t uPolarity = XAPIC_LVT_GET_POLARITY(uLvtLint);
+ const char *pszPolarity = s_apszLvtPolarity[uPolarity];
+ const uint8_t uDeliveryStatus = uLvtLint & XAPIC_LVT_DELIVERY_STATUS;
+ const char *pszDeliveryStatus = s_apszLvtDeliveryStatus[uDeliveryStatus];
+ const XAPICDELIVERYMODE enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint);
+ const char *pszDeliveryMode = s_apszLvtDeliveryModes[enmDeliveryMode];
+ const uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtLint);
+
+ pHlp->pfnPrintf(pHlp, "%-7s %9s %u %5s %u %8s %4s %6s %3u (%#x)\n",
+ pszLint,
+ pszNotApplicable, /* Timer mode */
+ uMask,
+ pszTriggerMode,
+ uRemoteIrr,
+ pszPolarity,
+ pszDeliveryStatus,
+ pszDeliveryMode,
+ uVector,
+ uVector);
+ }
+ }
+
+ /* Error. */
+ {
+ uint32_t const uLvtError = pXApicPage->lvt_thermal.all.u32LvtThermal;
+ const uint8_t uMask = XAPIC_LVT_IS_MASKED(uLvtError);
+ const uint8_t uDeliveryStatus = uLvtError & XAPIC_LVT_DELIVERY_STATUS;
+ const char *pszDeliveryStatus = s_apszLvtDeliveryStatus[uDeliveryStatus];
+ const XAPICDELIVERYMODE enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvtError);
+ const char *pszDeliveryMode = s_apszLvtDeliveryModes[enmDeliveryMode];
+ const uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtError);
+
+ pHlp->pfnPrintf(pHlp, "%-7s %9s %u %5s %1s %8s %4s %6s %3u (%#x)\n",
+ "Error",
+ pszNotApplicable, /* Timer mode */
+ uMask,
+ pszNotApplicable, /* TriggerMode */
+ pszNotApplicable, /* Remote IRR */
+ pszNotApplicable, /* Polarity */
+ pszDeliveryStatus,
+ pszDeliveryMode,
+ uVector,
+ uVector);
+ }
+}
+
+
+/**
+ * Dumps the APIC timer information.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) apicR3InfoTimer(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ pHlp->pfnPrintf(pHlp, "VCPU[%u] Local APIC timer:\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, " ICR = %#RX32\n", pXApicPage->timer_icr.u32InitialCount);
+ pHlp->pfnPrintf(pHlp, " CCR = %#RX32\n", pXApicPage->timer_ccr.u32CurrentCount);
+ pHlp->pfnPrintf(pHlp, " DCR = %#RX32\n", pXApicPage->timer_dcr.all.u32DivideValue);
+ pHlp->pfnPrintf(pHlp, " Timer shift = %#x\n", apicGetTimerShift(pXApicPage));
+ pHlp->pfnPrintf(pHlp, " Timer initial TS = %#RU64\n", pApicCpu->u64TimerInitial);
+ apicR3InfoLvtTimer(pVCpu, pHlp);
+}
+
+
+#ifdef APIC_FUZZY_SSM_COMPAT_TEST
+
+/**
+ * Reads a 32-bit register at a specified offset.
+ *
+ * @returns The value at the specified offset.
+ * @param pXApicPage The xAPIC page.
+ * @param offReg The offset of the register being read.
+ *
+ * @remarks Duplicate of apicReadRaw32()!
+ */
+static uint32_t apicR3ReadRawR32(PCXAPICPAGE pXApicPage, uint16_t offReg)
+{
+ Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
+ uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
+ uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
+ return uValue;
+}
+
+
+/**
+ * Helper for dumping per-VCPU APIC state to the release logger.
+ *
+ * This is primarily concerned about the APIC state relevant for saved-states.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pszPrefix A caller supplied prefix before dumping the state.
+ * @param uVersion Data layout version.
+ */
+static void apicR3DumpState(PVMCPU pVCpu, const char *pszPrefix, uint32_t uVersion)
+{
+ PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ LogRel(("APIC%u: %s (version %u):\n", pVCpu->idCpu, pszPrefix, uVersion));
+
+ switch (uVersion)
+ {
+ case APIC_SAVED_STATE_VERSION:
+ case APIC_SAVED_STATE_VERSION_VBOX_51_BETA2:
+ {
+ /* The auxiliary state. */
+ LogRel(("APIC%u: uApicBaseMsr = %#RX64\n", pVCpu->idCpu, pApicCpu->uApicBaseMsr));
+ LogRel(("APIC%u: uEsrInternal = %#RX64\n", pVCpu->idCpu, pApicCpu->uEsrInternal));
+
+ /* The timer. */
+ LogRel(("APIC%u: u64TimerInitial = %#RU64\n", pVCpu->idCpu, pApicCpu->u64TimerInitial));
+ LogRel(("APIC%u: uHintedTimerInitialCount = %#RU64\n", pVCpu->idCpu, pApicCpu->uHintedTimerInitialCount));
+ LogRel(("APIC%u: uHintedTimerShift = %#RU64\n", pVCpu->idCpu, pApicCpu->uHintedTimerShift));
+
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ LogRel(("APIC%u: uTimerICR = %#RX32\n", pVCpu->idCpu, pXApicPage->timer_icr.u32InitialCount));
+ LogRel(("APIC%u: uTimerCCR = %#RX32\n", pVCpu->idCpu, pXApicPage->timer_ccr.u32CurrentCount));
+
+ /* The PIBs. */
+ LogRel(("APIC%u: Edge PIB : %.*Rhxs\n", pVCpu->idCpu, sizeof(APICPIB), pApicCpu->pvApicPibR3));
+ LogRel(("APIC%u: Level PIB: %.*Rhxs\n", pVCpu->idCpu, sizeof(APICPIB), &pApicCpu->ApicPibLevel));
+
+ /* The LINT0, LINT1 interrupt line active states. */
+ LogRel(("APIC%u: fActiveLint0 = %RTbool\n", pVCpu->idCpu, pApicCpu->fActiveLint0));
+ LogRel(("APIC%u: fActiveLint1 = %RTbool\n", pVCpu->idCpu, pApicCpu->fActiveLint1));
+
+ /* The APIC page. */
+ LogRel(("APIC%u: APIC page: %.*Rhxs\n", pVCpu->idCpu, sizeof(XAPICPAGE), pApicCpu->pvApicPageR3));
+ break;
+ }
+
+ case APIC_SAVED_STATE_VERSION_VBOX_50:
+ case APIC_SAVED_STATE_VERSION_VBOX_30:
+ case APIC_SAVED_STATE_VERSION_ANCIENT:
+ {
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ LogRel(("APIC%u: uApicBaseMsr = %#RX32\n", pVCpu->idCpu, RT_LO_U32(pApicCpu->uApicBaseMsr)));
+ LogRel(("APIC%u: uId = %#RX32\n", pVCpu->idCpu, pXApicPage->id.u8ApicId));
+ LogRel(("APIC%u: uPhysId = N/A\n", pVCpu->idCpu));
+ LogRel(("APIC%u: uArbId = N/A\n", pVCpu->idCpu));
+ LogRel(("APIC%u: uTpr = %#RX32\n", pVCpu->idCpu, pXApicPage->tpr.u8Tpr));
+ LogRel(("APIC%u: uSvr = %#RX32\n", pVCpu->idCpu, pXApicPage->svr.all.u32Svr));
+ LogRel(("APIC%u: uLdr = %#x\n", pVCpu->idCpu, pXApicPage->ldr.all.u32Ldr));
+ LogRel(("APIC%u: uDfr = %#x\n", pVCpu->idCpu, pXApicPage->dfr.all.u32Dfr));
+
+ for (size_t i = 0; i < 8; i++)
+ {
+ LogRel(("APIC%u: Isr[%u].u32Reg = %#RX32\n", pVCpu->idCpu, i, pXApicPage->isr.u[i].u32Reg));
+ LogRel(("APIC%u: Tmr[%u].u32Reg = %#RX32\n", pVCpu->idCpu, i, pXApicPage->tmr.u[i].u32Reg));
+ LogRel(("APIC%u: Irr[%u].u32Reg = %#RX32\n", pVCpu->idCpu, i, pXApicPage->irr.u[i].u32Reg));
+ }
+
+ for (size_t i = 0; i < XAPIC_MAX_LVT_ENTRIES_P4; i++)
+ {
+ uint16_t const offReg = XAPIC_OFF_LVT_START + (i << 4);
+ LogRel(("APIC%u: Lvt[%u].u32Reg = %#RX32\n", pVCpu->idCpu, i, apicR3ReadRawR32(pXApicPage, offReg)));
+ }
+
+ LogRel(("APIC%u: uEsr = %#RX32\n", pVCpu->idCpu, pXApicPage->esr.all.u32Errors));
+ LogRel(("APIC%u: uIcr_Lo = %#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
+ LogRel(("APIC%u: uIcr_Hi = %#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
+ LogRel(("APIC%u: uTimerDcr = %#RX32\n", pVCpu->idCpu, pXApicPage->timer_dcr.all.u32DivideValue));
+ LogRel(("APIC%u: uCountShift = %#RX32\n", pVCpu->idCpu, apicGetTimerShift(pXApicPage)));
+ LogRel(("APIC%u: uInitialCount = %#RX32\n", pVCpu->idCpu, pXApicPage->timer_icr.u32InitialCount));
+ LogRel(("APIC%u: u64InitialCountLoadTime = %#RX64\n", pVCpu->idCpu, pApicCpu->u64TimerInitial));
+ LogRel(("APIC%u: u64NextTime / TimerCCR = %#RX64\n", pVCpu->idCpu, pXApicPage->timer_ccr.u32CurrentCount));
+ break;
+ }
+
+ default:
+ {
+ LogRel(("APIC: apicR3DumpState: Invalid/unrecognized saved-state version %u (%#x)\n", uVersion, uVersion));
+ break;
+ }
+ }
+}
+
+#endif /* APIC_FUZZY_SSM_COMPAT_TEST */
+
+/**
+ * Worker for saving per-VM APIC data.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ */
+static int apicR3SaveVMData(PPDMDEVINS pDevIns, PVM pVM, PSSMHANDLE pSSM)
+{
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ PAPIC pApic = VM_TO_APIC(pVM);
+ pHlp->pfnSSMPutU32(pSSM, pVM->cCpus);
+ pHlp->pfnSSMPutBool(pSSM, pApic->fIoApicPresent);
+ return pHlp->pfnSSMPutU32(pSSM, pApic->enmMaxMode);
+}
+
+
+/**
+ * Worker for loading per-VM APIC data.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ */
+static int apicR3LoadVMData(PPDMDEVINS pDevIns, PVM pVM, PSSMHANDLE pSSM)
+{
+ PAPIC pApic = VM_TO_APIC(pVM);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+
+ /* Load and verify number of CPUs. */
+ uint32_t cCpus;
+ int rc = pHlp->pfnSSMGetU32(pSSM, &cCpus);
+ AssertRCReturn(rc, rc);
+ if (cCpus != pVM->cCpus)
+ return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - cCpus: saved=%u config=%u"), cCpus, pVM->cCpus);
+
+ /* Load and verify I/O APIC presence. */
+ bool fIoApicPresent;
+ rc = pHlp->pfnSSMGetBool(pSSM, &fIoApicPresent);
+ AssertRCReturn(rc, rc);
+ if (fIoApicPresent != pApic->fIoApicPresent)
+ return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fIoApicPresent: saved=%RTbool config=%RTbool"),
+ fIoApicPresent, pApic->fIoApicPresent);
+
+ /* Load and verify configured max APIC mode. */
+ uint32_t uSavedMaxApicMode;
+ rc = pHlp->pfnSSMGetU32(pSSM, &uSavedMaxApicMode);
+ AssertRCReturn(rc, rc);
+ if (uSavedMaxApicMode != (uint32_t)pApic->enmMaxMode)
+ return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - uApicMode: saved=%u config=%u"),
+ uSavedMaxApicMode, pApic->enmMaxMode);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for loading per-VCPU APIC data for legacy (old) saved-states.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion Data layout version.
+ */
+static int apicR3LoadLegacyVCpuData(PPDMDEVINS pDevIns, PVMCPU pVCpu, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ AssertReturn(uVersion <= APIC_SAVED_STATE_VERSION_VBOX_50, VERR_NOT_SUPPORTED);
+
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+ PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
+
+ uint32_t uApicBaseLo;
+ int rc = pHlp->pfnSSMGetU32(pSSM, &uApicBaseLo);
+ AssertRCReturn(rc, rc);
+ pApicCpu->uApicBaseMsr = uApicBaseLo;
+ Log2(("APIC%u: apicR3LoadLegacyVCpuData: uApicBaseMsr=%#RX64\n", pVCpu->idCpu, pApicCpu->uApicBaseMsr));
+
+ switch (uVersion)
+ {
+ case APIC_SAVED_STATE_VERSION_VBOX_50:
+ case APIC_SAVED_STATE_VERSION_VBOX_30:
+ {
+ uint32_t uApicId, uPhysApicId, uArbId;
+ pHlp->pfnSSMGetU32(pSSM, &uApicId); pXApicPage->id.u8ApicId = uApicId;
+ pHlp->pfnSSMGetU32(pSSM, &uPhysApicId); NOREF(uPhysApicId); /* PhysId == pVCpu->idCpu */
+ pHlp->pfnSSMGetU32(pSSM, &uArbId); NOREF(uArbId); /* ArbID is & was unused. */
+ break;
+ }
+
+ case APIC_SAVED_STATE_VERSION_ANCIENT:
+ {
+ uint8_t uPhysApicId;
+ pHlp->pfnSSMGetU8(pSSM, &pXApicPage->id.u8ApicId);
+ pHlp->pfnSSMGetU8(pSSM, &uPhysApicId); NOREF(uPhysApicId); /* PhysId == pVCpu->idCpu */
+ break;
+ }
+
+ default:
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ uint32_t u32Tpr;
+ pHlp->pfnSSMGetU32(pSSM, &u32Tpr);
+ pXApicPage->tpr.u8Tpr = u32Tpr & XAPIC_TPR_VALID;
+
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->svr.all.u32Svr);
+ pHlp->pfnSSMGetU8(pSSM, &pXApicPage->ldr.u.u8LogicalApicId);
+
+ uint8_t uDfr;
+ pHlp->pfnSSMGetU8(pSSM, &uDfr);
+ pXApicPage->dfr.u.u4Model = uDfr >> 4;
+
+ AssertCompile(RT_ELEMENTS(pXApicPage->isr.u) == 8);
+ AssertCompile(RT_ELEMENTS(pXApicPage->tmr.u) == 8);
+ AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 8);
+ for (size_t i = 0; i < 8; i++)
+ {
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->isr.u[i].u32Reg);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->tmr.u[i].u32Reg);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->irr.u[i].u32Reg);
+ }
+
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_timer.all.u32LvtTimer);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_thermal.all.u32LvtThermal);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_perf.all.u32LvtPerf);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_lint0.all.u32LvtLint0);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_lint1.all.u32LvtLint1);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->lvt_error.all.u32LvtError);
+
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->esr.all.u32Errors);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->icr_lo.all.u32IcrLo);
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->icr_hi.all.u32IcrHi);
+
+ uint32_t u32TimerShift;
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->timer_dcr.all.u32DivideValue);
+ pHlp->pfnSSMGetU32(pSSM, &u32TimerShift);
+ /*
+ * Old implementation may have left the timer shift uninitialized until
+ * the timer configuration register was written. Unfortunately zero is
+ * also a valid timer shift value, so we're just going to ignore it
+ * completely. The shift count can always be derived from the DCR.
+ * See @bugref{8245#c98}.
+ */
+ uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
+
+ pHlp->pfnSSMGetU32(pSSM, &pXApicPage->timer_icr.u32InitialCount);
+ pHlp->pfnSSMGetU64(pSSM, &pApicCpu->u64TimerInitial);
+ uint64_t uNextTS;
+ rc = pHlp->pfnSSMGetU64(pSSM, &uNextTS); AssertRCReturn(rc, rc);
+ if (uNextTS >= pApicCpu->u64TimerInitial + ((pXApicPage->timer_icr.u32InitialCount + 1) << uTimerShift))
+ pXApicPage->timer_ccr.u32CurrentCount = pXApicPage->timer_icr.u32InitialCount;
+
+ rc = PDMDevHlpTimerLoad(pDevIns, pApicCpu->hTimer, pSSM);
+ AssertRCReturn(rc, rc);
+ Assert(pApicCpu->uHintedTimerInitialCount == 0);
+ Assert(pApicCpu->uHintedTimerShift == 0);
+ if (PDMDevHlpTimerIsActive(pDevIns, pApicCpu->hTimer))
+ {
+ uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
+ apicHintTimerFreq(pDevIns, pApicCpu, uInitialCount, uTimerShift);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @copydoc FNSSMDEVSAVEEXEC
+ */
+static DECLCALLBACK(int) apicR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
+{
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+
+ AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
+
+ LogFlow(("APIC: apicR3SaveExec\n"));
+
+ /* Save per-VM data. */
+ int rc = apicR3SaveVMData(pDevIns, pVM, pSSM);
+ AssertRCReturn(rc, rc);
+
+ /* Save per-VCPU data.*/
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ /* Update interrupts from the pending-interrupts bitmaps to the IRR. */
+ APICUpdatePendingInterrupts(pVCpu);
+
+ /* Save the auxiliary data. */
+ pHlp->pfnSSMPutU64(pSSM, pApicCpu->uApicBaseMsr);
+ pHlp->pfnSSMPutU32(pSSM, pApicCpu->uEsrInternal);
+
+ /* Save the APIC page. */
+ if (XAPIC_IN_X2APIC_MODE(pVCpu))
+ pHlp->pfnSSMPutStruct(pSSM, (const void *)pApicCpu->pvApicPageR3, &g_aX2ApicPageFields[0]);
+ else
+ pHlp->pfnSSMPutStruct(pSSM, (const void *)pApicCpu->pvApicPageR3, &g_aXApicPageFields[0]);
+
+ /* Save the timer. */
+ pHlp->pfnSSMPutU64(pSSM, pApicCpu->u64TimerInitial);
+ PDMDevHlpTimerSave(pDevIns, pApicCpu->hTimer, pSSM);
+
+ /* Save the LINT0, LINT1 interrupt line states. */
+ pHlp->pfnSSMPutBool(pSSM, pApicCpu->fActiveLint0);
+ pHlp->pfnSSMPutBool(pSSM, pApicCpu->fActiveLint1);
+
+#if defined(APIC_FUZZY_SSM_COMPAT_TEST) || defined(DEBUG_ramshankar)
+ apicR3DumpState(pVCpu, "Saved state", APIC_SAVED_STATE_VERSION);
+#endif
+ }
+
+#ifdef APIC_FUZZY_SSM_COMPAT_TEST
+ /* The state is fuzzy, don't even bother trying to load the guest. */
+ return VERR_INVALID_STATE;
+#else
+ return rc;
+#endif
+}
+
+
+/**
+ * @copydoc FNSSMDEVLOADEXEC
+ */
+static DECLCALLBACK(int) apicR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+
+ AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(uPass == SSM_PASS_FINAL, VERR_WRONG_ORDER);
+
+ LogFlow(("APIC: apicR3LoadExec: uVersion=%u uPass=%#x\n", uVersion, uPass));
+
+ /* Weed out invalid versions. */
+ if ( uVersion != APIC_SAVED_STATE_VERSION
+ && uVersion != APIC_SAVED_STATE_VERSION_VBOX_51_BETA2
+ && uVersion != APIC_SAVED_STATE_VERSION_VBOX_50
+ && uVersion != APIC_SAVED_STATE_VERSION_VBOX_30
+ && uVersion != APIC_SAVED_STATE_VERSION_ANCIENT)
+ {
+ LogRel(("APIC: apicR3LoadExec: Invalid/unrecognized saved-state version %u (%#x)\n", uVersion, uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ int rc = VINF_SUCCESS;
+ if (uVersion > APIC_SAVED_STATE_VERSION_VBOX_30)
+ {
+ rc = apicR3LoadVMData(pDevIns, pVM, pSSM);
+ AssertRCReturn(rc, rc);
+
+ if (uVersion == APIC_SAVED_STATE_VERSION)
+ { /* Load any new additional per-VM data. */ }
+ }
+
+ /*
+ * Restore per CPU state.
+ *
+ * Note! PDM will restore the VMCPU_FF_INTERRUPT_APIC flag for us.
+ * This code doesn't touch it. No devices should make us touch
+ * it later during the restore either, only during the 'done' phase.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ if (uVersion > APIC_SAVED_STATE_VERSION_VBOX_50)
+ {
+ /* Load the auxiliary data. */
+ pHlp->pfnSSMGetU64V(pSSM, &pApicCpu->uApicBaseMsr);
+ pHlp->pfnSSMGetU32(pSSM, &pApicCpu->uEsrInternal);
+
+ /* Load the APIC page. */
+ if (XAPIC_IN_X2APIC_MODE(pVCpu))
+ pHlp->pfnSSMGetStruct(pSSM, pApicCpu->pvApicPageR3, &g_aX2ApicPageFields[0]);
+ else
+ pHlp->pfnSSMGetStruct(pSSM, pApicCpu->pvApicPageR3, &g_aXApicPageFields[0]);
+
+ /* Load the timer. */
+ rc = pHlp->pfnSSMGetU64(pSSM, &pApicCpu->u64TimerInitial); AssertRCReturn(rc, rc);
+ rc = PDMDevHlpTimerLoad(pDevIns, pApicCpu->hTimer, pSSM); AssertRCReturn(rc, rc);
+ Assert(pApicCpu->uHintedTimerShift == 0);
+ Assert(pApicCpu->uHintedTimerInitialCount == 0);
+ if (PDMDevHlpTimerIsActive(pDevIns, pApicCpu->hTimer))
+ {
+ PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
+ uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
+ uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
+ apicHintTimerFreq(pDevIns, pApicCpu, uInitialCount, uTimerShift);
+ }
+
+ /* Load the LINT0, LINT1 interrupt line states. */
+ if (uVersion > APIC_SAVED_STATE_VERSION_VBOX_51_BETA2)
+ {
+ pHlp->pfnSSMGetBoolV(pSSM, &pApicCpu->fActiveLint0);
+ pHlp->pfnSSMGetBoolV(pSSM, &pApicCpu->fActiveLint1);
+ }
+ }
+ else
+ {
+ rc = apicR3LoadLegacyVCpuData(pDevIns, pVCpu, pSSM, uVersion);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Check that we're still good wrt restored data, then tell CPUM about the current CPUID[1].EDX[9] visibility.
+ */
+ rc = pHlp->pfnSSMHandleGetStatus(pSSM);
+ AssertRCReturn(rc, rc);
+ CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN));
+
+#if defined(APIC_FUZZY_SSM_COMPAT_TEST) || defined(DEBUG_ramshankar)
+ apicR3DumpState(pVCpu, "Loaded state", uVersion);
+#endif
+ }
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNTMTIMERDEV}
+ *
+ * @note pvUser points to the VMCPU.
+ *
+ * @remarks Currently this function is invoked on the last EMT, see @c
+ * idTimerCpu in tmR3TimerCallback(). However, the code does -not-
+ * rely on this and is designed to work with being invoked on any
+ * thread.
+ */
+static DECLCALLBACK(void) apicR3TimerCallback(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+ Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
+ Assert(pVCpu);
+ LogFlow(("APIC%u: apicR3TimerCallback\n", pVCpu->idCpu));
+ RT_NOREF(pDevIns, hTimer, pApicCpu);
+
+ PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
+ uint32_t const uLvtTimer = pXApicPage->lvt_timer.all.u32LvtTimer;
+#ifdef VBOX_WITH_STATISTICS
+ STAM_COUNTER_INC(&pApicCpu->StatTimerCallback);
+#endif
+ if (!XAPIC_LVT_IS_MASKED(uLvtTimer))
+ {
+ uint8_t uVector = XAPIC_LVT_GET_VECTOR(uLvtTimer);
+ Log2(("APIC%u: apicR3TimerCallback: Raising timer interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
+ apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE, 0 /* uSrcTag */);
+ }
+
+ XAPICTIMERMODE enmTimerMode = XAPIC_LVT_GET_TIMER_MODE(uLvtTimer);
+ switch (enmTimerMode)
+ {
+ case XAPICTIMERMODE_PERIODIC:
+ {
+ /* The initial-count register determines if the periodic timer is re-armed. */
+ uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
+ pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
+ if (uInitialCount)
+ {
+ Log2(("APIC%u: apicR3TimerCallback: Re-arming timer. uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
+ apicStartTimer(pVCpu, uInitialCount);
+ }
+ break;
+ }
+
+ case XAPICTIMERMODE_ONESHOT:
+ {
+ pXApicPage->timer_ccr.u32CurrentCount = 0;
+ break;
+ }
+
+ case XAPICTIMERMODE_TSC_DEADLINE:
+ {
+ /** @todo implement TSC deadline. */
+ AssertMsgFailed(("APIC: TSC deadline mode unimplemented\n"));
+ break;
+ }
+ }
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnReset}
+ */
+DECLCALLBACK(void) apicR3Reset(PPDMDEVINS pDevIns)
+{
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ VM_ASSERT_EMT0(pVM);
+ VM_ASSERT_IS_NOT_RUNNING(pVM);
+
+ LogFlow(("APIC: apicR3Reset\n"));
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpuDest = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpuDest);
+
+ if (PDMDevHlpTimerIsActive(pDevIns, pApicCpu->hTimer))
+ PDMDevHlpTimerStop(pDevIns, pApicCpu->hTimer);
+
+ apicResetCpu(pVCpuDest, true /* fResetApicBaseMsr */);
+
+ /* Clear the interrupt pending force flag. */
+ apicClearInterruptFF(pVCpuDest, PDMAPICIRQ_HARDWARE);
+ }
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnRelocate}
+ */
+DECLCALLBACK(void) apicR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
+{
+ RT_NOREF(pDevIns, offDelta);
+}
+
+
+/**
+ * Terminates the APIC state.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void apicR3TermState(PVM pVM)
+{
+ PAPIC pApic = VM_TO_APIC(pVM);
+ LogFlow(("APIC: apicR3TermState: pVM=%p\n", pVM));
+
+ /* Unmap and free the PIB. */
+ if (pApic->pvApicPibR3 != NIL_RTR3PTR)
+ {
+ size_t const cPages = pApic->cbApicPib >> HOST_PAGE_SHIFT;
+ if (cPages == 1)
+ SUPR3PageFreeEx(pApic->pvApicPibR3, cPages);
+ else
+ SUPR3ContFree(pApic->pvApicPibR3, cPages);
+ pApic->pvApicPibR3 = NIL_RTR3PTR;
+ pApic->pvApicPibR0 = NIL_RTR0PTR;
+ }
+
+ /* Unmap and free the virtual-APIC pages. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ pApicCpu->pvApicPibR3 = NIL_RTR3PTR;
+ pApicCpu->pvApicPibR0 = NIL_RTR0PTR;
+
+ if (pApicCpu->pvApicPageR3 != NIL_RTR3PTR)
+ {
+ SUPR3PageFreeEx(pApicCpu->pvApicPageR3, 1 /* cPages */);
+ pApicCpu->pvApicPageR3 = NIL_RTR3PTR;
+ pApicCpu->pvApicPageR0 = NIL_RTR0PTR;
+ }
+ }
+}
+
+
+/**
+ * Initializes the APIC state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int apicR3InitState(PVM pVM)
+{
+ PAPIC pApic = VM_TO_APIC(pVM);
+ LogFlow(("APIC: apicR3InitState: pVM=%p\n", pVM));
+
+ /*
+ * Allocate and map the pending-interrupt bitmap (PIB).
+ *
+ * We allocate all the VCPUs' PIBs contiguously in order to save space as
+ * physically contiguous allocations are rounded to a multiple of page size.
+ */
+ Assert(pApic->pvApicPibR3 == NIL_RTR3PTR);
+ Assert(pApic->pvApicPibR0 == NIL_RTR0PTR);
+ pApic->cbApicPib = RT_ALIGN_Z(pVM->cCpus * sizeof(APICPIB), HOST_PAGE_SIZE);
+ size_t const cHostPages = pApic->cbApicPib >> HOST_PAGE_SHIFT;
+ if (cHostPages == 1)
+ {
+ SUPPAGE SupApicPib;
+ RT_ZERO(SupApicPib);
+ SupApicPib.Phys = NIL_RTHCPHYS;
+ int rc = SUPR3PageAllocEx(1 /* cHostPages */, 0 /* fFlags */, &pApic->pvApicPibR3, &pApic->pvApicPibR0, &SupApicPib);
+ if (RT_SUCCESS(rc))
+ {
+ pApic->HCPhysApicPib = SupApicPib.Phys;
+ AssertLogRelReturn(pApic->pvApicPibR3, VERR_INTERNAL_ERROR);
+ }
+ else
+ {
+ LogRel(("APIC: Failed to allocate %u bytes for the pending-interrupt bitmap, rc=%Rrc\n", pApic->cbApicPib, rc));
+ return rc;
+ }
+ }
+ else
+ pApic->pvApicPibR3 = SUPR3ContAlloc(cHostPages, &pApic->pvApicPibR0, &pApic->HCPhysApicPib);
+
+ if (pApic->pvApicPibR3)
+ {
+ bool const fDriverless = SUPR3IsDriverless();
+ AssertLogRelReturn(pApic->pvApicPibR0 != NIL_RTR0PTR || fDriverless, VERR_INTERNAL_ERROR);
+ AssertLogRelReturn(pApic->HCPhysApicPib != NIL_RTHCPHYS || fDriverless, VERR_INTERNAL_ERROR);
+
+ /* Initialize the PIB. */
+ RT_BZERO(pApic->pvApicPibR3, pApic->cbApicPib);
+
+ /*
+ * Allocate the map the virtual-APIC pages.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ SUPPAGE SupApicPage;
+ RT_ZERO(SupApicPage);
+ SupApicPage.Phys = NIL_RTHCPHYS;
+
+ Assert(pVCpu->idCpu == idCpu);
+ Assert(pApicCpu->pvApicPageR3 == NIL_RTR3PTR);
+ Assert(pApicCpu->pvApicPageR0 == NIL_RTR0PTR);
+ AssertCompile(sizeof(XAPICPAGE) <= HOST_PAGE_SIZE);
+ pApicCpu->cbApicPage = sizeof(XAPICPAGE);
+ int rc = SUPR3PageAllocEx(1 /* cHostPages */, 0 /* fFlags */, &pApicCpu->pvApicPageR3, &pApicCpu->pvApicPageR0,
+ &SupApicPage);
+ if (RT_SUCCESS(rc))
+ {
+ AssertLogRelReturn(pApicCpu->pvApicPageR3 != NIL_RTR3PTR || fDriverless, VERR_INTERNAL_ERROR);
+ pApicCpu->HCPhysApicPage = SupApicPage.Phys;
+ AssertLogRelReturn(pApicCpu->HCPhysApicPage != NIL_RTHCPHYS || fDriverless, VERR_INTERNAL_ERROR);
+
+ /* Associate the per-VCPU PIB pointers to the per-VM PIB mapping. */
+ uint32_t const offApicPib = idCpu * sizeof(APICPIB);
+ pApicCpu->pvApicPibR0 = !fDriverless ? (RTR0PTR)((RTR0UINTPTR)pApic->pvApicPibR0 + offApicPib) : NIL_RTR0PTR;
+ pApicCpu->pvApicPibR3 = (RTR3PTR)((RTR3UINTPTR)pApic->pvApicPibR3 + offApicPib);
+
+ /* Initialize the virtual-APIC state. */
+ RT_BZERO(pApicCpu->pvApicPageR3, pApicCpu->cbApicPage);
+ apicResetCpu(pVCpu, true /* fResetApicBaseMsr */);
+
+#ifdef DEBUG_ramshankar
+ Assert(pApicCpu->pvApicPibR3 != NIL_RTR3PTR);
+ Assert(pApicCpu->pvApicPibR0 != NIL_RTR0PTR || fDriverless);
+ Assert(pApicCpu->pvApicPageR3 != NIL_RTR3PTR);
+#endif
+ }
+ else
+ {
+ LogRel(("APIC%u: Failed to allocate %u bytes for the virtual-APIC page, rc=%Rrc\n", idCpu, pApicCpu->cbApicPage, rc));
+ apicR3TermState(pVM);
+ return rc;
+ }
+ }
+
+#ifdef DEBUG_ramshankar
+ Assert(pApic->pvApicPibR3 != NIL_RTR3PTR);
+ Assert(pApic->pvApicPibR0 != NIL_RTR0PTR || fDriverless);
+#endif
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("APIC: Failed to allocate %u bytes of physically contiguous memory for the pending-interrupt bitmap\n",
+ pApic->cbApicPib));
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnDestruct}
+ */
+DECLCALLBACK(int) apicR3Destruct(PPDMDEVINS pDevIns)
+{
+ PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ LogFlow(("APIC: apicR3Destruct: pVM=%p\n", pVM));
+
+ apicR3TermState(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnInitComplete}
+ */
+DECLCALLBACK(int) apicR3InitComplete(PPDMDEVINS pDevIns)
+{
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ PAPIC pApic = VM_TO_APIC(pVM);
+
+ /*
+ * Init APIC settings that rely on HM and CPUM configurations.
+ */
+ CPUMCPUIDLEAF CpuLeaf;
+ int rc = CPUMR3CpuIdGetLeaf(pVM, &CpuLeaf, 1, 0);
+ AssertRCReturn(rc, rc);
+
+ pApic->fSupportsTscDeadline = RT_BOOL(CpuLeaf.uEcx & X86_CPUID_FEATURE_ECX_TSCDEADL);
+ pApic->fPostedIntrsEnabled = HMR3IsPostedIntrsEnabled(pVM->pUVM);
+ pApic->fVirtApicRegsEnabled = HMR3AreVirtApicRegsEnabled(pVM->pUVM);
+
+ LogRel(("APIC: fPostedIntrsEnabled=%RTbool fVirtApicRegsEnabled=%RTbool fSupportsTscDeadline=%RTbool\n",
+ pApic->fPostedIntrsEnabled, pApic->fVirtApicRegsEnabled, pApic->fSupportsTscDeadline));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnConstruct}
+ */
+DECLCALLBACK(int) apicR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
+{
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+ PAPICDEV pApicDev = PDMDEVINS_2_DATA(pDevIns, PAPICDEV);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ PAPIC pApic = VM_TO_APIC(pVM);
+ Assert(iInstance == 0); NOREF(iInstance);
+
+ /*
+ * Init the data.
+ */
+ pApic->pDevInsR3 = pDevIns;
+ pApic->fR0Enabled = pDevIns->fR0Enabled;
+ pApic->fRCEnabled = pDevIns->fRCEnabled;
+
+ /*
+ * Validate APIC settings.
+ */
+ PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns, "Mode|IOAPIC|NumCPUs|MacOSWorkaround", "");
+
+ /** @devcfgm{apic, IOAPIC, bool, true}
+ * Indicates whether an I/O APIC is present in the system. */
+ int rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "IOAPIC", &pApic->fIoApicPresent, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @devcfgm{apic, Mode, PDMAPICMODE, APIC(2)}
+ * Max APIC feature level. */
+ uint8_t uMaxMode;
+ rc = pHlp->pfnCFGMQueryU8Def(pCfg, "Mode", &uMaxMode, PDMAPICMODE_APIC);
+ AssertLogRelRCReturn(rc, rc);
+ switch ((PDMAPICMODE)uMaxMode)
+ {
+ case PDMAPICMODE_NONE:
+ LogRel(("APIC: APIC maximum mode configured as 'None', effectively disabled/not-present!\n"));
+ case PDMAPICMODE_APIC:
+ case PDMAPICMODE_X2APIC:
+ break;
+ default:
+ return VMR3SetError(pVM->pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "APIC mode %d unknown.", uMaxMode);
+ }
+ pApic->enmMaxMode = (PDMAPICMODE)uMaxMode;
+
+ /** @devcfgm{apic, MacOSWorkaround, bool, false}
+ * Enables a workaround for incorrect MSR_IA32_X2APIC_ID handling in macOS.
+ *
+ * Vital code in osfmk/i386/i386_init.c's vstart() routine incorrectly applies a
+ * 24 right shift to the ID register value (correct for legacy APIC, but
+ * entirely wrong for x2APIC), with the consequence that all CPUs use the same
+ * per-cpu data and things panic pretty quickly. There are some shifty ID
+ * reads in lapic_native.c too, but they are for either harmless (assuming boot
+ * CPU has ID 0) or are for logging/debugging purposes only. */
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "MacOSWorkaround", &pApic->fMacOSWorkaround, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Disable automatic PDM locking for this device.
+ */
+ rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the APIC with PDM.
+ */
+ rc = PDMDevHlpApicRegister(pDevIns);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Initialize the APIC state.
+ */
+ if (pApic->enmMaxMode == PDMAPICMODE_X2APIC)
+ {
+ rc = CPUMR3MsrRangesInsert(pVM, &g_MsrRange_x2Apic);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ else
+ {
+ /* We currently don't have a function to remove the range, so we register an range which will cause a #GP. */
+ rc = CPUMR3MsrRangesInsert(pVM, &g_MsrRange_x2Apic_Invalid);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /* Tell CPUM about the APIC feature level so it can adjust APICBASE MSR GP mask and CPUID bits. */
+ apicR3SetCpuIdFeatureLevel(pVM, pApic->enmMaxMode);
+
+ /* Finally, initialize the state. */
+ rc = apicR3InitState(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the MMIO range.
+ */
+ PAPICCPU pApicCpu0 = VMCPU_TO_APICCPU(pVM->apCpusR3[0]);
+ RTGCPHYS GCPhysApicBase = MSR_IA32_APICBASE_GET_ADDR(pApicCpu0->uApicBaseMsr);
+
+ rc = PDMDevHlpMmioCreateAndMap(pDevIns, GCPhysApicBase, sizeof(XAPICPAGE), apicWriteMmio, apicReadMmio,
+ IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_DWORD_ZEROED, "APIC", &pApicDev->hMmio);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create the APIC timers.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+ RTStrPrintf(&pApicCpu->szTimerDesc[0], sizeof(pApicCpu->szTimerDesc), "APIC Timer %u", pVCpu->idCpu);
+ rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL_SYNC, apicR3TimerCallback, pVCpu,
+ TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, pApicCpu->szTimerDesc, &pApicCpu->hTimer);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Register saved state callbacks.
+ */
+ rc = PDMDevHlpSSMRegister(pDevIns, APIC_SAVED_STATE_VERSION, sizeof(*pApicDev), apicR3SaveExec, apicR3LoadExec);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register debugger info callbacks.
+ *
+ * We use separate callbacks rather than arguments so they can also be
+ * dumped in an automated fashion while collecting crash diagnostics and
+ * not just used during live debugging via the VM debugger.
+ */
+ DBGFR3InfoRegisterInternalEx(pVM, "apic", "Dumps APIC basic information.", apicR3Info, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "apiclvt", "Dumps APIC LVT information.", apicR3InfoLvt, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "apictimer", "Dumps APIC timer information.", apicR3InfoTimer, DBGFINFO_FLAGS_ALL_EMTS);
+
+ /*
+ * Statistics.
+ */
+#define APIC_REG_COUNTER(a_pvReg, a_pszNameFmt, a_pszDesc) \
+ PDMDevHlpSTAMRegisterF(pDevIns, a_pvReg, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, \
+ STAMUNIT_OCCURENCES, a_pszDesc, a_pszNameFmt, idCpu)
+#define APIC_PROF_COUNTER(a_pvReg, a_pszNameFmt, a_pszDesc) \
+ PDMDevHlpSTAMRegisterF(pDevIns, a_pvReg, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, \
+ STAMUNIT_TICKS_PER_CALL, a_pszDesc, a_pszNameFmt, idCpu)
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
+
+ APIC_REG_COUNTER(&pApicCpu->StatPostIntrCnt, "%u", "APIC/VCPU stats / number of apicPostInterrupt calls.");
+ for (size_t i = 0; i < RT_ELEMENTS(pApicCpu->aStatVectors); i++)
+ PDMDevHlpSTAMRegisterF(pDevIns, &pApicCpu->aStatVectors[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Number of APICPostInterrupt calls for the vector.", "%u/Vectors/%02x", idCpu, i);
+
+#ifdef VBOX_WITH_STATISTICS
+ APIC_REG_COUNTER(&pApicCpu->StatMmioReadRZ, "%u/RZ/MmioRead", "Number of APIC MMIO reads in RZ.");
+ APIC_REG_COUNTER(&pApicCpu->StatMmioWriteRZ, "%u/RZ/MmioWrite", "Number of APIC MMIO writes in RZ.");
+ APIC_REG_COUNTER(&pApicCpu->StatMsrReadRZ, "%u/RZ/MsrRead", "Number of APIC MSR reads in RZ.");
+ APIC_REG_COUNTER(&pApicCpu->StatMsrWriteRZ, "%u/RZ/MsrWrite", "Number of APIC MSR writes in RZ.");
+
+ APIC_REG_COUNTER(&pApicCpu->StatMmioReadR3, "%u/R3/MmioRead", "Number of APIC MMIO reads in R3.");
+ APIC_REG_COUNTER(&pApicCpu->StatMmioWriteR3, "%u/R3/MmioWrite", "Number of APIC MMIO writes in R3.");
+ APIC_REG_COUNTER(&pApicCpu->StatMsrReadR3, "%u/R3/MsrRead", "Number of APIC MSR reads in R3.");
+ APIC_REG_COUNTER(&pApicCpu->StatMsrWriteR3, "%u/R3/MsrWrite", "Number of APIC MSR writes in R3.");
+
+ APIC_REG_COUNTER(&pApicCpu->StatPostIntrAlreadyPending,
+ "%u/PostInterruptAlreadyPending", "Number of times an interrupt is already pending.");
+ APIC_REG_COUNTER(&pApicCpu->StatTimerCallback, "%u/TimerCallback", "Number of times the timer callback is invoked.");
+
+ APIC_REG_COUNTER(&pApicCpu->StatTprWrite, "%u/TprWrite", "Number of TPR writes.");
+ APIC_REG_COUNTER(&pApicCpu->StatTprRead, "%u/TprRead", "Number of TPR reads.");
+ APIC_REG_COUNTER(&pApicCpu->StatEoiWrite, "%u/EoiWrite", "Number of EOI writes.");
+ APIC_REG_COUNTER(&pApicCpu->StatMaskedByTpr, "%u/MaskedByTpr", "Number of times TPR masks an interrupt in apicGetInterrupt.");
+ APIC_REG_COUNTER(&pApicCpu->StatMaskedByPpr, "%u/MaskedByPpr", "Number of times PPR masks an interrupt in apicGetInterrupt.");
+ APIC_REG_COUNTER(&pApicCpu->StatTimerIcrWrite, "%u/TimerIcrWrite", "Number of times the timer ICR is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatIcrLoWrite, "%u/IcrLoWrite", "Number of times the ICR Lo (send IPI) is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatIcrHiWrite, "%u/IcrHiWrite", "Number of times the ICR Hi is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatIcrFullWrite, "%u/IcrFullWrite", "Number of times the ICR full (send IPI, x2APIC) is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatIdMsrRead, "%u/IdMsrRead", "Number of times the APIC-ID MSR is read.");
+ APIC_REG_COUNTER(&pApicCpu->StatDcrWrite, "%u/DcrWrite", "Number of times the DCR is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatDfrWrite, "%u/DfrWrite", "Number of times the DFR is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatLdrWrite, "%u/LdrWrite", "Number of times the LDR is written.");
+ APIC_REG_COUNTER(&pApicCpu->StatLvtTimerWrite, "%u/LvtTimerWrite", "Number of times the LVT timer is written.");
+
+ APIC_PROF_COUNTER(&pApicCpu->StatUpdatePendingIntrs,
+ "/PROF/CPU%u/APIC/UpdatePendingInterrupts", "Profiling of APICUpdatePendingInterrupts");
+ APIC_PROF_COUNTER(&pApicCpu->StatPostIntr, "/PROF/CPU%u/APIC/PostInterrupt", "Profiling of APICPostInterrupt");
+#endif
+ }
+
+# undef APIC_PROF_COUNTER
+# undef APIC_REG_ACCESS_COUNTER
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
+
diff --git a/src/VBox/VMM/VMMR3/CFGM.cpp b/src/VBox/VMM/VMMR3/CFGM.cpp
new file mode 100644
index 00000000..71492b16
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CFGM.cpp
@@ -0,0 +1,3428 @@
+/* $Id: CFGM.cpp $ */
+/** @file
+ * CFGM - Configuration Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_cfgm CFGM - The Configuration Manager
+ *
+ * The configuration manager is a directory containing the VM configuration at
+ * run time. It works in a manner similar to the windows registry - it's like a
+ * file system hierarchy, but the files (values) live in a separate name space
+ * and can include the path separators.
+ *
+ * The configuration is normally created via a callback passed to VMR3Create()
+ * via the pfnCFGMConstructor parameter. To make testcase writing a bit simpler,
+ * we allow the callback to be NULL, in which case a simple default
+ * configuration will be created by CFGMR3ConstructDefaultTree(). The
+ * Console::configConstructor() method in Main/ConsoleImpl2.cpp creates the
+ * configuration from the XML.
+ *
+ * Devices, drivers, services and other PDM stuff are given their own subtree
+ * where they are protected from accessing information of any parents. This is
+ * is implemented via the CFGMR3SetRestrictedRoot() API.
+ *
+ * Data validation beyond the basic primitives is left to the caller. The caller
+ * is in a better position to know the proper validation rules of the individual
+ * properties.
+ *
+ * @see grp_cfgm
+ *
+ *
+ * @section sec_cfgm_primitives Data Primitives
+ *
+ * CFGM supports the following data primitives:
+ * - Integers. Representation is unsigned 64-bit. Boolean, unsigned and
+ * small integers, and pointers are all represented using this primitive.
+ * - Zero terminated character strings. These are of course UTF-8.
+ * - Variable length byte strings. This can be used to get/put binary
+ * objects like for instance RTMAC.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CFGM
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vmm.h>
+#include "CFGMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/memsafer.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+#include <iprt/utf16.h>
+#include <iprt/uuid.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void cfgmR3DumpPath(PCFGMNODE pNode, PCDBGFINFOHLP pHlp);
+static void cfgmR3Dump(PCFGMNODE pRoot, unsigned iLevel, PCDBGFINFOHLP pHlp);
+static DECLCALLBACK(void) cfgmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static int cfgmR3ResolveNode(PCFGMNODE pNode, const char *pszPath, PCFGMNODE *ppChild);
+static int cfgmR3ResolveLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf);
+static int cfgmR3InsertLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf);
+static void cfgmR3RemoveLeaf(PCFGMNODE pNode, PCFGMLEAF pLeaf);
+static void cfgmR3FreeValue(PVM pVM, PCFGMLEAF pLeaf);
+
+
+/** @todo replace pVM for pUVM !*/
+
+/**
+ * Allocator wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The cross context VM structure, if the tree
+ * is associated with one.
+ * @param enmTag The allocation tag.
+ * @param cb The size of the allocation.
+ */
+static void *cfgmR3MemAlloc(PVM pVM, MMTAG enmTag, size_t cb)
+{
+ if (pVM)
+ return MMR3HeapAlloc(pVM, enmTag, cb);
+ return RTMemAlloc(cb);
+}
+
+
+/**
+ * Free wrapper.
+ *
+ * @param pVM The cross context VM structure, if the tree
+ * is associated with one.
+ * @param pv The memory block to free.
+ */
+static void cfgmR3MemFree(PVM pVM, void *pv)
+{
+ if (pVM)
+ MMR3HeapFree(pv);
+ else
+ RTMemFree(pv);
+}
+
+
+/**
+ * String allocator wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The cross context VM structure, if the tree
+ * is associated with one.
+ * @param enmTag The allocation tag.
+ * @param cbString The size of the allocation, terminator included.
+ */
+static char *cfgmR3StrAlloc(PVM pVM, MMTAG enmTag, size_t cbString)
+{
+ if (pVM)
+ return (char *)MMR3HeapAlloc(pVM, enmTag, cbString);
+ return (char *)RTStrAlloc(cbString);
+}
+
+
+/**
+ * String free wrapper.
+ *
+ * @param pVM The cross context VM structure, if the tree
+ * is associated with one.
+ * @param pszString The memory block to free.
+ */
+static void cfgmR3StrFree(PVM pVM, char *pszString)
+{
+ if (pVM)
+ MMR3HeapFree(pszString);
+ else
+ RTStrFree(pszString);
+}
+
+
+/**
+ * Frees one node, leaving any children or leaves to the caller.
+ *
+ * @param pNode The node structure to free.
+ */
+static void cfgmR3FreeNodeOnly(PCFGMNODE pNode)
+{
+ pNode->pFirstLeaf = NULL;
+ pNode->pFirstChild = NULL;
+ pNode->pNext = NULL;
+ pNode->pPrev = NULL;
+ if (!pNode->pVM)
+ RTMemFree(pNode);
+ else
+ {
+ pNode->pVM = NULL;
+ MMR3HeapFree(pNode);
+ }
+}
+
+
+
+
+/**
+ * Constructs the configuration for the VM.
+ *
+ * This should only be called used once.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pfnCFGMConstructor Pointer to callback function for constructing
+ * the VM configuration tree. This is called on
+ * the EMT.
+ * @param pvUser The user argument passed to pfnCFGMConstructor.
+ * @thread EMT.
+ * @internal
+ */
+VMMR3DECL(int) CFGMR3Init(PVM pVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUser)
+{
+ LogFlow(("CFGMR3Init: pfnCFGMConstructor=%p pvUser=%p\n", pfnCFGMConstructor, pvUser));
+
+ /*
+ * Init data members.
+ */
+ pVM->cfgm.s.pRoot = NULL;
+
+ /*
+ * Register DBGF into item.
+ */
+ int rc = DBGFR3InfoRegisterInternal(pVM, "cfgm", "Dumps a part of the CFGM tree. The argument indicates where to start.",
+ cfgmR3Info);
+ AssertRCReturn(rc,rc);
+
+ /*
+ * Root Node.
+ */
+ PCFGMNODE pRoot = (PCFGMNODE)MMR3HeapAllocZ(pVM, MM_TAG_CFGM, sizeof(*pRoot));
+ if (!pRoot)
+ return VERR_NO_MEMORY;
+ pRoot->pVM = pVM;
+ pRoot->cchName = 0;
+ pVM->cfgm.s.pRoot = pRoot;
+
+ /*
+ * Call the constructor if specified, if not use the default one.
+ */
+ if (pfnCFGMConstructor)
+ rc = pfnCFGMConstructor(pVM->pUVM, pVM, VMMR3GetVTable(), pvUser);
+ else
+ rc = CFGMR3ConstructDefaultTree(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ Log(("CFGMR3Init: Successfully constructed the configuration\n"));
+ CFGMR3Dump(CFGMR3GetRoot(pVM));
+ }
+ else
+ LogRel(("Constructor failed with rc=%Rrc pfnCFGMConstructor=%p\n", rc, pfnCFGMConstructor));
+
+ return rc;
+}
+
+
+/**
+ * Terminates the configuration manager.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @internal
+ */
+VMMR3DECL(int) CFGMR3Term(PVM pVM)
+{
+ CFGMR3RemoveNode(pVM->cfgm.s.pRoot);
+ pVM->cfgm.s.pRoot = NULL;
+ return 0;
+}
+
+
+/**
+ * Gets the root node for the VM.
+ *
+ * @returns Pointer to root node.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetRoot(PVM pVM)
+{
+ return pVM->cfgm.s.pRoot;
+}
+
+
+/**
+ * Gets the root node for the VM.
+ *
+ * @returns Pointer to root node.
+ * @param pUVM The user mode VM structure.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetRootU(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ PVM pVM = pUVM->pVM;
+ AssertReturn(pVM, NULL);
+ return pVM->cfgm.s.pRoot;
+}
+
+
+/**
+ * Gets the parent of a CFGM node.
+ *
+ * @returns Pointer to the parent node.
+ * @returns NULL if pNode is Root or pNode is the start of a
+ * restricted subtree (use CFGMR3GetParentEx() for that).
+ *
+ * @param pNode The node which parent we query.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetParent(PCFGMNODE pNode)
+{
+ if (pNode && !pNode->fRestrictedRoot)
+ return pNode->pParent;
+ return NULL;
+}
+
+
+/**
+ * Gets the parent of a CFGM node.
+ *
+ * @returns Pointer to the parent node.
+ * @returns NULL if pNode is Root or pVM is not correct.
+ *
+ * @param pVM The cross context VM structure. Used as token that
+ * the caller is trusted.
+ * @param pNode The node which parent we query.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetParentEx(PVM pVM, PCFGMNODE pNode)
+{
+ if (pNode && pNode->pVM == pVM)
+ return pNode->pParent;
+ return NULL;
+}
+
+
+/**
+ * Query a child node.
+ *
+ * @returns Pointer to the specified node.
+ * @returns NULL if node was not found or pNode is NULL.
+ * @param pNode Node pszPath is relative to.
+ * @param pszPath Path to the child node or pNode.
+ * It's good style to end this with '/'.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetChild(PCFGMNODE pNode, const char *pszPath)
+{
+ PCFGMNODE pChild;
+ int rc = cfgmR3ResolveNode(pNode, pszPath, &pChild);
+ if (RT_SUCCESS(rc))
+ return pChild;
+ return NULL;
+}
+
+
+/**
+ * Query a child node by a format string.
+ *
+ * @returns Pointer to the specified node.
+ * @returns NULL if node was not found or pNode is NULL.
+ * @param pNode Node pszPath is relative to.
+ * @param pszPathFormat Path to the child node or pNode.
+ * It's good style to end this with '/'.
+ * @param ... Arguments to pszPathFormat.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetChildF(PCFGMNODE pNode, const char *pszPathFormat, ...)
+{
+ va_list Args;
+ va_start(Args, pszPathFormat);
+ PCFGMNODE pRet = CFGMR3GetChildFV(pNode, pszPathFormat, Args);
+ va_end(Args);
+ return pRet;
+}
+
+
+/**
+ * Query a child node by a format string.
+ *
+ * @returns Pointer to the specified node.
+ * @returns NULL if node was not found or pNode is NULL.
+ * @param pNode Node pszPath is relative to.
+ * @param pszPathFormat Path to the child node or pNode.
+ * It's good style to end this with '/'.
+ * @param Args Arguments to pszPathFormat.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetChildFV(PCFGMNODE pNode, const char *pszPathFormat, va_list Args)
+{
+ char *pszPath;
+ RTStrAPrintfV(&pszPath, pszPathFormat, Args);
+ if (pszPath)
+ {
+ PCFGMNODE pChild;
+ int rc = cfgmR3ResolveNode(pNode, pszPath, &pChild);
+ RTStrFree(pszPath);
+ if (RT_SUCCESS(rc))
+ return pChild;
+ }
+ return NULL;
+}
+
+
+/**
+ * Gets the first child node.
+ * Use this to start an enumeration of child nodes.
+ *
+ * @returns Pointer to the first child.
+ * @returns NULL if no children.
+ * @param pNode Node to enumerate children for.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetFirstChild(PCFGMNODE pNode)
+{
+ return pNode ? pNode->pFirstChild : NULL;
+}
+
+
+/**
+ * Gets the next sibling node.
+ * Use this to continue an enumeration.
+ *
+ * @returns Pointer to the first child.
+ * @returns NULL if no children.
+ * @param pCur Node to returned by a call to CFGMR3GetFirstChild()
+ * or successive calls to this function.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetNextChild(PCFGMNODE pCur)
+{
+ return pCur ? pCur->pNext : NULL;
+}
+
+
+/**
+ * Gets the name of the current node.
+ * (Needed for enumeration.)
+ *
+ * @returns VBox status code.
+ * @param pCur Node to returned by a call to CFGMR3GetFirstChild()
+ * or successive calls to CFGMR3GetNextChild().
+ * @param pszName Where to store the node name.
+ * @param cchName Size of the buffer pointed to by pszName (with terminator).
+ */
+VMMR3DECL(int) CFGMR3GetName(PCFGMNODE pCur, char *pszName, size_t cchName)
+{
+ int rc;
+ if (pCur)
+ {
+ if (cchName > pCur->cchName)
+ {
+ rc = VINF_SUCCESS;
+ memcpy(pszName, pCur->szName, pCur->cchName + 1);
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NO_NODE;
+ return rc;
+}
+
+
+/**
+ * Gets the length of the current node's name.
+ * (Needed for enumeration.)
+ *
+ * @returns Node name length in bytes including the terminating null char.
+ * @returns 0 if pCur is NULL.
+ * @param pCur Node to returned by a call to CFGMR3GetFirstChild()
+ * or successive calls to CFGMR3GetNextChild().
+ */
+VMMR3DECL(size_t) CFGMR3GetNameLen(PCFGMNODE pCur)
+{
+ return pCur ? pCur->cchName + 1 : 0;
+}
+
+
+/**
+ * Validates that the child nodes are within a set of valid names.
+ *
+ * @returns true if all names are found in pszzAllowed.
+ * @returns false if not.
+ * @param pNode The node which children should be examined.
+ * @param pszzValid List of valid names separated by '\\0' and ending with
+ * a double '\\0'.
+ *
+ * @deprecated Use CFGMR3ValidateConfig.
+ */
+VMMR3DECL(bool) CFGMR3AreChildrenValid(PCFGMNODE pNode, const char *pszzValid)
+{
+ if (pNode)
+ {
+ for (PCFGMNODE pChild = pNode->pFirstChild; pChild; pChild = pChild->pNext)
+ {
+ /* search pszzValid for the name */
+ const char *psz = pszzValid;
+ while (*psz)
+ {
+ size_t cch = strlen(psz);
+ if ( cch == pChild->cchName
+ && !memcmp(psz, pChild->szName, cch))
+ break;
+
+ /* next */
+ psz += cch + 1;
+ }
+
+ /* if at end of pszzValid we didn't find it => failure */
+ if (!*psz)
+ {
+ AssertMsgFailed(("Couldn't find '%s' in the valid values\n", pChild->szName));
+ return false;
+ }
+ }
+ }
+
+ /* all ok. */
+ return true;
+}
+
+
+/**
+ * Gets the first value of a node.
+ * Use this to start an enumeration of values.
+ *
+ * @returns Pointer to the first value.
+ * @param pCur The node (Key) which values to enumerate.
+ */
+VMMR3DECL(PCFGMLEAF) CFGMR3GetFirstValue(PCFGMNODE pCur)
+{
+ return pCur ? pCur->pFirstLeaf : NULL;
+}
+
+/**
+ * Gets the next value in enumeration.
+ *
+ * @returns Pointer to the next value.
+ * @param pCur The current value as returned by this function or CFGMR3GetFirstValue().
+ */
+VMMR3DECL(PCFGMLEAF) CFGMR3GetNextValue(PCFGMLEAF pCur)
+{
+ return pCur ? pCur->pNext : NULL;
+}
+
+/**
+ * Get the value name.
+ * (Needed for enumeration.)
+ *
+ * @returns VBox status code.
+ * @param pCur Value returned by a call to CFGMR3GetFirstValue()
+ * or successive calls to CFGMR3GetNextValue().
+ * @param pszName Where to store the value name.
+ * @param cchName Size of the buffer pointed to by pszName (with terminator).
+ */
+VMMR3DECL(int) CFGMR3GetValueName(PCFGMLEAF pCur, char *pszName, size_t cchName)
+{
+ int rc;
+ if (pCur)
+ {
+ if (cchName > pCur->cchName)
+ {
+ rc = VINF_SUCCESS;
+ memcpy(pszName, pCur->szName, pCur->cchName + 1);
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NO_NODE;
+ return rc;
+}
+
+
+/**
+ * Gets the length of the current node's name.
+ * (Needed for enumeration.)
+ *
+ * @returns Value name length in bytes including the terminating null char.
+ * @returns 0 if pCur is NULL.
+ * @param pCur Value returned by a call to CFGMR3GetFirstValue()
+ * or successive calls to CFGMR3GetNextValue().
+ */
+VMMR3DECL(size_t) CFGMR3GetValueNameLen(PCFGMLEAF pCur)
+{
+ return pCur ? pCur->cchName + 1 : 0;
+}
+
+
+/**
+ * Gets the value type.
+ * (For enumeration.)
+ *
+ * @returns VBox status code.
+ * @param pCur Value returned by a call to CFGMR3GetFirstValue()
+ * or successive calls to CFGMR3GetNextValue().
+ */
+VMMR3DECL(CFGMVALUETYPE) CFGMR3GetValueType(PCFGMLEAF pCur)
+{
+ Assert(pCur);
+ return pCur->enmType;
+}
+
+
+/**
+ * Validates that the values are within a set of valid names.
+ *
+ * @returns true if all names are found in pszzValid.
+ * @returns false if not.
+ * @param pNode The node which values should be examined.
+ * @param pszzValid List of valid names separated by '\\0' and ending with
+ * a double '\\0'.
+ * @deprecated Use CFGMR3ValidateConfig.
+ */
+VMMR3DECL(bool) CFGMR3AreValuesValid(PCFGMNODE pNode, const char *pszzValid)
+{
+ if (pNode)
+ {
+ for (PCFGMLEAF pLeaf = pNode->pFirstLeaf; pLeaf; pLeaf = pLeaf->pNext)
+ {
+ /* search pszzValid for the name */
+ const char *psz = pszzValid;
+ while (*psz)
+ {
+ size_t cch = strlen(psz);
+ if ( cch == pLeaf->cchName
+ && !memcmp(psz, pLeaf->szName, cch))
+ break;
+
+ /* next */
+ psz += cch + 1;
+ }
+
+ /* if at end of pszzValid we didn't find it => failure */
+ if (!*psz)
+ {
+ AssertMsgFailed(("Couldn't find '%s' in the valid values\n", pLeaf->szName));
+ return false;
+ }
+ }
+ }
+
+ /* all ok. */
+ return true;
+}
+
+
+/**
+ * Checks if the given value exists.
+ *
+ * @returns true if it exists, false if not.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName The name of the value we seek.
+ */
+VMMR3DECL(bool) CFGMR3Exists(PCFGMNODE pNode, const char *pszName)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ return RT_SUCCESS_NP(rc);
+}
+
+
+/**
+ * Query value type.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param penmType Where to store the type.
+ */
+VMMR3DECL(int) CFGMR3QueryType(PCFGMNODE pNode, const char *pszName, PCFGMVALUETYPE penmType)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (penmType)
+ *penmType = pLeaf->enmType;
+ }
+ return rc;
+}
+
+
+/**
+ * Query value size.
+ * This works on all types of values.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pcb Where to store the value size.
+ */
+VMMR3DECL(int) CFGMR3QuerySize(PCFGMNODE pNode, const char *pszName, size_t *pcb)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ switch (pLeaf->enmType)
+ {
+ case CFGMVALUETYPE_INTEGER:
+ *pcb = sizeof(pLeaf->Value.Integer.u64);
+ break;
+
+ case CFGMVALUETYPE_STRING:
+ case CFGMVALUETYPE_PASSWORD:
+ *pcb = pLeaf->Value.String.cb;
+ break;
+
+ case CFGMVALUETYPE_BYTES:
+ *pcb = pLeaf->Value.Bytes.cb;
+ break;
+
+ default:
+ rc = VERR_CFGM_IPE_1;
+ AssertMsgFailed(("Invalid value type %d\n", pLeaf->enmType));
+ break;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Query integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu64 Where to store the integer value.
+ */
+VMMR3DECL(int) CFGMR3QueryInteger(PCFGMNODE pNode, const char *pszName, uint64_t *pu64)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_INTEGER)
+ *pu64 = pLeaf->Value.Integer.u64;
+ else
+ rc = VERR_CFGM_NOT_INTEGER;
+ }
+ return rc;
+}
+
+
+/**
+ * Query integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu64 Where to store the integer value. This is set to the default on failure.
+ * @param u64Def The default value. This is always set.
+ */
+VMMR3DECL(int) CFGMR3QueryIntegerDef(PCFGMNODE pNode, const char *pszName, uint64_t *pu64, uint64_t u64Def)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_INTEGER)
+ *pu64 = pLeaf->Value.Integer.u64;
+ else
+ rc = VERR_CFGM_NOT_INTEGER;
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ *pu64 = u64Def;
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VINF_SUCCESS;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a zero terminate character value.
+ * @param pszString Where to store the string.
+ * @param cchString Size of the string buffer. (Includes terminator.)
+ */
+VMMR3DECL(int) CFGMR3QueryString(PCFGMNODE pNode, const char *pszName, char *pszString, size_t cchString)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_STRING)
+ {
+ size_t cbSrc = pLeaf->Value.String.cb;
+ if (cchString >= cbSrc)
+ {
+ memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
+ memset(pszString + cbSrc, 0, cchString - cbSrc);
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NOT_STRING;
+ }
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a zero terminate character value.
+ * @param pszString Where to store the string. This will not be set on overflow error.
+ * @param cchString Size of the string buffer. (Includes terminator.)
+ * @param pszDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryStringDef(PCFGMNODE pNode, const char *pszName, char *pszString, size_t cchString, const char *pszDef)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_STRING)
+ {
+ size_t cbSrc = pLeaf->Value.String.cb;
+ if (cchString >= cbSrc)
+ {
+ memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
+ memset(pszString + cbSrc, 0, cchString - cbSrc);
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NOT_STRING;
+ }
+
+ if (RT_FAILURE(rc) && rc != VERR_CFGM_NOT_ENOUGH_SPACE)
+ {
+ size_t cchDef = strlen(pszDef);
+ if (cchString > cchDef)
+ {
+ memcpy(pszString, pszDef, cchDef);
+ memset(pszString + cchDef, 0, cchString - cchDef);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VINF_SUCCESS;
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Query byte string value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a byte string value.
+ * @param pvData Where to store the binary data.
+ * @param cbData Size of buffer pvData points too.
+ */
+VMMR3DECL(int) CFGMR3QueryBytes(PCFGMNODE pNode, const char *pszName, void *pvData, size_t cbData)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_BYTES)
+ {
+ if (cbData >= pLeaf->Value.Bytes.cb)
+ {
+ memcpy(pvData, pLeaf->Value.Bytes.pau8, pLeaf->Value.Bytes.cb);
+ memset((char *)pvData + pLeaf->Value.Bytes.cb, 0, cbData - pLeaf->Value.Bytes.cb);
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NOT_BYTES;
+ }
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a zero terminate character value.
+ * @param pszString Where to store the string.
+ * @param cchString Size of the string buffer. (Includes terminator.)
+ *
+ * @note Concurrent calls to this function and CFGMR3QueryPasswordDef are not
+ * supported.
+ */
+VMMR3DECL(int) CFGMR3QueryPassword(PCFGMNODE pNode, const char *pszName, char *pszString, size_t cchString)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_PASSWORD)
+ {
+ size_t cbSrc = pLeaf->Value.String.cb;
+ if (cchString >= cbSrc)
+ {
+ RTMemSaferUnscramble(pLeaf->Value.String.psz, cbSrc);
+ memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
+ memset(pszString + cbSrc, 0, cchString - cbSrc);
+ RTMemSaferScramble(pLeaf->Value.String.psz, cbSrc);
+
+ Assert(pszString[cbSrc - 1] == '\0');
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NOT_PASSWORD;
+ }
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a zero terminate character value.
+ * @param pszString Where to store the string. This will not be set on overflow error.
+ * @param cchString Size of the string buffer. (Includes terminator.)
+ * @param pszDef The default value.
+ *
+ * @note Concurrent calls to this function and CFGMR3QueryPassword are not
+ * supported.
+ */
+VMMR3DECL(int) CFGMR3QueryPasswordDef(PCFGMNODE pNode, const char *pszName, char *pszString, size_t cchString, const char *pszDef)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_PASSWORD)
+ {
+ size_t cbSrc = pLeaf->Value.String.cb;
+ if (cchString >= cbSrc)
+ {
+ RTMemSaferUnscramble(pLeaf->Value.String.psz, cbSrc);
+ memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
+ memset(pszString + cbSrc, 0, cchString - cbSrc);
+ RTMemSaferScramble(pLeaf->Value.String.psz, cbSrc);
+
+ Assert(pszString[cbSrc - 1] == '\0');
+ }
+ else
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+ else
+ rc = VERR_CFGM_NOT_PASSWORD;
+ }
+
+ if (RT_FAILURE(rc) && rc != VERR_CFGM_NOT_ENOUGH_SPACE)
+ {
+ size_t cchDef = strlen(pszDef);
+ if (cchString > cchDef)
+ {
+ memcpy(pszString, pszDef, cchDef);
+ memset(pszString + cchDef, 0, cchString - cchDef);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VINF_SUCCESS;
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VERR_CFGM_NOT_ENOUGH_SPACE;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Validate one level of a configuration node.
+ *
+ * This replaces the CFGMR3AreChildrenValid and CFGMR3AreValuesValid APIs.
+ *
+ * @returns VBox status code.
+ *
+ * When an error is returned, both VMSetError and AssertLogRelMsgFailed
+ * have been called. So, all the caller needs to do is to propagate
+ * the error status up to PDM.
+ *
+ * @param pNode The node to validate.
+ * @param pszNode The node path, always ends with a slash. Use
+ * "/" for the root config node.
+ * @param pszValidValues Patterns describing the valid value names. See
+ * RTStrSimplePatternMultiMatch for details on the
+ * pattern syntax.
+ * @param pszValidNodes Patterns describing the valid node (key) names.
+ * See RTStrSimplePatternMultiMatch for details on
+ * the pattern syntax.
+ * @param pszWho Who is calling.
+ * @param uInstance The instance number of the caller.
+ */
+VMMR3DECL(int) CFGMR3ValidateConfig(PCFGMNODE pNode, const char *pszNode,
+ const char *pszValidValues, const char *pszValidNodes,
+ const char *pszWho, uint32_t uInstance)
+{
+ /* Input validation. */
+ AssertPtrNullReturn(pNode, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszNode, VERR_INVALID_POINTER);
+ Assert(*pszNode && pszNode[strlen(pszNode) - 1] == '/');
+ AssertPtrReturn(pszValidValues, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszValidNodes, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszWho, VERR_INVALID_POINTER);
+
+ if (pNode)
+ {
+ /*
+ * Enumerate the leaves and check them against pszValidValues.
+ */
+ for (PCFGMLEAF pLeaf = pNode->pFirstLeaf; pLeaf; pLeaf = pLeaf->pNext)
+ {
+ if (!RTStrSimplePatternMultiMatch(pszValidValues, RTSTR_MAX,
+ pLeaf->szName, pLeaf->cchName,
+ NULL))
+ {
+ AssertLogRelMsgFailed(("%s/%u: Value '%s%s' didn't match '%s'\n",
+ pszWho, uInstance, pszNode, pLeaf->szName, pszValidValues));
+ return VMSetError(pNode->pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS,
+ N_("Unknown configuration value '%s%s' found in the configuration of %s instance #%u"),
+ pszNode, pLeaf->szName, pszWho, uInstance);
+ }
+
+ }
+
+ /*
+ * Enumerate the child nodes and check them against pszValidNodes.
+ */
+ for (PCFGMNODE pChild = pNode->pFirstChild; pChild; pChild = pChild->pNext)
+ {
+ if (!RTStrSimplePatternMultiMatch(pszValidNodes, RTSTR_MAX,
+ pChild->szName, pChild->cchName,
+ NULL))
+ {
+ AssertLogRelMsgFailed(("%s/%u: Node '%s%s' didn't match '%s'\n",
+ pszWho, uInstance, pszNode, pChild->szName, pszValidNodes));
+ return VMSetError(pNode->pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
+ N_("Unknown configuration node '%s%s' found in the configuration of %s instance #%u"),
+ pszNode, pChild->szName, pszWho, uInstance);
+ }
+ }
+ }
+
+ /* All is well. */
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Populates the CFGM tree with the default configuration.
+ *
+ * This assumes an empty tree and is intended for testcases and such that only
+ * need to do very small adjustments to the config.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @internal
+ */
+VMMR3DECL(int) CFGMR3ConstructDefaultTree(PVM pVM)
+{
+ int rc;
+ int rcAll = VINF_SUCCESS;
+#define UPDATERC() do { if (RT_FAILURE(rc) && RT_SUCCESS(rcAll)) rcAll = rc; } while (0)
+
+ PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
+ AssertReturn(pRoot, VERR_WRONG_ORDER);
+
+ /*
+ * Create VM default values.
+ */
+ rc = CFGMR3InsertString(pRoot, "Name", "Default VM");
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "RamSize", 128U * _1M);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "RamHoleSize", 512U * _1M);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "TimerMillies", 10);
+ UPDATERC();
+
+ /*
+ * HM.
+ */
+ PCFGMNODE pHm;
+ rc = CFGMR3InsertNode(pRoot, "HM", &pHm);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pHm, "FallbackToIEM", 1); /* boolean */
+ UPDATERC();
+
+
+ /*
+ * PDM.
+ */
+ PCFGMNODE pPdm;
+ rc = CFGMR3InsertNode(pRoot, "PDM", &pPdm);
+ UPDATERC();
+ PCFGMNODE pDevices = NULL;
+ rc = CFGMR3InsertNode(pPdm, "Devices", &pDevices);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pDevices, "LoadBuiltin", 1); /* boolean */
+ UPDATERC();
+ PCFGMNODE pDrivers = NULL;
+ rc = CFGMR3InsertNode(pPdm, "Drivers", &pDrivers);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pDrivers, "LoadBuiltin", 1); /* boolean */
+ UPDATERC();
+
+
+ /*
+ * Devices
+ */
+ pDevices = NULL;
+ rc = CFGMR3InsertNode(pRoot, "Devices", &pDevices);
+ UPDATERC();
+ /* device */
+ PCFGMNODE pDev = NULL;
+ PCFGMNODE pInst = NULL;
+ PCFGMNODE pCfg = NULL;
+#if 0
+ PCFGMNODE pLunL0 = NULL;
+ PCFGMNODE pLunL1 = NULL;
+#endif
+
+ /*
+ * PC Arch.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pcarch", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * PC Bios.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pcbios", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice0", "IDE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice1", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice2", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice3", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "HardDiskDevice", "piix3ide");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "FloppyDevice", "");
+ UPDATERC();
+ RTUUID Uuid;
+ RTUuidClear(&Uuid);
+ rc = CFGMR3InsertBytes(pCfg, "UUID", &Uuid, sizeof(Uuid));
+ UPDATERC();
+
+ /*
+ * PCI bus.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pci", &pDev); /* piix3 */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * PS/2 keyboard & mouse
+ */
+ rc = CFGMR3InsertNode(pDevices, "pckbd", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+#if 0
+ rc = CFGMR3InsertNode(pInst, "LUN#0", &pLunL0);
+ UPDATERC();
+ rc = CFGMR3InsertString(pLunL0, "Driver", "KeyboardQueue");
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL0, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "QueueSize", 64);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL0, "AttachedDriver", &pLunL1);
+ UPDATERC();
+ rc = CFGMR3InsertString(pLunL1, "Driver", "MainKeyboard");
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL1, "Config", &pCfg);
+ UPDATERC();
+#endif
+#if 0
+ rc = CFGMR3InsertNode(pInst, "LUN#1", &pLunL0);
+ UPDATERC();
+ rc = CFGMR3InsertString(pLunL0, "Driver", "MouseQueue");
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL0, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "QueueSize", 128);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL0, "AttachedDriver", &pLunL1);
+ UPDATERC();
+ rc = CFGMR3InsertString(pLunL1, "Driver", "MainMouse");
+ UPDATERC();
+ rc = CFGMR3InsertNode(pLunL1, "Config", &pCfg);
+ UPDATERC();
+#endif
+
+ /*
+ * i8254 Programmable Interval Timer And Dummy Speaker
+ */
+ rc = CFGMR3InsertNode(pDevices, "i8254", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+#ifdef DEBUG
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+#endif
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * i8259 Programmable Interrupt Controller.
+ */
+ rc = CFGMR3InsertNode(pDevices, "i8259", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * RTC MC146818.
+ */
+ rc = CFGMR3InsertNode(pDevices, "mc146818", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * VGA.
+ */
+ rc = CFGMR3InsertNode(pDevices, "vga", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "VRamSize", 4 * _1M);
+ UPDATERC();
+
+ /* Bios logo. */
+ rc = CFGMR3InsertInteger(pCfg, "FadeIn", 1);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "FadeOut", 1);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "LogoTime", 0);
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "LogoFile", "");
+ UPDATERC();
+
+#if 0
+ rc = CFGMR3InsertNode(pInst, "LUN#0", &pLunL0);
+ UPDATERC();
+ rc = CFGMR3InsertString(pLunL0, "Driver", "MainDisplay");
+ UPDATERC();
+#endif
+
+ /*
+ * IDE controller.
+ */
+ rc = CFGMR3InsertNode(pDevices, "piix3ide", &pDev); /* piix3 */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * VMMDev.
+ */
+ rc = CFGMR3InsertNode(pDevices, "VMMDev", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+
+
+ /*
+ * ...
+ */
+
+#undef UPDATERC
+ return rcAll;
+}
+
+
+
+
+/**
+ * Resolves a path reference to a child node.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszPath Path to the child node.
+ * @param ppChild Where to store the pointer to the child node.
+ */
+static int cfgmR3ResolveNode(PCFGMNODE pNode, const char *pszPath, PCFGMNODE *ppChild)
+{
+ *ppChild = NULL;
+ if (!pNode)
+ return VERR_CFGM_NO_PARENT;
+ PCFGMNODE pChild = NULL;
+ for (;;)
+ {
+ /* skip leading slashes. */
+ while (*pszPath == '/')
+ pszPath++;
+
+ /* End of path? */
+ if (!*pszPath)
+ {
+ if (!pChild)
+ return VERR_CFGM_INVALID_CHILD_PATH;
+ *ppChild = pChild;
+ return VINF_SUCCESS;
+ }
+
+ /* find end of component. */
+ const char *pszNext = strchr(pszPath, '/');
+ if (!pszNext)
+ pszNext = strchr(pszPath, '\0');
+ RTUINT cchName = pszNext - pszPath;
+
+ /* search child list. */
+ pChild = pNode->pFirstChild;
+ for ( ; pChild; pChild = pChild->pNext)
+ if (pChild->cchName == cchName)
+ {
+ int iDiff = memcmp(pszPath, pChild->szName, cchName);
+ if (iDiff <= 0)
+ {
+ if (iDiff != 0)
+ pChild = NULL;
+ break;
+ }
+ }
+ if (!pChild)
+ return VERR_CFGM_CHILD_NOT_FOUND;
+
+ /* next iteration */
+ pNode = pChild;
+ pszPath = pszNext;
+ }
+
+ /* won't get here */
+}
+
+
+/**
+ * Resolves a path reference to a child node.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of a byte string value.
+ * @param ppLeaf Where to store the pointer to the leaf node.
+ */
+static int cfgmR3ResolveLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf)
+{
+ *ppLeaf = NULL;
+ if (!pNode)
+ return VERR_CFGM_NO_PARENT;
+
+ size_t cchName = strlen(pszName);
+ PCFGMLEAF pLeaf = pNode->pFirstLeaf;
+ while (pLeaf)
+ {
+ if (cchName == pLeaf->cchName)
+ {
+ int iDiff = memcmp(pszName, pLeaf->szName, cchName);
+ if (iDiff <= 0)
+ {
+ if (iDiff != 0)
+ break;
+ *ppLeaf = pLeaf;
+ return VINF_SUCCESS;
+ }
+ }
+
+ /* next */
+ pLeaf = pLeaf->pNext;
+ }
+ return VERR_CFGM_VALUE_NOT_FOUND;
+}
+
+
+
+/**
+ * Creates a CFGM tree.
+ *
+ * This is intended for creating device/driver configs can be
+ * passed around and later attached to the main tree in the
+ * correct location.
+ *
+ * @returns Pointer to the root node, NULL on error (out of memory or invalid
+ * VM handle).
+ * @param pUVM The user mode VM handle. For testcase (and other
+ * purposes, NULL can be used. However, the resulting
+ * tree cannot be inserted into a tree that has a
+ * non-NULL value. Using NULL can be usedful for
+ * testcases and similar, non VMM uses.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3CreateTree(PUVM pUVM)
+{
+ if (pUVM)
+ {
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ }
+
+ PCFGMNODE pNew;
+ if (pUVM)
+ pNew = (PCFGMNODE)MMR3HeapAllocU(pUVM, MM_TAG_CFGM, sizeof(*pNew));
+ else
+ pNew = (PCFGMNODE)RTMemAlloc(sizeof(*pNew));
+ if (pNew)
+ {
+ pNew->pPrev = NULL;
+ pNew->pNext = NULL;
+ pNew->pParent = NULL;
+ pNew->pFirstChild = NULL;
+ pNew->pFirstLeaf = NULL;
+ pNew->pVM = pUVM ? pUVM->pVM : NULL;
+ pNew->fRestrictedRoot = false;
+ pNew->cchName = 0;
+ pNew->szName[0] = 0;
+ }
+ return pNew;
+}
+
+
+/**
+ * Duplicates a CFGM sub-tree or a full tree.
+ *
+ * @returns Pointer to the root node. NULL if we run out of memory or the
+ * input parameter is NULL.
+ * @param pRoot The root of the tree to duplicate.
+ * @param ppCopy Where to return the root of the duplicate.
+ */
+VMMR3DECL(int) CFGMR3DuplicateSubTree(PCFGMNODE pRoot, PCFGMNODE *ppCopy)
+{
+ AssertPtrReturn(pRoot, VERR_INVALID_POINTER);
+
+ /*
+ * Create a new tree.
+ */
+ PCFGMNODE pNewRoot = CFGMR3CreateTree(pRoot->pVM ? pRoot->pVM->pUVM : NULL);
+ if (!pNewRoot)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Duplicate the content.
+ */
+ int rc = VINF_SUCCESS;
+ PCFGMNODE pSrcCur = pRoot;
+ PCFGMNODE pDstCur = pNewRoot;
+ for (;;)
+ {
+ if ( !pDstCur->pFirstChild
+ && !pDstCur->pFirstLeaf)
+ {
+ /*
+ * Values first.
+ */
+ /** @todo this isn't the most efficient way to do it. */
+ for (PCFGMLEAF pLeaf = pSrcCur->pFirstLeaf; pLeaf && RT_SUCCESS(rc); pLeaf = pLeaf->pNext)
+ rc = CFGMR3InsertValue(pDstCur, pLeaf);
+
+ /*
+ * Insert immediate child nodes.
+ */
+ /** @todo this isn't the most efficient way to do it. */
+ for (PCFGMNODE pChild = pSrcCur->pFirstChild; pChild && RT_SUCCESS(rc); pChild = pChild->pNext)
+ rc = CFGMR3InsertNode(pDstCur, pChild->szName, NULL);
+
+ AssertLogRelRCBreak(rc);
+ }
+
+ /*
+ * Deep copy of the children.
+ */
+ if (pSrcCur->pFirstChild)
+ {
+ Assert(pDstCur->pFirstChild && !strcmp(pDstCur->pFirstChild->szName, pSrcCur->pFirstChild->szName));
+ pSrcCur = pSrcCur->pFirstChild;
+ pDstCur = pDstCur->pFirstChild;
+ }
+ /*
+ * If it's the root node, we're done.
+ */
+ else if (pSrcCur == pRoot)
+ break;
+ else
+ {
+ /*
+ * Upon reaching the end of a sibling list, we must ascend and
+ * resume the sibiling walk on an previous level.
+ */
+ if (!pSrcCur->pNext)
+ {
+ do
+ {
+ pSrcCur = pSrcCur->pParent;
+ pDstCur = pDstCur->pParent;
+ } while (!pSrcCur->pNext && pSrcCur != pRoot);
+ if (pSrcCur == pRoot)
+ break;
+ }
+
+ /*
+ * Next sibling.
+ */
+ Assert(pDstCur->pNext && !strcmp(pDstCur->pNext->szName, pSrcCur->pNext->szName));
+ pSrcCur = pSrcCur->pNext;
+ pDstCur = pDstCur->pNext;
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ CFGMR3RemoveNode(pNewRoot);
+ return rc;
+ }
+
+ *ppCopy = pNewRoot;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Insert subtree.
+ *
+ * This function inserts (no duplication) a tree created by CFGMR3CreateTree()
+ * into the main tree.
+ *
+ * The root node of the inserted subtree will need to be reallocated, which
+ * effectually means that the passed in pSubTree handle becomes invalid
+ * upon successful return. Use the value returned in ppChild instead
+ * of pSubTree.
+ *
+ * @returns VBox status code.
+ * @returns VERR_CFGM_NODE_EXISTS if the final child node name component exists.
+ * @param pNode Parent node.
+ * @param pszName Name or path of the new child node.
+ * @param pSubTree The subtree to insert. Must be returned by CFGMR3CreateTree().
+ * @param ppChild Where to store the address of the new child node. (optional)
+ */
+VMMR3DECL(int) CFGMR3InsertSubTree(PCFGMNODE pNode, const char *pszName, PCFGMNODE pSubTree, PCFGMNODE *ppChild)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pNode, VERR_INVALID_POINTER);
+ AssertPtrReturn(pSubTree, VERR_INVALID_POINTER);
+ AssertReturn(pNode != pSubTree, VERR_INVALID_PARAMETER);
+ AssertReturn(!pSubTree->pParent, VERR_INVALID_PARAMETER);
+ AssertReturn(pNode->pVM == pSubTree->pVM, VERR_INVALID_PARAMETER);
+ Assert(!pSubTree->pNext);
+ Assert(!pSubTree->pPrev);
+
+ /*
+ * Use CFGMR3InsertNode to create a new node and then
+ * re-attach the children and leaves of the subtree to it.
+ */
+ PCFGMNODE pNewChild;
+ int rc = CFGMR3InsertNode(pNode, pszName, &pNewChild);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(!pNewChild->pFirstChild);
+ Assert(!pNewChild->pFirstLeaf);
+
+ pNewChild->pFirstChild = pSubTree->pFirstChild;
+ pNewChild->pFirstLeaf = pSubTree->pFirstLeaf;
+ for (PCFGMNODE pChild = pNewChild->pFirstChild; pChild; pChild = pChild->pNext)
+ pChild->pParent = pNewChild;
+
+ if (ppChild)
+ *ppChild = pNewChild;
+
+ /* free the old subtree root */
+ cfgmR3FreeNodeOnly(pSubTree);
+ }
+ return rc;
+}
+
+
+/**
+ * Replaces a (sub-)tree with new one.
+ *
+ * This function removes the exiting (sub-)tree, completely freeing it in the
+ * process, and inserts (no duplication) the specified tree. The tree can
+ * either be created by CFGMR3CreateTree or CFGMR3DuplicateSubTree.
+ *
+ * @returns VBox status code.
+ * @param pRoot The sub-tree to replace. This node will remain valid
+ * after the call.
+ * @param pNewRoot The tree to replace @a pRoot with. This not will
+ * become invalid after a successful call.
+ */
+VMMR3DECL(int) CFGMR3ReplaceSubTree(PCFGMNODE pRoot, PCFGMNODE pNewRoot)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pRoot, VERR_INVALID_POINTER);
+ AssertPtrReturn(pNewRoot, VERR_INVALID_POINTER);
+ AssertReturn(pRoot != pNewRoot, VERR_INVALID_PARAMETER);
+ AssertReturn(!pNewRoot->pParent, VERR_INVALID_PARAMETER);
+ AssertReturn(pNewRoot->pVM == pRoot->pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(!pNewRoot->pNext, VERR_INVALID_PARAMETER);
+ AssertReturn(!pNewRoot->pPrev, VERR_INVALID_PARAMETER);
+
+ /*
+ * Free the current properties fo pRoot.
+ */
+ while (pRoot->pFirstChild)
+ CFGMR3RemoveNode(pRoot->pFirstChild);
+
+ while (pRoot->pFirstLeaf)
+ cfgmR3RemoveLeaf(pRoot, pRoot->pFirstLeaf);
+
+ /*
+ * Copy all the properties from the new root to the current one.
+ */
+ pRoot->pFirstLeaf = pNewRoot->pFirstLeaf;
+ pRoot->pFirstChild = pNewRoot->pFirstChild;
+ for (PCFGMNODE pChild = pRoot->pFirstChild; pChild; pChild = pChild->pNext)
+ pChild->pParent = pRoot;
+
+ cfgmR3FreeNodeOnly(pNewRoot);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Copies all values and keys from one tree onto another.
+ *
+ * The flags control what happens to keys and values with the same name
+ * existing in both source and destination.
+ *
+ * @returns VBox status code.
+ * @param pDstTree The destination tree.
+ * @param pSrcTree The source tree.
+ * @param fFlags Copy flags, see CFGM_COPY_FLAGS_XXX.
+ */
+VMMR3DECL(int) CFGMR3CopyTree(PCFGMNODE pDstTree, PCFGMNODE pSrcTree, uint32_t fFlags)
+{
+ /*
+ * Input validation.
+ */
+ AssertPtrReturn(pSrcTree, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDstTree, VERR_INVALID_POINTER);
+ AssertReturn(pDstTree != pSrcTree, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & ~(CFGM_COPY_FLAGS_VALUE_DISP_MASK | CFGM_COPY_FLAGS_KEY_DISP_MASK)), VERR_INVALID_PARAMETER);
+ AssertReturn( (fFlags & CFGM_COPY_FLAGS_VALUE_DISP_MASK) != CFGM_COPY_FLAGS_RESERVED_VALUE_DISP_0
+ && (fFlags & CFGM_COPY_FLAGS_VALUE_DISP_MASK) != CFGM_COPY_FLAGS_RESERVED_VALUE_DISP_1,
+ VERR_INVALID_PARAMETER);
+ AssertReturn((fFlags & CFGM_COPY_FLAGS_KEY_DISP_MASK) != CFGM_COPY_FLAGS_RESERVED_KEY_DISP,
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Copy the values.
+ */
+ int rc;
+ for (PCFGMLEAF pValue = CFGMR3GetFirstValue(pSrcTree); pValue; pValue = CFGMR3GetNextValue(pValue))
+ {
+ rc = CFGMR3InsertValue(pDstTree, pValue);
+ if (rc == VERR_CFGM_LEAF_EXISTS)
+ {
+ if ((fFlags & CFGM_COPY_FLAGS_VALUE_DISP_MASK) == CFGM_COPY_FLAGS_REPLACE_VALUES)
+ {
+ rc = CFGMR3RemoveValue(pDstTree, pValue->szName);
+ if (RT_FAILURE(rc))
+ break;
+ rc = CFGMR3InsertValue(pDstTree, pValue);
+ }
+ else
+ rc = VINF_SUCCESS;
+ }
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Copy/merge the keys - merging results in recursion.
+ */
+ for (PCFGMNODE pSrcChild = CFGMR3GetFirstChild(pSrcTree); pSrcChild; pSrcChild = CFGMR3GetNextChild(pSrcChild))
+ {
+ PCFGMNODE pDstChild = CFGMR3GetChild(pDstTree, pSrcChild->szName);
+ if ( pDstChild
+ && (fFlags & CFGM_COPY_FLAGS_KEY_DISP_MASK) == CFGM_COPY_FLAGS_REPLACE_KEYS)
+ {
+ CFGMR3RemoveNode(pDstChild);
+ pDstChild = NULL;
+ }
+ if (!pDstChild)
+ {
+ PCFGMNODE pChildCopy;
+ rc = CFGMR3DuplicateSubTree(pSrcChild, &pChildCopy);
+ AssertRCReturn(rc, rc);
+ rc = CFGMR3InsertSubTree(pDstTree, pSrcChild->szName, pChildCopy, NULL);
+ AssertRCReturnStmt(rc, CFGMR3RemoveNode(pChildCopy), rc);
+ }
+ else if ((fFlags & CFGM_COPY_FLAGS_KEY_DISP_MASK) == CFGM_COPY_FLAGS_MERGE_KEYS)
+ {
+ rc = CFGMR3CopyTree(pDstChild, pSrcChild, fFlags);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Compares two names.
+ *
+ * @returns Similar to memcpy.
+ * @param pszName1 The first name.
+ * @param cchName1 The length of the first name.
+ * @param pszName2 The second name.
+ * @param cchName2 The length of the second name.
+ */
+DECLINLINE(int) cfgmR3CompareNames(const char *pszName1, size_t cchName1, const char *pszName2, size_t cchName2)
+{
+ int iDiff;
+ if (cchName1 <= cchName2)
+ {
+ iDiff = memcmp(pszName1, pszName2, cchName1);
+ if (!iDiff && cchName1 < cchName2)
+ iDiff = -1;
+ }
+ else
+ {
+ iDiff = memcmp(pszName1, pszName2, cchName2);
+ if (!iDiff)
+ iDiff = 1;
+ }
+ return iDiff;
+}
+
+
+/**
+ * Insert a node.
+ *
+ * @returns VBox status code.
+ * @returns VERR_CFGM_NODE_EXISTS if the final child node name component exists.
+ * @param pNode Parent node.
+ * @param pszName Name or path of the new child node.
+ * @param ppChild Where to store the address of the new child node. (optional)
+ */
+VMMR3DECL(int) CFGMR3InsertNode(PCFGMNODE pNode, const char *pszName, PCFGMNODE *ppChild)
+{
+ int rc;
+ if (pNode)
+ {
+ /*
+ * If given a path we have to deal with it component by component.
+ */
+ while (*pszName == '/')
+ pszName++;
+ if (strchr(pszName, '/'))
+ {
+ char *pszDup = RTStrDup(pszName);
+ if (pszDup)
+ {
+ char *psz = pszDup;
+ for (;;)
+ {
+ /* Terminate at '/' and find the next component. */
+ char *pszNext = strchr(psz, '/');
+ if (pszNext)
+ {
+ *pszNext++ = '\0';
+ while (*pszNext == '/')
+ pszNext++;
+ if (*pszNext == '\0')
+ pszNext = NULL;
+ }
+
+ /* does it exist? */
+ PCFGMNODE pChild = CFGMR3GetChild(pNode, psz);
+ if (!pChild)
+ {
+ /* no, insert it */
+ rc = CFGMR3InsertNode(pNode, psz, &pChild);
+ if (RT_FAILURE(rc))
+ break;
+ if (!pszNext)
+ {
+ if (ppChild)
+ *ppChild = pChild;
+ break;
+ }
+
+ }
+ /* if last component fail */
+ else if (!pszNext)
+ {
+ rc = VERR_CFGM_NODE_EXISTS;
+ break;
+ }
+
+ /* next */
+ pNode = pChild;
+ psz = pszNext;
+ }
+ RTStrFree(pszDup);
+ }
+ else
+ rc = VERR_NO_TMP_MEMORY;
+ }
+ /*
+ * Not multicomponent, just make sure it's a non-zero name.
+ */
+ else if (*pszName)
+ {
+ /*
+ * Check if already exists and find last node in chain.
+ */
+ size_t cchName = strlen(pszName);
+ PCFGMNODE pPrev = NULL;
+ PCFGMNODE pNext = pNode->pFirstChild;
+ if (pNext)
+ {
+ for ( ; pNext; pPrev = pNext, pNext = pNext->pNext)
+ {
+ int iDiff = cfgmR3CompareNames(pszName, cchName, pNext->szName, pNext->cchName);
+ if (iDiff <= 0)
+ {
+ if (!iDiff)
+ return VERR_CFGM_NODE_EXISTS;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Allocate and init node.
+ */
+ PCFGMNODE pNew = (PCFGMNODE)cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
+ if (pNew)
+ {
+ pNew->pParent = pNode;
+ pNew->pFirstChild = NULL;
+ pNew->pFirstLeaf = NULL;
+ pNew->pVM = pNode->pVM;
+ pNew->fRestrictedRoot = false;
+ pNew->cchName = cchName;
+ memcpy(pNew->szName, pszName, cchName + 1);
+
+ /*
+ * Insert into child list.
+ */
+ pNew->pPrev = pPrev;
+ if (pPrev)
+ pPrev->pNext = pNew;
+ else
+ pNode->pFirstChild = pNew;
+ pNew->pNext = pNext;
+ if (pNext)
+ pNext->pPrev = pNew;
+
+ if (ppChild)
+ *ppChild = pNew;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ {
+ rc = VERR_CFGM_INVALID_NODE_PATH;
+ AssertMsgFailed(("Invalid path %s\n", pszName));
+ }
+ }
+ else
+ {
+ rc = VERR_CFGM_NO_PARENT;
+ AssertMsgFailed(("No parent! path %s\n", pszName));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Insert a node, format string name.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param ppChild Where to store the address of the new child node. (optional)
+ * @param pszNameFormat Name of or path the new child node.
+ * @param ... Name format arguments.
+ */
+VMMR3DECL(int) CFGMR3InsertNodeF(PCFGMNODE pNode, PCFGMNODE *ppChild, const char *pszNameFormat, ...)
+{
+ va_list Args;
+ va_start(Args, pszNameFormat);
+ int rc = CFGMR3InsertNodeFV(pNode, ppChild, pszNameFormat, Args);
+ va_end(Args);
+ return rc;
+}
+
+
+/**
+ * Insert a node, format string name.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param ppChild Where to store the address of the new child node. (optional)
+ * @param pszNameFormat Name or path of the new child node.
+ * @param Args Name format arguments.
+ */
+VMMR3DECL(int) CFGMR3InsertNodeFV(PCFGMNODE pNode, PCFGMNODE *ppChild, const char *pszNameFormat, va_list Args)
+{
+ int rc;
+ char *pszName;
+ RTStrAPrintfV(&pszName, pszNameFormat, Args);
+ if (pszName)
+ {
+ rc = CFGMR3InsertNode(pNode, pszName, ppChild);
+ RTStrFree(pszName);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Marks the node as the root of a restricted subtree, i.e. the end of
+ * a CFGMR3GetParent() journey.
+ *
+ * @param pNode The node to mark.
+ */
+VMMR3DECL(void) CFGMR3SetRestrictedRoot(PCFGMNODE pNode)
+{
+ if (pNode)
+ pNode->fRestrictedRoot = true;
+}
+
+
+/**
+ * Insert a node.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Name of the new child node.
+ * @param ppLeaf Where to store the new leaf.
+ * The caller must fill in the enmType and Value fields!
+ */
+static int cfgmR3InsertLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf)
+{
+ int rc;
+ if (*pszName)
+ {
+ if (pNode)
+ {
+ /*
+ * Check if already exists and find last node in chain.
+ */
+ size_t cchName = strlen(pszName);
+ PCFGMLEAF pPrev = NULL;
+ PCFGMLEAF pNext = pNode->pFirstLeaf;
+ if (pNext)
+ {
+ for ( ; pNext; pPrev = pNext, pNext = pNext->pNext)
+ {
+ int iDiff = cfgmR3CompareNames(pszName, cchName, pNext->szName, pNext->cchName);
+ if (iDiff <= 0)
+ {
+ if (!iDiff)
+ return VERR_CFGM_LEAF_EXISTS;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Allocate and init node.
+ */
+ PCFGMLEAF pNew = (PCFGMLEAF)cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
+ if (pNew)
+ {
+ pNew->cchName = cchName;
+ memcpy(pNew->szName, pszName, cchName + 1);
+
+ /*
+ * Insert into child list.
+ */
+ pNew->pPrev = pPrev;
+ if (pPrev)
+ pPrev->pNext = pNew;
+ else
+ pNode->pFirstLeaf = pNew;
+ pNew->pNext = pNext;
+ if (pNext)
+ pNext->pPrev = pNew;
+
+ *ppLeaf = pNew;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_CFGM_NO_PARENT;
+ }
+ else
+ rc = VERR_CFGM_INVALID_CHILD_PATH;
+ return rc;
+}
+
+
+/**
+ * Removes a node.
+ *
+ * @param pNode The node to remove.
+ */
+VMMR3DECL(void) CFGMR3RemoveNode(PCFGMNODE pNode)
+{
+ if (pNode)
+ {
+ /*
+ * Free children.
+ */
+ while (pNode->pFirstChild)
+ CFGMR3RemoveNode(pNode->pFirstChild);
+
+ /*
+ * Free leaves.
+ */
+ while (pNode->pFirstLeaf)
+ cfgmR3RemoveLeaf(pNode, pNode->pFirstLeaf);
+
+ /*
+ * Unlink ourselves.
+ */
+ if (pNode->pPrev)
+ pNode->pPrev->pNext = pNode->pNext;
+ else
+ {
+ if (pNode->pParent)
+ pNode->pParent->pFirstChild = pNode->pNext;
+ else if ( pNode->pVM /* might be a different tree */
+ && pNode == pNode->pVM->cfgm.s.pRoot)
+ pNode->pVM->cfgm.s.pRoot = NULL;
+ }
+ if (pNode->pNext)
+ pNode->pNext->pPrev = pNode->pPrev;
+
+ /*
+ * Free ourselves.
+ */
+ cfgmR3FreeNodeOnly(pNode);
+ }
+}
+
+
+/**
+ * Removes a leaf.
+ *
+ * @param pNode Parent node.
+ * @param pLeaf Leaf to remove.
+ */
+static void cfgmR3RemoveLeaf(PCFGMNODE pNode, PCFGMLEAF pLeaf)
+{
+ if (pNode && pLeaf)
+ {
+ /*
+ * Unlink.
+ */
+ if (pLeaf->pPrev)
+ pLeaf->pPrev->pNext = pLeaf->pNext;
+ else
+ pNode->pFirstLeaf = pLeaf->pNext;
+ if (pLeaf->pNext)
+ pLeaf->pNext->pPrev = pLeaf->pPrev;
+
+ /*
+ * Free value and node.
+ */
+ cfgmR3FreeValue(pNode->pVM, pLeaf);
+ pLeaf->pNext = NULL;
+ pLeaf->pPrev = NULL;
+ cfgmR3MemFree(pNode->pVM, pLeaf);
+ }
+}
+
+
+/**
+ * Frees whatever resources the leaf value is owning.
+ *
+ * Use this before assigning a new value to a leaf.
+ * The caller must either free the leaf or assign a new value to it.
+ *
+ * @param pVM The cross context VM structure, if the tree
+ * is associated with one.
+ * @param pLeaf Pointer to the leaf which value should be free.
+ */
+static void cfgmR3FreeValue(PVM pVM, PCFGMLEAF pLeaf)
+{
+ if (pLeaf)
+ {
+ switch (pLeaf->enmType)
+ {
+ case CFGMVALUETYPE_BYTES:
+ cfgmR3MemFree(pVM, pLeaf->Value.Bytes.pau8);
+ pLeaf->Value.Bytes.pau8 = NULL;
+ pLeaf->Value.Bytes.cb = 0;
+ break;
+
+ case CFGMVALUETYPE_STRING:
+ cfgmR3StrFree(pVM, pLeaf->Value.String.psz);
+ pLeaf->Value.String.psz = NULL;
+ pLeaf->Value.String.cb = 0;
+ break;
+
+ case CFGMVALUETYPE_PASSWORD:
+ RTMemSaferFree(pLeaf->Value.String.psz, pLeaf->Value.String.cb);
+ pLeaf->Value.String.psz = NULL;
+ pLeaf->Value.String.cb = 0;
+ break;
+
+ case CFGMVALUETYPE_INTEGER:
+ break;
+ }
+ pLeaf->enmType = (CFGMVALUETYPE)0;
+ }
+}
+
+/**
+ * Destroys a tree created with CFGMR3CreateTree or CFGMR3DuplicateSubTree.
+ *
+ * @returns VBox status code.
+ * @param pRoot The root node of the tree.
+ */
+VMMR3DECL(int) CFGMR3DestroyTree(PCFGMNODE pRoot)
+{
+ if (!pRoot)
+ return VINF_SUCCESS;
+ AssertReturn(!pRoot->pParent, VERR_INVALID_PARAMETER);
+ AssertReturn(!pRoot->pVM || pRoot != pRoot->pVM->cfgm.s.pRoot, VERR_ACCESS_DENIED);
+
+ CFGMR3RemoveNode(pRoot);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Inserts a new integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param u64Integer The value.
+ */
+VMMR3DECL(int) CFGMR3InsertInteger(PCFGMNODE pNode, const char *pszName, uint64_t u64Integer)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3InsertLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ pLeaf->enmType = CFGMVALUETYPE_INTEGER;
+ pLeaf->Value.Integer.u64 = u64Integer;
+ }
+ return rc;
+}
+
+
+/**
+ * Inserts a new string value.
+ *
+ * This variant expects that the caller know the length of the string already so
+ * we can avoid calling strlen() here.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszString The value. Must not be NULL.
+ * @param cchString The length of the string excluding the
+ * terminator.
+ */
+VMMR3DECL(int) CFGMR3InsertStringN(PCFGMNODE pNode, const char *pszName, const char *pszString, size_t cchString)
+{
+ Assert(RTStrNLen(pszString, cchString) == cchString);
+
+ int rc;
+ if (pNode)
+ {
+ /*
+ * Allocate string object first.
+ */
+ char *pszStringCopy = (char *)cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cchString + 1);
+ if (pszStringCopy)
+ {
+ memcpy(pszStringCopy, pszString, cchString);
+ pszStringCopy[cchString] = '\0';
+
+ /*
+ * Create value leaf and set it to string type.
+ */
+ PCFGMLEAF pLeaf;
+ rc = cfgmR3InsertLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ pLeaf->enmType = CFGMVALUETYPE_STRING;
+ pLeaf->Value.String.psz = pszStringCopy;
+ pLeaf->Value.String.cb = cchString + 1;
+ }
+ else
+ cfgmR3StrFree(pNode->pVM, pszStringCopy);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_CFGM_NO_PARENT;
+
+ return rc;
+}
+
+
+/**
+ * Inserts a new string value.
+ *
+ * Calls strlen(pszString) internally; if you know the length of the string,
+ * CFGMR3InsertStringLengthKnown() is faster.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszString The value.
+ */
+VMMR3DECL(int) CFGMR3InsertString(PCFGMNODE pNode, const char *pszName, const char *pszString)
+{
+ return CFGMR3InsertStringN(pNode, pszName, pszString, strlen(pszString));
+}
+
+
+/**
+ * Same as CFGMR3InsertString except the string value given in RTStrPrintfV
+ * fashion.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszFormat The value given as a format string.
+ * @param va Argument to pszFormat.
+ */
+VMMR3DECL(int) CFGMR3InsertStringFV(PCFGMNODE pNode, const char *pszName, const char *pszFormat, va_list va)
+{
+ int rc;
+ if (pNode)
+ {
+ /*
+ * Allocate string object first.
+ */
+ char *pszString;
+ if (!pNode->pVM)
+ pszString = RTStrAPrintf2(pszFormat, va);
+ else
+ pszString = MMR3HeapAPrintfVU(pNode->pVM->pUVM, MM_TAG_CFGM_STRING, pszFormat, va);
+ if (pszString)
+ {
+ /*
+ * Create value leaf and set it to string type.
+ */
+ PCFGMLEAF pLeaf;
+ rc = cfgmR3InsertLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ pLeaf->enmType = CFGMVALUETYPE_STRING;
+ pLeaf->Value.String.psz = pszString;
+ pLeaf->Value.String.cb = strlen(pszString) + 1;
+ }
+ else
+ cfgmR3StrFree(pNode->pVM, pszString);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_CFGM_NO_PARENT;
+
+ return rc;
+}
+
+
+/**
+ * Same as CFGMR3InsertString except the string value given in RTStrPrintf
+ * fashion.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszFormat The value given as a format string.
+ * @param ... Argument to pszFormat.
+ */
+VMMR3DECL(int) CFGMR3InsertStringF(PCFGMNODE pNode, const char *pszName, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = CFGMR3InsertStringFV(pNode, pszName, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Same as CFGMR3InsertString except the string value given as a UTF-16 string.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pwszValue The string value (UTF-16).
+ */
+VMMR3DECL(int) CFGMR3InsertStringW(PCFGMNODE pNode, const char *pszName, PCRTUTF16 pwszValue)
+{
+ char *pszValue;
+ int rc = RTUtf16ToUtf8(pwszValue, &pszValue);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertString(pNode, pszName, pszValue);
+ RTStrFree(pszValue);
+ }
+ return rc;
+}
+
+
+/**
+ * Inserts a new bytes value.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pvBytes The value.
+ * @param cbBytes The value size.
+ */
+VMMR3DECL(int) CFGMR3InsertBytes(PCFGMNODE pNode, const char *pszName, const void *pvBytes, size_t cbBytes)
+{
+ int rc;
+ if (pNode)
+ {
+ if (cbBytes == (RTUINT)cbBytes)
+ {
+ /*
+ * Allocate string object first.
+ */
+ void *pvCopy = cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cbBytes);
+ if (pvCopy || !cbBytes)
+ {
+ memcpy(pvCopy, pvBytes, cbBytes);
+
+ /*
+ * Create value leaf and set it to string type.
+ */
+ PCFGMLEAF pLeaf;
+ rc = cfgmR3InsertLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ pLeaf->enmType = CFGMVALUETYPE_BYTES;
+ pLeaf->Value.Bytes.cb = cbBytes;
+ pLeaf->Value.Bytes.pau8 = (uint8_t *)pvCopy;
+ }
+ else
+ cfgmR3MemFree(pNode->pVM, pvCopy);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_OUT_OF_RANGE;
+ }
+ else
+ rc = VERR_CFGM_NO_PARENT;
+
+ return rc;
+}
+
+
+/**
+ * Inserts a new password value.
+ *
+ * This variant expects that the caller know the length of the password string
+ * already so we can avoid calling strlen() here.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszString The value. Must not be NULL.
+ * @param cchString The length of the string excluding the terminator.
+ */
+VMMR3DECL(int) CFGMR3InsertPasswordN(PCFGMNODE pNode, const char *pszName, const char *pszString, size_t cchString)
+{
+ Assert(RTStrNLen(pszString, cchString) == cchString);
+
+ int rc;
+ if (pNode)
+ {
+ /*
+ * Allocate string object first using the safer memory API since this
+ * is considered sensitive information.
+ */
+ char *pszStringCopy = (char *)RTMemSaferAllocZ(cchString + 1);
+ if (pszStringCopy)
+ {
+ memcpy(pszStringCopy, pszString, cchString);
+ pszStringCopy[cchString] = '\0';
+ RTMemSaferScramble(pszStringCopy, cchString + 1);
+
+ /*
+ * Create value leaf and set it to string type.
+ */
+ PCFGMLEAF pLeaf;
+ rc = cfgmR3InsertLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ pLeaf->enmType = CFGMVALUETYPE_PASSWORD;
+ pLeaf->Value.String.psz = pszStringCopy;
+ pLeaf->Value.String.cb = cchString + 1;
+ }
+ else
+ RTMemSaferFree(pszStringCopy, cchString + 1);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_CFGM_NO_PARENT;
+
+ return rc;
+}
+
+
+/**
+ * Inserts a new password value.
+ *
+ * Calls strlen(pszString) internally; if you know the length of the string,
+ * CFGMR3InsertStringLengthKnown() is faster.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Value name.
+ * @param pszString The value.
+ */
+VMMR3DECL(int) CFGMR3InsertPassword(PCFGMNODE pNode, const char *pszName, const char *pszString)
+{
+ return CFGMR3InsertPasswordN(pNode, pszName, pszString, strlen(pszString));
+}
+
+
+/**
+ * Make a copy of the specified value under the given node.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pValue The value to copy and insert.
+ */
+VMMR3DECL(int) CFGMR3InsertValue(PCFGMNODE pNode, PCFGMLEAF pValue)
+{
+ int rc;
+ switch (pValue->enmType)
+ {
+ case CFGMVALUETYPE_INTEGER:
+ rc = CFGMR3InsertInteger(pNode, pValue->szName, pValue->Value.Integer.u64);
+ break;
+
+ case CFGMVALUETYPE_BYTES:
+ rc = CFGMR3InsertBytes(pNode, pValue->szName, pValue->Value.Bytes.pau8, pValue->Value.Bytes.cb);
+ break;
+
+ case CFGMVALUETYPE_STRING:
+ rc = CFGMR3InsertStringN(pNode, pValue->szName, pValue->Value.String.psz, pValue->Value.String.cb - 1);
+ break;
+
+ case CFGMVALUETYPE_PASSWORD:
+ rc = CFGMR3InsertPasswordN(pNode, pValue->szName, pValue->Value.String.psz, pValue->Value.String.cb - 1);
+ break;
+
+ default:
+ rc = VERR_CFGM_IPE_1;
+ AssertMsgFailed(("Invalid value type %d\n", pValue->enmType));
+ break;
+ }
+ return rc;
+}
+
+
+/**
+ * Remove a value.
+ *
+ * @returns VBox status code.
+ * @param pNode Parent node.
+ * @param pszName Name of the new child node.
+ */
+VMMR3DECL(int) CFGMR3RemoveValue(PCFGMNODE pNode, const char *pszName)
+{
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ cfgmR3RemoveLeaf(pNode, pLeaf);
+ return rc;
+}
+
+
+
+/*
+ * -+- helper apis -+-
+ */
+
+
+/**
+ * Query unsigned 64-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu64 Where to store the integer value.
+ */
+VMMR3DECL(int) CFGMR3QueryU64(PCFGMNODE pNode, const char *pszName, uint64_t *pu64)
+{
+ return CFGMR3QueryInteger(pNode, pszName, pu64);
+}
+
+
+/**
+ * Query unsigned 64-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu64 Where to store the integer value. Set to default on failure.
+ * @param u64Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryU64Def(PCFGMNODE pNode, const char *pszName, uint64_t *pu64, uint64_t u64Def)
+{
+ return CFGMR3QueryIntegerDef(pNode, pszName, pu64, u64Def);
+}
+
+
+/**
+ * Query signed 64-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi64 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryS64(PCFGMNODE pNode, const char *pszName, int64_t *pi64)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ *pi64 = (int64_t)u64;
+ return rc;
+}
+
+
+/**
+ * Query signed 64-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi64 Where to store the value. Set to default on failure.
+ * @param i64Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryS64Def(PCFGMNODE pNode, const char *pszName, int64_t *pi64, int64_t i64Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, i64Def);
+ *pi64 = (int64_t)u64;
+ return rc;
+}
+
+
+/**
+ * Query unsigned 32-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu32 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryU32(PCFGMNODE pNode, const char *pszName, uint32_t *pu32)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffff00000000)))
+ *pu32 = (uint32_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query unsigned 32-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu32 Where to store the value. Set to default on failure.
+ * @param u32Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryU32Def(PCFGMNODE pNode, const char *pszName, uint32_t *pu32, uint32_t u32Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, u32Def);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffff00000000)))
+ *pu32 = (uint32_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pu32 = u32Def;
+ return rc;
+}
+
+
+/**
+ * Query signed 32-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi32 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryS32(PCFGMNODE pNode, const char *pszName, int32_t *pi32)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffff80000000))
+ || (u64 & UINT64_C(0xffffffff80000000)) == UINT64_C(0xffffffff80000000))
+ *pi32 = (int32_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query signed 32-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi32 Where to store the value. Set to default on failure.
+ * @param i32Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryS32Def(PCFGMNODE pNode, const char *pszName, int32_t *pi32, int32_t i32Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, i32Def);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffff80000000))
+ || (u64 & UINT64_C(0xffffffff80000000)) == UINT64_C(0xffffffff80000000))
+ *pi32 = (int32_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pi32 = i32Def;
+ return rc;
+}
+
+
+/**
+ * Query unsigned 16-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu16 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryU16(PCFGMNODE pNode, const char *pszName, uint16_t *pu16)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffffffff0000)))
+ *pu16 = (int16_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query unsigned 16-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu16 Where to store the value. Set to default on failure.
+ * @param u16Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryU16Def(PCFGMNODE pNode, const char *pszName, uint16_t *pu16, uint16_t u16Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, u16Def);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffffffff0000)))
+ *pu16 = (int16_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pu16 = u16Def;
+ return rc;
+}
+
+
+/**
+ * Query signed 16-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi16 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryS16(PCFGMNODE pNode, const char *pszName, int16_t *pi16)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffffffff8000))
+ || (u64 & UINT64_C(0xffffffffffff8000)) == UINT64_C(0xffffffffffff8000))
+ *pi16 = (int16_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query signed 16-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi16 Where to store the value. Set to default on failure.
+ * @param i16Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryS16Def(PCFGMNODE pNode, const char *pszName, int16_t *pi16, int16_t i16Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, i16Def);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffffffff8000))
+ || (u64 & UINT64_C(0xffffffffffff8000)) == UINT64_C(0xffffffffffff8000))
+ *pi16 = (int16_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pi16 = i16Def;
+ return rc;
+}
+
+
+/**
+ * Query unsigned 8-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu8 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryU8(PCFGMNODE pNode, const char *pszName, uint8_t *pu8)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffffffffff00)))
+ *pu8 = (uint8_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query unsigned 8-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu8 Where to store the value. Set to default on failure.
+ * @param u8Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryU8Def(PCFGMNODE pNode, const char *pszName, uint8_t *pu8, uint8_t u8Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, u8Def);
+ if (RT_SUCCESS(rc))
+ {
+ if (!(u64 & UINT64_C(0xffffffffffffff00)))
+ *pu8 = (uint8_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pu8 = u8Def;
+ return rc;
+}
+
+
+/**
+ * Query signed 8-bit integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi8 Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryS8(PCFGMNODE pNode, const char *pszName, int8_t *pi8)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffffffffff80))
+ || (u64 & UINT64_C(0xffffffffffffff80)) == UINT64_C(0xffffffffffffff80))
+ *pi8 = (int8_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query signed 8-bit integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi8 Where to store the value. Set to default on failure.
+ * @param i8Def The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryS8Def(PCFGMNODE pNode, const char *pszName, int8_t *pi8, int8_t i8Def)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, i8Def);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !(u64 & UINT64_C(0xffffffffffffff80))
+ || (u64 & UINT64_C(0xffffffffffffff80)) == UINT64_C(0xffffffffffffff80))
+ *pi8 = (int8_t)u64;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pi8 = i8Def;
+ return rc;
+}
+
+
+/**
+ * Query boolean integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pf Where to store the value.
+ * @remark This function will interpret any non-zero value as true.
+ */
+VMMR3DECL(int) CFGMR3QueryBool(PCFGMNODE pNode, const char *pszName, bool *pf)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ *pf = u64 ? true : false;
+ return rc;
+}
+
+
+/**
+ * Query boolean integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pf Where to store the value. Set to default on failure.
+ * @param fDef The default value.
+ * @remark This function will interpret any non-zero value as true.
+ */
+VMMR3DECL(int) CFGMR3QueryBoolDef(PCFGMNODE pNode, const char *pszName, bool *pf, bool fDef)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, fDef);
+ *pf = u64 ? true : false;
+ return rc;
+}
+
+
+/**
+ * Query I/O port address value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pPort Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryPort(PCFGMNODE pNode, const char *pszName, PRTIOPORT pPort)
+{
+ AssertCompileSize(RTIOPORT, 2);
+ return CFGMR3QueryU16(pNode, pszName, pPort);
+}
+
+
+/**
+ * Query I/O port address value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pPort Where to store the value. Set to default on failure.
+ * @param PortDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryPortDef(PCFGMNODE pNode, const char *pszName, PRTIOPORT pPort, RTIOPORT PortDef)
+{
+ AssertCompileSize(RTIOPORT, 2);
+ return CFGMR3QueryU16Def(pNode, pszName, pPort, PortDef);
+}
+
+
+/**
+ * Query unsigned int address value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryUInt(PCFGMNODE pNode, const char *pszName, unsigned int *pu)
+{
+ AssertCompileSize(unsigned int, 4);
+ return CFGMR3QueryU32(pNode, pszName, (uint32_t *)pu);
+}
+
+
+/**
+ * Query unsigned int address value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pu Where to store the value. Set to default on failure.
+ * @param uDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryUIntDef(PCFGMNODE pNode, const char *pszName, unsigned int *pu, unsigned int uDef)
+{
+ AssertCompileSize(unsigned int, 4);
+ return CFGMR3QueryU32Def(pNode, pszName, (uint32_t *)pu, uDef);
+}
+
+
+/**
+ * Query signed int address value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QuerySInt(PCFGMNODE pNode, const char *pszName, signed int *pi)
+{
+ AssertCompileSize(signed int, 4);
+ return CFGMR3QueryS32(pNode, pszName, (int32_t *)pi);
+}
+
+
+/**
+ * Query unsigned int address value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pi Where to store the value. Set to default on failure.
+ * @param iDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QuerySIntDef(PCFGMNODE pNode, const char *pszName, signed int *pi, signed int iDef)
+{
+ AssertCompileSize(signed int, 4);
+ return CFGMR3QueryS32Def(pNode, pszName, (int32_t *)pi, iDef);
+}
+
+
+/**
+ * Query Guest Context pointer integer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtr(PCFGMNODE pNode, const char *pszName, PRTGCPTR pGCPtr)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPTR u = (RTGCPTR)u64;
+ if (u64 == u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query Guest Context pointer integer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value. Set to default on failure.
+ * @param GCPtrDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtrDef(PCFGMNODE pNode, const char *pszName, PRTGCPTR pGCPtr, RTGCPTR GCPtrDef)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, GCPtrDef);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPTR u = (RTGCPTR)u64;
+ if (u64 == u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pGCPtr = GCPtrDef;
+ return rc;
+}
+
+
+/**
+ * Query Guest Context unsigned pointer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtrU(PCFGMNODE pNode, const char *pszName, PRTGCUINTPTR pGCPtr)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCUINTPTR u = (RTGCUINTPTR)u64;
+ if (u64 == u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query Guest Context unsigned pointer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value. Set to default on failure.
+ * @param GCPtrDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtrUDef(PCFGMNODE pNode, const char *pszName, PRTGCUINTPTR pGCPtr, RTGCUINTPTR GCPtrDef)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, GCPtrDef);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCUINTPTR u = (RTGCUINTPTR)u64;
+ if (u64 == u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pGCPtr = GCPtrDef;
+ return rc;
+}
+
+
+/**
+ * Query Guest Context signed pointer value.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtrS(PCFGMNODE pNode, const char *pszName, PRTGCINTPTR pGCPtr)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryInteger(pNode, pszName, &u64);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCINTPTR u = (RTGCINTPTR)u64;
+ if (u64 == (uint64_t)u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ return rc;
+}
+
+
+/**
+ * Query Guest Context signed pointer value with default.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Name of an integer value.
+ * @param pGCPtr Where to store the value. Set to default on failure.
+ * @param GCPtrDef The default value.
+ */
+VMMR3DECL(int) CFGMR3QueryGCPtrSDef(PCFGMNODE pNode, const char *pszName, PRTGCINTPTR pGCPtr, RTGCINTPTR GCPtrDef)
+{
+ uint64_t u64;
+ int rc = CFGMR3QueryIntegerDef(pNode, pszName, &u64, GCPtrDef);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCINTPTR u = (RTGCINTPTR)u64;
+ if (u64 == (uint64_t)u)
+ *pGCPtr = u;
+ else
+ rc = VERR_CFGM_INTEGER_TOO_BIG;
+ }
+ if (RT_FAILURE(rc))
+ *pGCPtr = GCPtrDef;
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value storing it in a
+ * buffer allocated from the MM heap.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in.
+ * @param pszName Value name. This value must be of zero terminated character string type.
+ * @param ppszString Where to store the string pointer.
+ * Free this using MMR3HeapFree() (or RTStrFree if not
+ * associated with a pUVM - see CFGMR3CreateTree).
+ */
+VMMR3DECL(int) CFGMR3QueryStringAlloc(PCFGMNODE pNode, const char *pszName, char **ppszString)
+{
+ size_t cbString;
+ int rc = CFGMR3QuerySize(pNode, pszName, &cbString);
+ if (RT_SUCCESS(rc))
+ {
+ char *pszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbString);
+ if (pszString)
+ {
+ rc = CFGMR3QueryString(pNode, pszName, pszString, cbString);
+ if (RT_SUCCESS(rc))
+ *ppszString = pszString;
+ else
+ cfgmR3StrFree(pNode->pVM, pszString);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ return rc;
+}
+
+
+/**
+ * Query zero terminated character value storing it in a
+ * buffer allocated from the MM heap.
+ *
+ * @returns VBox status code.
+ * @param pNode Which node to search for pszName in. This cannot be
+ * NULL if @a pszDef is not NULL, because we need
+ * somewhere way to get to the VM in order to call
+ * MMR3HeapStrDup.
+ * @param pszName Value name. This value must be of zero terminated character string type.
+ * @param ppszString Where to store the string pointer. Not set on failure.
+ * Free this using MMR3HeapFree() (or RTStrFree if not
+ * associated with a pUVM - see CFGMR3CreateTree).
+ * @param pszDef The default return value. This can be NULL.
+ */
+VMMR3DECL(int) CFGMR3QueryStringAllocDef(PCFGMNODE pNode, const char *pszName, char **ppszString, const char *pszDef)
+{
+ Assert(pNode || !pszDef); /* We need pVM if we need to duplicate the string later. */
+
+ /*
+ * (Don't call CFGMR3QuerySize and CFGMR3QueryStringDef here as the latter
+ * cannot handle pszDef being NULL.)
+ */
+ PCFGMLEAF pLeaf;
+ int rc = cfgmR3ResolveLeaf(pNode, pszName, &pLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLeaf->enmType == CFGMVALUETYPE_STRING)
+ {
+ size_t const cbSrc = pLeaf->Value.String.cb;
+ char *pszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbSrc);
+ if (pszString)
+ {
+ memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
+ *ppszString = pszString;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_CFGM_NOT_STRING;
+ }
+ if (RT_FAILURE(rc))
+ {
+ if (!pszDef)
+ *ppszString = NULL;
+ else
+ {
+ size_t const cbDef = strlen(pszDef) + 1;
+ *ppszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbDef);
+ memcpy(*ppszString, pszDef, cbDef);
+ }
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ rc = VINF_SUCCESS;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Dumps the configuration (sub)tree to the release log.
+ *
+ * @param pRoot The root node of the dump.
+ */
+VMMR3DECL(void) CFGMR3Dump(PCFGMNODE pRoot)
+{
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ LogRel(("************************* CFGM dump *************************\n"));
+ cfgmR3Dump(pRoot, 0, DBGFR3InfoLogRelHlp());
+#ifdef LOG_ENABLED
+ if (LogIsEnabled())
+ cfgmR3Dump(pRoot, 0, DBGFR3InfoLogHlp());
+#endif
+ LogRel(("********************* End of CFGM dump **********************\n"));
+ RTLogRelSetBuffering(fOldBuffered);
+}
+
+
+/**
+ * Info handler, internal version.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp Callback functions for doing output.
+ * @param pszArgs Argument string. Optional and specific to the handler.
+ */
+static DECLCALLBACK(void) cfgmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /*
+ * Figure where to start.
+ */
+ PCFGMNODE pRoot = pVM->cfgm.s.pRoot;
+ if (pszArgs && *pszArgs)
+ {
+ int rc = cfgmR3ResolveNode(pRoot, pszArgs, &pRoot);
+ if (RT_FAILURE(rc))
+ {
+ pHlp->pfnPrintf(pHlp, "Failed to resolve CFGM path '%s', %Rrc", pszArgs, rc);
+ return;
+ }
+ }
+
+ /*
+ * Dump the specified tree.
+ */
+ pHlp->pfnPrintf(pHlp, "pRoot=%p:{", pRoot);
+ cfgmR3DumpPath(pRoot, pHlp);
+ pHlp->pfnPrintf(pHlp, "}\n");
+ cfgmR3Dump(pRoot, 0, pHlp);
+}
+
+
+/**
+ * Recursively prints a path name.
+ */
+static void cfgmR3DumpPath(PCFGMNODE pNode, PCDBGFINFOHLP pHlp)
+{
+ if (pNode->pParent)
+ cfgmR3DumpPath(pNode->pParent, pHlp);
+ pHlp->pfnPrintf(pHlp, "%s/", pNode->szName);
+}
+
+
+/**
+ * Dumps a branch of a tree.
+ */
+static void cfgmR3Dump(PCFGMNODE pRoot, unsigned iLevel, PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Path.
+ */
+ pHlp->pfnPrintf(pHlp, "[");
+ cfgmR3DumpPath(pRoot, pHlp);
+ pHlp->pfnPrintf(pHlp, "] (level %d)%s\n", iLevel, pRoot->fRestrictedRoot ? " (restricted root)" : "");
+
+ /*
+ * Values.
+ */
+ PCFGMLEAF pLeaf;
+ size_t cchMax = 0;
+ for (pLeaf = CFGMR3GetFirstValue(pRoot); pLeaf; pLeaf = CFGMR3GetNextValue(pLeaf))
+ cchMax = RT_MAX(cchMax, pLeaf->cchName);
+ for (pLeaf = CFGMR3GetFirstValue(pRoot); pLeaf; pLeaf = CFGMR3GetNextValue(pLeaf))
+ {
+ switch (CFGMR3GetValueType(pLeaf))
+ {
+ case CFGMVALUETYPE_INTEGER:
+ {
+ pHlp->pfnPrintf(pHlp, " %-*s <integer> = %#018llx (%'lld",
+ (int)cchMax, pLeaf->szName, pLeaf->Value.Integer.u64, pLeaf->Value.Integer.u64);
+ if ( ( pLeaf->cchName >= 4
+ && !RTStrCmp(&pLeaf->szName[pLeaf->cchName - 4], "Size"))
+ || ( pLeaf->cchName >= 2
+ && !RTStrNCmp(pLeaf->szName, "cb", 2)) )
+ pHlp->pfnPrintf(pHlp, ", %' Rhcb)\n", pLeaf->Value.Integer.u64);
+ else
+ pHlp->pfnPrintf(pHlp, ")\n");
+ break;
+ }
+
+ case CFGMVALUETYPE_STRING:
+ pHlp->pfnPrintf(pHlp, " %-*s <string> = \"%s\" (cb=%zu)\n",
+ (int)cchMax, pLeaf->szName, pLeaf->Value.String.psz, pLeaf->Value.String.cb);
+ break;
+
+ case CFGMVALUETYPE_BYTES:
+ pHlp->pfnPrintf(pHlp, " %-*s <bytes> = \"%.*Rhxs\" (cb=%zu)\n",
+ (int)cchMax, pLeaf->szName, pLeaf->Value.Bytes.cb, pLeaf->Value.Bytes.pau8, pLeaf->Value.Bytes.cb);
+ break;
+
+ case CFGMVALUETYPE_PASSWORD:
+ pHlp->pfnPrintf(pHlp, " %-*s <password>= \"***REDACTED***\" (cb=%zu)\n",
+ (int)cchMax, pLeaf->szName, pLeaf->Value.String.cb);
+ break;
+
+ default:
+ AssertMsgFailed(("bad leaf!\n"));
+ break;
+ }
+ }
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /*
+ * Children.
+ */
+ for (PCFGMNODE pChild = CFGMR3GetFirstChild(pRoot); pChild; pChild = CFGMR3GetNextChild(pChild))
+ {
+ Assert(pChild->pNext != pChild);
+ Assert(pChild->pPrev != pChild);
+ Assert(pChild->pPrev != pChild->pNext || !pChild->pPrev);
+ Assert(pChild->pFirstChild != pChild);
+ Assert(pChild->pParent == pRoot);
+ cfgmR3Dump(pChild, iLevel + 1, pHlp);
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/CPUM.cpp b/src/VBox/VMM/VMMR3/CPUM.cpp
new file mode 100644
index 00000000..7e0fbd32
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUM.cpp
@@ -0,0 +1,4632 @@
+/* $Id: CPUM.cpp $ */
+/** @file
+ * CPUM - CPU Monitor / Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_cpum CPUM - CPU Monitor / Manager
+ *
+ * The CPU Monitor / Manager keeps track of all the CPU registers. It is
+ * also responsible for lazy FPU handling and some of the context loading
+ * in raw mode.
+ *
+ * There are three CPU contexts, the most important one is the guest one (GC).
+ * When running in raw-mode (RC) there is a special hyper context for the VMM
+ * part that floats around inside the guest address space. When running in
+ * raw-mode, CPUM also maintains a host context for saving and restoring
+ * registers across world switches. This latter is done in cooperation with the
+ * world switcher (@see pg_vmm).
+ *
+ * @see grp_cpum
+ *
+ * @section sec_cpum_fpu FPU / SSE / AVX / ++ state.
+ *
+ * TODO: proper write up, currently just some notes.
+ *
+ * The ring-0 FPU handling per OS:
+ *
+ * - 64-bit Windows uses XMM registers in the kernel as part of the calling
+ * convention (Visual C++ doesn't seem to have a way to disable
+ * generating such code either), so CR0.TS/EM are always zero from what I
+ * can tell. We are also forced to always load/save the guest XMM0-XMM15
+ * registers when entering/leaving guest context. Interrupt handlers
+ * using FPU/SSE will offically have call save and restore functions
+ * exported by the kernel, if the really really have to use the state.
+ *
+ * - 32-bit windows does lazy FPU handling, I think, probably including
+ * lazying saving. The Windows Internals book states that it's a bad
+ * idea to use the FPU in kernel space. However, it looks like it will
+ * restore the FPU state of the current thread in case of a kernel \#NM.
+ * Interrupt handlers should be same as for 64-bit.
+ *
+ * - Darwin allows taking \#NM in kernel space, restoring current thread's
+ * state if I read the code correctly. It saves the FPU state of the
+ * outgoing thread, and uses CR0.TS to lazily load the state of the
+ * incoming one. No idea yet how the FPU is treated by interrupt
+ * handlers, i.e. whether they are allowed to disable the state or
+ * something.
+ *
+ * - Linux also allows \#NM in kernel space (don't know since when), and
+ * uses CR0.TS for lazy loading. Saves outgoing thread's state, lazy
+ * loads the incoming unless configured to agressivly load it. Interrupt
+ * handlers can ask whether they're allowed to use the FPU, and may
+ * freely trash the state if Linux thinks it has saved the thread's state
+ * already. This is a problem.
+ *
+ * - Solaris will, from what I can tell, panic if it gets an \#NM in kernel
+ * context. When switching threads, the kernel will save the state of
+ * the outgoing thread and lazy load the incoming one using CR0.TS.
+ * There are a few routines in seeblk.s which uses the SSE unit in ring-0
+ * to do stuff, HAT are among the users. The routines there will
+ * manually clear CR0.TS and save the XMM registers they use only if
+ * CR0.TS was zero upon entry. They will skip it when not, because as
+ * mentioned above, the FPU state is saved when switching away from a
+ * thread and CR0.TS set to 1, so when CR0.TS is 1 there is nothing to
+ * preserve. This is a problem if we restore CR0.TS to 1 after loading
+ * the guest state.
+ *
+ * - FreeBSD - no idea yet.
+ *
+ * - OS/2 does not allow \#NMs in kernel space IIRC. Does lazy loading,
+ * possibly also lazy saving. Interrupts must preserve the CR0.TS+EM &
+ * FPU states.
+ *
+ * Up to r107425 (2016-05-24) we would only temporarily modify CR0.TS/EM while
+ * saving and restoring the host and guest states. The motivation for this
+ * change is that we want to be able to emulate SSE instruction in ring-0 (IEM).
+ *
+ * Starting with that change, we will leave CR0.TS=EM=0 after saving the host
+ * state and only restore it once we've restore the host FPU state. This has the
+ * accidental side effect of triggering Solaris to preserve XMM registers in
+ * sseblk.s. When CR0 was changed by saving the FPU state, CPUM must now inform
+ * the VT-x (HMVMX) code about it as it caches the CR0 value in the VMCS.
+ *
+ *
+ * @section sec_cpum_logging Logging Level Assignments.
+ *
+ * Following log level assignments:
+ * - Log6 is used for FPU state management.
+ * - Log7 is used for FPU state actualization.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#define CPUM_WITH_NONCONST_HOST_FEATURES
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/cpumdis.h>
+#include <VBox/vmm/cpumctx-v1_6.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/hmvmxinline.h>
+#include <VBox/vmm/ssm.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/param.h>
+#include <VBox/dis.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/cpuset.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/rand.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/**
+ * This was used in the saved state up to the early life of version 14.
+ *
+ * It indicates that we may have some out-of-sync hidden segement registers.
+ * It is only relevant for raw-mode.
+ */
+#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12)
+
+
+/** For saved state only: Block injection of non-maskable interrupts to the guest.
+ * @note This flag was moved to CPUMCTX::eflags.uBoth in v7.0.4. */
+#define CPUM_OLD_VMCPU_FF_BLOCK_NMIS RT_BIT_64(25)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+/**
+ * What kind of cpu info dump to perform.
+ */
+typedef enum CPUMDUMPTYPE
+{
+ CPUMDUMPTYPE_TERSE,
+ CPUMDUMPTYPE_DEFAULT,
+ CPUMDUMPTYPE_VERBOSE
+} CPUMDUMPTYPE;
+/** Pointer to a cpu info dump type. */
+typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
+static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) cpumR3InfoGuestHwvirt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+/** Host CPU features. */
+DECL_HIDDEN_DATA(CPUHOSTFEATURES) g_CpumHostFeatures;
+#endif
+
+/** Saved state field descriptors for CPUMCTX. */
+static const SSMFIELD g_aCpumCtxFields[] =
+{
+ SSMFIELD_ENTRY( CPUMCTX, rdi),
+ SSMFIELD_ENTRY( CPUMCTX, rsi),
+ SSMFIELD_ENTRY( CPUMCTX, rbp),
+ SSMFIELD_ENTRY( CPUMCTX, rax),
+ SSMFIELD_ENTRY( CPUMCTX, rbx),
+ SSMFIELD_ENTRY( CPUMCTX, rdx),
+ SSMFIELD_ENTRY( CPUMCTX, rcx),
+ SSMFIELD_ENTRY( CPUMCTX, rsp),
+ SSMFIELD_ENTRY( CPUMCTX, rflags),
+ SSMFIELD_ENTRY( CPUMCTX, rip),
+ SSMFIELD_ENTRY( CPUMCTX, r8),
+ SSMFIELD_ENTRY( CPUMCTX, r9),
+ SSMFIELD_ENTRY( CPUMCTX, r10),
+ SSMFIELD_ENTRY( CPUMCTX, r11),
+ SSMFIELD_ENTRY( CPUMCTX, r12),
+ SSMFIELD_ENTRY( CPUMCTX, r13),
+ SSMFIELD_ENTRY( CPUMCTX, r14),
+ SSMFIELD_ENTRY( CPUMCTX, r15),
+ SSMFIELD_ENTRY( CPUMCTX, es.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, es.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, es.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, es.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, cs.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, cs.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, ss.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, ss.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, ds.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, ds.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, fs.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, fs.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, gs.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, gs.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, cr0),
+ SSMFIELD_ENTRY( CPUMCTX, cr2),
+ SSMFIELD_ENTRY( CPUMCTX, cr3),
+ SSMFIELD_ENTRY( CPUMCTX, cr4),
+ SSMFIELD_ENTRY( CPUMCTX, dr[0]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[1]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[2]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[3]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[6]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[7]),
+ SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
+ SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
+ SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
+ SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
+ SSMFIELD_ENTRY( CPUMCTX, msrEFER),
+ SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrPAT),
+ SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
+ SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
+ SSMFIELD_ENTRY( CPUMCTX, tr.ValidSel),
+ SSMFIELD_ENTRY( CPUMCTX, tr.fFlags),
+ SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
+ SSMFIELD_ENTRY_VER( CPUMCTX, aXcr[0], CPUM_SAVED_STATE_VERSION_XSAVE),
+ SSMFIELD_ENTRY_VER( CPUMCTX, aXcr[1], CPUM_SAVED_STATE_VERSION_XSAVE),
+ SSMFIELD_ENTRY_VER( CPUMCTX, fXStateMask, CPUM_SAVED_STATE_VERSION_XSAVE),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for SVM nested hardware-virtualization
+ * Host State. */
+static const SSMFIELD g_aSvmHwvirtHostState[] =
+{
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uEferMsr),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uCr0),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uCr4),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uCr3),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uRip),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uRsp),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, uRax),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, rflags),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.Sel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.ValidSel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.fFlags),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.u64Base),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.u32Limit),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, es.Attr),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.Sel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.ValidSel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.fFlags),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.u64Base),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.u32Limit),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, cs.Attr),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.Sel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.ValidSel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.fFlags),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.u64Base),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.u32Limit),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ss.Attr),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.Sel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.ValidSel),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.fFlags),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.u64Base),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.u32Limit),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, ds.Attr),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, gdtr.cbGdt),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, gdtr.pGdt),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, idtr.cbIdt),
+ SSMFIELD_ENTRY( SVMHOSTSTATE, idtr.pIdt),
+ SSMFIELD_ENTRY_IGNORE(SVMHOSTSTATE, abPadding),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for VMX nested hardware-virtualization
+ * VMCS. */
+static const SSMFIELD g_aVmxHwvirtVmcs[] =
+{
+ SSMFIELD_ENTRY( VMXVVMCS, u32VmcsRevId),
+ SSMFIELD_ENTRY( VMXVVMCS, enmVmxAbort),
+ SSMFIELD_ENTRY( VMXVVMCS, fVmcsState),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au8Padding0),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved0),
+
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, u16Reserved0),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoVmInstrError),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoExitReason),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntInfo),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntErrCode),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringInfo),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringErrCode),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrLen),
+ SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrInfo),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32RoReserved2),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestPhysAddr),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved1),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoExitQual),
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRcx),
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRsi),
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRdi),
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRip),
+ SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestLinearAddr),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved5),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u16Vpid),
+ SSMFIELD_ENTRY( VMXVVMCS, u16PostIntNotifyVector),
+ SSMFIELD_ENTRY( VMXVVMCS, u16EptpIndex),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u16HlatPrefixSize, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved0),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u32PinCtls),
+ SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls),
+ SSMFIELD_ENTRY( VMXVVMCS, u32XcptBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMask),
+ SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMatch),
+ SSMFIELD_ENTRY( VMXVVMCS, u32Cr3TargetCount),
+ SSMFIELD_ENTRY( VMXVVMCS, u32ExitCtls),
+ SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrStoreCount),
+ SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrLoadCount),
+ SSMFIELD_ENTRY( VMXVVMCS, u32EntryCtls),
+ SSMFIELD_ENTRY( VMXVVMCS, u32EntryMsrLoadCount),
+ SSMFIELD_ENTRY( VMXVVMCS, u32EntryIntInfo),
+ SSMFIELD_ENTRY( VMXVVMCS, u32EntryXcptErrCode),
+ SSMFIELD_ENTRY( VMXVVMCS, u32EntryInstrLen),
+ SSMFIELD_ENTRY( VMXVVMCS, u32TprThreshold),
+ SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls2),
+ SSMFIELD_ENTRY( VMXVVMCS, u32PleGap),
+ SSMFIELD_ENTRY( VMXVVMCS, u32PleWindow),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved1),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapA),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapB),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrMsrBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrStore),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrLoad),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrEntryMsrLoad),
+ SSMFIELD_ENTRY( VMXVVMCS, u64ExecVmcsPtr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrPml),
+ SSMFIELD_ENTRY( VMXVVMCS, u64TscOffset),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrVirtApic),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrApicAccess),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrPostedIntDesc),
+ SSMFIELD_ENTRY( VMXVVMCS, u64VmFuncCtls),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EptPtr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap0),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap1),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap2),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap3),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrEptpList),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmreadBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmwriteBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u64AddrXcptVeInfo),
+ SSMFIELD_ENTRY( VMXVVMCS, u64XssExitBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u64EnclsExitBitmap),
+ SSMFIELD_ENTRY( VMXVVMCS, u64SppTablePtr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64TscMultiplier),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64ProcCtls3, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64EnclvExitBitmap, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64PconfigExitBitmap, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64HlatPtr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64ExitCtls2, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved0),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr0Mask),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr4Mask),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr0ReadShadow),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr4ReadShadow),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target0),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target1),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target2),
+ SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target3),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved4),
+
+ SSMFIELD_ENTRY( VMXVVMCS, HostEs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostCs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostSs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostDs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostFs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostGs),
+ SSMFIELD_ENTRY( VMXVVMCS, HostTr),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved2),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u32HostSysenterCs),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved4),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostPatMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostEferMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostPerfGlobalCtlMsr),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64HostPkrsMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved3),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostCr0),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostCr3),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostCr4),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostFsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostGsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostTrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostGdtrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostIdtrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEsp),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEip),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostRsp),
+ SSMFIELD_ENTRY( VMXVVMCS, u64HostRip),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64HostSCetMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64HostSsp, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64HostIntrSspTableAddrMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved7),
+
+ SSMFIELD_ENTRY( VMXVVMCS, GuestEs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestCs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestSs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestDs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestFs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestGs),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestLdtr),
+ SSMFIELD_ENTRY( VMXVVMCS, GuestTr),
+ SSMFIELD_ENTRY( VMXVVMCS, u16GuestIntStatus),
+ SSMFIELD_ENTRY( VMXVVMCS, u16PmlIndex),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved1),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestGdtrLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestIdtrLimit),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrAttr),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestIntrState),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestActivityState),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestSmBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u32GuestSysenterCS),
+ SSMFIELD_ENTRY( VMXVVMCS, u32PreemptTimer),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved3),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64VmcsLinkPtr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestDebugCtlMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPatMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestEferMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPerfGlobalCtlMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte0),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte1),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte2),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte3),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestBndcfgsMsr),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestRtitCtlMsr),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64GuestPkrsMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved2),
+
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr0),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr3),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr4),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestEsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestCsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestSsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestDsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestFsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestGsBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestLdtrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestTrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestGdtrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestIdtrBase),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestDr7),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestRsp),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestRip),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestRFlags),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestPendingDbgXcpts),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEsp),
+ SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEip),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64GuestSCetMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64GuestSsp, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_VER( VMXVVMCS, u64GuestIntrSspTableAddrMsr, CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2),
+ SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved6),
+
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for CPUMCTX. */
+static const SSMFIELD g_aCpumX87Fields[] =
+{
+ SSMFIELD_ENTRY( X86FXSTATE, FCW),
+ SSMFIELD_ENTRY( X86FXSTATE, FSW),
+ SSMFIELD_ENTRY( X86FXSTATE, FTW),
+ SSMFIELD_ENTRY( X86FXSTATE, FOP),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUIP),
+ SSMFIELD_ENTRY( X86FXSTATE, CS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUDP),
+ SSMFIELD_ENTRY( X86FXSTATE, DS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]),
+ SSMFIELD_ENTRY_VER( X86FXSTATE, au32RsrvdForSoftware[0], CPUM_SAVED_STATE_VERSION_XSAVE), /* 32-bit/64-bit hack */
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X86XSAVEHDR. */
+static const SSMFIELD g_aCpumXSaveHdrFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEHDR, bmXState),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X86XSAVEYMMHI. */
+static const SSMFIELD g_aCpumYmmHiFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[0]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[1]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[2]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[3]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[4]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[5]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[6]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[7]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[8]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[9]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[10]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[11]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[12]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[13]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[14]),
+ SSMFIELD_ENTRY( X86XSAVEYMMHI, aYmmHi[15]),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X86XSAVEBNDREGS. */
+static const SSMFIELD g_aCpumBndRegsFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEBNDREGS, aRegs[0]),
+ SSMFIELD_ENTRY( X86XSAVEBNDREGS, aRegs[1]),
+ SSMFIELD_ENTRY( X86XSAVEBNDREGS, aRegs[2]),
+ SSMFIELD_ENTRY( X86XSAVEBNDREGS, aRegs[3]),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X86XSAVEBNDCFG. */
+static const SSMFIELD g_aCpumBndCfgFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEBNDCFG, fConfig),
+ SSMFIELD_ENTRY( X86XSAVEBNDCFG, fStatus),
+ SSMFIELD_ENTRY_TERM()
+};
+
+#if 0 /** @todo */
+/** Saved state field descriptors for X86XSAVEOPMASK. */
+static const SSMFIELD g_aCpumOpmaskFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[0]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[1]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[2]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[3]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[4]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[5]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[6]),
+ SSMFIELD_ENTRY( X86XSAVEOPMASK, aKRegs[7]),
+ SSMFIELD_ENTRY_TERM()
+};
+#endif
+
+/** Saved state field descriptors for X86XSAVEZMMHI256. */
+static const SSMFIELD g_aCpumZmmHi256Fields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[0]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[1]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[2]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[3]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[4]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[5]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[6]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[7]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[8]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[9]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[10]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[11]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[12]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[13]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[14]),
+ SSMFIELD_ENTRY( X86XSAVEZMMHI256, aHi256Regs[15]),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for X86XSAVEZMM16HI. */
+static const SSMFIELD g_aCpumZmm16HiFields[] =
+{
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[0]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[1]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[2]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[3]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[4]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[5]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[6]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[7]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[8]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[9]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[10]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[11]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[12]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[13]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[14]),
+ SSMFIELD_ENTRY( X86XSAVEZMM16HI, aRegs[15]),
+ SSMFIELD_ENTRY_TERM()
+};
+
+
+
+/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
+ * registeres changed. */
+static const SSMFIELD g_aCpumX87FieldsMem[] =
+{
+ SSMFIELD_ENTRY( X86FXSTATE, FCW),
+ SSMFIELD_ENTRY( X86FXSTATE, FSW),
+ SSMFIELD_ENTRY( X86FXSTATE, FTW),
+ SSMFIELD_ENTRY( X86FXSTATE, FOP),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUIP),
+ SSMFIELD_ENTRY( X86FXSTATE, CS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUDP),
+ SSMFIELD_ENTRY( X86FXSTATE, DS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]),
+ SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdRest),
+ SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdForSoftware),
+};
+
+/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
+ * registeres changed. */
+static const SSMFIELD g_aCpumCtxFieldsMem[] =
+{
+ SSMFIELD_ENTRY( CPUMCTX, rdi),
+ SSMFIELD_ENTRY( CPUMCTX, rsi),
+ SSMFIELD_ENTRY( CPUMCTX, rbp),
+ SSMFIELD_ENTRY( CPUMCTX, rax),
+ SSMFIELD_ENTRY( CPUMCTX, rbx),
+ SSMFIELD_ENTRY( CPUMCTX, rdx),
+ SSMFIELD_ENTRY( CPUMCTX, rcx),
+ SSMFIELD_ENTRY( CPUMCTX, rsp),
+ SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
+ SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
+ SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
+ SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, es.Sel),
+ SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
+ SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
+ SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
+ SSMFIELD_ENTRY( CPUMCTX, rflags),
+ SSMFIELD_ENTRY( CPUMCTX, rip),
+ SSMFIELD_ENTRY( CPUMCTX, r8),
+ SSMFIELD_ENTRY( CPUMCTX, r9),
+ SSMFIELD_ENTRY( CPUMCTX, r10),
+ SSMFIELD_ENTRY( CPUMCTX, r11),
+ SSMFIELD_ENTRY( CPUMCTX, r12),
+ SSMFIELD_ENTRY( CPUMCTX, r13),
+ SSMFIELD_ENTRY( CPUMCTX, r14),
+ SSMFIELD_ENTRY( CPUMCTX, r15),
+ SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, es.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, cr0),
+ SSMFIELD_ENTRY( CPUMCTX, cr2),
+ SSMFIELD_ENTRY( CPUMCTX, cr3),
+ SSMFIELD_ENTRY( CPUMCTX, cr4),
+ SSMFIELD_ENTRY( CPUMCTX, dr[0]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[1]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[2]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[3]),
+ SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
+ SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, dr[6]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[7]),
+ SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
+ SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
+ SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
+ SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
+ SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
+ SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
+ SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
+ SSMFIELD_ENTRY( CPUMCTX, msrEFER),
+ SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrPAT),
+ SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
+ SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for CPUMCTX_VER1_6. */
+static const SSMFIELD g_aCpumX87FieldsV16[] =
+{
+ SSMFIELD_ENTRY( X86FXSTATE, FCW),
+ SSMFIELD_ENTRY( X86FXSTATE, FSW),
+ SSMFIELD_ENTRY( X86FXSTATE, FTW),
+ SSMFIELD_ENTRY( X86FXSTATE, FOP),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUIP),
+ SSMFIELD_ENTRY( X86FXSTATE, CS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1),
+ SSMFIELD_ENTRY( X86FXSTATE, FPUDP),
+ SSMFIELD_ENTRY( X86FXSTATE, DS),
+ SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR),
+ SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]),
+ SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]),
+ SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdRest),
+ SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdForSoftware),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/** Saved state field descriptors for CPUMCTX_VER1_6. */
+static const SSMFIELD g_aCpumCtxFieldsV16[] =
+{
+ SSMFIELD_ENTRY( CPUMCTX, rdi),
+ SSMFIELD_ENTRY( CPUMCTX, rsi),
+ SSMFIELD_ENTRY( CPUMCTX, rbp),
+ SSMFIELD_ENTRY( CPUMCTX, rax),
+ SSMFIELD_ENTRY( CPUMCTX, rbx),
+ SSMFIELD_ENTRY( CPUMCTX, rdx),
+ SSMFIELD_ENTRY( CPUMCTX, rcx),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
+ SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
+ SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
+ SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, es.Sel),
+ SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
+ SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
+ SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
+ SSMFIELD_ENTRY( CPUMCTX, rflags),
+ SSMFIELD_ENTRY( CPUMCTX, rip),
+ SSMFIELD_ENTRY( CPUMCTX, r8),
+ SSMFIELD_ENTRY( CPUMCTX, r9),
+ SSMFIELD_ENTRY( CPUMCTX, r10),
+ SSMFIELD_ENTRY( CPUMCTX, r11),
+ SSMFIELD_ENTRY( CPUMCTX, r12),
+ SSMFIELD_ENTRY( CPUMCTX, r13),
+ SSMFIELD_ENTRY( CPUMCTX, r14),
+ SSMFIELD_ENTRY( CPUMCTX, r15),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, es.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
+ SSMFIELD_ENTRY( CPUMCTX, cr0),
+ SSMFIELD_ENTRY( CPUMCTX, cr2),
+ SSMFIELD_ENTRY( CPUMCTX, cr3),
+ SSMFIELD_ENTRY( CPUMCTX, cr4),
+ SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, dr[0]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[1]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[2]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[3]),
+ SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
+ SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, dr[6]),
+ SSMFIELD_ENTRY( CPUMCTX, dr[7]),
+ SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
+ SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
+ SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
+ SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
+ SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
+ SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
+ SSMFIELD_ENTRY( CPUMCTX, msrEFER),
+ SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrPAT),
+ SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
+ SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
+ SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
+ SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
+ SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
+ SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
+ SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
+ SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
+ SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
+ SSMFIELD_ENTRY_TERM()
+};
+
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+/**
+ * Checks for partial/leaky FXSAVE/FXRSTOR handling on AMD CPUs.
+ *
+ * AMD K7, K8 and newer AMD CPUs do not save/restore the x87 error pointers
+ * (last instruction pointer, last data pointer, last opcode) except when the ES
+ * bit (Exception Summary) in x87 FSW (FPU Status Word) is set. Thus if we don't
+ * clear these registers there is potential, local FPU leakage from a process
+ * using the FPU to another.
+ *
+ * See AMD Instruction Reference for FXSAVE, FXRSTOR.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void cpumR3CheckLeakyFpu(PVM pVM)
+{
+ uint32_t u32CpuVersion = ASMCpuId_EAX(1);
+ uint32_t const u32Family = u32CpuVersion >> 8;
+ if ( u32Family >= 6 /* K7 and higher */
+ && (ASMIsAmdCpu() || ASMIsHygonCpu()) )
+ {
+ uint32_t cExt = ASMCpuId_EAX(0x80000000);
+ if (RTX86IsValidExtRange(cExt))
+ {
+ uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
+ if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
+ }
+ Log(("CPUM: Host CPU has leaky fxsave/fxrstor behaviour\n"));
+ }
+ }
+ }
+}
+#endif
+
+
+/**
+ * Initialize the SVM hardware virtualization state.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void cpumR3InitSvmHwVirtState(PVM pVM)
+{
+ LogRel(("CPUM: AMD-V nested-guest init\n"));
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+
+ /* Initialize that SVM hardware virtualization is available. */
+ pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_SVM;
+
+ AssertCompile(sizeof(pCtx->hwvirt.svm.Vmcb) == SVM_VMCB_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.svm.abMsrBitmap) == SVM_MSRPM_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.svm.abIoBitmap) == SVM_IOPM_PAGES * X86_PAGE_SIZE);
+
+ /* Initialize non-zero values. */
+ pCtx->hwvirt.svm.GCPhysVmcb = NIL_RTGCPHYS;
+ }
+}
+
+
+/**
+ * Resets per-VCPU SVM hardware virtualization state.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(void) cpumR3ResetSvmHwVirtState(PVMCPU pVCpu)
+{
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+ Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
+
+ RT_ZERO(pCtx->hwvirt.svm.Vmcb);
+ RT_ZERO(pCtx->hwvirt.svm.HostState);
+ RT_ZERO(pCtx->hwvirt.svm.abMsrBitmap);
+ RT_ZERO(pCtx->hwvirt.svm.abIoBitmap);
+
+ pCtx->hwvirt.svm.uMsrHSavePa = 0;
+ pCtx->hwvirt.svm.uPrevPauseTick = 0;
+ pCtx->hwvirt.svm.GCPhysVmcb = NIL_RTGCPHYS;
+ pCtx->hwvirt.svm.cPauseFilter = 0;
+ pCtx->hwvirt.svm.cPauseFilterThreshold = 0;
+ pCtx->hwvirt.svm.fInterceptEvents = false;
+}
+
+
+/**
+ * Initializes the VMX hardware virtualization state.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void cpumR3InitVmxHwVirtState(PVM pVM)
+{
+ LogRel(("CPUM: VT-x nested-guest init\n"));
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+
+ /* Initialize that VMX hardware virtualization is available. */
+ pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX;
+
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.Vmcs) == VMX_V_VMCS_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.Vmcs) == VMX_V_VMCS_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.ShadowVmcs) == VMX_V_SHADOW_VMCS_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.ShadowVmcs) == VMX_V_SHADOW_VMCS_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abVmreadBitmap) == VMX_V_VMREAD_VMWRITE_BITMAP_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abVmreadBitmap) == VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abVmwriteBitmap) == VMX_V_VMREAD_VMWRITE_BITMAP_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abVmwriteBitmap) == VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aEntryMsrLoadArea) == VMX_V_AUTOMSR_AREA_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aEntryMsrLoadArea) == VMX_V_AUTOMSR_AREA_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aExitMsrStoreArea) == VMX_V_AUTOMSR_AREA_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aExitMsrStoreArea) == VMX_V_AUTOMSR_AREA_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aExitMsrLoadArea) == VMX_V_AUTOMSR_AREA_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.aExitMsrLoadArea) == VMX_V_AUTOMSR_AREA_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abMsrBitmap) == VMX_V_MSR_BITMAP_PAGES * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abMsrBitmap) == VMX_V_MSR_BITMAP_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abIoBitmap) == (VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES) * X86_PAGE_SIZE);
+ AssertCompile(sizeof(pCtx->hwvirt.vmx.abIoBitmap) == VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE);
+
+ /* Initialize non-zero values. */
+ pCtx->hwvirt.vmx.GCPhysVmxon = NIL_RTGCPHYS;
+ pCtx->hwvirt.vmx.GCPhysShadowVmcs = NIL_RTGCPHYS;
+ pCtx->hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS;
+ }
+}
+
+
+/**
+ * Resets per-VCPU VMX hardware virtualization state.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(void) cpumR3ResetVmxHwVirtState(PVMCPU pVCpu)
+{
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+ Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
+
+ RT_ZERO(pCtx->hwvirt.vmx.Vmcs);
+ RT_ZERO(pCtx->hwvirt.vmx.ShadowVmcs);
+ RT_ZERO(pCtx->hwvirt.vmx.abVmreadBitmap);
+ RT_ZERO(pCtx->hwvirt.vmx.abVmwriteBitmap);
+ RT_ZERO(pCtx->hwvirt.vmx.aEntryMsrLoadArea);
+ RT_ZERO(pCtx->hwvirt.vmx.aExitMsrStoreArea);
+ RT_ZERO(pCtx->hwvirt.vmx.aExitMsrLoadArea);
+ RT_ZERO(pCtx->hwvirt.vmx.abMsrBitmap);
+ RT_ZERO(pCtx->hwvirt.vmx.abIoBitmap);
+
+ pCtx->hwvirt.vmx.GCPhysVmxon = NIL_RTGCPHYS;
+ pCtx->hwvirt.vmx.GCPhysShadowVmcs = NIL_RTGCPHYS;
+ pCtx->hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS;
+ pCtx->hwvirt.vmx.fInVmxRootMode = false;
+ pCtx->hwvirt.vmx.fInVmxNonRootMode = false;
+ /* Don't reset diagnostics here. */
+
+ pCtx->hwvirt.vmx.fInterceptEvents = false;
+ pCtx->hwvirt.vmx.fNmiUnblockingIret = false;
+ pCtx->hwvirt.vmx.uFirstPauseLoopTick = 0;
+ pCtx->hwvirt.vmx.uPrevPauseTick = 0;
+ pCtx->hwvirt.vmx.uEntryTick = 0;
+ pCtx->hwvirt.vmx.offVirtApicWrite = 0;
+ pCtx->hwvirt.vmx.fVirtNmiBlocking = false;
+
+ /* Stop any VMX-preemption timer. */
+ CPUMStopGuestVmxPremptTimer(pVCpu);
+
+ /* Clear all nested-guest FFs. */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_ALL_MASK);
+}
+
+
+/**
+ * Displays the host and guest VMX features.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs "terse", "default" or "verbose".
+ */
+DECLCALLBACK(void) cpumR3InfoVmxFeatures(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+ PCCPUMFEATURES pHostFeatures = &pVM->cpum.s.HostFeatures;
+ PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
+ if ( pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL
+ || pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_VIA
+ || pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_SHANGHAI)
+ {
+#define VMXFEATDUMP(a_szDesc, a_Var) \
+ pHlp->pfnPrintf(pHlp, " %s = %u (%u)\n", a_szDesc, pGuestFeatures->a_Var, pHostFeatures->a_Var)
+
+ pHlp->pfnPrintf(pHlp, "Nested hardware virtualization - VMX features\n");
+ pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
+ VMXFEATDUMP("VMX - Virtual-Machine Extensions ", fVmx);
+ /* Basic. */
+ VMXFEATDUMP("InsOutInfo - INS/OUTS instruction info. ", fVmxInsOutInfo);
+
+ /* Pin-based controls. */
+ VMXFEATDUMP("ExtIntExit - External interrupt exiting ", fVmxExtIntExit);
+ VMXFEATDUMP("NmiExit - NMI exiting ", fVmxNmiExit);
+ VMXFEATDUMP("VirtNmi - Virtual NMIs ", fVmxVirtNmi);
+ VMXFEATDUMP("PreemptTimer - VMX preemption timer ", fVmxPreemptTimer);
+ VMXFEATDUMP("PostedInt - Posted interrupts ", fVmxPostedInt);
+
+ /* Processor-based controls. */
+ VMXFEATDUMP("IntWindowExit - Interrupt-window exiting ", fVmxIntWindowExit);
+ VMXFEATDUMP("TscOffsetting - TSC offsetting ", fVmxTscOffsetting);
+ VMXFEATDUMP("HltExit - HLT exiting ", fVmxHltExit);
+ VMXFEATDUMP("InvlpgExit - INVLPG exiting ", fVmxInvlpgExit);
+ VMXFEATDUMP("MwaitExit - MWAIT exiting ", fVmxMwaitExit);
+ VMXFEATDUMP("RdpmcExit - RDPMC exiting ", fVmxRdpmcExit);
+ VMXFEATDUMP("RdtscExit - RDTSC exiting ", fVmxRdtscExit);
+ VMXFEATDUMP("Cr3LoadExit - CR3-load exiting ", fVmxCr3LoadExit);
+ VMXFEATDUMP("Cr3StoreExit - CR3-store exiting ", fVmxCr3StoreExit);
+ VMXFEATDUMP("TertiaryExecCtls - Activate tertiary controls ", fVmxTertiaryExecCtls);
+ VMXFEATDUMP("Cr8LoadExit - CR8-load exiting ", fVmxCr8LoadExit);
+ VMXFEATDUMP("Cr8StoreExit - CR8-store exiting ", fVmxCr8StoreExit);
+ VMXFEATDUMP("UseTprShadow - Use TPR shadow ", fVmxUseTprShadow);
+ VMXFEATDUMP("NmiWindowExit - NMI-window exiting ", fVmxNmiWindowExit);
+ VMXFEATDUMP("MovDRxExit - Mov-DR exiting ", fVmxMovDRxExit);
+ VMXFEATDUMP("UncondIoExit - Unconditional I/O exiting ", fVmxUncondIoExit);
+ VMXFEATDUMP("UseIoBitmaps - Use I/O bitmaps ", fVmxUseIoBitmaps);
+ VMXFEATDUMP("MonitorTrapFlag - Monitor Trap Flag ", fVmxMonitorTrapFlag);
+ VMXFEATDUMP("UseMsrBitmaps - MSR bitmaps ", fVmxUseMsrBitmaps);
+ VMXFEATDUMP("MonitorExit - MONITOR exiting ", fVmxMonitorExit);
+ VMXFEATDUMP("PauseExit - PAUSE exiting ", fVmxPauseExit);
+ VMXFEATDUMP("SecondaryExecCtl - Activate secondary controls ", fVmxSecondaryExecCtls);
+
+ /* Secondary processor-based controls. */
+ VMXFEATDUMP("VirtApic - Virtualize-APIC accesses ", fVmxVirtApicAccess);
+ VMXFEATDUMP("Ept - Extended Page Tables ", fVmxEpt);
+ VMXFEATDUMP("DescTableExit - Descriptor-table exiting ", fVmxDescTableExit);
+ VMXFEATDUMP("Rdtscp - Enable RDTSCP ", fVmxRdtscp);
+ VMXFEATDUMP("VirtX2ApicMode - Virtualize-x2APIC mode ", fVmxVirtX2ApicMode);
+ VMXFEATDUMP("Vpid - Enable VPID ", fVmxVpid);
+ VMXFEATDUMP("WbinvdExit - WBINVD exiting ", fVmxWbinvdExit);
+ VMXFEATDUMP("UnrestrictedGuest - Unrestricted guest ", fVmxUnrestrictedGuest);
+ VMXFEATDUMP("ApicRegVirt - APIC-register virtualization ", fVmxApicRegVirt);
+ VMXFEATDUMP("VirtIntDelivery - Virtual-interrupt delivery ", fVmxVirtIntDelivery);
+ VMXFEATDUMP("PauseLoopExit - PAUSE-loop exiting ", fVmxPauseLoopExit);
+ VMXFEATDUMP("RdrandExit - RDRAND exiting ", fVmxRdrandExit);
+ VMXFEATDUMP("Invpcid - Enable INVPCID ", fVmxInvpcid);
+ VMXFEATDUMP("VmFuncs - Enable VM Functions ", fVmxVmFunc);
+ VMXFEATDUMP("VmcsShadowing - VMCS shadowing ", fVmxVmcsShadowing);
+ VMXFEATDUMP("RdseedExiting - RDSEED exiting ", fVmxRdseedExit);
+ VMXFEATDUMP("PML - Page-Modification Log (PML) ", fVmxPml);
+ VMXFEATDUMP("EptVe - EPT violations can cause #VE ", fVmxEptXcptVe);
+ VMXFEATDUMP("ConcealVmxFromPt - Conceal VMX from Processor Trace ", fVmxConcealVmxFromPt);
+ VMXFEATDUMP("XsavesXRstors - Enable XSAVES/XRSTORS ", fVmxXsavesXrstors);
+ VMXFEATDUMP("ModeBasedExecuteEpt - Mode-based execute permissions ", fVmxModeBasedExecuteEpt);
+ VMXFEATDUMP("SppEpt - Sub-page page write permissions for EPT ", fVmxSppEpt);
+ VMXFEATDUMP("PtEpt - Processor Trace address' translatable by EPT ", fVmxPtEpt);
+ VMXFEATDUMP("UseTscScaling - Use TSC scaling ", fVmxUseTscScaling);
+ VMXFEATDUMP("UserWaitPause - Enable TPAUSE, UMONITOR and UMWAIT ", fVmxUserWaitPause);
+ VMXFEATDUMP("EnclvExit - ENCLV exiting ", fVmxEnclvExit);
+
+ /* Tertiary processor-based controls. */
+ VMXFEATDUMP("LoadIwKeyExit - LOADIWKEY exiting ", fVmxLoadIwKeyExit);
+
+ /* VM-entry controls. */
+ VMXFEATDUMP("EntryLoadDebugCtls - Load debug controls on VM-entry ", fVmxEntryLoadDebugCtls);
+ VMXFEATDUMP("Ia32eModeGuest - IA-32e mode guest ", fVmxIa32eModeGuest);
+ VMXFEATDUMP("EntryLoadEferMsr - Load IA32_EFER MSR on VM-entry ", fVmxEntryLoadEferMsr);
+ VMXFEATDUMP("EntryLoadPatMsr - Load IA32_PAT MSR on VM-entry ", fVmxEntryLoadPatMsr);
+
+ /* VM-exit controls. */
+ VMXFEATDUMP("ExitSaveDebugCtls - Save debug controls on VM-exit ", fVmxExitSaveDebugCtls);
+ VMXFEATDUMP("HostAddrSpaceSize - Host address-space size ", fVmxHostAddrSpaceSize);
+ VMXFEATDUMP("ExitAckExtInt - Acknowledge interrupt on VM-exit ", fVmxExitAckExtInt);
+ VMXFEATDUMP("ExitSavePatMsr - Save IA32_PAT MSR on VM-exit ", fVmxExitSavePatMsr);
+ VMXFEATDUMP("ExitLoadPatMsr - Load IA32_PAT MSR on VM-exit ", fVmxExitLoadPatMsr);
+ VMXFEATDUMP("ExitSaveEferMsr - Save IA32_EFER MSR on VM-exit ", fVmxExitSaveEferMsr);
+ VMXFEATDUMP("ExitLoadEferMsr - Load IA32_EFER MSR on VM-exit ", fVmxExitLoadEferMsr);
+ VMXFEATDUMP("SavePreemptTimer - Save VMX-preemption timer ", fVmxSavePreemptTimer);
+ VMXFEATDUMP("SecondaryExitCtls - Secondary VM-exit controls ", fVmxSecondaryExitCtls);
+
+ /* Miscellaneous data. */
+ VMXFEATDUMP("ExitSaveEferLma - Save IA32_EFER.LMA on VM-exit ", fVmxExitSaveEferLma);
+ VMXFEATDUMP("IntelPt - Intel PT (Processor Trace) in VMX operation ", fVmxPt);
+ VMXFEATDUMP("VmwriteAll - VMWRITE to any supported VMCS field ", fVmxVmwriteAll);
+ VMXFEATDUMP("EntryInjectSoftInt - Inject softint. with 0-len instr. ", fVmxEntryInjectSoftInt);
+#undef VMXFEATDUMP
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "No VMX features present - requires an Intel or compatible CPU.\n");
+}
+
+
+/**
+ * Checks whether nested-guest execution using hardware-assisted VMX (e.g, using HM
+ * or NEM) is allowed.
+ *
+ * @returns @c true if hardware-assisted nested-guest execution is allowed, @c false
+ * otherwise.
+ * @param pVM The cross context VM structure.
+ */
+static bool cpumR3IsHwAssistNstGstExecAllowed(PVM pVM)
+{
+ AssertMsg(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET, ("Calling this function too early!\n"));
+#ifndef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
+ if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
+ || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ return true;
+#else
+ NOREF(pVM);
+#endif
+ return false;
+}
+
+
+/**
+ * Initializes the VMX guest MSRs from guest CPU features based on the host MSRs.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHostVmxMsrs The host VMX MSRs. Pass NULL when fully emulating VMX
+ * and no hardware-assisted nested-guest execution is
+ * possible for this VM.
+ * @param pGuestFeatures The guest features to use (only VMX features are
+ * accessed).
+ * @param pGuestVmxMsrs Where to store the initialized guest VMX MSRs.
+ *
+ * @remarks This function ASSUMES the VMX guest-features are already exploded!
+ */
+static void cpumR3InitVmxGuestMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PCCPUMFEATURES pGuestFeatures, PVMXMSRS pGuestVmxMsrs)
+{
+ bool const fIsNstGstHwExecAllowed = cpumR3IsHwAssistNstGstExecAllowed(pVM);
+
+ Assert(!fIsNstGstHwExecAllowed || pHostVmxMsrs);
+ Assert(pGuestFeatures->fVmx);
+
+ /* Basic information. */
+ uint8_t const fTrueVmxMsrs = 1;
+ {
+ uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID )
+ | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE )
+ | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, !pGuestFeatures->fLongMode )
+ | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 )
+ | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB )
+ | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS, pGuestFeatures->fVmxInsOutInfo)
+ | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS, fTrueVmxMsrs );
+ pGuestVmxMsrs->u64Basic = u64Basic;
+ }
+
+ /* Pin-based VM-execution controls. */
+ {
+ uint32_t const fFeatures = (pGuestFeatures->fVmxExtIntExit << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxNmiExit << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxVirtNmi << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT )
+ | (pGuestFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT)
+ | (pGuestFeatures->fVmxPostedInt << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT );
+ uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1;
+ uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1;
+ AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n",
+ fAllowed0, fAllowed1, fFeatures));
+ pGuestVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+
+ /* True pin-based VM-execution controls. */
+ if (fTrueVmxMsrs)
+ {
+ /* VMX_PIN_CTLS_DEFAULT1 contains MB1 reserved bits and must be reserved MB1 in true pin-based controls as well. */
+ pGuestVmxMsrs->TruePinCtls.u = pGuestVmxMsrs->PinCtls.u;
+ }
+ }
+
+ /* Processor-based VM-execution controls. */
+ {
+ uint32_t const fFeatures = (pGuestFeatures->fVmxIntWindowExit << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxTscOffsetting << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT)
+ | (pGuestFeatures->fVmxHltExit << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxInvlpgExit << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxMwaitExit << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxRdpmcExit << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxRdtscExit << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxCr3LoadExit << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxCr3StoreExit << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxTertiaryExecCtls << VMX_BF_PROC_CTLS_USE_TERTIARY_CTLS_SHIFT )
+ | (pGuestFeatures->fVmxCr8LoadExit << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxCr8StoreExit << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxUseTprShadow << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT )
+ | (pGuestFeatures->fVmxNmiWindowExit << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxMovDRxExit << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxUncondIoExit << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxUseIoBitmaps << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT )
+ | (pGuestFeatures->fVmxMonitorTrapFlag << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT )
+ | (pGuestFeatures->fVmxUseMsrBitmaps << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT )
+ | (pGuestFeatures->fVmxMonitorExit << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxPauseExit << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT);
+ uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1;
+ uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1;
+ AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+ fAllowed1, fFeatures));
+ pGuestVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+
+ /* True processor-based VM-execution controls. */
+ if (fTrueVmxMsrs)
+ {
+ /* VMX_PROC_CTLS_DEFAULT1 contains MB1 reserved bits but the following are not really reserved. */
+ uint32_t const fTrueAllowed0 = VMX_PROC_CTLS_DEFAULT1 & ~( VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_MASK
+ | VMX_BF_PROC_CTLS_CR3_STORE_EXIT_MASK);
+ uint32_t const fTrueAllowed1 = fFeatures | fTrueAllowed0;
+ pGuestVmxMsrs->TrueProcCtls.u = RT_MAKE_U64(fTrueAllowed0, fTrueAllowed1);
+ }
+ }
+
+ /* Secondary processor-based VM-execution controls. */
+ if (pGuestFeatures->fVmxSecondaryExecCtls)
+ {
+ uint32_t const fFeatures = (pGuestFeatures->fVmxVirtApicAccess << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT )
+ | (pGuestFeatures->fVmxEpt << VMX_BF_PROC_CTLS2_EPT_SHIFT )
+ | (pGuestFeatures->fVmxDescTableExit << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxRdtscp << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT )
+ | (pGuestFeatures->fVmxVirtX2ApicMode << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT )
+ | (pGuestFeatures->fVmxVpid << VMX_BF_PROC_CTLS2_VPID_SHIFT )
+ | (pGuestFeatures->fVmxWbinvdExit << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT )
+ | (pGuestFeatures->fVmxApicRegVirt << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT )
+ | (pGuestFeatures->fVmxVirtIntDelivery << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT )
+ | (pGuestFeatures->fVmxPauseLoopExit << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxRdrandExit << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxInvpcid << VMX_BF_PROC_CTLS2_INVPCID_SHIFT )
+ | (pGuestFeatures->fVmxVmFunc << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT )
+ | (pGuestFeatures->fVmxVmcsShadowing << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT )
+ | (pGuestFeatures->fVmxRdseedExit << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT )
+ | (pGuestFeatures->fVmxPml << VMX_BF_PROC_CTLS2_PML_SHIFT )
+ | (pGuestFeatures->fVmxEptXcptVe << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT )
+ | (pGuestFeatures->fVmxConcealVmxFromPt << VMX_BF_PROC_CTLS2_CONCEAL_VMX_FROM_PT_SHIFT)
+ | (pGuestFeatures->fVmxXsavesXrstors << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT )
+ | (pGuestFeatures->fVmxModeBasedExecuteEpt << VMX_BF_PROC_CTLS2_MODE_BASED_EPT_PERM_SHIFT)
+ | (pGuestFeatures->fVmxSppEpt << VMX_BF_PROC_CTLS2_SPP_EPT_SHIFT )
+ | (pGuestFeatures->fVmxPtEpt << VMX_BF_PROC_CTLS2_PT_EPT_SHIFT )
+ | (pGuestFeatures->fVmxUseTscScaling << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT )
+ | (pGuestFeatures->fVmxUserWaitPause << VMX_BF_PROC_CTLS2_USER_WAIT_PAUSE_SHIFT )
+ | (pGuestFeatures->fVmxEnclvExit << VMX_BF_PROC_CTLS2_ENCLV_EXIT_SHIFT );
+ uint32_t const fAllowed0 = 0;
+ uint32_t const fAllowed1 = fFeatures;
+ pGuestVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+ }
+
+ /* Tertiary processor-based VM-execution controls. */
+ if (pGuestFeatures->fVmxTertiaryExecCtls)
+ {
+ pGuestVmxMsrs->u64ProcCtls3 = (pGuestFeatures->fVmxLoadIwKeyExit << VMX_BF_PROC_CTLS3_LOADIWKEY_EXIT_SHIFT);
+ }
+
+ /* VM-exit controls. */
+ {
+ uint32_t const fFeatures = (pGuestFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT )
+ | (pGuestFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT)
+ | (pGuestFeatures->fVmxExitAckExtInt << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT )
+ | (pGuestFeatures->fVmxExitSavePatMsr << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT )
+ | (pGuestFeatures->fVmxExitLoadPatMsr << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT )
+ | (pGuestFeatures->fVmxExitSaveEferMsr << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT )
+ | (pGuestFeatures->fVmxExitLoadEferMsr << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT )
+ | (pGuestFeatures->fVmxSavePreemptTimer << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT )
+ | (pGuestFeatures->fVmxSecondaryExitCtls << VMX_BF_EXIT_CTLS_USE_SECONDARY_CTLS_SHIFT );
+ /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */
+ uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1;
+ uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1;
+ AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+ fAllowed1, fFeatures));
+ pGuestVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+
+ /* True VM-exit controls. */
+ if (fTrueVmxMsrs)
+ {
+ /* VMX_EXIT_CTLS_DEFAULT1 contains MB1 reserved bits but the following are not really reserved */
+ uint32_t const fTrueAllowed0 = VMX_EXIT_CTLS_DEFAULT1 & ~VMX_BF_EXIT_CTLS_SAVE_DEBUG_MASK;
+ uint32_t const fTrueAllowed1 = fFeatures | fTrueAllowed0;
+ pGuestVmxMsrs->TrueExitCtls.u = RT_MAKE_U64(fTrueAllowed0, fTrueAllowed1);
+ }
+ }
+
+ /* VM-entry controls. */
+ {
+ uint32_t const fFeatures = (pGuestFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT )
+ | (pGuestFeatures->fVmxIa32eModeGuest << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT)
+ | (pGuestFeatures->fVmxEntryLoadEferMsr << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT )
+ | (pGuestFeatures->fVmxEntryLoadPatMsr << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT );
+ uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1;
+ uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1;
+ AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+ fAllowed1, fFeatures));
+ pGuestVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+
+ /* True VM-entry controls. */
+ if (fTrueVmxMsrs)
+ {
+ /* VMX_ENTRY_CTLS_DEFAULT1 contains MB1 reserved bits but the following are not really reserved */
+ uint32_t const fTrueAllowed0 = VMX_ENTRY_CTLS_DEFAULT1 & ~( VMX_BF_ENTRY_CTLS_LOAD_DEBUG_MASK
+ | VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_MASK
+ | VMX_BF_ENTRY_CTLS_ENTRY_SMM_MASK
+ | VMX_BF_ENTRY_CTLS_DEACTIVATE_DUAL_MON_MASK);
+ uint32_t const fTrueAllowed1 = fFeatures | fTrueAllowed0;
+ pGuestVmxMsrs->TrueEntryCtls.u = RT_MAKE_U64(fTrueAllowed0, fTrueAllowed1);
+ }
+ }
+
+ /* Miscellaneous data. */
+ {
+ uint64_t const uHostMsr = fIsNstGstHwExecAllowed ? pHostVmxMsrs->u64Misc : 0;
+
+ uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
+ uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
+ pGuestVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC, VMX_V_PREEMPT_TIMER_SHIFT )
+ | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA, pGuestFeatures->fVmxExitSaveEferLma )
+ | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES, fActivityState )
+ | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT, pGuestFeatures->fVmxPt )
+ | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR, 0 )
+ | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET, VMX_V_CR3_TARGET_COUNT )
+ | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS, cMaxMsrs )
+ | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI, 0 )
+ | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL, pGuestFeatures->fVmxVmwriteAll )
+ | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT, pGuestFeatures->fVmxEntryInjectSoftInt)
+ | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID, VMX_V_MSEG_REV_ID );
+ }
+
+ /* CR0 Fixed-0 (we report this fixed value regardless of whether UX is supported as it does on real hardware). */
+ pGuestVmxMsrs->u64Cr0Fixed0 = VMX_V_CR0_FIXED0;
+
+ /* CR0 Fixed-1. */
+ {
+ /*
+ * All CPUs I've looked at so far report CR0 fixed-1 bits as 0xffffffff.
+ * This is different from CR4 fixed-1 bits which are reported as per the
+ * CPU features and/or micro-architecture/generation. Why? Ask Intel.
+ */
+ pGuestVmxMsrs->u64Cr0Fixed1 = fIsNstGstHwExecAllowed ? pHostVmxMsrs->u64Cr0Fixed1 : VMX_V_CR0_FIXED1;
+
+ /* Make sure the CR0 MB1 bits are not clear. */
+ Assert((pGuestVmxMsrs->u64Cr0Fixed1 & pGuestVmxMsrs->u64Cr0Fixed0) == pGuestVmxMsrs->u64Cr0Fixed0);
+ }
+
+ /* CR4 Fixed-0. */
+ pGuestVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0;
+
+ /* CR4 Fixed-1. */
+ {
+ pGuestVmxMsrs->u64Cr4Fixed1 = CPUMGetGuestCR4ValidMask(pVM) & pHostVmxMsrs->u64Cr4Fixed1;
+
+ /* Make sure the CR4 MB1 bits are not clear. */
+ Assert((pGuestVmxMsrs->u64Cr4Fixed1 & pGuestVmxMsrs->u64Cr4Fixed0) == pGuestVmxMsrs->u64Cr4Fixed0);
+
+ /* Make sure bits that must always be set are set. */
+ Assert(pGuestVmxMsrs->u64Cr4Fixed1 & X86_CR4_PAE);
+ Assert(pGuestVmxMsrs->u64Cr4Fixed1 & X86_CR4_VMXE);
+ }
+
+ /* VMCS Enumeration. */
+ pGuestVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT;
+
+ /* VPID and EPT Capabilities. */
+ if (pGuestFeatures->fVmxEpt)
+ {
+ /*
+ * INVVPID instruction always causes a VM-exit unconditionally, so we are free to fake
+ * and emulate any INVVPID flush type. However, it only makes sense to expose the types
+ * when INVVPID instruction is supported just to be more compatible with guest
+ * hypervisors that may make assumptions by only looking at this MSR even though they
+ * are technically supposed to refer to VMX_PROC_CTLS2_VPID first.
+ *
+ * See Intel spec. 25.1.2 "Instructions That Cause VM Exits Unconditionally".
+ * See Intel spec. 30.3 "VMX Instructions".
+ */
+ uint64_t const uHostMsr = fIsNstGstHwExecAllowed ? pHostVmxMsrs->u64EptVpidCaps : UINT64_MAX;
+ uint8_t const fVpid = pGuestFeatures->fVmxVpid;
+
+ uint8_t const fExecOnly = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY);
+ uint8_t const fPml4 = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
+ uint8_t const fMemTypeUc = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_MEMTYPE_UC);
+ uint8_t const fMemTypeWb = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_MEMTYPE_WB);
+ uint8_t const f2MPage = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_PDE_2M);
+ uint8_t const fInvept = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVEPT);
+ /** @todo Nested VMX: Support accessed/dirty bits, see @bugref{10092#c25}. */
+ /* uint8_t const fAccessDirty = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY); */
+ uint8_t const fEptSingle = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVEPT_SINGLE_CTX);
+ uint8_t const fEptAll = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVEPT_ALL_CTX);
+ uint8_t const fVpidIndiv = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
+ uint8_t const fVpidSingle = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
+ uint8_t const fVpidAll = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
+ uint8_t const fVpidSingleGlobal = RT_BF_GET(uHostMsr, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
+ pGuestVmxMsrs->u64EptVpidCaps = RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_EXEC_ONLY, fExecOnly)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4, fPml4)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_MEMTYPE_UC, fMemTypeUc)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_MEMTYPE_WB, fMemTypeWb)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_PDE_2M, f2MPage)
+ //| RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_PDPTE_1G, 0)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVEPT, fInvept)
+ //| RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY, 0)
+ //| RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION, 0)
+ //| RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_SUPER_SHW_STACK, 0)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVEPT_SINGLE_CTX, fEptSingle)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVEPT_ALL_CTX, fEptAll)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVVPID, fVpid)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR, fVpid & fVpidIndiv)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX, fVpid & fVpidSingle)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX, fVpid & fVpidAll)
+ | RT_BF_MAKE(VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS, fVpid & fVpidSingleGlobal);
+ }
+
+ /* VM Functions. */
+ if (pGuestFeatures->fVmxVmFunc)
+ pGuestVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);
+}
+
+
+/**
+ * Checks whether the given guest CPU VMX features are compatible with the provided
+ * base features.
+ *
+ * @returns @c true if compatible, @c false otherwise.
+ * @param pVM The cross context VM structure.
+ * @param pBase The base VMX CPU features.
+ * @param pGst The guest VMX CPU features.
+ *
+ * @remarks Only VMX feature bits are examined.
+ */
+static bool cpumR3AreVmxCpuFeaturesCompatible(PVM pVM, PCCPUMFEATURES pBase, PCCPUMFEATURES pGst)
+{
+ if (!cpumR3IsHwAssistNstGstExecAllowed(pVM))
+ return false;
+
+#define CPUM_VMX_FEAT_SHIFT(a_pFeat, a_FeatName, a_cShift) ((uint64_t)(a_pFeat->a_FeatName) << (a_cShift))
+#define CPUM_VMX_MAKE_FEATURES_1(a_pFeat) ( CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxInsOutInfo , 0) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExtIntExit , 1) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxNmiExit , 2) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVirtNmi , 3) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPreemptTimer , 4) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPostedInt , 5) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxIntWindowExit , 6) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxTscOffsetting , 7) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxHltExit , 8) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxInvlpgExit , 9) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxMwaitExit , 10) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxRdpmcExit , 12) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxRdtscExit , 13) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxCr3LoadExit , 14) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxCr3StoreExit , 15) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxTertiaryExecCtls , 16) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxCr8LoadExit , 17) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxCr8StoreExit , 18) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUseTprShadow , 19) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxNmiWindowExit , 20) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxMovDRxExit , 21) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUncondIoExit , 22) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUseIoBitmaps , 23) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxMonitorTrapFlag , 24) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUseMsrBitmaps , 25) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxMonitorExit , 26) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPauseExit , 27) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxSecondaryExecCtls , 28) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVirtApicAccess , 29) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEpt , 30) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxDescTableExit , 31) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxRdtscp , 32) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVirtX2ApicMode , 33) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVpid , 34) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxWbinvdExit , 35) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUnrestrictedGuest , 36) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxApicRegVirt , 37) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVirtIntDelivery , 38) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPauseLoopExit , 39) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxRdrandExit , 40) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxInvpcid , 41) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVmFunc , 42) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVmcsShadowing , 43) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxRdseedExit , 44) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPml , 45) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEptXcptVe , 46) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxConcealVmxFromPt , 47) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxXsavesXrstors , 48) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxModeBasedExecuteEpt, 49) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxSppEpt , 50) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPtEpt , 51) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUseTscScaling , 52) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxUserWaitPause , 53) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEnclvExit , 54) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxLoadIwKeyExit , 55) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEntryLoadDebugCtls , 56) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxIa32eModeGuest , 57) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEntryLoadEferMsr , 58) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEntryLoadPatMsr , 59) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitSaveDebugCtls , 60) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxHostAddrSpaceSize , 61) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitAckExtInt , 62) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitSavePatMsr , 63))
+
+#define CPUM_VMX_MAKE_FEATURES_2(a_pFeat) ( CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitLoadPatMsr , 0) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitSaveEferMsr , 1) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitLoadEferMsr , 2) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxSavePreemptTimer , 3) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxSecondaryExitCtls , 4) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxExitSaveEferLma , 5) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxPt , 6) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxVmwriteAll , 7) \
+ | CPUM_VMX_FEAT_SHIFT(a_pFeat, fVmxEntryInjectSoftInt , 8))
+
+ /* Check first set of feature bits. */
+ {
+ uint64_t const fBase = CPUM_VMX_MAKE_FEATURES_1(pBase);
+ uint64_t const fGst = CPUM_VMX_MAKE_FEATURES_1(pGst);
+ if ((fBase | fGst) != fBase)
+ {
+ uint64_t const fDiff = fBase ^ fGst;
+ LogRel(("CPUM: VMX features (1) now exposed to the guest are incompatible with those from the saved state. fBase=%#RX64 fGst=%#RX64 fDiff=%#RX64\n",
+ fBase, fGst, fDiff));
+ return false;
+ }
+ }
+
+ /* Check second set of feature bits. */
+ {
+ uint64_t const fBase = CPUM_VMX_MAKE_FEATURES_2(pBase);
+ uint64_t const fGst = CPUM_VMX_MAKE_FEATURES_2(pGst);
+ if ((fBase | fGst) != fBase)
+ {
+ uint64_t const fDiff = fBase ^ fGst;
+ LogRel(("CPUM: VMX features (2) now exposed to the guest are incompatible with those from the saved state. fBase=%#RX64 fGst=%#RX64 fDiff=%#RX64\n",
+ fBase, fGst, fDiff));
+ return false;
+ }
+ }
+#undef CPUM_VMX_FEAT_SHIFT
+#undef CPUM_VMX_MAKE_FEATURES_1
+#undef CPUM_VMX_MAKE_FEATURES_2
+
+ return true;
+}
+
+
+/**
+ * Initializes VMX guest features and MSRs.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCpumCfg The CPUM CFGM configuration node.
+ * @param pHostVmxMsrs The host VMX MSRs. Pass NULL when fully emulating VMX
+ * and no hardware-assisted nested-guest execution is
+ * possible for this VM.
+ * @param pGuestVmxMsrs Where to store the initialized guest VMX MSRs.
+ */
+void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs)
+{
+ Assert(pVM);
+ Assert(pCpumCfg);
+ Assert(pGuestVmxMsrs);
+
+ /*
+ * Query VMX features from CFGM.
+ */
+ bool fVmxPreemptTimer;
+ bool fVmxEpt;
+ bool fVmxUnrestrictedGuest;
+ {
+ /** @cfgm{/CPUM/NestedVmxPreemptTimer, bool, true}
+ * Whether to expose the VMX-preemption timer feature to the guest (if also
+ * supported by the host hardware). When disabled will prevent exposing the
+ * VMX-preemption timer feature to the guest even if the host supports it.
+ *
+ * @todo Currently disabled, see @bugref{9180#c108}.
+ */
+ int rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxPreemptTimer", &fVmxPreemptTimer, false);
+ AssertLogRelRCReturnVoid(rc);
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ /** @cfgm{/CPUM/NestedVmxEpt, bool, true}
+ * Whether to expose the EPT feature to the guest. The default is true.
+ * When disabled will automatically prevent exposing features that rely
+ * on it. This is dependent upon nested paging being enabled for the VM.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &fVmxEpt, true);
+ AssertLogRelRCReturnVoid(rc);
+
+ /** @cfgm{/CPUM/NestedVmxUnrestrictedGuest, bool, true}
+ * Whether to expose the Unrestricted Guest feature to the guest. The
+ * default is the same a /CPUM/Nested/VmxEpt. When disabled will
+ * automatically prevent exposing features that rely on it.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &fVmxUnrestrictedGuest, fVmxEpt);
+ AssertLogRelRCReturnVoid(rc);
+#else
+ fVmxEpt = fVmxUnrestrictedGuest = false;
+#endif
+ }
+
+ if (fVmxEpt)
+ {
+ const char *pszWhy = NULL;
+ if (!VM_IS_HM_ENABLED(pVM) && !VM_IS_EXEC_ENGINE_IEM(pVM))
+ pszWhy = "execution engine is neither HM nor IEM";
+ else if (VM_IS_HM_ENABLED(pVM) && !HMIsNestedPagingActive(pVM))
+ pszWhy = "nested paging is not enabled for the VM or it is not supported by the host";
+ else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.fNoExecute)
+ pszWhy = "NX is not available on the host";
+ if (pszWhy)
+ {
+ LogRel(("CPUM: Warning! EPT not exposed to the guest because %s\n", pszWhy));
+ fVmxEpt = false;
+ }
+ }
+ else if (fVmxUnrestrictedGuest)
+ {
+ LogRel(("CPUM: Warning! Can't expose \"Unrestricted Guest\" to the guest when EPT is not exposed!\n"));
+ fVmxUnrestrictedGuest = false;
+ }
+
+ /*
+ * Initialize the set of VMX features we emulate.
+ *
+ * Note! Some bits might be reported as 1 always if they fall under the
+ * default1 class bits (e.g. fVmxEntryLoadDebugCtls), see @bugref{9180#c5}.
+ */
+ CPUMFEATURES EmuFeat;
+ RT_ZERO(EmuFeat);
+ EmuFeat.fVmx = 1;
+ EmuFeat.fVmxInsOutInfo = 1;
+ EmuFeat.fVmxExtIntExit = 1;
+ EmuFeat.fVmxNmiExit = 1;
+ EmuFeat.fVmxVirtNmi = 1;
+ EmuFeat.fVmxPreemptTimer = fVmxPreemptTimer;
+ EmuFeat.fVmxPostedInt = 0;
+ EmuFeat.fVmxIntWindowExit = 1;
+ EmuFeat.fVmxTscOffsetting = 1;
+ EmuFeat.fVmxHltExit = 1;
+ EmuFeat.fVmxInvlpgExit = 1;
+ EmuFeat.fVmxMwaitExit = 1;
+ EmuFeat.fVmxRdpmcExit = 1;
+ EmuFeat.fVmxRdtscExit = 1;
+ EmuFeat.fVmxCr3LoadExit = 1;
+ EmuFeat.fVmxCr3StoreExit = 1;
+ EmuFeat.fVmxTertiaryExecCtls = 0;
+ EmuFeat.fVmxCr8LoadExit = 1;
+ EmuFeat.fVmxCr8StoreExit = 1;
+ EmuFeat.fVmxUseTprShadow = 1;
+ EmuFeat.fVmxNmiWindowExit = 1;
+ EmuFeat.fVmxMovDRxExit = 1;
+ EmuFeat.fVmxUncondIoExit = 1;
+ EmuFeat.fVmxUseIoBitmaps = 1;
+ EmuFeat.fVmxMonitorTrapFlag = 0;
+ EmuFeat.fVmxUseMsrBitmaps = 1;
+ EmuFeat.fVmxMonitorExit = 1;
+ EmuFeat.fVmxPauseExit = 1;
+ EmuFeat.fVmxSecondaryExecCtls = 1;
+ EmuFeat.fVmxVirtApicAccess = 1;
+ EmuFeat.fVmxEpt = fVmxEpt;
+ EmuFeat.fVmxDescTableExit = 1;
+ EmuFeat.fVmxRdtscp = 1;
+ EmuFeat.fVmxVirtX2ApicMode = 0;
+ EmuFeat.fVmxVpid = 1;
+ EmuFeat.fVmxWbinvdExit = 1;
+ EmuFeat.fVmxUnrestrictedGuest = fVmxUnrestrictedGuest;
+ EmuFeat.fVmxApicRegVirt = 0;
+ EmuFeat.fVmxVirtIntDelivery = 0;
+ EmuFeat.fVmxPauseLoopExit = 1;
+ EmuFeat.fVmxRdrandExit = 0;
+ EmuFeat.fVmxInvpcid = 1;
+ EmuFeat.fVmxVmFunc = 0;
+ EmuFeat.fVmxVmcsShadowing = 0;
+ EmuFeat.fVmxRdseedExit = 0;
+ EmuFeat.fVmxPml = 0;
+ EmuFeat.fVmxEptXcptVe = 0;
+ EmuFeat.fVmxConcealVmxFromPt = 0;
+ EmuFeat.fVmxXsavesXrstors = 0;
+ EmuFeat.fVmxModeBasedExecuteEpt = 0;
+ EmuFeat.fVmxSppEpt = 0;
+ EmuFeat.fVmxPtEpt = 0;
+ EmuFeat.fVmxUseTscScaling = 0;
+ EmuFeat.fVmxUserWaitPause = 0;
+ EmuFeat.fVmxEnclvExit = 0;
+ EmuFeat.fVmxLoadIwKeyExit = 0;
+ EmuFeat.fVmxEntryLoadDebugCtls = 1;
+ EmuFeat.fVmxIa32eModeGuest = 1;
+ EmuFeat.fVmxEntryLoadEferMsr = 1;
+ EmuFeat.fVmxEntryLoadPatMsr = 1;
+ EmuFeat.fVmxExitSaveDebugCtls = 1;
+ EmuFeat.fVmxHostAddrSpaceSize = 1;
+ EmuFeat.fVmxExitAckExtInt = 1;
+ EmuFeat.fVmxExitSavePatMsr = 0;
+ EmuFeat.fVmxExitLoadPatMsr = 1;
+ EmuFeat.fVmxExitSaveEferMsr = 1;
+ EmuFeat.fVmxExitLoadEferMsr = 1;
+ EmuFeat.fVmxSavePreemptTimer = 0; /* Cannot be enabled if VMX-preemption timer is disabled. */
+ EmuFeat.fVmxSecondaryExitCtls = 0;
+ EmuFeat.fVmxExitSaveEferLma = 1; /* Cannot be disabled if unrestricted guest is enabled. */
+ EmuFeat.fVmxPt = 0;
+ EmuFeat.fVmxVmwriteAll = 0; /** @todo NSTVMX: enable this when nested VMCS shadowing is enabled. */
+ EmuFeat.fVmxEntryInjectSoftInt = 1;
+
+ /*
+ * Merge guest features.
+ *
+ * When hardware-assisted VMX may be used, any feature we emulate must also be supported
+ * by the hardware, hence we merge our emulated features with the host features below.
+ */
+ PCCPUMFEATURES pBaseFeat = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat;
+ PCPUMFEATURES pGuestFeat = &pVM->cpum.s.GuestFeatures;
+ Assert(pBaseFeat->fVmx);
+ pGuestFeat->fVmxInsOutInfo = (pBaseFeat->fVmxInsOutInfo & EmuFeat.fVmxInsOutInfo );
+ pGuestFeat->fVmxExtIntExit = (pBaseFeat->fVmxExtIntExit & EmuFeat.fVmxExtIntExit );
+ pGuestFeat->fVmxNmiExit = (pBaseFeat->fVmxNmiExit & EmuFeat.fVmxNmiExit );
+ pGuestFeat->fVmxVirtNmi = (pBaseFeat->fVmxVirtNmi & EmuFeat.fVmxVirtNmi );
+ pGuestFeat->fVmxPreemptTimer = (pBaseFeat->fVmxPreemptTimer & EmuFeat.fVmxPreemptTimer );
+ pGuestFeat->fVmxPostedInt = (pBaseFeat->fVmxPostedInt & EmuFeat.fVmxPostedInt );
+ pGuestFeat->fVmxIntWindowExit = (pBaseFeat->fVmxIntWindowExit & EmuFeat.fVmxIntWindowExit );
+ pGuestFeat->fVmxTscOffsetting = (pBaseFeat->fVmxTscOffsetting & EmuFeat.fVmxTscOffsetting );
+ pGuestFeat->fVmxHltExit = (pBaseFeat->fVmxHltExit & EmuFeat.fVmxHltExit );
+ pGuestFeat->fVmxInvlpgExit = (pBaseFeat->fVmxInvlpgExit & EmuFeat.fVmxInvlpgExit );
+ pGuestFeat->fVmxMwaitExit = (pBaseFeat->fVmxMwaitExit & EmuFeat.fVmxMwaitExit );
+ pGuestFeat->fVmxRdpmcExit = (pBaseFeat->fVmxRdpmcExit & EmuFeat.fVmxRdpmcExit );
+ pGuestFeat->fVmxRdtscExit = (pBaseFeat->fVmxRdtscExit & EmuFeat.fVmxRdtscExit );
+ pGuestFeat->fVmxCr3LoadExit = (pBaseFeat->fVmxCr3LoadExit & EmuFeat.fVmxCr3LoadExit );
+ pGuestFeat->fVmxCr3StoreExit = (pBaseFeat->fVmxCr3StoreExit & EmuFeat.fVmxCr3StoreExit );
+ pGuestFeat->fVmxTertiaryExecCtls = (pBaseFeat->fVmxTertiaryExecCtls & EmuFeat.fVmxTertiaryExecCtls );
+ pGuestFeat->fVmxCr8LoadExit = (pBaseFeat->fVmxCr8LoadExit & EmuFeat.fVmxCr8LoadExit );
+ pGuestFeat->fVmxCr8StoreExit = (pBaseFeat->fVmxCr8StoreExit & EmuFeat.fVmxCr8StoreExit );
+ pGuestFeat->fVmxUseTprShadow = (pBaseFeat->fVmxUseTprShadow & EmuFeat.fVmxUseTprShadow );
+ pGuestFeat->fVmxNmiWindowExit = (pBaseFeat->fVmxNmiWindowExit & EmuFeat.fVmxNmiWindowExit );
+ pGuestFeat->fVmxMovDRxExit = (pBaseFeat->fVmxMovDRxExit & EmuFeat.fVmxMovDRxExit );
+ pGuestFeat->fVmxUncondIoExit = (pBaseFeat->fVmxUncondIoExit & EmuFeat.fVmxUncondIoExit );
+ pGuestFeat->fVmxUseIoBitmaps = (pBaseFeat->fVmxUseIoBitmaps & EmuFeat.fVmxUseIoBitmaps );
+ pGuestFeat->fVmxMonitorTrapFlag = (pBaseFeat->fVmxMonitorTrapFlag & EmuFeat.fVmxMonitorTrapFlag );
+ pGuestFeat->fVmxUseMsrBitmaps = (pBaseFeat->fVmxUseMsrBitmaps & EmuFeat.fVmxUseMsrBitmaps );
+ pGuestFeat->fVmxMonitorExit = (pBaseFeat->fVmxMonitorExit & EmuFeat.fVmxMonitorExit );
+ pGuestFeat->fVmxPauseExit = (pBaseFeat->fVmxPauseExit & EmuFeat.fVmxPauseExit );
+ pGuestFeat->fVmxSecondaryExecCtls = (pBaseFeat->fVmxSecondaryExecCtls & EmuFeat.fVmxSecondaryExecCtls );
+ pGuestFeat->fVmxVirtApicAccess = (pBaseFeat->fVmxVirtApicAccess & EmuFeat.fVmxVirtApicAccess );
+ pGuestFeat->fVmxEpt = (pBaseFeat->fVmxEpt & EmuFeat.fVmxEpt );
+ pGuestFeat->fVmxDescTableExit = (pBaseFeat->fVmxDescTableExit & EmuFeat.fVmxDescTableExit );
+ pGuestFeat->fVmxRdtscp = (pBaseFeat->fVmxRdtscp & EmuFeat.fVmxRdtscp );
+ pGuestFeat->fVmxVirtX2ApicMode = (pBaseFeat->fVmxVirtX2ApicMode & EmuFeat.fVmxVirtX2ApicMode );
+ pGuestFeat->fVmxVpid = (pBaseFeat->fVmxVpid & EmuFeat.fVmxVpid );
+ pGuestFeat->fVmxWbinvdExit = (pBaseFeat->fVmxWbinvdExit & EmuFeat.fVmxWbinvdExit );
+ pGuestFeat->fVmxUnrestrictedGuest = (pBaseFeat->fVmxUnrestrictedGuest & EmuFeat.fVmxUnrestrictedGuest );
+ pGuestFeat->fVmxApicRegVirt = (pBaseFeat->fVmxApicRegVirt & EmuFeat.fVmxApicRegVirt );
+ pGuestFeat->fVmxVirtIntDelivery = (pBaseFeat->fVmxVirtIntDelivery & EmuFeat.fVmxVirtIntDelivery );
+ pGuestFeat->fVmxPauseLoopExit = (pBaseFeat->fVmxPauseLoopExit & EmuFeat.fVmxPauseLoopExit );
+ pGuestFeat->fVmxRdrandExit = (pBaseFeat->fVmxRdrandExit & EmuFeat.fVmxRdrandExit );
+ pGuestFeat->fVmxInvpcid = (pBaseFeat->fVmxInvpcid & EmuFeat.fVmxInvpcid );
+ pGuestFeat->fVmxVmFunc = (pBaseFeat->fVmxVmFunc & EmuFeat.fVmxVmFunc );
+ pGuestFeat->fVmxVmcsShadowing = (pBaseFeat->fVmxVmcsShadowing & EmuFeat.fVmxVmcsShadowing );
+ pGuestFeat->fVmxRdseedExit = (pBaseFeat->fVmxRdseedExit & EmuFeat.fVmxRdseedExit );
+ pGuestFeat->fVmxPml = (pBaseFeat->fVmxPml & EmuFeat.fVmxPml );
+ pGuestFeat->fVmxEptXcptVe = (pBaseFeat->fVmxEptXcptVe & EmuFeat.fVmxEptXcptVe );
+ pGuestFeat->fVmxConcealVmxFromPt = (pBaseFeat->fVmxConcealVmxFromPt & EmuFeat.fVmxConcealVmxFromPt );
+ pGuestFeat->fVmxXsavesXrstors = (pBaseFeat->fVmxXsavesXrstors & EmuFeat.fVmxXsavesXrstors );
+ pGuestFeat->fVmxModeBasedExecuteEpt = (pBaseFeat->fVmxModeBasedExecuteEpt & EmuFeat.fVmxModeBasedExecuteEpt );
+ pGuestFeat->fVmxSppEpt = (pBaseFeat->fVmxSppEpt & EmuFeat.fVmxSppEpt );
+ pGuestFeat->fVmxPtEpt = (pBaseFeat->fVmxPtEpt & EmuFeat.fVmxPtEpt );
+ pGuestFeat->fVmxUseTscScaling = (pBaseFeat->fVmxUseTscScaling & EmuFeat.fVmxUseTscScaling );
+ pGuestFeat->fVmxUserWaitPause = (pBaseFeat->fVmxUserWaitPause & EmuFeat.fVmxUserWaitPause );
+ pGuestFeat->fVmxEnclvExit = (pBaseFeat->fVmxEnclvExit & EmuFeat.fVmxEnclvExit );
+ pGuestFeat->fVmxLoadIwKeyExit = (pBaseFeat->fVmxLoadIwKeyExit & EmuFeat.fVmxLoadIwKeyExit );
+ pGuestFeat->fVmxEntryLoadDebugCtls = (pBaseFeat->fVmxEntryLoadDebugCtls & EmuFeat.fVmxEntryLoadDebugCtls );
+ pGuestFeat->fVmxIa32eModeGuest = (pBaseFeat->fVmxIa32eModeGuest & EmuFeat.fVmxIa32eModeGuest );
+ pGuestFeat->fVmxEntryLoadEferMsr = (pBaseFeat->fVmxEntryLoadEferMsr & EmuFeat.fVmxEntryLoadEferMsr );
+ pGuestFeat->fVmxEntryLoadPatMsr = (pBaseFeat->fVmxEntryLoadPatMsr & EmuFeat.fVmxEntryLoadPatMsr );
+ pGuestFeat->fVmxExitSaveDebugCtls = (pBaseFeat->fVmxExitSaveDebugCtls & EmuFeat.fVmxExitSaveDebugCtls );
+ pGuestFeat->fVmxHostAddrSpaceSize = (pBaseFeat->fVmxHostAddrSpaceSize & EmuFeat.fVmxHostAddrSpaceSize );
+ pGuestFeat->fVmxExitAckExtInt = (pBaseFeat->fVmxExitAckExtInt & EmuFeat.fVmxExitAckExtInt );
+ pGuestFeat->fVmxExitSavePatMsr = (pBaseFeat->fVmxExitSavePatMsr & EmuFeat.fVmxExitSavePatMsr );
+ pGuestFeat->fVmxExitLoadPatMsr = (pBaseFeat->fVmxExitLoadPatMsr & EmuFeat.fVmxExitLoadPatMsr );
+ pGuestFeat->fVmxExitSaveEferMsr = (pBaseFeat->fVmxExitSaveEferMsr & EmuFeat.fVmxExitSaveEferMsr );
+ pGuestFeat->fVmxExitLoadEferMsr = (pBaseFeat->fVmxExitLoadEferMsr & EmuFeat.fVmxExitLoadEferMsr );
+ pGuestFeat->fVmxSavePreemptTimer = (pBaseFeat->fVmxSavePreemptTimer & EmuFeat.fVmxSavePreemptTimer );
+ pGuestFeat->fVmxSecondaryExitCtls = (pBaseFeat->fVmxSecondaryExitCtls & EmuFeat.fVmxSecondaryExitCtls );
+ pGuestFeat->fVmxExitSaveEferLma = (pBaseFeat->fVmxExitSaveEferLma & EmuFeat.fVmxExitSaveEferLma );
+ pGuestFeat->fVmxPt = (pBaseFeat->fVmxPt & EmuFeat.fVmxPt );
+ pGuestFeat->fVmxVmwriteAll = (pBaseFeat->fVmxVmwriteAll & EmuFeat.fVmxVmwriteAll );
+ pGuestFeat->fVmxEntryInjectSoftInt = (pBaseFeat->fVmxEntryInjectSoftInt & EmuFeat.fVmxEntryInjectSoftInt );
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ /* Don't expose VMX preemption timer if host is subject to VMX-preemption timer erratum. */
+ if ( pGuestFeat->fVmxPreemptTimer
+ && HMIsSubjectToVmxPreemptTimerErratum())
+ {
+ LogRel(("CPUM: Warning! VMX-preemption timer not exposed to guest due to host CPU erratum\n"));
+ pGuestFeat->fVmxPreemptTimer = 0;
+ pGuestFeat->fVmxSavePreemptTimer = 0;
+ }
+#endif
+
+ /* Sanity checking. */
+ if (!pGuestFeat->fVmxSecondaryExecCtls)
+ {
+ Assert(!pGuestFeat->fVmxVirtApicAccess);
+ Assert(!pGuestFeat->fVmxEpt);
+ Assert(!pGuestFeat->fVmxDescTableExit);
+ Assert(!pGuestFeat->fVmxRdtscp);
+ Assert(!pGuestFeat->fVmxVirtX2ApicMode);
+ Assert(!pGuestFeat->fVmxVpid);
+ Assert(!pGuestFeat->fVmxWbinvdExit);
+ Assert(!pGuestFeat->fVmxUnrestrictedGuest);
+ Assert(!pGuestFeat->fVmxApicRegVirt);
+ Assert(!pGuestFeat->fVmxVirtIntDelivery);
+ Assert(!pGuestFeat->fVmxPauseLoopExit);
+ Assert(!pGuestFeat->fVmxRdrandExit);
+ Assert(!pGuestFeat->fVmxInvpcid);
+ Assert(!pGuestFeat->fVmxVmFunc);
+ Assert(!pGuestFeat->fVmxVmcsShadowing);
+ Assert(!pGuestFeat->fVmxRdseedExit);
+ Assert(!pGuestFeat->fVmxPml);
+ Assert(!pGuestFeat->fVmxEptXcptVe);
+ Assert(!pGuestFeat->fVmxConcealVmxFromPt);
+ Assert(!pGuestFeat->fVmxXsavesXrstors);
+ Assert(!pGuestFeat->fVmxModeBasedExecuteEpt);
+ Assert(!pGuestFeat->fVmxSppEpt);
+ Assert(!pGuestFeat->fVmxPtEpt);
+ Assert(!pGuestFeat->fVmxUseTscScaling);
+ Assert(!pGuestFeat->fVmxUserWaitPause);
+ Assert(!pGuestFeat->fVmxEnclvExit);
+ }
+ else if (pGuestFeat->fVmxUnrestrictedGuest)
+ {
+ /* See footnote in Intel spec. 27.2 "Recording VM-Exit Information And Updating VM-entry Control Fields". */
+ Assert(pGuestFeat->fVmxExitSaveEferLma);
+ /* Unrestricted guest execution requires EPT. See Intel spec. 25.2.1.1 "VM-Execution Control Fields". */
+ Assert(pGuestFeat->fVmxEpt);
+ }
+
+ if (!pGuestFeat->fVmxTertiaryExecCtls)
+ Assert(!pGuestFeat->fVmxLoadIwKeyExit);
+
+ /*
+ * Finally initialize the VMX guest MSRs.
+ */
+ cpumR3InitVmxGuestMsrs(pVM, pHostVmxMsrs, pGuestFeat, pGuestVmxMsrs);
+}
+
+
+/**
+ * Gets the host hardware-virtualization MSRs.
+ *
+ * @returns VBox status code.
+ * @param pMsrs Where to store the MSRs.
+ */
+static int cpumR3GetHostHwvirtMsrs(PCPUMMSRS pMsrs)
+{
+ Assert(pMsrs);
+
+ uint32_t fCaps = 0;
+ int rc = SUPR3QueryVTCaps(&fCaps);
+ if (RT_SUCCESS(rc))
+ {
+ if (fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V))
+ {
+ SUPHWVIRTMSRS HwvirtMsrs;
+ rc = SUPR3GetHwvirtMsrs(&HwvirtMsrs, false /* fForceRequery */);
+ if (RT_SUCCESS(rc))
+ {
+ if (fCaps & SUPVTCAPS_VT_X)
+ HMGetVmxMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.vmx);
+ else
+ HMGetSvmMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.svm);
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("CPUM: Querying hardware-virtualization MSRs failed. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ LogRel(("CPUM: Querying hardware-virtualization capability succeeded but did not find VT-x or AMD-V\n"));
+ return VERR_INTERNAL_ERROR_5;
+ }
+
+ LogRel(("CPUM: No hardware-virtualization capability detected\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNTMTIMERINT,
+ * Callback that fires when the nested VMX-preemption timer expired.}
+ */
+static DECLCALLBACK(void) cpumR3VmxPreemptTimerCallback(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ RT_NOREF(pVM, hTimer);
+ PVMCPU pVCpu = (PVMCPUR3)pvUser;
+ AssertPtr(pVCpu);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
+}
+
+
+/**
+ * Initializes the CPUM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) CPUMR3Init(PVM pVM)
+{
+ LogFlow(("CPUMR3Init\n"));
+
+ /*
+ * Assert alignment, sizes and tables.
+ */
+ AssertCompileMemberAlignment(VM, cpum.s, 32);
+ AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
+ AssertCompileSizeAlignment(CPUMCTX, 64);
+ AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
+ AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
+ AssertCompileMemberAlignment(VM, cpum, 64);
+ AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
+#ifdef VBOX_STRICT
+ int rc2 = cpumR3MsrStrictInitChecks();
+ AssertRCReturn(rc2, rc2);
+#endif
+
+ /*
+ * Gather info about the host CPU.
+ */
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ if (!ASMHasCpuId())
+ {
+ LogRel(("The CPU doesn't support CPUID!\n"));
+ return VERR_UNSUPPORTED_CPU;
+ }
+
+ pVM->cpum.s.fHostMxCsrMask = CPUMR3DeterminHostMxCsrMask();
+#endif
+
+ CPUMMSRS HostMsrs;
+ RT_ZERO(HostMsrs);
+ int rc = cpumR3GetHostHwvirtMsrs(&HostMsrs);
+ AssertLogRelRCReturn(rc, rc);
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ /* Use the host features detected by CPUMR0ModuleInit if available. */
+ if (pVM->cpum.s.HostFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID)
+ g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures;
+ else
+ {
+ PCPUMCPUIDLEAF paLeaves;
+ uint32_t cLeaves;
+ rc = CPUMCpuIdCollectLeavesX86(&paLeaves, &cLeaves);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = cpumCpuIdExplodeFeaturesX86(paLeaves, cLeaves, &HostMsrs, &g_CpumHostFeatures.s);
+ RTMemFree(paLeaves);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ pVM->cpum.s.HostFeatures = g_CpumHostFeatures.s;
+ pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
+#endif
+
+ /*
+ * Check that the CPU supports the minimum features we require.
+ */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ if (!pVM->cpum.s.HostFeatures.fFxSaveRstor)
+ return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support the FXSAVE/FXRSTOR instruction.");
+ if (!pVM->cpum.s.HostFeatures.fMmx)
+ return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support MMX.");
+ if (!pVM->cpum.s.HostFeatures.fTsc)
+ return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support RDTSC.");
+#endif
+
+ /*
+ * Setup the CR4 AND and OR masks used in the raw-mode switcher.
+ */
+ pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
+ pVM->cpum.s.CR4.OrMask = X86_CR4_OSFXSR;
+
+ /*
+ * Figure out which XSAVE/XRSTOR features are available on the host.
+ */
+ uint64_t fXcr0Host = 0;
+ uint64_t fXStateHostMask = 0;
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ if ( pVM->cpum.s.HostFeatures.fXSaveRstor
+ && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor)
+ {
+ fXStateHostMask = fXcr0Host = ASMGetXcr0();
+ fXStateHostMask &= XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI;
+ AssertLogRelMsgStmt((fXStateHostMask & (XSAVE_C_X87 | XSAVE_C_SSE)) == (XSAVE_C_X87 | XSAVE_C_SSE),
+ ("%#llx\n", fXStateHostMask), fXStateHostMask = 0);
+ }
+#endif
+ pVM->cpum.s.fXStateHostMask = fXStateHostMask;
+ LogRel(("CPUM: fXStateHostMask=%#llx; initial: %#llx; host XCR0=%#llx\n",
+ pVM->cpum.s.fXStateHostMask, fXStateHostMask, fXcr0Host));
+
+ /*
+ * Initialize the host XSAVE/XRSTOR mask.
+ */
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.cbMaxExtendedState;
+ cbMaxXState = RT_ALIGN(cbMaxXState, 128);
+ AssertLogRelReturn( pVM->cpum.s.HostFeatures.cbMaxExtendedState >= sizeof(X86FXSTATE)
+ && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)
+ && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)
+ , VERR_CPUM_IPE_2);
+#endif
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ pVCpu->cpum.s.Host.fXStateMask = fXStateHostMask;
+ pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE;
+ }
+
+ /*
+ * Register saved state data item.
+ */
+ rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
+ NULL, cpumR3LiveExec, NULL,
+ NULL, cpumR3SaveExec, NULL,
+ cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Register info handlers and registers with the debugger facility.
+ */
+ DBGFR3InfoRegisterInternalEx(pVM, "cpum", "Displays the all the cpu states.",
+ &cpumR3InfoAll, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "cpumguest", "Displays the guest cpu state.",
+ &cpumR3InfoGuest, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "cpumguesthwvirt", "Displays the guest hwvirt. cpu state.",
+ &cpumR3InfoGuestHwvirt, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "cpumhyper", "Displays the hypervisor cpu state.",
+ &cpumR3InfoHyper, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "cpumhost", "Displays the host cpu state.",
+ &cpumR3InfoHost, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternalEx(pVM, "cpumguestinstr", "Displays the current guest instruction.",
+ &cpumR3InfoGuestInstr, DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternal( pVM, "cpuid", "Displays the guest cpuid leaves.",
+ &cpumR3CpuIdInfo);
+ DBGFR3InfoRegisterInternal( pVM, "cpumvmxfeat", "Displays the host and guest VMX hwvirt. features.",
+ &cpumR3InfoVmxFeatures);
+
+ rc = cpumR3DbgInit(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ /*
+ * Check if we need to workaround partial/leaky FPU handling.
+ */
+ cpumR3CheckLeakyFpu(pVM);
+#endif
+
+ /*
+ * Initialize the Guest CPUID and MSR states.
+ */
+ rc = cpumR3InitCpuIdAndMsrs(pVM, &HostMsrs);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Generate the RFLAGS cookie.
+ */
+ pVM->cpum.s.fReservedRFlagsCookie = RTRandU64() & ~(CPUMX86EFLAGS_HW_MASK_64 | CPUMX86EFLAGS_INT_MASK_64);
+
+ /*
+ * Init the VMX/SVM state.
+ *
+ * This must be done after initializing CPUID/MSR features as we access the
+ * the VMX/SVM guest features below.
+ *
+ * In the case of nested VT-x, we also need to create the per-VCPU
+ * VMX preemption timers.
+ */
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ cpumR3InitVmxHwVirtState(pVM);
+ else if (pVM->cpum.s.GuestFeatures.fSvm)
+ cpumR3InitSvmHwVirtState(pVM);
+ else
+ Assert(pVM->apCpusR3[0]->cpum.s.Guest.hwvirt.enmHwvirt == CPUMHWVIRT_NONE);
+
+ /*
+ * Initialize the general guest CPU state.
+ */
+ CPUMR3Reset(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * The CPUM will update the addresses used by the switcher.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Terminates the CPUM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) CPUMR3Term(PVM pVM)
+{
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
+ pVCpu->cpum.s.uMagic = 0;
+ pvCpu->cpum.s.Guest.dr[5] = 0;
+ }
+#endif
+
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ if (pVCpu->cpum.s.hNestedVmxPreemptTimer != NIL_TMTIMERHANDLE)
+ {
+ int rc = TMR3TimerDestroy(pVM, pVCpu->cpum.s.hNestedVmxPreemptTimer); AssertRC(rc);
+ pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE;
+ }
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used by CPUMR3Reset and CPU hot plugging.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the CPU that is
+ * being reset. This may differ from the current EMT.
+ */
+VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
+{
+ /** @todo anything different for VCPU > 0? */
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+
+ /*
+ * Initialize everything to ZERO first.
+ */
+ uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
+
+ RT_BZERO(pCtx, RT_UOFFSETOF(CPUMCTX, aoffXState));
+
+ pVCpu->cpum.s.fUseFlags = fUseFlags;
+
+ pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
+ pCtx->eip = 0x0000fff0;
+ pCtx->edx = 0x00000600; /* P6 processor */
+
+ Assert((pVM->cpum.s.fReservedRFlagsCookie & (X86_EFL_LIVE_MASK | X86_EFL_RAZ_LO_MASK | X86_EFL_RA1_MASK)) == 0);
+ pCtx->rflags.uBoth = pVM->cpum.s.fReservedRFlagsCookie | X86_EFL_RA1_MASK;
+
+ pCtx->cs.Sel = 0xf000;
+ pCtx->cs.ValidSel = 0xf000;
+ pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->cs.u64Base = UINT64_C(0xffff0000);
+ pCtx->cs.u32Limit = 0x0000ffff;
+ pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->cs.Attr.n.u1Present = 1;
+ pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_ER_ACC;
+
+ pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->ds.u32Limit = 0x0000ffff;
+ pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->ds.Attr.n.u1Present = 1;
+ pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
+
+ pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->es.u32Limit = 0x0000ffff;
+ pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->es.Attr.n.u1Present = 1;
+ pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
+
+ pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->fs.u32Limit = 0x0000ffff;
+ pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->fs.Attr.n.u1Present = 1;
+ pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
+
+ pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->gs.u32Limit = 0x0000ffff;
+ pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->gs.Attr.n.u1Present = 1;
+ pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
+
+ pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->ss.u32Limit = 0x0000ffff;
+ pCtx->ss.Attr.n.u1Present = 1;
+ pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
+ pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
+
+ pCtx->idtr.cbIdt = 0xffff;
+ pCtx->gdtr.cbGdt = 0xffff;
+
+ pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->ldtr.u32Limit = 0xffff;
+ pCtx->ldtr.Attr.n.u1Present = 1;
+ pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
+
+ pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->tr.u32Limit = 0xffff;
+ pCtx->tr.Attr.n.u1Present = 1;
+ pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
+
+ pCtx->dr[6] = X86_DR6_INIT_VAL;
+ pCtx->dr[7] = X86_DR7_INIT_VAL;
+
+ PX86FXSTATE pFpuCtx = &pCtx->XState.x87;
+ pFpuCtx->FTW = 0x00; /* All empty (abbridged tag reg edition). */
+ pFpuCtx->FCW = 0x37f;
+
+ /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
+ IA-32 Processor States Following Power-up, Reset, or INIT */
+ pFpuCtx->MXCSR = 0x1F80;
+ pFpuCtx->MXCSR_MASK = pVM->cpum.s.GuestInfo.fMxCsrMask; /** @todo check if REM messes this up... */
+
+ pCtx->aXcr[0] = XSAVE_C_X87;
+ if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr))
+ {
+ /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR
+ as we don't know what happened before. (Bother optimize later?) */
+ pCtx->XState.Hdr.bmXState = XSAVE_C_X87 | XSAVE_C_SSE;
+ }
+
+ /*
+ * MSRs.
+ */
+ /* Init PAT MSR */
+ pCtx->msrPAT = MSR_IA32_CR_PAT_INIT_VAL;
+
+ /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
+ * The Intel docs don't mention it. */
+ Assert(!pCtx->msrEFER);
+
+ /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
+ is supposed to be here, just trying provide useful/sensible values. */
+ PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
+ if (pRange)
+ {
+ pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+ | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
+ | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
+ | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
+ pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+ | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+ pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
+ }
+
+ /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
+
+ /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
+ * called from each EMT while we're getting called by CPUMR3Reset()
+ * iteratively on the same thread. Fix later. */
+#if 0 /** @todo r=bird: This we will do in TM, not here. */
+ /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
+ CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
+#endif
+
+
+ /* C-state control. Guesses. */
+ pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
+ /* For Nehalem+ and Atoms, the 0xE2 MSR (MSR_PKG_CST_CONFIG_CONTROL) is documented. For Core 2,
+ * it's undocumented but exists as MSR_PMG_CST_CONFIG_CONTROL and has similar but not identical
+ * functionality. The default value must be different due to incompatible write mask.
+ */
+ if (CPUMMICROARCH_IS_INTEL_CORE2(pVM->cpum.s.GuestFeatures.enmMicroarch))
+ pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 0x202a01; /* From Mac Pro Harpertown, unlocked. */
+ else if (pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Intel_Core_Yonah)
+ pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 0x26740c; /* From MacBookPro1,1. */
+
+ /*
+ * Hardware virtualization state.
+ */
+ CPUMSetGuestGif(pCtx, true);
+ Assert(!pVM->cpum.s.GuestFeatures.fVmx || !pVM->cpum.s.GuestFeatures.fSvm); /* Paranoia. */
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ cpumR3ResetVmxHwVirtState(pVCpu);
+ else if (pVM->cpum.s.GuestFeatures.fSvm)
+ cpumR3ResetSvmHwVirtState(pVCpu);
+}
+
+
+/**
+ * Resets the CPU.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) CPUMR3Reset(PVM pVM)
+{
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ CPUMR3ResetCpu(pVM, pVCpu);
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+
+ /* Magic marker for searching in crash dumps. */
+ strcpy((char *)pVCpu->.cpum.s.aMagic, "CPUMCPU Magic");
+ pVCpu->cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
+ pVCpu->cpum.s.Guest->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
+#endif
+ }
+}
+
+
+
+
+/**
+ * Pass 0 live exec callback.
+ *
+ * @returns VINF_SSM_DONT_CALL_AGAIN.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uPass The pass (0).
+ */
+static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
+ cpumR3SaveCpuId(pVM, pSSM);
+ return VINF_SSM_DONT_CALL_AGAIN;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Save.
+ */
+ SSMR3PutU32(pSSM, pVM->cCpus);
+ SSMR3PutU32(pSSM, sizeof(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr));
+ CPUMCTX DummyHyperCtx;
+ RT_ZERO(DummyHyperCtx);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
+ PCPUMCTX const pGstCtx = &pVCpu->cpum.s.Guest;
+
+ /** @todo ditch this the next time we change the saved state. */
+ SSMR3PutStructEx(pSSM, &DummyHyperCtx, sizeof(DummyHyperCtx), 0, g_aCpumCtxFields, NULL);
+
+ uint64_t const fSavedRFlags = pGstCtx->rflags.uBoth;
+ pGstCtx->rflags.uBoth &= CPUMX86EFLAGS_HW_MASK_64; /* Temporarily clear the non-hardware bits in RFLAGS while saving. */
+ SSMR3PutStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFields, NULL);
+ pGstCtx->rflags.uBoth = fSavedRFlags;
+
+ SSMR3PutStructEx(pSSM, &pGstCtx->XState.x87, sizeof(pGstCtx->XState.x87), 0, g_aCpumX87Fields, NULL);
+ if (pGstCtx->fXStateMask != 0)
+ SSMR3PutStructEx(pSSM, &pGstCtx->XState.Hdr, sizeof(pGstCtx->XState.Hdr), 0, g_aCpumXSaveHdrFields, NULL);
+ if (pGstCtx->fXStateMask & XSAVE_C_YMM)
+ {
+ PCX86XSAVEYMMHI pYmmHiCtx = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
+ SSMR3PutStructEx(pSSM, pYmmHiCtx, sizeof(*pYmmHiCtx), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumYmmHiFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_BNDREGS)
+ {
+ PCX86XSAVEBNDREGS pBndRegs = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_BNDREGS_BIT, PCX86XSAVEBNDREGS);
+ SSMR3PutStructEx(pSSM, pBndRegs, sizeof(*pBndRegs), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumBndRegsFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_BNDCSR)
+ {
+ PCX86XSAVEBNDCFG pBndCfg = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_BNDCSR_BIT, PCX86XSAVEBNDCFG);
+ SSMR3PutStructEx(pSSM, pBndCfg, sizeof(*pBndCfg), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumBndCfgFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_ZMM_HI256)
+ {
+ PCX86XSAVEZMMHI256 pZmmHi256 = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_HI256_BIT, PCX86XSAVEZMMHI256);
+ SSMR3PutStructEx(pSSM, pZmmHi256, sizeof(*pZmmHi256), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmmHi256Fields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_ZMM_16HI)
+ {
+ PCX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_16HI_BIT, PCX86XSAVEZMM16HI);
+ SSMR3PutStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL);
+ }
+ SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[0].u);
+ SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[1].u);
+ SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[2].u);
+ SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[3].u);
+ if (pVM->cpum.s.GuestFeatures.fSvm)
+ {
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.svm.uMsrHSavePa);
+ SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.svm.GCPhysVmcb);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.svm.uPrevPauseTick);
+ SSMR3PutU16(pSSM, pGstCtx->hwvirt.svm.cPauseFilter);
+ SSMR3PutU16(pSSM, pGstCtx->hwvirt.svm.cPauseFilterThreshold);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.svm.fInterceptEvents);
+ SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 0 /* fFlags */,
+ g_aSvmHwvirtHostState, NULL /* pvUser */);
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.Vmcb, sizeof(pGstCtx->hwvirt.svm.Vmcb));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.abIoBitmap[0], sizeof(pGstCtx->hwvirt.svm.abIoBitmap));
+ /* This is saved in the old VMCPUM_FF format. Change if more flags are added. */
+ SSMR3PutU32(pSSM, pGstCtx->hwvirt.fSavedInhibit & CPUMCTX_INHIBIT_NMI ? CPUM_OLD_VMCPU_FF_BLOCK_NMIS : 0);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.fGif);
+ }
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ {
+ SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmxon);
+ SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmcs);
+ SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysShadowVmcs);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxRootMode);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxNonRootMode);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInterceptEvents);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
+ SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs), 0, g_aVmxHwvirtVmcs, NULL);
+ SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.vmx.ShadowVmcs, sizeof(pGstCtx->hwvirt.vmx.ShadowVmcs),
+ 0, g_aVmxHwvirtVmcs, NULL);
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.abVmreadBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abVmreadBitmap));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.abVmwriteBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abVmwriteBitmap));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.aEntryMsrLoadArea[0], sizeof(pGstCtx->hwvirt.vmx.aEntryMsrLoadArea));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.aExitMsrStoreArea[0], sizeof(pGstCtx->hwvirt.vmx.aExitMsrStoreArea));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.aExitMsrLoadArea[0], sizeof(pGstCtx->hwvirt.vmx.aExitMsrLoadArea));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abMsrBitmap));
+ SSMR3PutMem(pSSM, &pGstCtx->hwvirt.vmx.abIoBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abIoBitmap));
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uFirstPauseLoopTick);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uPrevPauseTick);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uEntryTick);
+ SSMR3PutU16(pSSM, pGstCtx->hwvirt.vmx.offVirtApicWrite);
+ SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fVirtNmiBlocking);
+ SSMR3PutU64(pSSM, MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON); /* Deprecated since 2021/09/22. Value kept backwards compatibile with 6.1.26. */
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Basic);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.PinCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Misc);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmFunc);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64ProcCtls3);
+ SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64ExitCtls2);
+ }
+ SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
+ SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
+ AssertCompileSizeAlignment(pVCpu->cpum.s.GuestMsrs.msr, sizeof(uint64_t));
+ SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVCpu->cpum.s.GuestMsrs.msr));
+ }
+
+ cpumR3SaveCpuId(pVM, pSSM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADPREP}
+ */
+static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
+{
+ NOREF(pSSM);
+ pVM->cpum.s.fPendingRestore = true;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADEXEC}
+ */
+static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ int rc; /* Only for AssertRCReturn use. */
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3
+ && uVersion != CPUM_SAVED_STATE_VERSION_PAE_PDPES
+ && uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2
+ && uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX
+ && uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM
+ && uVersion != CPUM_SAVED_STATE_VERSION_XSAVE
+ && uVersion != CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT
+ && uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
+ && uVersion != CPUM_SAVED_STATE_VERSION_PUT_STRUCT
+ && uVersion != CPUM_SAVED_STATE_VERSION_MEM
+ && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
+ && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
+ && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
+ && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
+ && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
+ && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
+ {
+ AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ if (uPass == SSM_PASS_FINAL)
+ {
+ /*
+ * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
+ * really old SSM file versions.)
+ */
+ if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
+ SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
+ else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
+ SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR));
+
+ /*
+ * Figure x86 and ctx field definitions to use for older states.
+ */
+ uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
+ PCSSMFIELD paCpumCtx1Fields = g_aCpumX87Fields;
+ PCSSMFIELD paCpumCtx2Fields = g_aCpumCtxFields;
+ if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
+ {
+ paCpumCtx1Fields = g_aCpumX87FieldsV16;
+ paCpumCtx2Fields = g_aCpumCtxFieldsV16;
+ }
+ else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
+ {
+ paCpumCtx1Fields = g_aCpumX87FieldsMem;
+ paCpumCtx2Fields = g_aCpumCtxFieldsMem;
+ }
+
+ /*
+ * The hyper state used to preceed the CPU count. Starting with
+ * XSAVE it was moved down till after we've got the count.
+ */
+ CPUMCTX HyperCtxIgnored;
+ if (uVersion < CPUM_SAVED_STATE_VERSION_XSAVE)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ X86FXSTATE Ign;
+ SSMR3GetStructEx(pSSM, &Ign, sizeof(Ign), fLoad | SSMSTRUCT_FLAGS_NO_TAIL_MARKER, paCpumCtx1Fields, NULL);
+ SSMR3GetStructEx(pSSM, &HyperCtxIgnored, sizeof(HyperCtxIgnored),
+ fLoad | SSMSTRUCT_FLAGS_NO_LEAD_MARKER, paCpumCtx2Fields, NULL);
+ }
+ }
+
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
+ {
+ uint32_t cCpus;
+ rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
+ AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
+ VERR_SSM_UNEXPECTED_DATA);
+ }
+ AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
+ || pVM->cCpus == 1,
+ ("cCpus=%u\n", pVM->cCpus),
+ VERR_SSM_UNEXPECTED_DATA);
+
+ uint32_t cbMsrs = 0;
+ if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
+ {
+ rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
+ AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
+ VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
+ VERR_SSM_UNEXPECTED_DATA);
+ }
+
+ /*
+ * Do the per-CPU restoring.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PCPUMCTX pGstCtx = &pVCpu->cpum.s.Guest;
+
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_XSAVE)
+ {
+ /*
+ * The XSAVE saved state layout moved the hyper state down here.
+ */
+ rc = SSMR3GetStructEx(pSSM, &HyperCtxIgnored, sizeof(HyperCtxIgnored), 0, g_aCpumCtxFields, NULL);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Start by restoring the CPUMCTX structure and the X86FXSAVE bits of the extended state.
+ */
+ rc = SSMR3GetStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFields, NULL);
+ rc = SSMR3GetStructEx(pSSM, &pGstCtx->XState.x87, sizeof(pGstCtx->XState.x87), 0, g_aCpumX87Fields, NULL);
+ AssertRCReturn(rc, rc);
+
+ /* Check that the xsave/xrstor mask is valid (invalid results in #GP). */
+ if (pGstCtx->fXStateMask != 0)
+ {
+ AssertLogRelMsgReturn(!(pGstCtx->fXStateMask & ~pVM->cpum.s.fXStateGuestMask),
+ ("fXStateMask=%#RX64 fXStateGuestMask=%#RX64\n",
+ pGstCtx->fXStateMask, pVM->cpum.s.fXStateGuestMask),
+ VERR_CPUM_INCOMPATIBLE_XSAVE_COMP_MASK);
+ AssertLogRelMsgReturn(pGstCtx->fXStateMask & XSAVE_C_X87,
+ ("fXStateMask=%#RX64\n", pGstCtx->fXStateMask), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ AssertLogRelMsgReturn((pGstCtx->fXStateMask & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM,
+ ("fXStateMask=%#RX64\n", pGstCtx->fXStateMask), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ AssertLogRelMsgReturn( (pGstCtx->fXStateMask & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
+ || (pGstCtx->fXStateMask & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
+ == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI),
+ ("fXStateMask=%#RX64\n", pGstCtx->fXStateMask), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ }
+
+ /* Check that the XCR0 mask is valid (invalid results in #GP). */
+ AssertLogRelMsgReturn(pGstCtx->aXcr[0] & XSAVE_C_X87, ("xcr0=%#RX64\n", pGstCtx->aXcr[0]), VERR_CPUM_INVALID_XCR0);
+ if (pGstCtx->aXcr[0] != XSAVE_C_X87)
+ {
+ AssertLogRelMsgReturn(!(pGstCtx->aXcr[0] & ~(pGstCtx->fXStateMask | XSAVE_C_X87)),
+ ("xcr0=%#RX64 fXStateMask=%#RX64\n", pGstCtx->aXcr[0], pGstCtx->fXStateMask),
+ VERR_CPUM_INVALID_XCR0);
+ AssertLogRelMsgReturn(pGstCtx->aXcr[0] & XSAVE_C_X87,
+ ("xcr0=%#RX64\n", pGstCtx->aXcr[0]), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ AssertLogRelMsgReturn((pGstCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM,
+ ("xcr0=%#RX64\n", pGstCtx->aXcr[0]), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ AssertLogRelMsgReturn( (pGstCtx->aXcr[0] & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
+ || (pGstCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
+ == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI),
+ ("xcr0=%#RX64\n", pGstCtx->aXcr[0]), VERR_CPUM_INVALID_XSAVE_COMP_MASK);
+ }
+
+ /* Check that the XCR1 is zero, as we don't implement it yet. */
+ AssertLogRelMsgReturn(!pGstCtx->aXcr[1], ("xcr1=%#RX64\n", pGstCtx->aXcr[1]), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /*
+ * Restore the individual extended state components we support.
+ */
+ if (pGstCtx->fXStateMask != 0)
+ {
+ rc = SSMR3GetStructEx(pSSM, &pGstCtx->XState.Hdr, sizeof(pGstCtx->XState.Hdr),
+ 0, g_aCpumXSaveHdrFields, NULL);
+ AssertRCReturn(rc, rc);
+ AssertLogRelMsgReturn(!(pGstCtx->XState.Hdr.bmXState & ~pGstCtx->fXStateMask),
+ ("bmXState=%#RX64 fXStateMask=%#RX64\n",
+ pGstCtx->XState.Hdr.bmXState, pGstCtx->fXStateMask),
+ VERR_CPUM_INVALID_XSAVE_HDR);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_YMM)
+ {
+ PX86XSAVEYMMHI pYmmHiCtx = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
+ SSMR3GetStructEx(pSSM, pYmmHiCtx, sizeof(*pYmmHiCtx), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumYmmHiFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_BNDREGS)
+ {
+ PX86XSAVEBNDREGS pBndRegs = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_BNDREGS_BIT, PX86XSAVEBNDREGS);
+ SSMR3GetStructEx(pSSM, pBndRegs, sizeof(*pBndRegs), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumBndRegsFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_BNDCSR)
+ {
+ PX86XSAVEBNDCFG pBndCfg = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_BNDCSR_BIT, PX86XSAVEBNDCFG);
+ SSMR3GetStructEx(pSSM, pBndCfg, sizeof(*pBndCfg), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumBndCfgFields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_ZMM_HI256)
+ {
+ PX86XSAVEZMMHI256 pZmmHi256 = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_HI256_BIT, PX86XSAVEZMMHI256);
+ SSMR3GetStructEx(pSSM, pZmmHi256, sizeof(*pZmmHi256), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmmHi256Fields, NULL);
+ }
+ if (pGstCtx->fXStateMask & XSAVE_C_ZMM_16HI)
+ {
+ PX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_16HI_BIT, PX86XSAVEZMM16HI);
+ SSMR3GetStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL);
+ }
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_PAE_PDPES)
+ {
+ SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[0].u);
+ SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[1].u);
+ SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[2].u);
+ SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[3].u);
+ }
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_SVM)
+ {
+ if (pVM->cpum.s.GuestFeatures.fSvm)
+ {
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.svm.uMsrHSavePa);
+ SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.svm.GCPhysVmcb);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.svm.uPrevPauseTick);
+ SSMR3GetU16(pSSM, &pGstCtx->hwvirt.svm.cPauseFilter);
+ SSMR3GetU16(pSSM, &pGstCtx->hwvirt.svm.cPauseFilterThreshold);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.svm.fInterceptEvents);
+ SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState),
+ 0 /* fFlags */, g_aSvmHwvirtHostState, NULL /* pvUser */);
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.Vmcb, sizeof(pGstCtx->hwvirt.svm.Vmcb));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.abIoBitmap[0], sizeof(pGstCtx->hwvirt.svm.abIoBitmap));
+
+ uint32_t fSavedLocalFFs = 0;
+ rc = SSMR3GetU32(pSSM, &fSavedLocalFFs);
+ AssertRCReturn(rc, rc);
+ Assert(fSavedLocalFFs == 0 || fSavedLocalFFs == CPUM_OLD_VMCPU_FF_BLOCK_NMIS);
+ pGstCtx->hwvirt.fSavedInhibit = fSavedLocalFFs & CPUM_OLD_VMCPU_FF_BLOCK_NMIS ? CPUMCTX_INHIBIT_NMI : 0;
+
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.fGif);
+ }
+ }
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX)
+ {
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ {
+ SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmxon);
+ SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmcs);
+ SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysShadowVmcs);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxRootMode);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxNonRootMode);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInterceptEvents);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
+ SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs),
+ 0, g_aVmxHwvirtVmcs, NULL);
+ SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.vmx.ShadowVmcs, sizeof(pGstCtx->hwvirt.vmx.ShadowVmcs),
+ 0, g_aVmxHwvirtVmcs, NULL);
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.abVmreadBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abVmreadBitmap));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.abVmwriteBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abVmwriteBitmap));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.aEntryMsrLoadArea[0], sizeof(pGstCtx->hwvirt.vmx.aEntryMsrLoadArea));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.aExitMsrStoreArea[0], sizeof(pGstCtx->hwvirt.vmx.aExitMsrStoreArea));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.aExitMsrLoadArea[0], sizeof(pGstCtx->hwvirt.vmx.aExitMsrLoadArea));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abMsrBitmap));
+ SSMR3GetMem(pSSM, &pGstCtx->hwvirt.vmx.abIoBitmap[0], sizeof(pGstCtx->hwvirt.vmx.abIoBitmap));
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uFirstPauseLoopTick);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uPrevPauseTick);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uEntryTick);
+ SSMR3GetU16(pSSM, &pGstCtx->hwvirt.vmx.offVirtApicWrite);
+ SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fVirtNmiBlocking);
+ SSMR3Skip(pSSM, sizeof(uint64_t)); /* Unused - used to be IA32_FEATURE_CONTROL, see @bugref{10106}. */
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Basic);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.PinCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Misc);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmFunc);
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps);
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2)
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64ProcCtls3);
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3)
+ SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64ExitCtls2);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Pre XSAVE saved state.
+ */
+ SSMR3GetStructEx(pSSM, &pGstCtx->XState.x87, sizeof(pGstCtx->XState.x87),
+ fLoad | SSMSTRUCT_FLAGS_NO_TAIL_MARKER, paCpumCtx1Fields, NULL);
+ SSMR3GetStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), fLoad | SSMSTRUCT_FLAGS_NO_LEAD_MARKER, paCpumCtx2Fields, NULL);
+ }
+
+ /*
+ * Restore a couple of flags and the MSRs.
+ */
+ uint32_t fIgnoredUsedFlags = 0;
+ rc = SSMR3GetU32(pSSM, &fIgnoredUsedFlags); /* we're recalc the two relevant flags after loading state. */
+ AssertRCReturn(rc, rc);
+ SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
+
+ rc = VINF_SUCCESS;
+ if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
+ rc = SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], cbMsrs);
+ else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
+ {
+ SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
+ rc = SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
+ }
+ AssertRCReturn(rc, rc);
+
+ /* Deal with the reusing of reserved RFLAGS bits. */
+ pGstCtx->rflags.uBoth |= pVM->cpum.s.fReservedRFlagsCookie;
+
+ /* REM and other may have cleared must-be-one fields in DR6 and
+ DR7, fix these. */
+ pGstCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
+ pGstCtx->dr[6] |= X86_DR6_RA1_MASK;
+ pGstCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
+ pGstCtx->dr[7] |= X86_DR7_RA1_MASK;
+ }
+
+ /* Older states does not have the internal selector register flags
+ and valid selector value. Supply those. */
+ if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ bool const fValid = true /*!VM_IS_RAW_MODE_ENABLED(pVM)*/
+ || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
+ && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
+ PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
+ if (fValid)
+ {
+ for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
+ {
+ paSelReg[iSelReg].fFlags = CPUMSELREG_FLAGS_VALID;
+ paSelReg[iSelReg].ValidSel = paSelReg[iSelReg].Sel;
+ }
+
+ pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
+ pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
+ }
+ else
+ {
+ for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
+ {
+ paSelReg[iSelReg].fFlags = 0;
+ paSelReg[iSelReg].ValidSel = 0;
+ }
+
+ /* This might not be 104% correct, but I think it's close
+ enough for all practical purposes... (REM always loaded
+ LDTR registers.) */
+ pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
+ pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
+ }
+ pVCpu->cpum.s.Guest.tr.fFlags = CPUMSELREG_FLAGS_VALID;
+ pVCpu->cpum.s.Guest.tr.ValidSel = pVCpu->cpum.s.Guest.tr.Sel;
+ }
+ }
+
+ /* Clear CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID. */
+ if ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
+ && uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
+ }
+
+ /*
+ * A quick sanity check.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.es.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.cs.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ss.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ds.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.fs.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ AssertLogRelReturn(!(pVCpu->cpum.s.Guest.gs.fFlags & ~CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
+ }
+ }
+
+ pVM->cpum.s.fPendingRestore = false;
+
+ /*
+ * Guest CPUIDs (and VMX MSR features).
+ */
+ if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2)
+ {
+ CPUMMSRS GuestMsrs;
+ RT_ZERO(GuestMsrs);
+
+ CPUMFEATURES BaseFeatures;
+ bool const fVmxGstFeat = pVM->cpum.s.GuestFeatures.fVmx;
+ if (fVmxGstFeat)
+ {
+ /*
+ * At this point the MSRs in the guest CPU-context are loaded with the guest VMX MSRs from the saved state.
+ * However the VMX sub-features have not been exploded yet. So cache the base (host derived) VMX features
+ * here so we can compare them for compatibility after exploding guest features.
+ */
+ BaseFeatures = pVM->cpum.s.GuestFeatures;
+
+ /* Use the VMX MSR features from the saved state while exploding guest features. */
+ GuestMsrs.hwvirt.vmx = pVM->apCpusR3[0]->cpum.s.Guest.hwvirt.vmx.Msrs;
+ }
+
+ /* Load CPUID and explode guest features. */
+ rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
+ if (fVmxGstFeat)
+ {
+ /*
+ * Check if the exploded VMX features from the saved state are compatible with the host-derived features
+ * we cached earlier (above). The is required if we use hardware-assisted nested-guest execution with
+ * VMX features presented to the guest.
+ */
+ bool const fIsCompat = cpumR3AreVmxCpuFeaturesCompatible(pVM, &BaseFeatures, &pVM->cpum.s.GuestFeatures);
+ if (!fIsCompat)
+ return VERR_CPUM_INVALID_HWVIRT_FEAT_COMBO;
+ }
+ return rc;
+ }
+ return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion);
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADDONE}
+ */
+static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
+{
+ if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
+ return VINF_SUCCESS;
+
+ /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
+ if (pVM->cpum.s.fPendingRestore)
+ {
+ LogRel(("CPUM: Missing state!\n"));
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /* Notify PGM of the NXE states in case they've changed. */
+ PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
+
+ /* During init. this is done in CPUMR3InitCompleted(). */
+ if (fSupportsLongMode)
+ pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
+
+ /* Recalc the CPUM_USE_DEBUG_REGS_HYPER value. */
+ CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the CPUM state restore is still pending.
+ *
+ * @returns true / false.
+ * @param pVM The cross context VM structure.
+ */
+VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
+{
+ return pVM->cpum.s.fPendingRestore;
+}
+
+
+/**
+ * Formats the EFLAGS value into mnemonics.
+ *
+ * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
+ * @param efl The EFLAGS value with both guest hardware and VBox
+ * internal bits included.
+ */
+static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
+{
+ /*
+ * Format the flags.
+ */
+ static const struct
+ {
+ const char *pszSet; const char *pszClear; uint32_t fFlag;
+ } s_aFlags[] =
+ {
+ { "vip",NULL, X86_EFL_VIP },
+ { "vif",NULL, X86_EFL_VIF },
+ { "ac", NULL, X86_EFL_AC },
+ { "vm", NULL, X86_EFL_VM },
+ { "rf", NULL, X86_EFL_RF },
+ { "nt", NULL, X86_EFL_NT },
+ { "ov", "nv", X86_EFL_OF },
+ { "dn", "up", X86_EFL_DF },
+ { "ei", "di", X86_EFL_IF },
+ { "tf", NULL, X86_EFL_TF },
+ { "nt", "pl", X86_EFL_SF },
+ { "nz", "zr", X86_EFL_ZF },
+ { "ac", "na", X86_EFL_AF },
+ { "po", "pe", X86_EFL_PF },
+ { "cy", "nc", X86_EFL_CF },
+ { "inh-ss", NULL, CPUMCTX_INHIBIT_SHADOW_SS },
+ { "inh-sti", NULL, CPUMCTX_INHIBIT_SHADOW_STI },
+ { "inh-nmi", NULL, CPUMCTX_INHIBIT_NMI },
+ };
+ char *psz = pszEFlags;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
+ {
+ const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
+ if (pszAdd)
+ {
+ strcpy(psz, pszAdd);
+ psz += strlen(pszAdd);
+ *psz++ = ' ';
+ }
+ }
+ psz[-1] = '\0';
+}
+
+
+/**
+ * Formats a full register dump.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCtx The context to format.
+ * @param pHlp Output functions.
+ * @param enmType The dump type.
+ * @param pszPrefix Register name prefix.
+ */
+static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
+{
+ NOREF(pVM);
+
+ /*
+ * Format the EFLAGS.
+ */
+ char szEFlags[80];
+ cpumR3InfoFormatFlags(&szEFlags[0], pCtx->eflags.uBoth);
+
+ /*
+ * Format the registers.
+ */
+ uint32_t const efl = pCtx->eflags.u;
+ switch (enmType)
+ {
+ case CPUMDUMPTYPE_TERSE:
+ if (CPUMIsGuestIn64BitCodeEx(pCtx))
+ pHlp->pfnPrintf(pHlp,
+ "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
+ "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
+ "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
+ "%sr14=%016RX64 %sr15=%016RX64\n"
+ "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
+ "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
+ pszPrefix, pCtx->rax, pszPrefix, pCtx->rbx, pszPrefix, pCtx->rcx, pszPrefix, pCtx->rdx, pszPrefix, pCtx->rsi, pszPrefix, pCtx->rdi,
+ pszPrefix, pCtx->r8, pszPrefix, pCtx->r9, pszPrefix, pCtx->r10, pszPrefix, pCtx->r11, pszPrefix, pCtx->r12, pszPrefix, pCtx->r13,
+ pszPrefix, pCtx->r14, pszPrefix, pCtx->r15,
+ pszPrefix, pCtx->rip, pszPrefix, pCtx->rsp, pszPrefix, pCtx->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pszPrefix, pCtx->ss.Sel, pszPrefix, pCtx->ds.Sel, pszPrefix, pCtx->es.Sel,
+ pszPrefix, pCtx->fs.Sel, pszPrefix, pCtx->gs.Sel, pszPrefix, efl);
+ else
+ pHlp->pfnPrintf(pHlp,
+ "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
+ "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
+ "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
+ pszPrefix, pCtx->eax, pszPrefix, pCtx->ebx, pszPrefix, pCtx->ecx, pszPrefix, pCtx->edx, pszPrefix, pCtx->esi, pszPrefix, pCtx->edi,
+ pszPrefix, pCtx->eip, pszPrefix, pCtx->esp, pszPrefix, pCtx->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pszPrefix, pCtx->ss.Sel, pszPrefix, pCtx->ds.Sel, pszPrefix, pCtx->es.Sel,
+ pszPrefix, pCtx->fs.Sel, pszPrefix, pCtx->gs.Sel, pszPrefix, efl);
+ break;
+
+ case CPUMDUMPTYPE_DEFAULT:
+ if (CPUMIsGuestIn64BitCodeEx(pCtx))
+ pHlp->pfnPrintf(pHlp,
+ "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
+ "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
+ "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
+ "%sr14=%016RX64 %sr15=%016RX64\n"
+ "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
+ "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
+ "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
+ ,
+ pszPrefix, pCtx->rax, pszPrefix, pCtx->rbx, pszPrefix, pCtx->rcx, pszPrefix, pCtx->rdx, pszPrefix, pCtx->rsi, pszPrefix, pCtx->rdi,
+ pszPrefix, pCtx->r8, pszPrefix, pCtx->r9, pszPrefix, pCtx->r10, pszPrefix, pCtx->r11, pszPrefix, pCtx->r12, pszPrefix, pCtx->r13,
+ pszPrefix, pCtx->r14, pszPrefix, pCtx->r15,
+ pszPrefix, pCtx->rip, pszPrefix, pCtx->rsp, pszPrefix, pCtx->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pszPrefix, pCtx->ss.Sel, pszPrefix, pCtx->ds.Sel, pszPrefix, pCtx->es.Sel,
+ pszPrefix, pCtx->fs.Sel, pszPrefix, pCtx->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
+ pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
+ pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
+ else
+ pHlp->pfnPrintf(pHlp,
+ "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
+ "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
+ "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
+ "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
+ ,
+ pszPrefix, pCtx->eax, pszPrefix, pCtx->ebx, pszPrefix, pCtx->ecx, pszPrefix, pCtx->edx, pszPrefix, pCtx->esi, pszPrefix, pCtx->edi,
+ pszPrefix, pCtx->eip, pszPrefix, pCtx->esp, pszPrefix, pCtx->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pszPrefix, pCtx->ss.Sel, pszPrefix, pCtx->ds.Sel, pszPrefix, pCtx->es.Sel,
+ pszPrefix, pCtx->fs.Sel, pszPrefix, pCtx->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
+ pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
+ pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
+ break;
+
+ case CPUMDUMPTYPE_VERBOSE:
+ if (CPUMIsGuestIn64BitCodeEx(pCtx))
+ pHlp->pfnPrintf(pHlp,
+ "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
+ "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
+ "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
+ "%sr14=%016RX64 %sr15=%016RX64\n"
+ "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
+ "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
+ "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
+ "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
+ "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
+ "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
+ "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
+ "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
+ "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
+ ,
+ pszPrefix, pCtx->rax, pszPrefix, pCtx->rbx, pszPrefix, pCtx->rcx, pszPrefix, pCtx->rdx, pszPrefix, pCtx->rsi, pszPrefix, pCtx->rdi,
+ pszPrefix, pCtx->r8, pszPrefix, pCtx->r9, pszPrefix, pCtx->r10, pszPrefix, pCtx->r11, pszPrefix, pCtx->r12, pszPrefix, pCtx->r13,
+ pszPrefix, pCtx->r14, pszPrefix, pCtx->r15,
+ pszPrefix, pCtx->rip, pszPrefix, pCtx->rsp, pszPrefix, pCtx->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
+ pszPrefix, pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
+ pszPrefix, pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
+ pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
+ pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
+ pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
+ pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
+ pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
+ pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
+ pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
+ pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
+ pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
+ pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
+ else
+ pHlp->pfnPrintf(pHlp,
+ "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
+ "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
+ "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
+ "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
+ "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
+ "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
+ "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
+ "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
+ "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
+ "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
+ "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
+ "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
+ ,
+ pszPrefix, pCtx->eax, pszPrefix, pCtx->ebx, pszPrefix, pCtx->ecx, pszPrefix, pCtx->edx, pszPrefix, pCtx->esi, pszPrefix, pCtx->edi,
+ pszPrefix, pCtx->eip, pszPrefix, pCtx->esp, pszPrefix, pCtx->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
+ pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
+ pszPrefix, pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
+ pszPrefix, pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
+ pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
+ pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
+ pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
+ pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
+ pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
+ pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
+ pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
+
+ pHlp->pfnPrintf(pHlp, "%sxcr=%016RX64 %sxcr1=%016RX64 %sxss=%016RX64 (fXStateMask=%016RX64)\n",
+ pszPrefix, pCtx->aXcr[0], pszPrefix, pCtx->aXcr[1],
+ pszPrefix, UINT64_C(0) /** @todo XSS */, pCtx->fXStateMask);
+ {
+ PX86FXSTATE pFpuCtx = &pCtx->XState.x87;
+ pHlp->pfnPrintf(pHlp,
+ "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
+ "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
+ ,
+ pszPrefix, pFpuCtx->FCW, pszPrefix, pFpuCtx->FSW, pszPrefix, pFpuCtx->FTW, pszPrefix, pFpuCtx->FOP,
+ pszPrefix, pFpuCtx->MXCSR, pszPrefix, pFpuCtx->MXCSR_MASK,
+ pszPrefix, pFpuCtx->FPUIP, pszPrefix, pFpuCtx->CS, pszPrefix, pFpuCtx->Rsrvd1,
+ pszPrefix, pFpuCtx->FPUDP, pszPrefix, pFpuCtx->DS, pszPrefix, pFpuCtx->Rsrvd2
+ );
+ /*
+ * The FSAVE style memory image contains ST(0)-ST(7) at increasing addresses,
+ * not (FP)R0-7 as Intel SDM suggests.
+ */
+ unsigned iShift = (pFpuCtx->FSW >> 11) & 7;
+ for (unsigned iST = 0; iST < RT_ELEMENTS(pFpuCtx->aRegs); iST++)
+ {
+ unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pFpuCtx->aRegs);
+ unsigned uTag = (pFpuCtx->FTW >> (2 * iFPR)) & 3;
+ char chSign = pFpuCtx->aRegs[iST].au16[4] & 0x8000 ? '-' : '+';
+ unsigned iInteger = (unsigned)(pFpuCtx->aRegs[iST].au64[0] >> 63);
+ uint64_t u64Fraction = pFpuCtx->aRegs[iST].au64[0] & UINT64_C(0x7fffffffffffffff);
+ int iExponent = pFpuCtx->aRegs[iST].au16[4] & 0x7fff;
+ iExponent -= 16383; /* subtract bias */
+ /** @todo This isn't entirenly correct and needs more work! */
+ pHlp->pfnPrintf(pHlp,
+ "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu * 2 ^ %d (*)",
+ pszPrefix, iST, pszPrefix, iFPR,
+ pFpuCtx->aRegs[iST].au16[4], pFpuCtx->aRegs[iST].au32[1], pFpuCtx->aRegs[iST].au32[0],
+ uTag, chSign, iInteger, u64Fraction, iExponent);
+ if (pFpuCtx->aRegs[iST].au16[5] || pFpuCtx->aRegs[iST].au16[6] || pFpuCtx->aRegs[iST].au16[7])
+ pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
+ pFpuCtx->aRegs[iST].au16[5], pFpuCtx->aRegs[iST].au16[6], pFpuCtx->aRegs[iST].au16[7]);
+ else
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+
+ /* XMM/YMM/ZMM registers. */
+ if (pCtx->fXStateMask & XSAVE_C_YMM)
+ {
+ PCX86XSAVEYMMHI pYmmHiCtx = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
+ if (!(pCtx->fXStateMask & XSAVE_C_ZMM_HI256))
+ for (unsigned i = 0; i < RT_ELEMENTS(pFpuCtx->aXMM); i++)
+ pHlp->pfnPrintf(pHlp, "%sYMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
+ pszPrefix, i, i < 10 ? " " : "",
+ pYmmHiCtx->aYmmHi[i].au32[3],
+ pYmmHiCtx->aYmmHi[i].au32[2],
+ pYmmHiCtx->aYmmHi[i].au32[1],
+ pYmmHiCtx->aYmmHi[i].au32[0],
+ pFpuCtx->aXMM[i].au32[3],
+ pFpuCtx->aXMM[i].au32[2],
+ pFpuCtx->aXMM[i].au32[1],
+ pFpuCtx->aXMM[i].au32[0]);
+ else
+ {
+ PCX86XSAVEZMMHI256 pZmmHi256 = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_ZMM_HI256_BIT, PCX86XSAVEZMMHI256);
+ for (unsigned i = 0; i < RT_ELEMENTS(pFpuCtx->aXMM); i++)
+ pHlp->pfnPrintf(pHlp,
+ "%sZMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32''%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
+ pszPrefix, i, i < 10 ? " " : "",
+ pZmmHi256->aHi256Regs[i].au32[7],
+ pZmmHi256->aHi256Regs[i].au32[6],
+ pZmmHi256->aHi256Regs[i].au32[5],
+ pZmmHi256->aHi256Regs[i].au32[4],
+ pZmmHi256->aHi256Regs[i].au32[3],
+ pZmmHi256->aHi256Regs[i].au32[2],
+ pZmmHi256->aHi256Regs[i].au32[1],
+ pZmmHi256->aHi256Regs[i].au32[0],
+ pYmmHiCtx->aYmmHi[i].au32[3],
+ pYmmHiCtx->aYmmHi[i].au32[2],
+ pYmmHiCtx->aYmmHi[i].au32[1],
+ pYmmHiCtx->aYmmHi[i].au32[0],
+ pFpuCtx->aXMM[i].au32[3],
+ pFpuCtx->aXMM[i].au32[2],
+ pFpuCtx->aXMM[i].au32[1],
+ pFpuCtx->aXMM[i].au32[0]);
+
+ PCX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_ZMM_16HI_BIT, PCX86XSAVEZMM16HI);
+ for (unsigned i = 0; i < RT_ELEMENTS(pZmm16Hi->aRegs); i++)
+ pHlp->pfnPrintf(pHlp,
+ "%sZMM%u=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32''%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
+ pszPrefix, i + 16,
+ pZmm16Hi->aRegs[i].au32[15],
+ pZmm16Hi->aRegs[i].au32[14],
+ pZmm16Hi->aRegs[i].au32[13],
+ pZmm16Hi->aRegs[i].au32[12],
+ pZmm16Hi->aRegs[i].au32[11],
+ pZmm16Hi->aRegs[i].au32[10],
+ pZmm16Hi->aRegs[i].au32[9],
+ pZmm16Hi->aRegs[i].au32[8],
+ pZmm16Hi->aRegs[i].au32[7],
+ pZmm16Hi->aRegs[i].au32[6],
+ pZmm16Hi->aRegs[i].au32[5],
+ pZmm16Hi->aRegs[i].au32[4],
+ pZmm16Hi->aRegs[i].au32[3],
+ pZmm16Hi->aRegs[i].au32[2],
+ pZmm16Hi->aRegs[i].au32[1],
+ pZmm16Hi->aRegs[i].au32[0]);
+ }
+ }
+ else
+ for (unsigned i = 0; i < RT_ELEMENTS(pFpuCtx->aXMM); i++)
+ pHlp->pfnPrintf(pHlp,
+ i & 1
+ ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
+ : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
+ pszPrefix, i, i < 10 ? " " : "",
+ pFpuCtx->aXMM[i].au32[3],
+ pFpuCtx->aXMM[i].au32[2],
+ pFpuCtx->aXMM[i].au32[1],
+ pFpuCtx->aXMM[i].au32[0]);
+
+ if (pCtx->fXStateMask & XSAVE_C_OPMASK)
+ {
+ PCX86XSAVEOPMASK pOpMask = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_OPMASK_BIT, PCX86XSAVEOPMASK);
+ for (unsigned i = 0; i < RT_ELEMENTS(pOpMask->aKRegs); i += 4)
+ pHlp->pfnPrintf(pHlp, "%sK%u=%016RX64 %sK%u=%016RX64 %sK%u=%016RX64 %sK%u=%016RX64\n",
+ pszPrefix, i + 0, pOpMask->aKRegs[i + 0],
+ pszPrefix, i + 1, pOpMask->aKRegs[i + 1],
+ pszPrefix, i + 2, pOpMask->aKRegs[i + 2],
+ pszPrefix, i + 3, pOpMask->aKRegs[i + 3]);
+ }
+
+ if (pCtx->fXStateMask & XSAVE_C_BNDREGS)
+ {
+ PCX86XSAVEBNDREGS pBndRegs = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_BNDREGS_BIT, PCX86XSAVEBNDREGS);
+ for (unsigned i = 0; i < RT_ELEMENTS(pBndRegs->aRegs); i += 2)
+ pHlp->pfnPrintf(pHlp, "%sBNDREG%u=%016RX64/%016RX64 %sBNDREG%u=%016RX64/%016RX64\n",
+ pszPrefix, i, pBndRegs->aRegs[i].uLowerBound, pBndRegs->aRegs[i].uUpperBound,
+ pszPrefix, i + 1, pBndRegs->aRegs[i + 1].uLowerBound, pBndRegs->aRegs[i + 1].uUpperBound);
+ }
+
+ if (pCtx->fXStateMask & XSAVE_C_BNDCSR)
+ {
+ PCX86XSAVEBNDCFG pBndCfg = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_BNDCSR_BIT, PCX86XSAVEBNDCFG);
+ pHlp->pfnPrintf(pHlp, "%sBNDCFG.CONFIG=%016RX64 %sBNDCFG.STATUS=%016RX64\n",
+ pszPrefix, pBndCfg->fConfig, pszPrefix, pBndCfg->fStatus);
+ }
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pFpuCtx->au32RsrvdRest); i++)
+ if (pFpuCtx->au32RsrvdRest[i])
+ pHlp->pfnPrintf(pHlp, "%sRsrvdRest[%u]=%RX32 (offset=%#x)\n",
+ pszPrefix, i, pFpuCtx->au32RsrvdRest[i], RT_UOFFSETOF_DYN(X86FXSTATE, au32RsrvdRest[i]) );
+ }
+
+ pHlp->pfnPrintf(pHlp,
+ "%sEFER =%016RX64\n"
+ "%sPAT =%016RX64\n"
+ "%sSTAR =%016RX64\n"
+ "%sCSTAR =%016RX64\n"
+ "%sLSTAR =%016RX64\n"
+ "%sSFMASK =%016RX64\n"
+ "%sKERNELGSBASE =%016RX64\n",
+ pszPrefix, pCtx->msrEFER,
+ pszPrefix, pCtx->msrPAT,
+ pszPrefix, pCtx->msrSTAR,
+ pszPrefix, pCtx->msrCSTAR,
+ pszPrefix, pCtx->msrLSTAR,
+ pszPrefix, pCtx->msrSFMASK,
+ pszPrefix, pCtx->msrKERNELGSBASE);
+
+ if (CPUMIsGuestInPAEModeEx(pCtx))
+ for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aPaePdpes); i++)
+ pHlp->pfnPrintf(pHlp, "%sPAE PDPTE %u =%016RX64\n", pszPrefix, i, pCtx->aPaePdpes[i]);
+ break;
+ }
+}
+
+
+/**
+ * Display all cpu states and any other cpum info.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ cpumR3InfoGuest(pVM, pHlp, pszArgs);
+ cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
+ cpumR3InfoGuestHwvirt(pVM, pHlp, pszArgs);
+ cpumR3InfoHyper(pVM, pHlp, pszArgs);
+ cpumR3InfoHost(pVM, pHlp, pszArgs);
+}
+
+
+/**
+ * Parses the info argument.
+ *
+ * The argument starts with 'verbose', 'terse' or 'default' and then
+ * continues with the comment string.
+ *
+ * @param pszArgs The pointer to the argument string.
+ * @param penmType Where to store the dump type request.
+ * @param ppszComment Where to store the pointer to the comment string.
+ */
+static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
+{
+ if (!pszArgs)
+ {
+ *penmType = CPUMDUMPTYPE_DEFAULT;
+ *ppszComment = "";
+ }
+ else
+ {
+ if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
+ {
+ pszArgs += 7;
+ *penmType = CPUMDUMPTYPE_VERBOSE;
+ }
+ else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
+ {
+ pszArgs += 5;
+ *penmType = CPUMDUMPTYPE_TERSE;
+ }
+ else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
+ {
+ pszArgs += 7;
+ *penmType = CPUMDUMPTYPE_DEFAULT;
+ }
+ else
+ *penmType = CPUMDUMPTYPE_DEFAULT;
+ *ppszComment = RTStrStripL(pszArgs);
+ }
+}
+
+
+/**
+ * Display the guest cpu state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments.
+ */
+static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ CPUMDUMPTYPE enmType;
+ const char *pszComment;
+ cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
+
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+ cpumR3InfoOne(pVM, pCtx, pHlp, enmType, "");
+}
+
+
+/**
+ * Displays an SVM VMCB control area.
+ *
+ * @param pHlp The info helper functions.
+ * @param pVmcbCtrl Pointer to a SVM VMCB controls area.
+ * @param pszPrefix Caller specified string prefix.
+ */
+static void cpumR3InfoSvmVmcbCtrl(PCDBGFINFOHLP pHlp, PCSVMVMCBCTRL pVmcbCtrl, const char *pszPrefix)
+{
+ AssertReturnVoid(pHlp);
+ AssertReturnVoid(pVmcbCtrl);
+
+ pHlp->pfnPrintf(pHlp, "%sCRX-read intercepts = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptRdCRx);
+ pHlp->pfnPrintf(pHlp, "%sCRX-write intercepts = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptWrCRx);
+ pHlp->pfnPrintf(pHlp, "%sDRX-read intercepts = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptRdDRx);
+ pHlp->pfnPrintf(pHlp, "%sDRX-write intercepts = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptWrDRx);
+ pHlp->pfnPrintf(pHlp, "%sException intercepts = %#RX32\n", pszPrefix, pVmcbCtrl->u32InterceptXcpt);
+ pHlp->pfnPrintf(pHlp, "%sControl intercepts = %#RX64\n", pszPrefix, pVmcbCtrl->u64InterceptCtrl);
+ pHlp->pfnPrintf(pHlp, "%sPause-filter threshold = %#RX16\n", pszPrefix, pVmcbCtrl->u16PauseFilterThreshold);
+ pHlp->pfnPrintf(pHlp, "%sPause-filter count = %#RX16\n", pszPrefix, pVmcbCtrl->u16PauseFilterCount);
+ pHlp->pfnPrintf(pHlp, "%sIOPM bitmap physaddr = %#RX64\n", pszPrefix, pVmcbCtrl->u64IOPMPhysAddr);
+ pHlp->pfnPrintf(pHlp, "%sMSRPM bitmap physaddr = %#RX64\n", pszPrefix, pVmcbCtrl->u64MSRPMPhysAddr);
+ pHlp->pfnPrintf(pHlp, "%sTSC offset = %#RX64\n", pszPrefix, pVmcbCtrl->u64TSCOffset);
+ pHlp->pfnPrintf(pHlp, "%sTLB Control\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sASID = %#RX32\n", pszPrefix, pVmcbCtrl->TLBCtrl.n.u32ASID);
+ pHlp->pfnPrintf(pHlp, " %sTLB-flush type = %u\n", pszPrefix, pVmcbCtrl->TLBCtrl.n.u8TLBFlush);
+ pHlp->pfnPrintf(pHlp, "%sInterrupt Control\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sVTPR = %#RX8 (%u)\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u8VTPR, pVmcbCtrl->IntCtrl.n.u8VTPR);
+ pHlp->pfnPrintf(pHlp, " %sVIRQ (Pending) = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VIrqPending);
+ pHlp->pfnPrintf(pHlp, " %sVINTR vector = %#RX8\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u8VIntrVector);
+ pHlp->pfnPrintf(pHlp, " %sVGIF = %u\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VGif);
+ pHlp->pfnPrintf(pHlp, " %sVINTR priority = %#RX8\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u4VIntrPrio);
+ pHlp->pfnPrintf(pHlp, " %sIgnore TPR = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1IgnoreTPR);
+ pHlp->pfnPrintf(pHlp, " %sVINTR masking = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VIntrMasking);
+ pHlp->pfnPrintf(pHlp, " %sVGIF enable = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VGifEnable);
+ pHlp->pfnPrintf(pHlp, " %sAVIC enable = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1AvicEnable);
+ pHlp->pfnPrintf(pHlp, "%sInterrupt Shadow\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sInterrupt shadow = %RTbool\n", pszPrefix, pVmcbCtrl->IntShadow.n.u1IntShadow);
+ pHlp->pfnPrintf(pHlp, " %sGuest-interrupt Mask = %RTbool\n", pszPrefix, pVmcbCtrl->IntShadow.n.u1GuestIntMask);
+ pHlp->pfnPrintf(pHlp, "%sExit Code = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitCode);
+ pHlp->pfnPrintf(pHlp, "%sEXITINFO1 = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitInfo1);
+ pHlp->pfnPrintf(pHlp, "%sEXITINFO2 = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitInfo2);
+ pHlp->pfnPrintf(pHlp, "%sExit Interrupt Info\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sValid = %RTbool\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u1Valid);
+ pHlp->pfnPrintf(pHlp, " %sVector = %#RX8 (%u)\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u8Vector, pVmcbCtrl->ExitIntInfo.n.u8Vector);
+ pHlp->pfnPrintf(pHlp, " %sType = %u\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u3Type);
+ pHlp->pfnPrintf(pHlp, " %sError-code valid = %RTbool\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid);
+ pHlp->pfnPrintf(pHlp, " %sError-code = %#RX32\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u32ErrorCode);
+ pHlp->pfnPrintf(pHlp, "%sNested paging and SEV\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sNested paging = %RTbool\n", pszPrefix, pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging);
+ pHlp->pfnPrintf(pHlp, " %sSEV (Secure Encrypted VM) = %RTbool\n", pszPrefix, pVmcbCtrl->NestedPagingCtrl.n.u1Sev);
+ pHlp->pfnPrintf(pHlp, " %sSEV-ES (Encrypted State) = %RTbool\n", pszPrefix, pVmcbCtrl->NestedPagingCtrl.n.u1SevEs);
+ pHlp->pfnPrintf(pHlp, "%sEvent Inject\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sValid = %RTbool\n", pszPrefix, pVmcbCtrl->EventInject.n.u1Valid);
+ pHlp->pfnPrintf(pHlp, " %sVector = %#RX32 (%u)\n", pszPrefix, pVmcbCtrl->EventInject.n.u8Vector, pVmcbCtrl->EventInject.n.u8Vector);
+ pHlp->pfnPrintf(pHlp, " %sType = %u\n", pszPrefix, pVmcbCtrl->EventInject.n.u3Type);
+ pHlp->pfnPrintf(pHlp, " %sError-code valid = %RTbool\n", pszPrefix, pVmcbCtrl->EventInject.n.u1ErrorCodeValid);
+ pHlp->pfnPrintf(pHlp, " %sError-code = %#RX32\n", pszPrefix, pVmcbCtrl->EventInject.n.u32ErrorCode);
+ pHlp->pfnPrintf(pHlp, "%sNested-paging CR3 = %#RX64\n", pszPrefix, pVmcbCtrl->u64NestedPagingCR3);
+ pHlp->pfnPrintf(pHlp, "%sLBR Virtualization\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sLBR virt = %RTbool\n", pszPrefix, pVmcbCtrl->LbrVirt.n.u1LbrVirt);
+ pHlp->pfnPrintf(pHlp, " %sVirt. VMSAVE/VMLOAD = %RTbool\n", pszPrefix, pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload);
+ pHlp->pfnPrintf(pHlp, "%sVMCB Clean Bits = %#RX32\n", pszPrefix, pVmcbCtrl->u32VmcbCleanBits);
+ pHlp->pfnPrintf(pHlp, "%sNext-RIP = %#RX64\n", pszPrefix, pVmcbCtrl->u64NextRIP);
+ pHlp->pfnPrintf(pHlp, "%sInstruction bytes fetched = %u\n", pszPrefix, pVmcbCtrl->cbInstrFetched);
+ pHlp->pfnPrintf(pHlp, "%sInstruction bytes = %.*Rhxs\n", pszPrefix, sizeof(pVmcbCtrl->abInstr), pVmcbCtrl->abInstr);
+ pHlp->pfnPrintf(pHlp, "%sAVIC\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sBar addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicBar.n.u40Addr);
+ pHlp->pfnPrintf(pHlp, " %sBacking page addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicBackingPagePtr.n.u40Addr);
+ pHlp->pfnPrintf(pHlp, " %sLogical table addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicLogicalTablePtr.n.u40Addr);
+ pHlp->pfnPrintf(pHlp, " %sPhysical table addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicPhysicalTablePtr.n.u40Addr);
+ pHlp->pfnPrintf(pHlp, " %sLast guest core Id = %u\n", pszPrefix, pVmcbCtrl->AvicPhysicalTablePtr.n.u8LastGuestCoreId);
+}
+
+
+/**
+ * Helper for dumping the SVM VMCB selector registers.
+ *
+ * @param pHlp The info helper functions.
+ * @param pSel Pointer to the SVM selector register.
+ * @param pszName Name of the selector.
+ * @param pszPrefix Caller specified string prefix.
+ */
+DECLINLINE(void) cpumR3InfoSvmVmcbSelReg(PCDBGFINFOHLP pHlp, PCSVMSELREG pSel, const char *pszName, const char *pszPrefix)
+{
+ /* The string width of 4 used below is to handle 'LDTR'. Change later if longer register names are used. */
+ pHlp->pfnPrintf(pHlp, "%s%-4s = {%04x base=%016RX64 limit=%08x flags=%04x}\n", pszPrefix,
+ pszName, pSel->u16Sel, pSel->u64Base, pSel->u32Limit, pSel->u16Attr);
+}
+
+
+/**
+ * Helper for dumping the SVM VMCB GDTR/IDTR registers.
+ *
+ * @param pHlp The info helper functions.
+ * @param pXdtr Pointer to the descriptor table register.
+ * @param pszName Name of the descriptor table register.
+ * @param pszPrefix Caller specified string prefix.
+ */
+DECLINLINE(void) cpumR3InfoSvmVmcbXdtr(PCDBGFINFOHLP pHlp, PCSVMXDTR pXdtr, const char *pszName, const char *pszPrefix)
+{
+ /* The string width of 4 used below is to cover 'GDTR', 'IDTR'. Change later if longer register names are used. */
+ pHlp->pfnPrintf(pHlp, "%s%-4s = %016RX64:%04x\n", pszPrefix, pszName, pXdtr->u64Base, pXdtr->u32Limit);
+}
+
+
+/**
+ * Displays an SVM VMCB state-save area.
+ *
+ * @param pHlp The info helper functions.
+ * @param pVmcbStateSave Pointer to a SVM VMCB controls area.
+ * @param pszPrefix Caller specified string prefix.
+ */
+static void cpumR3InfoSvmVmcbStateSave(PCDBGFINFOHLP pHlp, PCSVMVMCBSTATESAVE pVmcbStateSave, const char *pszPrefix)
+{
+ AssertReturnVoid(pHlp);
+ AssertReturnVoid(pVmcbStateSave);
+
+ char szEFlags[80];
+ cpumR3InfoFormatFlags(&szEFlags[0], pVmcbStateSave->u64RFlags);
+
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->CS, "CS", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->SS, "SS", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->ES, "ES", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->DS, "DS", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->FS, "FS", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->GS, "GS", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->LDTR, "LDTR", pszPrefix);
+ cpumR3InfoSvmVmcbSelReg(pHlp, &pVmcbStateSave->TR, "TR", pszPrefix);
+ cpumR3InfoSvmVmcbXdtr(pHlp, &pVmcbStateSave->GDTR, "GDTR", pszPrefix);
+ cpumR3InfoSvmVmcbXdtr(pHlp, &pVmcbStateSave->IDTR, "IDTR", pszPrefix);
+ pHlp->pfnPrintf(pHlp, "%sCPL = %u\n", pszPrefix, pVmcbStateSave->u8CPL);
+ pHlp->pfnPrintf(pHlp, "%sEFER = %#RX64\n", pszPrefix, pVmcbStateSave->u64EFER);
+ pHlp->pfnPrintf(pHlp, "%sCR4 = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR4);
+ pHlp->pfnPrintf(pHlp, "%sCR3 = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR3);
+ pHlp->pfnPrintf(pHlp, "%sCR0 = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR0);
+ pHlp->pfnPrintf(pHlp, "%sDR7 = %#RX64\n", pszPrefix, pVmcbStateSave->u64DR7);
+ pHlp->pfnPrintf(pHlp, "%sDR6 = %#RX64\n", pszPrefix, pVmcbStateSave->u64DR6);
+ pHlp->pfnPrintf(pHlp, "%sRFLAGS = %#RX64 %31s\n", pszPrefix, pVmcbStateSave->u64RFlags, szEFlags);
+ pHlp->pfnPrintf(pHlp, "%sRIP = %#RX64\n", pszPrefix, pVmcbStateSave->u64RIP);
+ pHlp->pfnPrintf(pHlp, "%sRSP = %#RX64\n", pszPrefix, pVmcbStateSave->u64RSP);
+ pHlp->pfnPrintf(pHlp, "%sRAX = %#RX64\n", pszPrefix, pVmcbStateSave->u64RAX);
+ pHlp->pfnPrintf(pHlp, "%sSTAR = %#RX64\n", pszPrefix, pVmcbStateSave->u64STAR);
+ pHlp->pfnPrintf(pHlp, "%sLSTAR = %#RX64\n", pszPrefix, pVmcbStateSave->u64LSTAR);
+ pHlp->pfnPrintf(pHlp, "%sCSTAR = %#RX64\n", pszPrefix, pVmcbStateSave->u64CSTAR);
+ pHlp->pfnPrintf(pHlp, "%sSFMASK = %#RX64\n", pszPrefix, pVmcbStateSave->u64SFMASK);
+ pHlp->pfnPrintf(pHlp, "%sKERNELGSBASE = %#RX64\n", pszPrefix, pVmcbStateSave->u64KernelGSBase);
+ pHlp->pfnPrintf(pHlp, "%sSysEnter CS = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterCS);
+ pHlp->pfnPrintf(pHlp, "%sSysEnter EIP = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterEIP);
+ pHlp->pfnPrintf(pHlp, "%sSysEnter ESP = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterESP);
+ pHlp->pfnPrintf(pHlp, "%sCR2 = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR2);
+ pHlp->pfnPrintf(pHlp, "%sPAT = %#RX64\n", pszPrefix, pVmcbStateSave->u64PAT);
+ pHlp->pfnPrintf(pHlp, "%sDBGCTL = %#RX64\n", pszPrefix, pVmcbStateSave->u64DBGCTL);
+ pHlp->pfnPrintf(pHlp, "%sBR_FROM = %#RX64\n", pszPrefix, pVmcbStateSave->u64BR_FROM);
+ pHlp->pfnPrintf(pHlp, "%sBR_TO = %#RX64\n", pszPrefix, pVmcbStateSave->u64BR_TO);
+ pHlp->pfnPrintf(pHlp, "%sLASTXCPT_FROM = %#RX64\n", pszPrefix, pVmcbStateSave->u64LASTEXCPFROM);
+ pHlp->pfnPrintf(pHlp, "%sLASTXCPT_TO = %#RX64\n", pszPrefix, pVmcbStateSave->u64LASTEXCPTO);
+}
+
+
+/**
+ * Displays a virtual-VMCS.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pHlp The info helper functions.
+ * @param pVmcs Pointer to a virtual VMCS.
+ * @param pszPrefix Caller specified string prefix.
+ */
+static void cpumR3InfoVmxVmcs(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, PCVMXVVMCS pVmcs, const char *pszPrefix)
+{
+ AssertReturnVoid(pHlp);
+ AssertReturnVoid(pVmcs);
+
+ /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */
+#define CPUMVMX_DUMP_HOST_XDTR(a_pHlp, a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+ do { \
+ (a_pHlp)->pfnPrintf((a_pHlp), " %s%-4s = {base=%016RX64}\n", \
+ (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u); \
+ } while (0)
+
+#define CPUMVMX_DUMP_HOST_FS_GS_TR(a_pHlp, a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+ do { \
+ (a_pHlp)->pfnPrintf((a_pHlp), " %s%-4s = {%04x base=%016RX64}\n", \
+ (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u); \
+ } while (0)
+
+#define CPUMVMX_DUMP_GUEST_SEGREG(a_pHlp, a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+ do { \
+ (a_pHlp)->pfnPrintf((a_pHlp), " %s%-4s = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \
+ (a_pszPrefix), (a_SegName), (a_pVmcs)->Guest##a_Seg, (a_pVmcs)->u64Guest##a_Seg##Base.u, \
+ (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr); \
+ } while (0)
+
+#define CPUMVMX_DUMP_GUEST_XDTR(a_pHlp, a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+ do { \
+ (a_pHlp)->pfnPrintf((a_pHlp), " %s%-4s = {base=%016RX64 limit=%08x}\n", \
+ (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Guest##a_Seg##Base.u, (a_pVmcs)->u32Guest##a_Seg##Limit); \
+ } while (0)
+
+ /* Header. */
+ {
+ pHlp->pfnPrintf(pHlp, "%sHeader:\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sVMCS revision id = %#RX32\n", pszPrefix, pVmcs->u32VmcsRevId);
+ pHlp->pfnPrintf(pHlp, " %sVMX-abort id = %#RX32 (%s)\n", pszPrefix, pVmcs->enmVmxAbort, VMXGetAbortDesc(pVmcs->enmVmxAbort));
+ pHlp->pfnPrintf(pHlp, " %sVMCS state = %#x (%s)\n", pszPrefix, pVmcs->fVmcsState, VMXGetVmcsStateDesc(pVmcs->fVmcsState));
+ }
+
+ /* Control fields. */
+ {
+ /* 16-bit. */
+ pHlp->pfnPrintf(pHlp, "%sControl:\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sVPID = %#RX16\n", pszPrefix, pVmcs->u16Vpid);
+ pHlp->pfnPrintf(pHlp, " %sPosted intr notify vector = %#RX16\n", pszPrefix, pVmcs->u16PostIntNotifyVector);
+ pHlp->pfnPrintf(pHlp, " %sEPTP index = %#RX16\n", pszPrefix, pVmcs->u16EptpIndex);
+ pHlp->pfnPrintf(pHlp, " %sHLAT prefix size = %#RX16\n", pszPrefix, pVmcs->u16HlatPrefixSize);
+
+ /* 32-bit. */
+ pHlp->pfnPrintf(pHlp, " %sPin ctls = %#RX32\n", pszPrefix, pVmcs->u32PinCtls);
+ pHlp->pfnPrintf(pHlp, " %sProcessor ctls = %#RX32\n", pszPrefix, pVmcs->u32ProcCtls);
+ pHlp->pfnPrintf(pHlp, " %sSecondary processor ctls = %#RX32\n", pszPrefix, pVmcs->u32ProcCtls2);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit ctls = %#RX32\n", pszPrefix, pVmcs->u32ExitCtls);
+ pHlp->pfnPrintf(pHlp, " %sVM-entry ctls = %#RX32\n", pszPrefix, pVmcs->u32EntryCtls);
+ pHlp->pfnPrintf(pHlp, " %sException bitmap = %#RX32\n", pszPrefix, pVmcs->u32XcptBitmap);
+ pHlp->pfnPrintf(pHlp, " %sPage-fault mask = %#RX32\n", pszPrefix, pVmcs->u32XcptPFMask);
+ pHlp->pfnPrintf(pHlp, " %sPage-fault match = %#RX32\n", pszPrefix, pVmcs->u32XcptPFMatch);
+ pHlp->pfnPrintf(pHlp, " %sCR3-target count = %RU32\n", pszPrefix, pVmcs->u32Cr3TargetCount);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit MSR store count = %RU32\n", pszPrefix, pVmcs->u32ExitMsrStoreCount);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit MSR load count = %RU32\n", pszPrefix, pVmcs->u32ExitMsrLoadCount);
+ pHlp->pfnPrintf(pHlp, " %sVM-entry MSR load count = %RU32\n", pszPrefix, pVmcs->u32EntryMsrLoadCount);
+ pHlp->pfnPrintf(pHlp, " %sVM-entry interruption info = %#RX32\n", pszPrefix, pVmcs->u32EntryIntInfo);
+ {
+ uint32_t const fInfo = pVmcs->u32EntryIntInfo;
+ uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(fInfo);
+ pHlp->pfnPrintf(pHlp, " %sValid = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_VALID(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sType = %#x (%s)\n", pszPrefix, uType, VMXGetEntryIntInfoTypeDesc(uType));
+ pHlp->pfnPrintf(pHlp, " %sVector = %#x\n", pszPrefix, VMX_ENTRY_INT_INFO_VECTOR(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sNMI-unblocking-IRET = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sError-code valid = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(fInfo));
+ }
+ pHlp->pfnPrintf(pHlp, " %sVM-entry xcpt error-code = %#RX32\n", pszPrefix, pVmcs->u32EntryXcptErrCode);
+ pHlp->pfnPrintf(pHlp, " %sVM-entry instr length = %u byte(s)\n", pszPrefix, pVmcs->u32EntryInstrLen);
+ pHlp->pfnPrintf(pHlp, " %sTPR threshold = %#RX32\n", pszPrefix, pVmcs->u32TprThreshold);
+ pHlp->pfnPrintf(pHlp, " %sPLE gap = %#RX32\n", pszPrefix, pVmcs->u32PleGap);
+ pHlp->pfnPrintf(pHlp, " %sPLE window = %#RX32\n", pszPrefix, pVmcs->u32PleWindow);
+
+ /* 64-bit. */
+ pHlp->pfnPrintf(pHlp, " %sIO-bitmap A addr = %#RX64\n", pszPrefix, pVmcs->u64AddrIoBitmapA.u);
+ pHlp->pfnPrintf(pHlp, " %sIO-bitmap B addr = %#RX64\n", pszPrefix, pVmcs->u64AddrIoBitmapB.u);
+ pHlp->pfnPrintf(pHlp, " %sMSR-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrMsrBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit MSR store addr = %#RX64\n", pszPrefix, pVmcs->u64AddrExitMsrStore.u);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit MSR load addr = %#RX64\n", pszPrefix, pVmcs->u64AddrExitMsrLoad.u);
+ pHlp->pfnPrintf(pHlp, " %sVM-entry MSR load addr = %#RX64\n", pszPrefix, pVmcs->u64AddrEntryMsrLoad.u);
+ pHlp->pfnPrintf(pHlp, " %sExecutive VMCS ptr = %#RX64\n", pszPrefix, pVmcs->u64ExecVmcsPtr.u);
+ pHlp->pfnPrintf(pHlp, " %sPML addr = %#RX64\n", pszPrefix, pVmcs->u64AddrPml.u);
+ pHlp->pfnPrintf(pHlp, " %sTSC offset = %#RX64\n", pszPrefix, pVmcs->u64TscOffset.u);
+ pHlp->pfnPrintf(pHlp, " %sVirtual-APIC addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVirtApic.u);
+ pHlp->pfnPrintf(pHlp, " %sAPIC-access addr = %#RX64\n", pszPrefix, pVmcs->u64AddrApicAccess.u);
+ pHlp->pfnPrintf(pHlp, " %sPosted-intr desc addr = %#RX64\n", pszPrefix, pVmcs->u64AddrPostedIntDesc.u);
+ pHlp->pfnPrintf(pHlp, " %sVM-functions control = %#RX64\n", pszPrefix, pVmcs->u64VmFuncCtls.u);
+ pHlp->pfnPrintf(pHlp, " %sEPTP ptr = %#RX64\n", pszPrefix, pVmcs->u64EptPtr.u);
+ pHlp->pfnPrintf(pHlp, " %sEOI-exit bitmap 0 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap0.u);
+ pHlp->pfnPrintf(pHlp, " %sEOI-exit bitmap 1 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap1.u);
+ pHlp->pfnPrintf(pHlp, " %sEOI-exit bitmap 2 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap2.u);
+ pHlp->pfnPrintf(pHlp, " %sEOI-exit bitmap 3 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap3.u);
+ pHlp->pfnPrintf(pHlp, " %sEPTP-list addr = %#RX64\n", pszPrefix, pVmcs->u64AddrEptpList.u);
+ pHlp->pfnPrintf(pHlp, " %sVMREAD-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVmreadBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sVMWRITE-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVmwriteBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sVirt-Xcpt info addr = %#RX64\n", pszPrefix, pVmcs->u64AddrXcptVeInfo.u);
+ pHlp->pfnPrintf(pHlp, " %sXSS-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64XssExitBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sENCLS-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64EnclsExitBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sSPP-table ptr = %#RX64\n", pszPrefix, pVmcs->u64SppTablePtr.u);
+ pHlp->pfnPrintf(pHlp, " %sTSC multiplier = %#RX64\n", pszPrefix, pVmcs->u64TscMultiplier.u);
+ pHlp->pfnPrintf(pHlp, " %sTertiary processor ctls = %#RX64\n", pszPrefix, pVmcs->u64ProcCtls3.u);
+ pHlp->pfnPrintf(pHlp, " %sENCLV-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64EnclvExitBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sPCONFIG-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64PconfigExitBitmap.u);
+ pHlp->pfnPrintf(pHlp, " %sHLAT ptr = %#RX64\n", pszPrefix, pVmcs->u64HlatPtr.u);
+ pHlp->pfnPrintf(pHlp, " %sSecondary VM-exit controls = %#RX64\n", pszPrefix, pVmcs->u64ExitCtls2.u);
+
+ /* Natural width. */
+ pHlp->pfnPrintf(pHlp, " %sCR0 guest/host mask = %#RX64\n", pszPrefix, pVmcs->u64Cr0Mask.u);
+ pHlp->pfnPrintf(pHlp, " %sCR4 guest/host mask = %#RX64\n", pszPrefix, pVmcs->u64Cr4Mask.u);
+ pHlp->pfnPrintf(pHlp, " %sCR0 read shadow = %#RX64\n", pszPrefix, pVmcs->u64Cr0ReadShadow.u);
+ pHlp->pfnPrintf(pHlp, " %sCR4 read shadow = %#RX64\n", pszPrefix, pVmcs->u64Cr4ReadShadow.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3-target 0 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target0.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3-target 1 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target1.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3-target 2 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target2.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3-target 3 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target3.u);
+ }
+
+ /* Guest state. */
+ {
+ char szEFlags[80];
+ cpumR3InfoFormatFlags(&szEFlags[0], pVmcs->u64GuestRFlags.u);
+ pHlp->pfnPrintf(pHlp, "%sGuest state:\n", pszPrefix);
+
+ /* 16-bit. */
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Cs, "CS", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Ss, "SS", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Es, "ES", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Ds, "DS", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Fs, "FS", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Gs, "GS", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Ldtr, "LDTR", pszPrefix);
+ CPUMVMX_DUMP_GUEST_SEGREG(pHlp, pVmcs, Tr, "TR", pszPrefix);
+ CPUMVMX_DUMP_GUEST_XDTR(pHlp, pVmcs, Gdtr, "GDTR", pszPrefix);
+ CPUMVMX_DUMP_GUEST_XDTR(pHlp, pVmcs, Idtr, "IDTR", pszPrefix);
+ pHlp->pfnPrintf(pHlp, " %sInterrupt status = %#RX16\n", pszPrefix, pVmcs->u16GuestIntStatus);
+ pHlp->pfnPrintf(pHlp, " %sPML index = %#RX16\n", pszPrefix, pVmcs->u16PmlIndex);
+
+ /* 32-bit. */
+ pHlp->pfnPrintf(pHlp, " %sInterruptibility state = %#RX32\n", pszPrefix, pVmcs->u32GuestIntrState);
+ pHlp->pfnPrintf(pHlp, " %sActivity state = %#RX32\n", pszPrefix, pVmcs->u32GuestActivityState);
+ pHlp->pfnPrintf(pHlp, " %sSMBASE = %#RX32\n", pszPrefix, pVmcs->u32GuestSmBase);
+ pHlp->pfnPrintf(pHlp, " %sSysEnter CS = %#RX32\n", pszPrefix, pVmcs->u32GuestSysenterCS);
+ pHlp->pfnPrintf(pHlp, " %sVMX-preemption timer value = %#RX32\n", pszPrefix, pVmcs->u32PreemptTimer);
+
+ /* 64-bit. */
+ pHlp->pfnPrintf(pHlp, " %sVMCS link ptr = %#RX64\n", pszPrefix, pVmcs->u64VmcsLinkPtr.u);
+ pHlp->pfnPrintf(pHlp, " %sDBGCTL = %#RX64\n", pszPrefix, pVmcs->u64GuestDebugCtlMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPAT = %#RX64\n", pszPrefix, pVmcs->u64GuestPatMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sEFER = %#RX64\n", pszPrefix, pVmcs->u64GuestEferMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPERFGLOBALCTRL = %#RX64\n", pszPrefix, pVmcs->u64GuestPerfGlobalCtlMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPDPTE 0 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte0.u);
+ pHlp->pfnPrintf(pHlp, " %sPDPTE 1 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte1.u);
+ pHlp->pfnPrintf(pHlp, " %sPDPTE 2 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte2.u);
+ pHlp->pfnPrintf(pHlp, " %sPDPTE 3 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte3.u);
+ pHlp->pfnPrintf(pHlp, " %sBNDCFGS = %#RX64\n", pszPrefix, pVmcs->u64GuestBndcfgsMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sRTIT_CTL = %#RX64\n", pszPrefix, pVmcs->u64GuestRtitCtlMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPKRS = %#RX64\n", pszPrefix, pVmcs->u64GuestPkrsMsr.u);
+
+ /* Natural width. */
+ pHlp->pfnPrintf(pHlp, " %sCR0 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr0.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr3.u);
+ pHlp->pfnPrintf(pHlp, " %sCR4 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr4.u);
+ pHlp->pfnPrintf(pHlp, " %sDR7 = %#RX64\n", pszPrefix, pVmcs->u64GuestDr7.u);
+ pHlp->pfnPrintf(pHlp, " %sRSP = %#RX64\n", pszPrefix, pVmcs->u64GuestRsp.u);
+ pHlp->pfnPrintf(pHlp, " %sRIP = %#RX64\n", pszPrefix, pVmcs->u64GuestRip.u);
+ pHlp->pfnPrintf(pHlp, " %sRFLAGS = %#RX64 %31s\n",pszPrefix, pVmcs->u64GuestRFlags.u, szEFlags);
+ pHlp->pfnPrintf(pHlp, " %sPending debug xcpts = %#RX64\n", pszPrefix, pVmcs->u64GuestPendingDbgXcpts.u);
+ pHlp->pfnPrintf(pHlp, " %sSysEnter ESP = %#RX64\n", pszPrefix, pVmcs->u64GuestSysenterEsp.u);
+ pHlp->pfnPrintf(pHlp, " %sSysEnter EIP = %#RX64\n", pszPrefix, pVmcs->u64GuestSysenterEip.u);
+ pHlp->pfnPrintf(pHlp, " %sS_CET = %#RX64\n", pszPrefix, pVmcs->u64GuestSCetMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sSSP = %#RX64\n", pszPrefix, pVmcs->u64GuestSsp.u);
+ pHlp->pfnPrintf(pHlp, " %sINTERRUPT_SSP_TABLE_ADDR = %#RX64\n", pszPrefix, pVmcs->u64GuestIntrSspTableAddrMsr.u);
+ }
+
+ /* Host state. */
+ {
+ pHlp->pfnPrintf(pHlp, "%sHost state:\n", pszPrefix);
+
+ /* 16-bit. */
+ pHlp->pfnPrintf(pHlp, " %sCS = %#RX16\n", pszPrefix, pVmcs->HostCs);
+ pHlp->pfnPrintf(pHlp, " %sSS = %#RX16\n", pszPrefix, pVmcs->HostSs);
+ pHlp->pfnPrintf(pHlp, " %sDS = %#RX16\n", pszPrefix, pVmcs->HostDs);
+ pHlp->pfnPrintf(pHlp, " %sES = %#RX16\n", pszPrefix, pVmcs->HostEs);
+ CPUMVMX_DUMP_HOST_FS_GS_TR(pHlp, pVmcs, Fs, "FS", pszPrefix);
+ CPUMVMX_DUMP_HOST_FS_GS_TR(pHlp, pVmcs, Gs, "GS", pszPrefix);
+ CPUMVMX_DUMP_HOST_FS_GS_TR(pHlp, pVmcs, Tr, "TR", pszPrefix);
+ CPUMVMX_DUMP_HOST_XDTR(pHlp, pVmcs, Gdtr, "GDTR", pszPrefix);
+ CPUMVMX_DUMP_HOST_XDTR(pHlp, pVmcs, Idtr, "IDTR", pszPrefix);
+
+ /* 32-bit. */
+ pHlp->pfnPrintf(pHlp, " %sSysEnter CS = %#RX32\n", pszPrefix, pVmcs->u32HostSysenterCs);
+
+ /* 64-bit. */
+ pHlp->pfnPrintf(pHlp, " %sEFER = %#RX64\n", pszPrefix, pVmcs->u64HostEferMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPAT = %#RX64\n", pszPrefix, pVmcs->u64HostPatMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPERFGLOBALCTRL = %#RX64\n", pszPrefix, pVmcs->u64HostPerfGlobalCtlMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sPKRS = %#RX64\n", pszPrefix, pVmcs->u64HostPkrsMsr.u);
+
+ /* Natural width. */
+ pHlp->pfnPrintf(pHlp, " %sCR0 = %#RX64\n", pszPrefix, pVmcs->u64HostCr0.u);
+ pHlp->pfnPrintf(pHlp, " %sCR3 = %#RX64\n", pszPrefix, pVmcs->u64HostCr3.u);
+ pHlp->pfnPrintf(pHlp, " %sCR4 = %#RX64\n", pszPrefix, pVmcs->u64HostCr4.u);
+ pHlp->pfnPrintf(pHlp, " %sSysEnter ESP = %#RX64\n", pszPrefix, pVmcs->u64HostSysenterEsp.u);
+ pHlp->pfnPrintf(pHlp, " %sSysEnter EIP = %#RX64\n", pszPrefix, pVmcs->u64HostSysenterEip.u);
+ pHlp->pfnPrintf(pHlp, " %sRSP = %#RX64\n", pszPrefix, pVmcs->u64HostRsp.u);
+ pHlp->pfnPrintf(pHlp, " %sRIP = %#RX64\n", pszPrefix, pVmcs->u64HostRip.u);
+ pHlp->pfnPrintf(pHlp, " %sS_CET = %#RX64\n", pszPrefix, pVmcs->u64HostSCetMsr.u);
+ pHlp->pfnPrintf(pHlp, " %sSSP = %#RX64\n", pszPrefix, pVmcs->u64HostSsp.u);
+ pHlp->pfnPrintf(pHlp, " %sINTERRUPT_SSP_TABLE_ADDR = %#RX64\n", pszPrefix, pVmcs->u64HostIntrSspTableAddrMsr.u);
+ }
+
+ /* Read-only fields. */
+ {
+ pHlp->pfnPrintf(pHlp, "%sRead-only data fields:\n", pszPrefix);
+
+ /* 16-bit (none currently). */
+
+ /* 32-bit. */
+ pHlp->pfnPrintf(pHlp, " %sExit reason = %u (%s)\n", pszPrefix, pVmcs->u32RoExitReason, HMGetVmxExitName(pVmcs->u32RoExitReason));
+ pHlp->pfnPrintf(pHlp, " %sExit qualification = %#RX64\n", pszPrefix, pVmcs->u64RoExitQual.u);
+ pHlp->pfnPrintf(pHlp, " %sVM-instruction error = %#RX32\n", pszPrefix, pVmcs->u32RoVmInstrError);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit intr info = %#RX32\n", pszPrefix, pVmcs->u32RoExitIntInfo);
+ {
+ uint32_t const fInfo = pVmcs->u32RoExitIntInfo;
+ uint8_t const uType = VMX_EXIT_INT_INFO_TYPE(fInfo);
+ pHlp->pfnPrintf(pHlp, " %sValid = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_VALID(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sType = %#x (%s)\n", pszPrefix, uType, VMXGetExitIntInfoTypeDesc(uType));
+ pHlp->pfnPrintf(pHlp, " %sVector = %#x\n", pszPrefix, VMX_EXIT_INT_INFO_VECTOR(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sNMI-unblocking-IRET = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sError-code valid = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(fInfo));
+ }
+ pHlp->pfnPrintf(pHlp, " %sVM-exit intr error-code = %#RX32\n", pszPrefix, pVmcs->u32RoExitIntErrCode);
+ pHlp->pfnPrintf(pHlp, " %sIDT-vectoring info = %#RX32\n", pszPrefix, pVmcs->u32RoIdtVectoringInfo);
+ {
+ uint32_t const fInfo = pVmcs->u32RoIdtVectoringInfo;
+ uint8_t const uType = VMX_IDT_VECTORING_INFO_TYPE(fInfo);
+ pHlp->pfnPrintf(pHlp, " %sValid = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_VALID(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sType = %#x (%s)\n", pszPrefix, uType, VMXGetIdtVectoringInfoTypeDesc(uType));
+ pHlp->pfnPrintf(pHlp, " %sVector = %#x\n", pszPrefix, VMX_IDT_VECTORING_INFO_VECTOR(fInfo));
+ pHlp->pfnPrintf(pHlp, " %sError-code valid = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(fInfo));
+ }
+ pHlp->pfnPrintf(pHlp, " %sIDT-vectoring error-code = %#RX32\n", pszPrefix, pVmcs->u32RoIdtVectoringErrCode);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit instruction length = %u byte(s)\n", pszPrefix, pVmcs->u32RoExitInstrLen);
+ pHlp->pfnPrintf(pHlp, " %sVM-exit instruction info = %#RX64\n", pszPrefix, pVmcs->u32RoExitInstrInfo);
+
+ /* 64-bit. */
+ pHlp->pfnPrintf(pHlp, " %sGuest-physical addr = %#RX64\n", pszPrefix, pVmcs->u64RoGuestPhysAddr.u);
+
+ /* Natural width. */
+ pHlp->pfnPrintf(pHlp, " %sI/O RCX = %#RX64\n", pszPrefix, pVmcs->u64RoIoRcx.u);
+ pHlp->pfnPrintf(pHlp, " %sI/O RSI = %#RX64\n", pszPrefix, pVmcs->u64RoIoRsi.u);
+ pHlp->pfnPrintf(pHlp, " %sI/O RDI = %#RX64\n", pszPrefix, pVmcs->u64RoIoRdi.u);
+ pHlp->pfnPrintf(pHlp, " %sI/O RIP = %#RX64\n", pszPrefix, pVmcs->u64RoIoRip.u);
+ pHlp->pfnPrintf(pHlp, " %sGuest-linear addr = %#RX64\n", pszPrefix, pVmcs->u64RoGuestLinearAddr.u);
+ }
+
+#ifdef DEBUG_ramshankar
+ if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
+ {
+ void *pvPage = RTMemTmpAllocZ(VMX_V_VIRT_APIC_SIZE);
+ Assert(pvPage);
+ RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
+ int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pvPage, GCPhysVirtApic, VMX_V_VIRT_APIC_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ pHlp->pfnPrintf(pHlp, " %sVirtual-APIC page\n", pszPrefix);
+ pHlp->pfnPrintf(pHlp, "%.*Rhxs\n", VMX_V_VIRT_APIC_SIZE, pvPage);
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ RTMemTmpFree(pvPage);
+ }
+#else
+ NOREF(pVCpu);
+#endif
+
+#undef CPUMVMX_DUMP_HOST_XDTR
+#undef CPUMVMX_DUMP_HOST_FS_GS_TR
+#undef CPUMVMX_DUMP_GUEST_SEGREG
+#undef CPUMVMX_DUMP_GUEST_XDTR
+}
+
+
+/**
+ * Display the guest's hardware-virtualization cpu state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) cpumR3InfoGuestHwvirt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+ bool const fSvm = pVM->cpum.s.GuestFeatures.fSvm;
+ bool const fVmx = pVM->cpum.s.GuestFeatures.fVmx;
+
+ pHlp->pfnPrintf(pHlp, "VCPU[%u] hardware virtualization state:\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, "fSavedInhibit = %#RX32\n", pCtx->hwvirt.fSavedInhibit);
+ pHlp->pfnPrintf(pHlp, "In nested-guest hwvirt mode = %RTbool\n", CPUMIsGuestInNestedHwvirtMode(pCtx));
+
+ if (fSvm)
+ {
+ pHlp->pfnPrintf(pHlp, "SVM hwvirt state:\n");
+ pHlp->pfnPrintf(pHlp, " fGif = %RTbool\n", pCtx->hwvirt.fGif);
+
+ char szEFlags[80];
+ cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u);
+ pHlp->pfnPrintf(pHlp, " uMsrHSavePa = %#RX64\n", pCtx->hwvirt.svm.uMsrHSavePa);
+ pHlp->pfnPrintf(pHlp, " GCPhysVmcb = %#RGp\n", pCtx->hwvirt.svm.GCPhysVmcb);
+ pHlp->pfnPrintf(pHlp, " VmcbCtrl:\n");
+ cpumR3InfoSvmVmcbCtrl(pHlp, &pCtx->hwvirt.svm.Vmcb.ctrl, " " /* pszPrefix */);
+ pHlp->pfnPrintf(pHlp, " VmcbStateSave:\n");
+ cpumR3InfoSvmVmcbStateSave(pHlp, &pCtx->hwvirt.svm.Vmcb.guest, " " /* pszPrefix */);
+ pHlp->pfnPrintf(pHlp, " HostState:\n");
+ pHlp->pfnPrintf(pHlp, " uEferMsr = %#RX64\n", pCtx->hwvirt.svm.HostState.uEferMsr);
+ pHlp->pfnPrintf(pHlp, " uCr0 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr0);
+ pHlp->pfnPrintf(pHlp, " uCr4 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr4);
+ pHlp->pfnPrintf(pHlp, " uCr3 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr3);
+ pHlp->pfnPrintf(pHlp, " uRip = %#RX64\n", pCtx->hwvirt.svm.HostState.uRip);
+ pHlp->pfnPrintf(pHlp, " uRsp = %#RX64\n", pCtx->hwvirt.svm.HostState.uRsp);
+ pHlp->pfnPrintf(pHlp, " uRax = %#RX64\n", pCtx->hwvirt.svm.HostState.uRax);
+ pHlp->pfnPrintf(pHlp, " rflags = %#RX64 %31s\n", pCtx->hwvirt.svm.HostState.rflags.u64, szEFlags);
+ PCCPUMSELREG pSelEs = &pCtx->hwvirt.svm.HostState.es;
+ pHlp->pfnPrintf(pHlp, " es = {%04x base=%016RX64 limit=%08x flags=%08x}\n",
+ pSelEs->Sel, pSelEs->u64Base, pSelEs->u32Limit, pSelEs->Attr.u);
+ PCCPUMSELREG pSelCs = &pCtx->hwvirt.svm.HostState.cs;
+ pHlp->pfnPrintf(pHlp, " cs = {%04x base=%016RX64 limit=%08x flags=%08x}\n",
+ pSelCs->Sel, pSelCs->u64Base, pSelCs->u32Limit, pSelCs->Attr.u);
+ PCCPUMSELREG pSelSs = &pCtx->hwvirt.svm.HostState.ss;
+ pHlp->pfnPrintf(pHlp, " ss = {%04x base=%016RX64 limit=%08x flags=%08x}\n",
+ pSelSs->Sel, pSelSs->u64Base, pSelSs->u32Limit, pSelSs->Attr.u);
+ PCCPUMSELREG pSelDs = &pCtx->hwvirt.svm.HostState.ds;
+ pHlp->pfnPrintf(pHlp, " ds = {%04x base=%016RX64 limit=%08x flags=%08x}\n",
+ pSelDs->Sel, pSelDs->u64Base, pSelDs->u32Limit, pSelDs->Attr.u);
+ pHlp->pfnPrintf(pHlp, " gdtr = %016RX64:%04x\n", pCtx->hwvirt.svm.HostState.gdtr.pGdt,
+ pCtx->hwvirt.svm.HostState.gdtr.cbGdt);
+ pHlp->pfnPrintf(pHlp, " idtr = %016RX64:%04x\n", pCtx->hwvirt.svm.HostState.idtr.pIdt,
+ pCtx->hwvirt.svm.HostState.idtr.cbIdt);
+ pHlp->pfnPrintf(pHlp, " cPauseFilter = %RU16\n", pCtx->hwvirt.svm.cPauseFilter);
+ pHlp->pfnPrintf(pHlp, " cPauseFilterThreshold = %RU32\n", pCtx->hwvirt.svm.cPauseFilterThreshold);
+ pHlp->pfnPrintf(pHlp, " fInterceptEvents = %u\n", pCtx->hwvirt.svm.fInterceptEvents);
+ }
+ else if (fVmx)
+ {
+ pHlp->pfnPrintf(pHlp, "VMX hwvirt state:\n");
+ pHlp->pfnPrintf(pHlp, " GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon);
+ pHlp->pfnPrintf(pHlp, " GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs);
+ pHlp->pfnPrintf(pHlp, " GCPhysShadowVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysShadowVmcs);
+ pHlp->pfnPrintf(pHlp, " enmDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmDiag, HMGetVmxDiagDesc(pCtx->hwvirt.vmx.enmDiag));
+ pHlp->pfnPrintf(pHlp, " uDiagAux = %#RX64\n", pCtx->hwvirt.vmx.uDiagAux);
+ pHlp->pfnPrintf(pHlp, " enmAbort = %u (%s)\n", pCtx->hwvirt.vmx.enmAbort, VMXGetAbortDesc(pCtx->hwvirt.vmx.enmAbort));
+ pHlp->pfnPrintf(pHlp, " uAbortAux = %u (%#x)\n", pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux);
+ pHlp->pfnPrintf(pHlp, " fInVmxRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxRootMode);
+ pHlp->pfnPrintf(pHlp, " fInVmxNonRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxNonRootMode);
+ pHlp->pfnPrintf(pHlp, " fInterceptEvents = %RTbool\n", pCtx->hwvirt.vmx.fInterceptEvents);
+ pHlp->pfnPrintf(pHlp, " fNmiUnblockingIret = %RTbool\n", pCtx->hwvirt.vmx.fNmiUnblockingIret);
+ pHlp->pfnPrintf(pHlp, " uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick);
+ pHlp->pfnPrintf(pHlp, " uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick);
+ pHlp->pfnPrintf(pHlp, " uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick);
+ pHlp->pfnPrintf(pHlp, " offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite);
+ pHlp->pfnPrintf(pHlp, " fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking);
+ pHlp->pfnPrintf(pHlp, " VMCS cache:\n");
+ cpumR3InfoVmxVmcs(pVCpu, pHlp, &pCtx->hwvirt.vmx.Vmcs, " " /* pszPrefix */);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "Hwvirt state disabled.\n");
+
+#undef CPUMHWVIRTDUMP_NONE
+#undef CPUMHWVIRTDUMP_COMMON
+#undef CPUMHWVIRTDUMP_SVM
+#undef CPUMHWVIRTDUMP_VMX
+#undef CPUMHWVIRTDUMP_LAST
+#undef CPUMHWVIRTDUMP_ALL
+}
+
+/**
+ * Display the current guest instruction
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ char szInstruction[256];
+ szInstruction[0] = '\0';
+ DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
+ pHlp->pfnPrintf(pHlp, "\nCPUM%u: %s\n\n", pVCpu->idCpu, szInstruction);
+}
+
+
+/**
+ * Display the hypervisor cpu state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ CPUMDUMPTYPE enmType;
+ const char *pszComment;
+ cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
+ pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
+
+ pHlp->pfnPrintf(pHlp,
+ ".dr0=%016RX64 .dr1=%016RX64 .dr2=%016RX64 .dr3=%016RX64\n"
+ ".dr4=%016RX64 .dr5=%016RX64 .dr6=%016RX64 .dr7=%016RX64\n",
+ pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1], pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3],
+ pVCpu->cpum.s.Hyper.dr[4], pVCpu->cpum.s.Hyper.dr[5], pVCpu->cpum.s.Hyper.dr[6], pVCpu->cpum.s.Hyper.dr[7]);
+ pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
+}
+
+
+/**
+ * Display the host cpu state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ CPUMDUMPTYPE enmType;
+ const char *pszComment;
+ cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
+ pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+ PCPUMHOSTCTX pCtx = &pVCpu->cpum.s.Host;
+
+ /*
+ * Format the EFLAGS.
+ */
+ uint64_t efl = pCtx->rflags;
+ char szEFlags[80];
+ cpumR3InfoFormatFlags(&szEFlags[0], efl);
+
+ /*
+ * Format the registers.
+ */
+ pHlp->pfnPrintf(pHlp,
+ "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
+ "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
+ "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
+ " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
+ "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
+ "r14=%016RX64 r15=%016RX64\n"
+ "iopl=%d %31s\n"
+ "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
+ "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
+ "cr4=%016RX64 ldtr=%04x tr=%04x\n"
+ "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
+ "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
+ "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
+ "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
+ "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
+ ,
+ /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
+ pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
+ /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
+ /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
+ pCtx->r11, pCtx->r12, pCtx->r13,
+ pCtx->r14, pCtx->r15,
+ X86_EFL_GET_IOPL(efl), szEFlags,
+ pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
+ pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
+ pCtx->cr4, pCtx->ldtr, pCtx->tr,
+ pCtx->dr0, pCtx->dr1, pCtx->dr2,
+ pCtx->dr3, pCtx->dr6, pCtx->dr7,
+ pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
+ pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
+ pCtx->FSbase, pCtx->GSbase, pCtx->efer);
+}
+
+/**
+ * Structure used when disassembling and instructions in DBGF.
+ * This is used so the reader function can get the stuff it needs.
+ */
+typedef struct CPUMDISASSTATE
+{
+ /** Pointer to the CPU structure. */
+ PDISCPUSTATE pCpu;
+ /** Pointer to the VM. */
+ PVM pVM;
+ /** Pointer to the VMCPU. */
+ PVMCPU pVCpu;
+ /** Pointer to the first byte in the segment. */
+ RTGCUINTPTR GCPtrSegBase;
+ /** Pointer to the byte after the end of the segment. (might have wrapped!) */
+ RTGCUINTPTR GCPtrSegEnd;
+ /** The size of the segment minus 1. */
+ RTGCUINTPTR cbSegLimit;
+ /** Pointer to the current page - R3 Ptr. */
+ void const *pvPageR3;
+ /** Pointer to the current page - GC Ptr. */
+ RTGCPTR pvPageGC;
+ /** The lock information that PGMPhysReleasePageMappingLock needs. */
+ PGMPAGEMAPLOCK PageMapLock;
+ /** Whether the PageMapLock is valid or not. */
+ bool fLocked;
+ /** 64 bits mode or not. */
+ bool f64Bits;
+} CPUMDISASSTATE, *PCPUMDISASSTATE;
+
+
+/**
+ * @callback_method_impl{FNDISREADBYTES}
+ */
+static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
+{
+ PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
+ for (;;)
+ {
+ RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
+
+ /*
+ * Need to update the page translation?
+ */
+ if ( !pState->pvPageR3
+ || (GCPtr >> GUEST_PAGE_SHIFT) != (pState->pvPageGC >> GUEST_PAGE_SHIFT))
+ {
+ /* translate the address */
+ pState->pvPageGC = GCPtr & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
+
+ /* Release mapping lock previously acquired. */
+ if (pState->fLocked)
+ PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
+ int rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
+ if (RT_SUCCESS(rc))
+ pState->fLocked = true;
+ else
+ {
+ pState->fLocked = false;
+ pState->pvPageR3 = NULL;
+ return rc;
+ }
+ }
+
+ /*
+ * Check the segment limit.
+ */
+ if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
+ return VERR_OUT_OF_SELECTOR_BOUNDS;
+
+ /*
+ * Calc how much we can read.
+ */
+ uint32_t cb = GUEST_PAGE_SIZE - (GCPtr & GUEST_PAGE_OFFSET_MASK);
+ if (!pState->f64Bits)
+ {
+ RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
+ if (cb > cbSeg && cbSeg)
+ cb = cbSeg;
+ }
+ if (cb > cbMaxRead)
+ cb = cbMaxRead;
+
+ /*
+ * Read and advance or exit.
+ */
+ memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & GUEST_PAGE_OFFSET_MASK), cb);
+ offInstr += (uint8_t)cb;
+ if (cb >= cbMinRead)
+ {
+ pDis->cbCachedInstr = offInstr;
+ return VINF_SUCCESS;
+ }
+ cbMinRead -= (uint8_t)cb;
+ cbMaxRead -= (uint8_t)cb;
+ }
+}
+
+
+/**
+ * Disassemble an instruction and return the information in the provided structure.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pCtx Pointer to the guest CPU context.
+ * @param GCPtrPC Program counter (relative to CS) to disassemble from.
+ * @param pCpu Disassembly state.
+ * @param pszPrefix String prefix for logging (debug only).
+ *
+ */
+VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu,
+ const char *pszPrefix)
+{
+ CPUMDISASSTATE State;
+ int rc;
+
+ const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ State.pCpu = pCpu;
+ State.pvPageGC = 0;
+ State.pvPageR3 = NULL;
+ State.pVM = pVM;
+ State.pVCpu = pVCpu;
+ State.fLocked = false;
+ State.f64Bits = false;
+
+ /*
+ * Get selector information.
+ */
+ DISCPUMODE enmDisCpuMode;
+ if ( (pCtx->cr0 & X86_CR0_PE)
+ && pCtx->eflags.Bits.u1VM == 0)
+ {
+ if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
+ return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
+ State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
+ State.GCPtrSegBase = pCtx->cs.u64Base;
+ State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
+ State.cbSegLimit = pCtx->cs.u32Limit;
+ enmDisCpuMode = (State.f64Bits)
+ ? DISCPUMODE_64BIT
+ : pCtx->cs.Attr.n.u1DefBig
+ ? DISCPUMODE_32BIT
+ : DISCPUMODE_16BIT;
+ }
+ else
+ {
+ /* real or V86 mode */
+ enmDisCpuMode = DISCPUMODE_16BIT;
+ State.GCPtrSegBase = pCtx->cs.Sel * 16;
+ State.GCPtrSegEnd = 0xFFFFFFFF;
+ State.cbSegLimit = 0xFFFFFFFF;
+ }
+
+ /*
+ * Disassemble the instruction.
+ */
+ uint32_t cbInstr;
+#ifndef LOG_ENABLED
+ RT_NOREF_PV(pszPrefix);
+ rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
+ if (RT_SUCCESS(rc))
+ {
+#else
+ char szOutput[160];
+ rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
+ pCpu, &cbInstr, szOutput, sizeof(szOutput));
+ if (RT_SUCCESS(rc))
+ {
+ /* log it */
+ if (pszPrefix)
+ Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
+ else
+ Log(("%s", szOutput));
+#endif
+ rc = VINF_SUCCESS;
+ }
+ else
+ Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
+
+ /* Release mapping lock acquired in cpumR3DisasInstrRead. */
+ if (State.fLocked)
+ PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
+
+ return rc;
+}
+
+
+
+/**
+ * API for controlling a few of the CPU features found in CR4.
+ *
+ * Currently only X86_CR4_TSD is accepted as input.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fOr The CR4 OR mask.
+ * @param fAnd The CR4 AND mask.
+ */
+VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
+{
+ AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
+ AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
+
+ pVM->cpum.s.CR4.OrMask &= fAnd;
+ pVM->cpum.s.CR4.OrMask |= fOr;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when the ring-3 init phase completes.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat Which init phase.
+ */
+VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ switch (enmWhat)
+ {
+ case VMINITCOMPLETED_RING3:
+ {
+ /*
+ * Figure out if the guest uses 32-bit or 64-bit FPU state at runtime for 64-bit capable VMs.
+ * Only applicable/used on 64-bit hosts, refer CPUMR0A.asm. See @bugref{7138}.
+ */
+ bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /* While loading a saved-state we fix it up in, cpumR3LoadDone(). */
+ if (fSupportsLongMode)
+ pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
+ }
+
+ /* Register statistic counters for MSRs. */
+ cpumR3MsrRegStats(pVM);
+
+ /* There shouldn't be any more calls to CPUMR3SetGuestCpuIdFeature and
+ CPUMR3ClearGuestCpuIdFeature now, so do some final CPUID polishing (NX). */
+ cpumR3CpuIdRing3InitDone(pVM);
+
+ /* Create VMX-preemption timer for nested guests if required. Must be
+ done here as CPUM is initialized before TM. */
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ char szName[32];
+ RTStrPrintf(szName, sizeof(szName), "Nested VMX-preemption %u", idCpu);
+ int rc = TMR3TimerCreate(pVM, TMCLOCK_VIRTUAL_SYNC, cpumR3VmxPreemptTimerCallback, pVCpu,
+ TMTIMER_FLAGS_RING0, szName, &pVCpu->cpum.s.hNestedVmxPreemptTimer);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when the ring-0 init phases completed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM)
+{
+ /*
+ * Enable log buffering as we're going to log a lot of lines.
+ */
+ bool const fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+
+ /*
+ * Log the cpuid.
+ */
+ RTCPUSET OnlineSet;
+ LogRel(("CPUM: Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
+ (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
+ RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
+ RTCPUID cCores = RTMpGetCoreCount();
+ if (cCores)
+ LogRel(("CPUM: Physical host cores: %u\n", (unsigned)cCores));
+ LogRel(("************************* CPUID dump ************************\n"));
+ DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
+ LogRel(("\n"));
+ DBGFR3_INFO_LOG_SAFE(pVM, "cpuid", "verbose"); /* macro */
+ LogRel(("******************** End of CPUID dump **********************\n"));
+
+ /*
+ * Log VT-x extended features.
+ *
+ * SVM features are currently all covered under CPUID so there is nothing
+ * to do here for SVM.
+ */
+ if (pVM->cpum.s.HostFeatures.fVmx)
+ {
+ LogRel(("*********************** VT-x features ***********************\n"));
+ DBGFR3Info(pVM->pUVM, "cpumvmxfeat", "default", DBGFR3InfoLogRelHlp());
+ LogRel(("\n"));
+ LogRel(("******************* End of VT-x features ********************\n"));
+ }
+
+ /*
+ * Restore the log buffering state to what it was previously.
+ */
+ RTLogRelSetBuffering(fOldBuffered);
+}
+
+
+/**
+ * Marks the guest debug state as active.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ *
+ * @note This is used solely by NEM (hence the name) to set the correct flags here
+ * without loading the host's DRx registers, which is not possible from ring-3 anyway.
+ * The specific NEM backends have to make sure to load the correct values.
+ */
+VMMR3_INT_DECL(void) CPUMR3NemActivateGuestDebugState(PVMCPUCC pVCpu)
+{
+ ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HYPER);
+ ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
+}
+
+
+/**
+ * Marks the hyper debug state as active.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ *
+ * @note This is used solely by NEM (hence the name) to set the correct flags here
+ * without loading the host's DRx registers, which is not possible from ring-3 anyway.
+ * The specific NEM backends have to make sure to load the correct values.
+ */
+VMMR3_INT_DECL(void) CPUMR3NemActivateHyperDebugState(PVMCPUCC pVCpu)
+{
+ /*
+ * Make sure the hypervisor values are up to date.
+ */
+ CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */);
+
+ ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_GUEST);
+ ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
+}
diff --git a/src/VBox/VMM/VMMR3/CPUMDbg.cpp b/src/VBox/VMM/VMMR3/CPUMDbg.cpp
new file mode 100644
index 00000000..91406858
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUMDbg.cpp
@@ -0,0 +1,1288 @@
+/* $Id: CPUMDbg.cpp $ */
+/** @file
+ * CPUM - CPU Monitor / Manager, Debugger & Debugging APIs.
+ */
+
+/*
+ * Copyright (C) 2010-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/apic.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/thread.h>
+#include <iprt/string.h>
+#include <iprt/uint128.h>
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_Generic(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = *(uint8_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U16: pValue->u16 = *(uint16_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U32: pValue->u32 = *(uint32_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U64: pValue->u64 = *(uint64_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U128: pValue->u128 = *(PCRTUINT128U )pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U256: pValue->u256 = *(PCRTUINT256U )pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U512: pValue->u512 = *(PCRTUINT512U )pv; return VINF_SUCCESS;
+ default:
+ AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnSet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_Generic(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ void *pv = (uint8_t *)&pVCpu->cpum + pDesc->offRegister;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ *(uint8_t *)pv &= ~pfMask->u8;
+ *(uint8_t *)pv |= pValue->u8 & pfMask->u8;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U16:
+ *(uint16_t *)pv &= ~pfMask->u16;
+ *(uint16_t *)pv |= pValue->u16 & pfMask->u16;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U32:
+ *(uint32_t *)pv &= ~pfMask->u32;
+ *(uint32_t *)pv |= pValue->u32 & pfMask->u32;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U64:
+ *(uint64_t *)pv &= ~pfMask->u64;
+ *(uint64_t *)pv |= pValue->u64 & pfMask->u64;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U128:
+ {
+ RTUINT128U Val;
+ RTUInt128AssignAnd((PRTUINT128U)pv, RTUInt128AssignBitwiseNot(RTUInt128Assign(&Val, &pfMask->u128)));
+ RTUInt128AssignOr((PRTUINT128U)pv, RTUInt128AssignAnd(RTUInt128Assign(&Val, &pValue->u128), &pfMask->u128));
+ return VINF_SUCCESS;
+ }
+
+ default:
+ AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_XStateGeneric(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ void const *pv = (uint8_t const *)&pVCpu->cpum.s.Guest.XState + pDesc->offRegister;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = *(uint8_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U16: pValue->u16 = *(uint16_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U32: pValue->u32 = *(uint32_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U64: pValue->u64 = *(uint64_t const *)pv; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U128: pValue->u128 = *(PCRTUINT128U )pv; return VINF_SUCCESS;
+ default:
+ AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnSet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_XStateGeneric(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ void *pv = (uint8_t *)&pVCpu->cpum.s.Guest.XState + pDesc->offRegister;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ *(uint8_t *)pv &= ~pfMask->u8;
+ *(uint8_t *)pv |= pValue->u8 & pfMask->u8;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U16:
+ *(uint16_t *)pv &= ~pfMask->u16;
+ *(uint16_t *)pv |= pValue->u16 & pfMask->u16;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U32:
+ *(uint32_t *)pv &= ~pfMask->u32;
+ *(uint32_t *)pv |= pValue->u32 & pfMask->u32;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U64:
+ *(uint64_t *)pv &= ~pfMask->u64;
+ *(uint64_t *)pv |= pValue->u64 & pfMask->u64;
+ return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_U128:
+ {
+ RTUINT128U Val;
+ RTUInt128AssignAnd((PRTUINT128U)pv, RTUInt128AssignBitwiseNot(RTUInt128Assign(&Val, &pfMask->u128)));
+ RTUInt128AssignOr((PRTUINT128U)pv, RTUInt128AssignAnd(RTUInt128Assign(&Val, &pValue->u128), &pfMask->u128));
+ return VINF_SUCCESS;
+ }
+
+ default:
+ AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+}
+
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_seg(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ /** @todo perform a selector load, updating hidden selectors and stuff. */
+ NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_gdtr(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VBOXGDTR const *pGdtr = (VBOXGDTR const *)((uint8_t const *)&pVCpu->cpum + pDesc->offRegister);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_DTR);
+
+ pValue->dtr.u32Limit = pGdtr->cbGdt;
+ pValue->dtr.u64Base = pGdtr->pGdt;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_gdtr(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ RT_NOREF(pfMask);
+
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VBOXGDTR *pGdtr = (VBOXGDTR *)((uint8_t *)&pVCpu->cpum + pDesc->offRegister);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_DTR);
+
+ pGdtr->cbGdt = pValue->dtr.u32Limit;
+ pGdtr->pGdt = pValue->dtr.u64Base;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_idtr(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VBOXIDTR const *pIdtr = (VBOXIDTR const *)((uint8_t const *)&pVCpu->cpum + pDesc->offRegister);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_DTR);
+
+ pValue->dtr.u32Limit = pIdtr->cbIdt;
+ pValue->dtr.u64Base = pIdtr->pIdt;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_idtr(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ RT_NOREF(pfMask);
+
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VBOXIDTR *pIdtr = (VBOXIDTR *)((uint8_t *)&pVCpu->cpum + pDesc->offRegister);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_DTR);
+
+ pIdtr->cbIdt = pValue->dtr.u32Limit;
+ pIdtr->pIdt = pValue->dtr.u64Base;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Determins the tag register value for a CPU register when the FPU state
+ * format is FXSAVE.
+ *
+ * @returns The tag register value.
+ * @param pFpu Pointer to the guest FPU.
+ * @param iReg The register number (0..7).
+ */
+DECLINLINE(uint16_t) cpumR3RegCalcFpuTagFromFxSave(PCX86FXSTATE pFpu, unsigned iReg)
+{
+ /*
+ * See table 11-1 in the AMD docs.
+ */
+ if (!(pFpu->FTW & RT_BIT_32(iReg)))
+ return 3; /* b11 - empty */
+
+ uint16_t const uExp = pFpu->aRegs[iReg].au16[4];
+ if (uExp == 0)
+ {
+ if (pFpu->aRegs[iReg].au64[0] == 0) /* J & M == 0 */
+ return 1; /* b01 - zero */
+ return 2; /* b10 - special */
+ }
+
+ if (uExp == UINT16_C(0xffff))
+ return 2; /* b10 - special */
+
+ if (!(pFpu->aRegs[iReg].au64[0] >> 63)) /* J == 0 */
+ return 2; /* b10 - special */
+
+ return 0; /* b00 - valid (normal) */
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_ftw(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ PCX86FXSTATE pFpu = (PCX86FXSTATE)((uint8_t const *)&pVCpu->cpum + pDesc->offRegister);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_U16);
+
+ pValue->u16 = cpumR3RegCalcFpuTagFromFxSave(pFpu, 0)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 1) << 2)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 2) << 4)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 3) << 6)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 4) << 8)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 5) << 10)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 6) << 12)
+ | (cpumR3RegCalcFpuTagFromFxSave(pFpu, 7) << 14);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_ftw(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask);
+ return VERR_DBGF_READ_ONLY_REGISTER;
+}
+
+#if 0 /* unused */
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_Dummy(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ RT_NOREF_PV(pvUser);
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = 0; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U16: pValue->u16 = 0; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U32: pValue->u32 = 0; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U64: pValue->u64 = 0; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U128:
+ RT_ZERO(pValue->u128);
+ return VINF_SUCCESS;
+ case DBGFREGVALTYPE_DTR:
+ pValue->dtr.u32Limit = 0;
+ pValue->dtr.u64Base = 0;
+ return VINF_SUCCESS;
+ case DBGFREGVALTYPE_R80:
+ RT_ZERO(pValue->r80Ex);
+ return VINF_SUCCESS;
+ default:
+ AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnSet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_Dummy(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask);
+ return VERR_DBGF_READ_ONLY_REGISTER;
+}
+
+#endif /* unused */
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGet_ymm(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ uint32_t iReg = pDesc->offRegister;
+
+ Assert(pDesc->enmType == DBGFREGVALTYPE_U256);
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ if (iReg < 16)
+ {
+ pValue->u256.DQWords.dqw0 = pVCpu->cpum.s.Guest.XState.x87.aXMM[iReg].uXmm;
+ pValue->u256.DQWords.dqw1 = pVCpu->cpum.s.Guest.XState.u.YmmHi.aYmmHi[iReg].uXmm;
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnSet}
+ */
+static DECLCALLBACK(int) cpumR3RegSet_ymm(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ uint32_t iReg = pDesc->offRegister;
+
+ Assert(pDesc->enmType == DBGFREGVALTYPE_U256);
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ if (iReg < 16)
+ {
+ RTUINT128U Val;
+ RTUInt128AssignAnd(&pVCpu->cpum.s.Guest.XState.x87.aXMM[iReg].uXmm,
+ RTUInt128AssignBitwiseNot(RTUInt128Assign(&Val, &pfMask->u256.DQWords.dqw0)));
+ RTUInt128AssignOr(&pVCpu->cpum.s.Guest.XState.u.YmmHi.aYmmHi[iReg].uXmm,
+ RTUInt128AssignAnd(RTUInt128Assign(&Val, &pValue->u128), &pfMask->u128));
+
+ }
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/*
+ *
+ * Guest register access functions.
+ *
+ */
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstGet_crX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ uint64_t u64Value;
+ int rc = CPUMGetGuestCRx(pVCpu, pDesc->offRegister, &u64Value);
+ if (rc == VERR_PDM_NO_APIC_INSTANCE) /* CR8 might not be available, see @bugref{8868}.*/
+ u64Value = 0;
+ else
+ AssertRCReturn(rc, rc);
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64: pValue->u64 = u64Value; break;
+ case DBGFREGVALTYPE_U32: pValue->u32 = (uint32_t)u64Value; break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstSet_crX(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ int rc;
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * Calculate the new value.
+ */
+ uint64_t u64Value;
+ uint64_t fMask;
+ uint64_t fMaskMax;
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64:
+ u64Value = pValue->u64;
+ fMask = pfMask->u64;
+ fMaskMax = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U32:
+ u64Value = pValue->u32;
+ fMask = pfMask->u32;
+ fMaskMax = UINT32_MAX;
+ break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ if (fMask != fMaskMax)
+ {
+ uint64_t u64FullValue;
+ rc = CPUMGetGuestCRx(pVCpu, pDesc->offRegister, &u64FullValue);
+ if (RT_FAILURE(rc))
+ return rc;
+ u64Value = (u64FullValue & ~fMask)
+ | (u64Value & fMask);
+ }
+
+ /*
+ * Perform the assignment.
+ */
+ switch (pDesc->offRegister)
+ {
+ case 0: rc = CPUMSetGuestCR0(pVCpu, u64Value); break;
+ case 2: rc = CPUMSetGuestCR2(pVCpu, u64Value); break;
+ case 3: rc = CPUMSetGuestCR3(pVCpu, u64Value); break;
+ case 4: rc = CPUMSetGuestCR4(pVCpu, u64Value); break;
+ case 8: rc = APICSetTpr(pVCpu, (uint8_t)(u64Value << 4)); break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ return rc;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstGet_drX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ uint64_t u64Value;
+ int rc = CPUMGetGuestDRx(pVCpu, pDesc->offRegister, &u64Value);
+ AssertRCReturn(rc, rc);
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64: pValue->u64 = u64Value; break;
+ case DBGFREGVALTYPE_U32: pValue->u32 = (uint32_t)u64Value; break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstSet_drX(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ int rc;
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * Calculate the new value.
+ */
+ uint64_t u64Value;
+ uint64_t fMask;
+ uint64_t fMaskMax;
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64:
+ u64Value = pValue->u64;
+ fMask = pfMask->u64;
+ fMaskMax = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U32:
+ u64Value = pValue->u32;
+ fMask = pfMask->u32;
+ fMaskMax = UINT32_MAX;
+ break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ if (fMask != fMaskMax)
+ {
+ uint64_t u64FullValue;
+ rc = CPUMGetGuestDRx(pVCpu, pDesc->offRegister, &u64FullValue);
+ if (RT_FAILURE(rc))
+ return rc;
+ u64Value = (u64FullValue & ~fMask)
+ | (u64Value & fMask);
+ }
+
+ /*
+ * Perform the assignment.
+ */
+ return CPUMSetGuestDRx(pVCpu, pDesc->offRegister, u64Value);
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstGet_msr(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ uint64_t u64Value;
+ VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pDesc->offRegister, &u64Value);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64: pValue->u64 = u64Value; break;
+ case DBGFREGVALTYPE_U32: pValue->u32 = (uint32_t)u64Value; break;
+ case DBGFREGVALTYPE_U16: pValue->u16 = (uint16_t)u64Value; break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+
+ /** @todo what to do about errors? */
+ Assert(RT_FAILURE_NP(rcStrict));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstSet_msr(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * Calculate the new value.
+ */
+ uint64_t u64Value;
+ uint64_t fMask;
+ uint64_t fMaskMax;
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U64:
+ u64Value = pValue->u64;
+ fMask = pfMask->u64;
+ fMaskMax = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U32:
+ u64Value = pValue->u32;
+ fMask = pfMask->u32;
+ fMaskMax = UINT32_MAX;
+ break;
+ case DBGFREGVALTYPE_U16:
+ u64Value = pValue->u16;
+ fMask = pfMask->u16;
+ fMaskMax = UINT16_MAX;
+ break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ if (fMask != fMaskMax)
+ {
+ uint64_t u64FullValue;
+ VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pDesc->offRegister, &u64FullValue);
+ if (rcStrict != VINF_SUCCESS)
+ {
+ AssertRC(RT_FAILURE_NP(rcStrict));
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+ u64Value = (u64FullValue & ~fMask)
+ | (u64Value & fMask);
+ }
+
+ /*
+ * Perform the assignment.
+ */
+ VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pDesc->offRegister, u64Value);
+ if (rcStrict == VINF_SUCCESS)
+ return VINF_SUCCESS;
+ AssertRC(RT_FAILURE_NP(rcStrict));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstGet_stN(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)
+{
+ PVMCPU pVCpu = (PVMCPU)pvUser;
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pDesc->enmType == DBGFREGVALTYPE_R80);
+
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.s.Guest.XState.x87;
+ unsigned iReg = (pFpuCtx->FSW >> 11) & 7;
+ iReg += pDesc->offRegister;
+ iReg &= 7;
+ pValue->r80Ex = pFpuCtx->aRegs[iReg].r80Ex;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{DBGFREGDESC,pfnGet}
+ */
+static DECLCALLBACK(int) cpumR3RegGstSet_stN(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)
+{
+ NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+
+/*
+ * Set up aliases.
+ */
+#define CPUMREGALIAS_STD(Name, psz32, psz16, psz8) \
+ static DBGFREGALIAS const g_aCpumRegAliases_##Name[] = \
+ { \
+ { psz32, DBGFREGVALTYPE_U32 }, \
+ { psz16, DBGFREGVALTYPE_U16 }, \
+ { psz8, DBGFREGVALTYPE_U8 }, \
+ { NULL, DBGFREGVALTYPE_INVALID } \
+ }
+CPUMREGALIAS_STD(rax, "eax", "ax", "al");
+CPUMREGALIAS_STD(rcx, "ecx", "cx", "cl");
+CPUMREGALIAS_STD(rdx, "edx", "dx", "dl");
+CPUMREGALIAS_STD(rbx, "ebx", "bx", "bl");
+CPUMREGALIAS_STD(rsp, "esp", "sp", NULL);
+CPUMREGALIAS_STD(rbp, "ebp", "bp", NULL);
+CPUMREGALIAS_STD(rsi, "esi", "si", "sil");
+CPUMREGALIAS_STD(rdi, "edi", "di", "dil");
+CPUMREGALIAS_STD(r8, "r8d", "r8w", "r8b");
+CPUMREGALIAS_STD(r9, "r9d", "r9w", "r9b");
+CPUMREGALIAS_STD(r10, "r10d", "r10w", "r10b");
+CPUMREGALIAS_STD(r11, "r11d", "r11w", "r11b");
+CPUMREGALIAS_STD(r12, "r12d", "r12w", "r12b");
+CPUMREGALIAS_STD(r13, "r13d", "r13w", "r13b");
+CPUMREGALIAS_STD(r14, "r14d", "r14w", "r14b");
+CPUMREGALIAS_STD(r15, "r15d", "r15w", "r15b");
+CPUMREGALIAS_STD(rip, "eip", "ip", NULL);
+CPUMREGALIAS_STD(rflags, "eflags", "flags", NULL);
+#undef CPUMREGALIAS_STD
+
+static DBGFREGALIAS const g_aCpumRegAliases_fpuip[] =
+{
+ { "fpuip16", DBGFREGVALTYPE_U16 },
+ { NULL, DBGFREGVALTYPE_INVALID }
+};
+
+static DBGFREGALIAS const g_aCpumRegAliases_fpudp[] =
+{
+ { "fpudp16", DBGFREGVALTYPE_U16 },
+ { NULL, DBGFREGVALTYPE_INVALID }
+};
+
+static DBGFREGALIAS const g_aCpumRegAliases_cr0[] =
+{
+ { "msw", DBGFREGVALTYPE_U16 },
+ { NULL, DBGFREGVALTYPE_INVALID }
+};
+
+/*
+ * Sub fields.
+ */
+/** Sub-fields for the (hidden) segment attribute register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_seg[] =
+{
+ DBGFREGSUBFIELD_RW("type", 0, 4, 0),
+ DBGFREGSUBFIELD_RW("s", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("dpl", 5, 2, 0),
+ DBGFREGSUBFIELD_RW("p", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("avl", 12, 1, 0),
+ DBGFREGSUBFIELD_RW("l", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("d", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("g", 15, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the flags register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_rflags[] =
+{
+ DBGFREGSUBFIELD_RW("cf", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("pf", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("af", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("zf", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("sf", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("tf", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("if", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("df", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("of", 11, 1, 0),
+ DBGFREGSUBFIELD_RW("iopl", 12, 2, 0),
+ DBGFREGSUBFIELD_RW("nt", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("rf", 16, 1, 0),
+ DBGFREGSUBFIELD_RW("vm", 17, 1, 0),
+ DBGFREGSUBFIELD_RW("ac", 18, 1, 0),
+ DBGFREGSUBFIELD_RW("vif", 19, 1, 0),
+ DBGFREGSUBFIELD_RW("vip", 20, 1, 0),
+ DBGFREGSUBFIELD_RW("id", 21, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the FPU control word register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_fcw[] =
+{
+ DBGFREGSUBFIELD_RW("im", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("dm", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("zm", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("om", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("um", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("pm", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("pc", 8, 2, 0),
+ DBGFREGSUBFIELD_RW("rc", 10, 2, 0),
+ DBGFREGSUBFIELD_RW("x", 12, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the FPU status word register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_fsw[] =
+{
+ DBGFREGSUBFIELD_RW("ie", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("de", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("ze", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("oe", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("ue", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("pe", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("se", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("es", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("c0", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("c1", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("c2", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("top", 11, 3, 0),
+ DBGFREGSUBFIELD_RW("c3", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("b", 15, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the FPU tag word register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_ftw[] =
+{
+ DBGFREGSUBFIELD_RW("tag0", 0, 2, 0),
+ DBGFREGSUBFIELD_RW("tag1", 2, 2, 0),
+ DBGFREGSUBFIELD_RW("tag2", 4, 2, 0),
+ DBGFREGSUBFIELD_RW("tag3", 6, 2, 0),
+ DBGFREGSUBFIELD_RW("tag4", 8, 2, 0),
+ DBGFREGSUBFIELD_RW("tag5", 10, 2, 0),
+ DBGFREGSUBFIELD_RW("tag6", 12, 2, 0),
+ DBGFREGSUBFIELD_RW("tag7", 14, 2, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the Multimedia Extensions Control and Status Register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_mxcsr[] =
+{
+ DBGFREGSUBFIELD_RW("ie", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("de", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("ze", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("oe", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("ue", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("pe", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("daz", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("im", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("dm", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("zm", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("om", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("um", 11, 1, 0),
+ DBGFREGSUBFIELD_RW("pm", 12, 1, 0),
+ DBGFREGSUBFIELD_RW("rc", 13, 2, 0),
+ DBGFREGSUBFIELD_RW("fz", 14, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the FPU tag word register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_stN[] =
+{
+ DBGFREGSUBFIELD_RW("man", 0, 64, 0),
+ DBGFREGSUBFIELD_RW("exp", 64, 15, 0),
+ DBGFREGSUBFIELD_RW("sig", 79, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the MMX registers. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_mmN[] =
+{
+ DBGFREGSUBFIELD_RW("dw0", 0, 32, 0),
+ DBGFREGSUBFIELD_RW("dw1", 32, 32, 0),
+ DBGFREGSUBFIELD_RW("w0", 0, 16, 0),
+ DBGFREGSUBFIELD_RW("w1", 16, 16, 0),
+ DBGFREGSUBFIELD_RW("w2", 32, 16, 0),
+ DBGFREGSUBFIELD_RW("w3", 48, 16, 0),
+ DBGFREGSUBFIELD_RW("b0", 0, 8, 0),
+ DBGFREGSUBFIELD_RW("b1", 8, 8, 0),
+ DBGFREGSUBFIELD_RW("b2", 16, 8, 0),
+ DBGFREGSUBFIELD_RW("b3", 24, 8, 0),
+ DBGFREGSUBFIELD_RW("b4", 32, 8, 0),
+ DBGFREGSUBFIELD_RW("b5", 40, 8, 0),
+ DBGFREGSUBFIELD_RW("b6", 48, 8, 0),
+ DBGFREGSUBFIELD_RW("b7", 56, 8, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the XMM registers. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_xmmN[] =
+{
+ DBGFREGSUBFIELD_RW("r0", 0, 32, 0),
+ DBGFREGSUBFIELD_RW("r0.man", 0+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r0.exp", 0+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r0.sig", 0+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r1", 32, 32, 0),
+ DBGFREGSUBFIELD_RW("r1.man", 32+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r1.exp", 32+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r1.sig", 32+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r2", 64, 32, 0),
+ DBGFREGSUBFIELD_RW("r2.man", 64+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r2.exp", 64+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r2.sig", 64+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r3", 96, 32, 0),
+ DBGFREGSUBFIELD_RW("r3.man", 96+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r3.exp", 96+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r3.sig", 96+31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+#if 0 /* needs special accessor, too lazy for that now. */
+/** Sub-fields for the YMM registers. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_ymmN[] =
+{
+ DBGFREGSUBFIELD_RW("r0", 0, 32, 0),
+ DBGFREGSUBFIELD_RW("r0.man", 0+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r0.exp", 0+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r0.sig", 0+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r1", 32, 32, 0),
+ DBGFREGSUBFIELD_RW("r1.man", 32+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r1.exp", 32+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r1.sig", 32+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r2", 64, 32, 0),
+ DBGFREGSUBFIELD_RW("r2.man", 64+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r2.exp", 64+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r2.sig", 64+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r3", 96, 32, 0),
+ DBGFREGSUBFIELD_RW("r3.man", 96+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r3.exp", 96+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r3.sig", 96+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r4", 128, 32, 0),
+ DBGFREGSUBFIELD_RW("r4.man", 128+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r4.exp", 128+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r4.sig", 128+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r5", 160, 32, 0),
+ DBGFREGSUBFIELD_RW("r5.man", 160+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r5.exp", 160+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r5.sig", 160+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r6", 192, 32, 0),
+ DBGFREGSUBFIELD_RW("r6.man", 192+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r6.exp", 192+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r6.sig", 192+31, 1, 0),
+ DBGFREGSUBFIELD_RW("r7", 224, 32, 0),
+ DBGFREGSUBFIELD_RW("r7.man", 224+ 0, 23, 0),
+ DBGFREGSUBFIELD_RW("r7.exp", 224+23, 8, 0),
+ DBGFREGSUBFIELD_RW("r7.sig", 224+31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+#endif
+
+/** Sub-fields for the CR0 register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_cr0[] =
+{
+ DBGFREGSUBFIELD_RW("pe", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("mp", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("em", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("ts", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("et", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("ne", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("wp", 16, 1, 0),
+ DBGFREGSUBFIELD_RW("am", 18, 1, 0),
+ DBGFREGSUBFIELD_RW("nw", 29, 1, 0),
+ DBGFREGSUBFIELD_RW("cd", 30, 1, 0),
+ DBGFREGSUBFIELD_RW("pg", 31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the CR3 register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_cr3[] =
+{
+ DBGFREGSUBFIELD_RW("pwt", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("pcd", 4, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the CR4 register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_cr4[] =
+{
+ DBGFREGSUBFIELD_RW("vme", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("pvi", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("tsd", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("de", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("pse", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("pae", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("mce", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("pge", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("pce", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("osfxsr", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("osxmmeexcpt", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("vmxe", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("smxe", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("pcide", 17, 1, 0),
+ DBGFREGSUBFIELD_RW("osxsave", 18, 1, 0),
+ DBGFREGSUBFIELD_RW("smep", 20, 1, 0),
+ DBGFREGSUBFIELD_RW("smap", 21, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the DR6 register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_dr6[] =
+{
+ DBGFREGSUBFIELD_RW("b0", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("b1", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("b2", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("b3", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("bd", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("bs", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("bt", 15, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the DR7 register. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_dr7[] =
+{
+ DBGFREGSUBFIELD_RW("l0", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("g0", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("l1", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("g1", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("l2", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("g2", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("l3", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("g3", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("le", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("ge", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("gd", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("rw0", 16, 2, 0),
+ DBGFREGSUBFIELD_RW("len0", 18, 2, 0),
+ DBGFREGSUBFIELD_RW("rw1", 20, 2, 0),
+ DBGFREGSUBFIELD_RW("len1", 22, 2, 0),
+ DBGFREGSUBFIELD_RW("rw2", 24, 2, 0),
+ DBGFREGSUBFIELD_RW("len2", 26, 2, 0),
+ DBGFREGSUBFIELD_RW("rw3", 28, 2, 0),
+ DBGFREGSUBFIELD_RW("len3", 30, 2, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the CR_PAT MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_apic_base[] =
+{
+ DBGFREGSUBFIELD_RW("bsp", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("ge", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("base", 12, 20, 12),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the CR_PAT MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_cr_pat[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the PERF_STATUS MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_perf_status[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the EFER MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_efer[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the STAR MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_star[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the CSTAR MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_cstar[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** Sub-fields for the LSTAR MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_lstar[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+#if 0 /** @todo */
+/** Sub-fields for the SF_MASK MSR. */
+static DBGFREGSUBFIELD const g_aCpumRegFields_sf_mask[] =
+{
+ /** @todo */
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+#endif
+
+
+/** @name Macros for producing register descriptor table entries.
+ * @{ */
+#define CPU_REG_EX_AS(a_szName, a_RegSuff, a_TypeSuff, a_offRegister, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \
+ { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, 0 /*fFlags*/, a_offRegister, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields }
+
+#define CPU_REG_REG(UName, LName) \
+ CPU_REG_RW_AS(#LName, UName, U64, LName, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_##LName, NULL)
+
+#define CPU_REG_SEG(UName, LName) \
+ CPU_REG_RW_AS(#LName, UName, U16, LName.Sel, cpumR3RegGet_Generic, cpumR3RegSet_seg, NULL, NULL ), \
+ CPU_REG_RW_AS(#LName "_attr", UName##_ATTR, U32, LName.Attr.u, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_seg), \
+ CPU_REG_RW_AS(#LName "_base", UName##_BASE, U64, LName.u64Base, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ), \
+ CPU_REG_RW_AS(#LName "_lim", UName##_LIMIT, U32, LName.u32Limit, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL )
+
+#define CPU_REG_MM(n) \
+ CPU_REG_XS_RW_AS("mm" #n, MM##n, U64, x87.aRegs[n].mmx, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mmN)
+
+#define CPU_REG_XMM(n) \
+ CPU_REG_XS_RW_AS("xmm" #n, XMM##n, U128, x87.aXMM[n].xmm, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_xmmN)
+
+#define CPU_REG_YMM(n) \
+ { "ymm" #n, DBGFREG_YMM##n, DBGFREGVALTYPE_U256, 0 /*fFlags*/, n, cpumR3RegGet_ymm, cpumR3RegSet_ymm, NULL /*paAliases*/, NULL /*paSubFields*/ }
+
+/** @} */
+
+
+/**
+ * The guest register descriptors.
+ */
+static DBGFREGDESC const g_aCpumRegGstDescs[] =
+{
+#define CPU_REG_RW_AS(a_szName, a_RegSuff, a_TypeSuff, a_CpumCtxMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \
+ { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, 0 /*fFlags*/, (uint32_t)RT_UOFFSETOF(CPUMCPU, Guest.a_CpumCtxMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields }
+#define CPU_REG_RO_AS(a_szName, a_RegSuff, a_TypeSuff, a_CpumCtxMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \
+ { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, (uint32_t)RT_UOFFSETOF(CPUMCPU, Guest.a_CpumCtxMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields }
+#define CPU_REG_XS_RW_AS(a_szName, a_RegSuff, a_TypeSuff, a_XStateMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \
+ { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, 0 /*fFlags*/, (uint32_t)RT_UOFFSETOF(X86XSAVEAREA, a_XStateMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields }
+#define CPU_REG_XS_RO_AS(a_szName, a_RegSuff, a_TypeSuff, a_XStateMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \
+ { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, (uint32_t)RT_UOFFSETOF(X86XSAVEAREA, a_XStateMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields }
+#define CPU_REG_MSR(a_szName, UName, a_TypeSuff, a_paSubFields) \
+ CPU_REG_EX_AS(a_szName, MSR_##UName, a_TypeSuff, MSR_##UName, cpumR3RegGstGet_msr, cpumR3RegGstSet_msr, NULL, a_paSubFields)
+#define CPU_REG_ST(n) \
+ CPU_REG_EX_AS("st" #n, ST##n, R80, n, cpumR3RegGstGet_stN, cpumR3RegGstSet_stN, NULL, g_aCpumRegFields_stN)
+
+ CPU_REG_REG(RAX, rax),
+ CPU_REG_REG(RCX, rcx),
+ CPU_REG_REG(RDX, rdx),
+ CPU_REG_REG(RBX, rbx),
+ CPU_REG_REG(RSP, rsp),
+ CPU_REG_REG(RBP, rbp),
+ CPU_REG_REG(RSI, rsi),
+ CPU_REG_REG(RDI, rdi),
+ CPU_REG_REG(R8, r8),
+ CPU_REG_REG(R9, r9),
+ CPU_REG_REG(R10, r10),
+ CPU_REG_REG(R11, r11),
+ CPU_REG_REG(R12, r12),
+ CPU_REG_REG(R13, r13),
+ CPU_REG_REG(R14, r14),
+ CPU_REG_REG(R15, r15),
+ CPU_REG_SEG(CS, cs),
+ CPU_REG_SEG(DS, ds),
+ CPU_REG_SEG(ES, es),
+ CPU_REG_SEG(FS, fs),
+ CPU_REG_SEG(GS, gs),
+ CPU_REG_SEG(SS, ss),
+ CPU_REG_REG(RIP, rip),
+ CPU_REG_RW_AS("rflags", RFLAGS, U64, rflags, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_rflags, g_aCpumRegFields_rflags ),
+ CPU_REG_XS_RW_AS("fcw", FCW, U16, x87.FCW, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_fcw ),
+ CPU_REG_XS_RW_AS("fsw", FSW, U16, x87.FSW, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_fsw ),
+ CPU_REG_XS_RO_AS("ftw", FTW, U16, x87, cpumR3RegGet_ftw, cpumR3RegSet_ftw, NULL, g_aCpumRegFields_ftw ),
+ CPU_REG_XS_RW_AS("fop", FOP, U16, x87.FOP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ),
+ CPU_REG_XS_RW_AS("fpuip", FPUIP, U32, x87.FPUIP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, g_aCpumRegAliases_fpuip, NULL ),
+ CPU_REG_XS_RW_AS("fpucs", FPUCS, U16, x87.CS, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ),
+ CPU_REG_XS_RW_AS("fpudp", FPUDP, U32, x87.FPUDP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, g_aCpumRegAliases_fpudp, NULL ),
+ CPU_REG_XS_RW_AS("fpuds", FPUDS, U16, x87.DS, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ),
+ CPU_REG_XS_RW_AS("mxcsr", MXCSR, U32, x87.MXCSR, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mxcsr ),
+ CPU_REG_XS_RW_AS("mxcsr_mask", MXCSR_MASK, U32, x87.MXCSR_MASK, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mxcsr ),
+ CPU_REG_ST(0),
+ CPU_REG_ST(1),
+ CPU_REG_ST(2),
+ CPU_REG_ST(3),
+ CPU_REG_ST(4),
+ CPU_REG_ST(5),
+ CPU_REG_ST(6),
+ CPU_REG_ST(7),
+ CPU_REG_MM(0),
+ CPU_REG_MM(1),
+ CPU_REG_MM(2),
+ CPU_REG_MM(3),
+ CPU_REG_MM(4),
+ CPU_REG_MM(5),
+ CPU_REG_MM(6),
+ CPU_REG_MM(7),
+ CPU_REG_XMM(0),
+ CPU_REG_XMM(1),
+ CPU_REG_XMM(2),
+ CPU_REG_XMM(3),
+ CPU_REG_XMM(4),
+ CPU_REG_XMM(5),
+ CPU_REG_XMM(6),
+ CPU_REG_XMM(7),
+ CPU_REG_XMM(8),
+ CPU_REG_XMM(9),
+ CPU_REG_XMM(10),
+ CPU_REG_XMM(11),
+ CPU_REG_XMM(12),
+ CPU_REG_XMM(13),
+ CPU_REG_XMM(14),
+ CPU_REG_XMM(15),
+ CPU_REG_YMM(0),
+ CPU_REG_YMM(1),
+ CPU_REG_YMM(2),
+ CPU_REG_YMM(3),
+ CPU_REG_YMM(4),
+ CPU_REG_YMM(5),
+ CPU_REG_YMM(6),
+ CPU_REG_YMM(7),
+ CPU_REG_YMM(8),
+ CPU_REG_YMM(9),
+ CPU_REG_YMM(10),
+ CPU_REG_YMM(11),
+ CPU_REG_YMM(12),
+ CPU_REG_YMM(13),
+ CPU_REG_YMM(14),
+ CPU_REG_YMM(15),
+ CPU_REG_RW_AS("gdtr_base", GDTR_BASE, U64, gdtr.pGdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_RW_AS("gdtr_lim", GDTR_LIMIT, U16, gdtr.cbGdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_RW_AS("idtr_base", IDTR_BASE, U64, idtr.pIdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_RW_AS("idtr_lim", IDTR_LIMIT, U16, idtr.cbIdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_SEG(LDTR, ldtr),
+ CPU_REG_SEG(TR, tr),
+ CPU_REG_EX_AS("cr0", CR0, U32, 0, cpumR3RegGstGet_crX, cpumR3RegGstSet_crX, g_aCpumRegAliases_cr0, g_aCpumRegFields_cr0 ),
+ CPU_REG_EX_AS("cr2", CR2, U64, 2, cpumR3RegGstGet_crX, cpumR3RegGstSet_crX, NULL, NULL ),
+ CPU_REG_EX_AS("cr3", CR3, U64, 3, cpumR3RegGstGet_crX, cpumR3RegGstSet_crX, NULL, g_aCpumRegFields_cr3 ),
+ CPU_REG_EX_AS("cr4", CR4, U32, 4, cpumR3RegGstGet_crX, cpumR3RegGstSet_crX, NULL, g_aCpumRegFields_cr4 ),
+ CPU_REG_EX_AS("cr8", CR8, U32, 8, cpumR3RegGstGet_crX, cpumR3RegGstSet_crX, NULL, NULL ),
+ CPU_REG_EX_AS("dr0", DR0, U64, 0, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, NULL ),
+ CPU_REG_EX_AS("dr1", DR1, U64, 1, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, NULL ),
+ CPU_REG_EX_AS("dr2", DR2, U64, 2, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, NULL ),
+ CPU_REG_EX_AS("dr3", DR3, U64, 3, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, NULL ),
+ CPU_REG_EX_AS("dr6", DR6, U32, 6, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, g_aCpumRegFields_dr6 ),
+ CPU_REG_EX_AS("dr7", DR7, U32, 7, cpumR3RegGstGet_drX, cpumR3RegGstSet_drX, NULL, g_aCpumRegFields_dr7 ),
+ CPU_REG_MSR("apic_base", IA32_APICBASE, U32, g_aCpumRegFields_apic_base ),
+ CPU_REG_MSR("pat", IA32_CR_PAT, U64, g_aCpumRegFields_cr_pat ),
+ CPU_REG_MSR("perf_status", IA32_PERF_STATUS, U64, g_aCpumRegFields_perf_status),
+ CPU_REG_MSR("sysenter_cs", IA32_SYSENTER_CS, U16, NULL ),
+ CPU_REG_MSR("sysenter_eip", IA32_SYSENTER_EIP, U64, NULL ),
+ CPU_REG_MSR("sysenter_esp", IA32_SYSENTER_ESP, U64, NULL ),
+ CPU_REG_MSR("tsc", IA32_TSC, U32, NULL ),
+ CPU_REG_MSR("efer", K6_EFER, U32, g_aCpumRegFields_efer ),
+ CPU_REG_MSR("star", K6_STAR, U64, g_aCpumRegFields_star ),
+ CPU_REG_MSR("cstar", K8_CSTAR, U64, g_aCpumRegFields_cstar ),
+ CPU_REG_MSR("msr_fs_base", K8_FS_BASE, U64, NULL ),
+ CPU_REG_MSR("msr_gs_base", K8_GS_BASE, U64, NULL ),
+ CPU_REG_MSR("krnl_gs_base", K8_KERNEL_GS_BASE, U64, NULL ),
+ CPU_REG_MSR("lstar", K8_LSTAR, U64, g_aCpumRegFields_lstar ),
+ CPU_REG_MSR("sf_mask", K8_SF_MASK, U64, NULL ),
+ CPU_REG_MSR("tsc_aux", K8_TSC_AUX, U64, NULL ),
+ CPU_REG_EX_AS("ah", AH, U8, RT_OFFSETOF(CPUMCPU, Guest.rax) + 1, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_EX_AS("ch", CH, U8, RT_OFFSETOF(CPUMCPU, Guest.rcx) + 1, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_EX_AS("dh", DH, U8, RT_OFFSETOF(CPUMCPU, Guest.rdx) + 1, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_EX_AS("bh", BH, U8, RT_OFFSETOF(CPUMCPU, Guest.rbx) + 1, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),
+ CPU_REG_RW_AS("gdtr", GDTR, DTR, gdtr, cpumR3RegGet_gdtr, cpumR3RegSet_gdtr, NULL, NULL ),
+ CPU_REG_RW_AS("idtr", IDTR, DTR, idtr, cpumR3RegGet_idtr, cpumR3RegSet_idtr, NULL, NULL ),
+ DBGFREGDESC_TERMINATOR()
+
+#undef CPU_REG_RW_AS
+#undef CPU_REG_RO_AS
+#undef CPU_REG_MSR
+#undef CPU_REG_ST
+};
+
+
+/**
+ * Initializes the debugger related sides of the CPUM component.
+ *
+ * Called by CPUMR3Init.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int cpumR3DbgInit(PVM pVM)
+{
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ int rc = DBGFR3RegRegisterCpu(pVM, pVM->apCpusR3[idCpu], g_aCpumRegGstDescs, true /*fGuestRegs*/);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
new file mode 100644
index 00000000..a2288250
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
@@ -0,0 +1,5902 @@
+/* $Id: CPUMR3CpuId.cpp $ */
+/** @file
+ * CPUM - CPU ID part.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/ssm.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/sup.h>
+
+#include <VBox/err.h>
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/ctype.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/x86-helpers.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** For sanity and avoid wasting hyper heap on buggy config / saved state. */
+#define CPUM_CPUID_MAX_LEAVES 2048
+
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+/**
+ * Determins the host CPU MXCSR mask.
+ *
+ * @returns MXCSR mask.
+ */
+VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void)
+{
+ if ( ASMHasCpuId()
+ && RTX86IsValidStdRange(ASMCpuId_EAX(0))
+ && ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_FXSR)
+ {
+ uint8_t volatile abBuf[sizeof(X86FXSTATE) + 64];
+ PX86FXSTATE pState = (PX86FXSTATE)&abBuf[64 - ((uintptr_t)&abBuf[0] & 63)];
+ RT_ZERO(*pState);
+ ASMFxSave(pState);
+ if (pState->MXCSR_MASK == 0)
+ return 0xffbf;
+ return pState->MXCSR_MASK;
+ }
+ return 0;
+}
+#endif
+
+
+
+#ifndef IN_VBOX_CPU_REPORT
+/**
+ * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
+ *
+ * @returns true if found, false it not.
+ * @param paLeaves The CPUID leaves to search. This is sorted.
+ * @param cLeaves The number of leaves in the array.
+ * @param uLeaf The leaf to locate.
+ * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
+ * @param pLegacy The legacy output leaf.
+ */
+static bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
+ PCPUMCPUID pLegacy)
+{
+ PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, uLeaf, uSubLeaf);
+ if (pLeaf)
+ {
+ pLegacy->uEax = pLeaf->uEax;
+ pLegacy->uEbx = pLeaf->uEbx;
+ pLegacy->uEcx = pLeaf->uEcx;
+ pLegacy->uEdx = pLeaf->uEdx;
+ return true;
+ }
+ return false;
+}
+#endif /* IN_VBOX_CPU_REPORT */
+
+
+/**
+ * Inserts a CPU ID leaf, replacing any existing ones.
+ *
+ * When inserting a simple leaf where we already got a series of sub-leaves with
+ * the same leaf number (eax), the simple leaf will replace the whole series.
+ *
+ * When pVM is NULL, this ASSUMES that the leaves array is still on the normal
+ * host-context heap and has only been allocated/reallocated by the
+ * cpumCpuIdEnsureSpace function.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure. If NULL, use
+ * the process heap, otherwise the VM's hyper heap.
+ * @param ppaLeaves Pointer to the pointer to the array of sorted
+ * CPUID leaves and sub-leaves. Must be NULL if using
+ * the hyper heap.
+ * @param pcLeaves Where we keep the leaf count for *ppaLeaves. Must
+ * be NULL if using the hyper heap.
+ * @param pNewLeaf Pointer to the data of the new leaf we're about to
+ * insert.
+ */
+static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
+{
+ /*
+ * Validate input parameters if we are using the hyper heap and use the VM's CPUID arrays.
+ */
+ if (pVM)
+ {
+ AssertReturn(!ppaLeaves, VERR_INVALID_PARAMETER);
+ AssertReturn(!pcLeaves, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3 == pVM->cpum.s.GuestInfo.aCpuIdLeaves, VERR_INVALID_PARAMETER);
+
+ ppaLeaves = &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
+ pcLeaves = &pVM->cpum.s.GuestInfo.cCpuIdLeaves;
+ }
+
+ PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
+ uint32_t cLeaves = *pcLeaves;
+
+ /*
+ * Validate the new leaf a little.
+ */
+ AssertLogRelMsgReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_VALID_MASK),
+ ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fFlags),
+ VERR_INVALID_FLAGS);
+ AssertLogRelMsgReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0,
+ ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
+ VERR_INVALID_PARAMETER);
+ AssertLogRelMsgReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1),
+ ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
+ VERR_INVALID_PARAMETER);
+ AssertLogRelMsgReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf,
+ ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Find insertion point. The lazy bird uses the same excuse as in
+ * cpumCpuIdGetLeaf(), but optimizes for linear insertion (saved state).
+ */
+ uint32_t i;
+ if ( cLeaves > 0
+ && paLeaves[cLeaves - 1].uLeaf < pNewLeaf->uLeaf)
+ {
+ /* Add at end. */
+ i = cLeaves;
+ }
+ else if ( cLeaves > 0
+ && paLeaves[cLeaves - 1].uLeaf == pNewLeaf->uLeaf)
+ {
+ /* Either replacing the last leaf or dealing with sub-leaves. Spool
+ back to the first sub-leaf to pretend we did the linear search. */
+ i = cLeaves - 1;
+ while ( i > 0
+ && paLeaves[i - 1].uLeaf == pNewLeaf->uLeaf)
+ i--;
+ }
+ else
+ {
+ /* Linear search from the start. */
+ i = 0;
+ while ( i < cLeaves
+ && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
+ i++;
+ }
+ if ( i < cLeaves
+ && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
+ {
+ if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
+ {
+ /*
+ * The sub-leaf mask differs, replace all existing leaves with the
+ * same leaf number.
+ */
+ uint32_t c = 1;
+ while ( i + c < cLeaves
+ && paLeaves[i + c].uLeaf == pNewLeaf->uLeaf)
+ c++;
+ if (c > 1 && i + c < cLeaves)
+ {
+ memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
+ *pcLeaves = cLeaves -= c - 1;
+ }
+
+ paLeaves[i] = *pNewLeaf;
+#ifdef VBOX_STRICT
+ cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
+#endif
+ return VINF_SUCCESS;
+ }
+
+ /* Find sub-leaf insertion point. */
+ while ( i < cLeaves
+ && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf
+ && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
+ i++;
+
+ /*
+ * If we've got an exactly matching leaf, replace it.
+ */
+ if ( i < cLeaves
+ && paLeaves[i].uLeaf == pNewLeaf->uLeaf
+ && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
+ {
+ paLeaves[i] = *pNewLeaf;
+#ifdef VBOX_STRICT
+ cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
+#endif
+ return VINF_SUCCESS;
+ }
+ }
+
+ /*
+ * Adding a new leaf at 'i'.
+ */
+ AssertLogRelReturn(cLeaves < CPUM_CPUID_MAX_LEAVES, VERR_TOO_MANY_CPUID_LEAVES);
+ paLeaves = cpumCpuIdEnsureSpace(pVM, ppaLeaves, cLeaves);
+ if (!paLeaves)
+ return VERR_NO_MEMORY;
+
+ if (i < cLeaves)
+ memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
+ *pcLeaves += 1;
+ paLeaves[i] = *pNewLeaf;
+
+#ifdef VBOX_STRICT
+ cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
+#endif
+ return VINF_SUCCESS;
+}
+
+
+#ifndef IN_VBOX_CPU_REPORT
+/**
+ * Removes a range of CPUID leaves.
+ *
+ * This will not reallocate the array.
+ *
+ * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
+ * @param pcLeaves Where we keep the leaf count for @a paLeaves.
+ * @param uFirst The first leaf.
+ * @param uLast The last leaf.
+ */
+static void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
+{
+ uint32_t cLeaves = *pcLeaves;
+
+ Assert(uFirst <= uLast);
+
+ /*
+ * Find the first one.
+ */
+ uint32_t iFirst = 0;
+ while ( iFirst < cLeaves
+ && paLeaves[iFirst].uLeaf < uFirst)
+ iFirst++;
+
+ /*
+ * Find the end (last + 1).
+ */
+ uint32_t iEnd = iFirst;
+ while ( iEnd < cLeaves
+ && paLeaves[iEnd].uLeaf <= uLast)
+ iEnd++;
+
+ /*
+ * Adjust the array if anything needs removing.
+ */
+ if (iFirst < iEnd)
+ {
+ if (iEnd < cLeaves)
+ memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
+ *pcLeaves = cLeaves -= (iEnd - iFirst);
+ }
+
+# ifdef VBOX_STRICT
+ cpumCpuIdAssertOrder(paLeaves, *pcLeaves);
+# endif
+}
+#endif /* IN_VBOX_CPU_REPORT */
+
+
+/**
+ * Gets a CPU ID leaf.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pLeaf Where to store the found leaf.
+ * @param uLeaf The leaf to locate.
+ * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
+ */
+VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf)
+{
+ PCPUMCPUIDLEAF pcLeaf = cpumCpuIdGetLeafInt(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
+ uLeaf, uSubLeaf);
+ if (pcLeaf)
+ {
+ memcpy(pLeaf, pcLeaf, sizeof(*pLeaf));
+ return VINF_SUCCESS;
+ }
+
+ return VERR_NOT_FOUND;
+}
+
+
+/**
+ * Gets all the leaves.
+ *
+ * This only works after the CPUID leaves have been initialized. The interface
+ * is intended for NEM and configuring CPUID leaves for the native hypervisor.
+ *
+ * @returns Pointer to the array of leaves. NULL on failure.
+ * @param pVM The cross context VM structure.
+ * @param pcLeaves Where to return the number of leaves.
+ */
+VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves)
+{
+ *pcLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
+ return pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
+}
+
+
+/**
+ * Inserts a CPU ID leaf, replacing any existing ones.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pNewLeaf Pointer to the leaf being inserted.
+ */
+VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf)
+{
+ /*
+ * Validate parameters.
+ */
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pNewLeaf, VERR_INVALID_PARAMETER);
+
+ /*
+ * Disallow replacing CPU ID leaves that this API currently cannot manage.
+ * These leaves have dependencies on saved-states, see PATMCpuidReplacement().
+ * If you want to modify these leaves, use CPUMSetGuestCpuIdFeature().
+ */
+ if ( pNewLeaf->uLeaf == UINT32_C(0x00000000) /* Standard */
+ || pNewLeaf->uLeaf == UINT32_C(0x00000001)
+ || pNewLeaf->uLeaf == UINT32_C(0x80000000) /* Extended */
+ || pNewLeaf->uLeaf == UINT32_C(0x80000001)
+ || pNewLeaf->uLeaf == UINT32_C(0xc0000000) /* Centaur */
+ || pNewLeaf->uLeaf == UINT32_C(0xc0000001) )
+ {
+ return VERR_NOT_SUPPORTED;
+ }
+
+ return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf);
+}
+
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+/**
+ * Determines the method the CPU uses to handle unknown CPUID leaves.
+ *
+ * @returns VBox status code.
+ * @param penmUnknownMethod Where to return the method.
+ * @param pDefUnknown Where to return default unknown values. This
+ * will be set, even if the resulting method
+ * doesn't actually needs it.
+ */
+VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
+{
+ uint32_t uLastStd = ASMCpuId_EAX(0);
+ uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
+ if (!RTX86IsValidExtRange(uLastExt))
+ uLastExt = 0x80000000;
+
+ uint32_t auChecks[] =
+ {
+ uLastStd + 1,
+ uLastStd + 5,
+ uLastStd + 8,
+ uLastStd + 32,
+ uLastStd + 251,
+ uLastExt + 1,
+ uLastExt + 8,
+ uLastExt + 15,
+ uLastExt + 63,
+ uLastExt + 255,
+ 0x7fbbffcc,
+ 0x833f7872,
+ 0xefff2353,
+ 0x35779456,
+ 0x1ef6d33e,
+ };
+
+ static const uint32_t s_auValues[] =
+ {
+ 0xa95d2156,
+ 0x00000001,
+ 0x00000002,
+ 0x00000008,
+ 0x00000000,
+ 0x55773399,
+ 0x93401769,
+ 0x12039587,
+ };
+
+ /*
+ * Simple method, all zeros.
+ */
+ *penmUnknownMethod = CPUMUNKNOWNCPUID_DEFAULTS;
+ pDefUnknown->uEax = 0;
+ pDefUnknown->uEbx = 0;
+ pDefUnknown->uEcx = 0;
+ pDefUnknown->uEdx = 0;
+
+ /*
+ * Intel has been observed returning the last standard leaf.
+ */
+ uint32_t auLast[4];
+ ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
+
+ uint32_t cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if (memcmp(auCur, auLast, sizeof(auCur)))
+ break;
+ cChecks--;
+ }
+ if (cChecks == 0)
+ {
+ /* Now, what happens when the input changes? Esp. ECX. */
+ uint32_t cTotal = 0;
+ uint32_t cSame = 0;
+ uint32_t cLastWithEcx = 0;
+ uint32_t cNeither = 0;
+ uint32_t cValues = RT_ELEMENTS(s_auValues);
+ while (cValues > 0)
+ {
+ uint32_t uValue = s_auValues[cValues - 1];
+ uint32_t auLastWithEcx[4];
+ ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
+ &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
+
+ cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if (!memcmp(auCur, auLast, sizeof(auCur)))
+ {
+ cSame++;
+ if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+ cLastWithEcx++;
+ }
+ else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+ cLastWithEcx++;
+ else
+ cNeither++;
+ cTotal++;
+ cChecks--;
+ }
+ cValues--;
+ }
+
+ Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
+ if (cSame == cTotal)
+ *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
+ else if (cLastWithEcx == cTotal)
+ *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
+ else
+ *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
+ pDefUnknown->uEax = auLast[0];
+ pDefUnknown->uEbx = auLast[1];
+ pDefUnknown->uEcx = auLast[2];
+ pDefUnknown->uEdx = auLast[3];
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Unchanged register values?
+ */
+ cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t const uLeaf = auChecks[cChecks - 1];
+ uint32_t cValues = RT_ELEMENTS(s_auValues);
+ while (cValues > 0)
+ {
+ uint32_t uValue = s_auValues[cValues - 1];
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if ( auCur[0] != uLeaf
+ || auCur[1] != uValue
+ || auCur[2] != uValue
+ || auCur[3] != uValue)
+ break;
+ cValues--;
+ }
+ if (cValues != 0)
+ break;
+ cChecks--;
+ }
+ if (cChecks == 0)
+ {
+ *penmUnknownMethod = CPUMUNKNOWNCPUID_PASSTHRU;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Just go with the simple method.
+ */
+ return VINF_SUCCESS;
+}
+#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
+
+
+/**
+ * Translates a unknow CPUID leaf method into the constant name (sans prefix).
+ *
+ * @returns Read only name string.
+ * @param enmUnknownMethod The method to translate.
+ */
+VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod)
+{
+ switch (enmUnknownMethod)
+ {
+ case CPUMUNKNOWNCPUID_DEFAULTS: return "DEFAULTS";
+ case CPUMUNKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
+ case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
+ case CPUMUNKNOWNCPUID_PASSTHRU: return "PASSTHRU";
+
+ case CPUMUNKNOWNCPUID_INVALID:
+ case CPUMUNKNOWNCPUID_END:
+ case CPUMUNKNOWNCPUID_32BIT_HACK:
+ break;
+ }
+ return "Invalid-unknown-CPUID-method";
+}
+
+
+/*
+ *
+ * Init related code.
+ * Init related code.
+ * Init related code.
+ *
+ *
+ */
+#ifndef IN_VBOX_CPU_REPORT
+
+
+/**
+ * Gets an exactly matching leaf + sub-leaf in the CPUID leaf array.
+ *
+ * This ignores the fSubLeafMask.
+ *
+ * @returns Pointer to the matching leaf, or NULL if not found.
+ * @param pCpum The CPUM instance data.
+ * @param uLeaf The leaf to locate.
+ * @param uSubLeaf The subleaf to locate.
+ */
+static PCPUMCPUIDLEAF cpumR3CpuIdGetExactLeaf(PCPUM pCpum, uint32_t uLeaf, uint32_t uSubLeaf)
+{
+ uint64_t uNeedle = RT_MAKE_U64(uSubLeaf, uLeaf);
+ PCPUMCPUIDLEAF paLeaves = pCpum->GuestInfo.paCpuIdLeavesR3;
+ uint32_t iEnd = pCpum->GuestInfo.cCpuIdLeaves;
+ if (iEnd)
+ {
+ uint32_t iBegin = 0;
+ for (;;)
+ {
+ uint32_t const i = (iEnd - iBegin) / 2 + iBegin;
+ uint64_t const uCur = RT_MAKE_U64(paLeaves[i].uSubLeaf, paLeaves[i].uLeaf);
+ if (uNeedle < uCur)
+ {
+ if (i > iBegin)
+ iEnd = i;
+ else
+ break;
+ }
+ else if (uNeedle > uCur)
+ {
+ if (i + 1 < iEnd)
+ iBegin = i + 1;
+ else
+ break;
+ }
+ else
+ return &paLeaves[i];
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Loads MSR range overrides.
+ *
+ * This must be called before the MSR ranges are moved from the normal heap to
+ * the hyper heap!
+ *
+ * @returns VBox status code (VMSetError called).
+ * @param pVM The cross context VM structure.
+ * @param pMsrNode The CFGM node with the MSR overrides.
+ */
+static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
+{
+ for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+ {
+ /*
+ * Assemble a valid MSR range.
+ */
+ CPUMMSRRANGE MsrRange;
+ MsrRange.offCpumCpu = 0;
+ MsrRange.fReserved = 0;
+
+ int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
+
+ rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ char szType[32];
+ rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
+ MsrRange.szName, rc);
+ if (!RTStrICmp(szType, "FixedValue"))
+ {
+ MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
+ MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
+
+ rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
+ MsrRange.szName, rc);
+ }
+ else
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
+ "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
+
+ /*
+ * Insert the range into the table (replaces/splits/shrinks existing
+ * MSR ranges).
+ */
+ rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
+ &MsrRange);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads CPUID leaf overrides.
+ *
+ * This must be called before the CPUID leaves are moved from the normal
+ * heap to the hyper heap!
+ *
+ * @returns VBox status code (VMSetError called).
+ * @param pVM The cross context VM structure.
+ * @param pParentNode The CFGM node with the CPUID leaves.
+ * @param pszLabel How to label the overrides we're loading.
+ */
+static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
+{
+ for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+ {
+ /*
+ * Get the leaf and subleaf numbers.
+ */
+ char szName[128];
+ int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
+
+ /* The leaf number is either specified directly or thru the node name. */
+ uint32_t uLeaf;
+ rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ rc = RTStrToUInt32Full(szName, 16, &uLeaf);
+ if (rc != VINF_SUCCESS)
+ return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
+ "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
+ }
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ uint32_t uSubLeaf;
+ rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ uint32_t fSubLeafMask;
+ rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ /*
+ * Look up the specified leaf, since the output register values
+ * defaults to any existing values. This allows overriding a single
+ * register, without needing to know the other values.
+ */
+ PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, uLeaf, uSubLeaf);
+ CPUMCPUIDLEAF Leaf;
+ if (pLeaf)
+ Leaf = *pLeaf;
+ else
+ RT_ZERO(Leaf);
+ Leaf.uLeaf = uLeaf;
+ Leaf.uSubLeaf = uSubLeaf;
+ Leaf.fSubLeafMask = fSubLeafMask;
+
+ rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ /*
+ * Insert the leaf into the table (replaces existing ones).
+ */
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,
+ &Leaf);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Fetches overrides for a CPUID leaf.
+ *
+ * @returns VBox status code.
+ * @param pLeaf The leaf to load the overrides into.
+ * @param pCfgNode The CFGM node containing the overrides
+ * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
+ * @param iLeaf The CPUID leaf number.
+ */
+static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
+{
+ PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
+ if (pLeafNode)
+ {
+ uint32_t u32;
+ int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
+ if (RT_SUCCESS(rc))
+ pLeaf->uEax = u32;
+ else
+ AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
+
+ rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
+ if (RT_SUCCESS(rc))
+ pLeaf->uEbx = u32;
+ else
+ AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
+
+ rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
+ if (RT_SUCCESS(rc))
+ pLeaf->uEcx = u32;
+ else
+ AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
+
+ rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
+ if (RT_SUCCESS(rc))
+ pLeaf->uEdx = u32;
+ else
+ AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
+
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Load the overrides for a set of CPUID leaves.
+ *
+ * @returns VBox status code.
+ * @param paLeaves The leaf array.
+ * @param cLeaves The number of leaves.
+ * @param uStart The start leaf number.
+ * @param pCfgNode The CFGM node containing the overrides
+ * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
+ */
+static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
+{
+ for (uint32_t i = 0; i < cLeaves; i++)
+ {
+ int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Installs the CPUID leaves and explods the data into structures like
+ * GuestFeatures and CPUMCTX::aoffXState.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCpum The CPUM part of @a VM.
+ * @param paLeaves The leaves. These will be copied (but not freed).
+ * @param cLeaves The number of leaves.
+ * @param pMsrs The MSRs.
+ */
+static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
+{
+# ifdef VBOX_STRICT
+ cpumCpuIdAssertOrder(paLeaves, cLeaves);
+# endif
+
+ /*
+ * Install the CPUID information.
+ */
+ AssertLogRelMsgReturn(cLeaves <= RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves),
+ ("cLeaves=%u - max %u\n", cLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves)),
+ VERR_CPUM_IPE_1); /** @todo better status! */
+ if (paLeaves != pCpum->GuestInfo.aCpuIdLeaves)
+ memcpy(pCpum->GuestInfo.aCpuIdLeaves, paLeaves, cLeaves * sizeof(paLeaves[0]));
+ pCpum->GuestInfo.paCpuIdLeavesR3 = pCpum->GuestInfo.aCpuIdLeaves;
+ pCpum->GuestInfo.cCpuIdLeaves = cLeaves;
+
+ /*
+ * Update the default CPUID leaf if necessary.
+ */
+ switch (pCpum->GuestInfo.enmUnknownCpuIdMethod)
+ {
+ case CPUMUNKNOWNCPUID_LAST_STD_LEAF:
+ case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX:
+ {
+ /* We don't use CPUID(0).eax here because of the NT hack that only
+ changes that value without actually removing any leaves. */
+ uint32_t i = 0;
+ if ( pCpum->GuestInfo.cCpuIdLeaves > 0
+ && pCpum->GuestInfo.paCpuIdLeavesR3[0].uLeaf <= UINT32_C(0xff))
+ {
+ while ( i + 1 < pCpum->GuestInfo.cCpuIdLeaves
+ && pCpum->GuestInfo.paCpuIdLeavesR3[i + 1].uLeaf <= UINT32_C(0xff))
+ i++;
+ pCpum->GuestInfo.DefCpuId.uEax = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEax;
+ pCpum->GuestInfo.DefCpuId.uEbx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEbx;
+ pCpum->GuestInfo.DefCpuId.uEcx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEcx;
+ pCpum->GuestInfo.DefCpuId.uEdx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEdx;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * Explode the guest CPU features.
+ */
+ int rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs,
+ &pCpum->GuestFeatures);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Adjust the scalable bus frequency according to the CPUID information
+ * we're now using.
+ */
+ if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
+ pCpum->GuestInfo.uScalableBusFreq = pCpum->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+ ? UINT64_C(100000000) /* 100MHz */
+ : UINT64_C(133333333); /* 133MHz */
+
+ /*
+ * Populate the legacy arrays. Currently used for everything, later only
+ * for patch manager.
+ */
+ struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
+ {
+ { pCpum->aGuestCpuIdPatmStd, RT_ELEMENTS(pCpum->aGuestCpuIdPatmStd), 0x00000000 },
+ { pCpum->aGuestCpuIdPatmExt, RT_ELEMENTS(pCpum->aGuestCpuIdPatmExt), 0x80000000 },
+ { pCpum->aGuestCpuIdPatmCentaur, RT_ELEMENTS(pCpum->aGuestCpuIdPatmCentaur), 0xc0000000 },
+ };
+ for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
+ {
+ uint32_t cLeft = aOldRanges[i].cCpuIds;
+ uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
+ PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
+ while (cLeft-- > 0)
+ {
+ uLeaf--;
+ pLegacyLeaf--;
+
+ PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, 0 /* uSubLeaf */);
+ if (pLeaf)
+ {
+ pLegacyLeaf->uEax = pLeaf->uEax;
+ pLegacyLeaf->uEbx = pLeaf->uEbx;
+ pLegacyLeaf->uEcx = pLeaf->uEcx;
+ pLegacyLeaf->uEdx = pLeaf->uEdx;
+ }
+ else
+ *pLegacyLeaf = pCpum->GuestInfo.DefCpuId;
+ }
+ }
+
+ /*
+ * Configure XSAVE offsets according to the CPUID info and set the feature flags.
+ */
+ PVMCPU pVCpu0 = pVM->apCpusR3[0];
+ AssertCompile(sizeof(pVCpu0->cpum.s.Guest.abXState) == CPUM_MAX_XSAVE_AREA_SIZE);
+ memset(&pVCpu0->cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVCpu0->cpum.s.Guest.aoffXState));
+ pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_X87_BIT] = 0;
+ pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_SSE_BIT] = 0;
+ for (uint32_t iComponent = XSAVE_C_SSE_BIT + 1; iComponent < 63; iComponent++)
+ if (pCpum->fXStateGuestMask & RT_BIT_64(iComponent))
+ {
+ PCPUMCPUIDLEAF pSubLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0xd, iComponent);
+ AssertLogRelMsgReturn(pSubLeaf, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
+ AssertLogRelMsgReturn(pSubLeaf->fSubLeafMask >= iComponent, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
+ AssertLogRelMsgReturn( pSubLeaf->uEax > 0
+ && pSubLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
+ && pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState
+ && pSubLeaf->uEbx <= pCpum->GuestFeatures.cbMaxExtendedState
+ && pSubLeaf->uEbx + pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState,
+ ("iComponent=%#x eax=%#x ebx=%#x cbMax=%#x\n", iComponent, pSubLeaf->uEax, pSubLeaf->uEbx,
+ pCpum->GuestFeatures.cbMaxExtendedState),
+ VERR_CPUM_IPE_1);
+ pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEbx;
+ }
+
+ /* Copy the CPU #0 data to the other CPUs. */
+ for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ memcpy(&pVCpu->cpum.s.Guest.aoffXState[0], &pVCpu0->cpum.s.Guest.aoffXState[0], sizeof(pVCpu0->cpum.s.Guest.aoffXState));
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/** @name Instruction Set Extension Options
+ * @{ */
+/** Configuration option type (extended boolean, really). */
+typedef uint8_t CPUMISAEXTCFG;
+/** Always disable the extension. */
+#define CPUMISAEXTCFG_DISABLED false
+/** Enable the extension if it's supported by the host CPU. */
+#define CPUMISAEXTCFG_ENABLED_SUPPORTED true
+/** Enable the extension if it's supported by the host CPU, but don't let
+ * the portable CPUID feature disable it. */
+#define CPUMISAEXTCFG_ENABLED_PORTABLE UINT8_C(127)
+/** Always enable the extension. */
+#define CPUMISAEXTCFG_ENABLED_ALWAYS UINT8_C(255)
+/** @} */
+
+/**
+ * CPUID Configuration (from CFGM).
+ *
+ * @remarks The members aren't document since we would only be duplicating the
+ * \@cfgm entries in cpumR3CpuIdReadConfig.
+ */
+typedef struct CPUMCPUIDCONFIG
+{
+ bool fNt4LeafLimit;
+ bool fInvariantTsc;
+ bool fInvariantApic;
+ bool fForceVme;
+ bool fNestedHWVirt;
+
+ CPUMISAEXTCFG enmCmpXchg16b;
+ CPUMISAEXTCFG enmMonitor;
+ CPUMISAEXTCFG enmMWaitExtensions;
+ CPUMISAEXTCFG enmSse41;
+ CPUMISAEXTCFG enmSse42;
+ CPUMISAEXTCFG enmAvx;
+ CPUMISAEXTCFG enmAvx2;
+ CPUMISAEXTCFG enmXSave;
+ CPUMISAEXTCFG enmAesNi;
+ CPUMISAEXTCFG enmPClMul;
+ CPUMISAEXTCFG enmPopCnt;
+ CPUMISAEXTCFG enmMovBe;
+ CPUMISAEXTCFG enmRdRand;
+ CPUMISAEXTCFG enmRdSeed;
+ CPUMISAEXTCFG enmCLFlushOpt;
+ CPUMISAEXTCFG enmFsGsBase;
+ CPUMISAEXTCFG enmPcid;
+ CPUMISAEXTCFG enmInvpcid;
+ CPUMISAEXTCFG enmFlushCmdMsr;
+ CPUMISAEXTCFG enmMdsClear;
+ CPUMISAEXTCFG enmArchCapMsr;
+
+ CPUMISAEXTCFG enmAbm;
+ CPUMISAEXTCFG enmSse4A;
+ CPUMISAEXTCFG enmMisAlnSse;
+ CPUMISAEXTCFG enm3dNowPrf;
+ CPUMISAEXTCFG enmAmdExtMmx;
+
+ uint32_t uMaxStdLeaf;
+ uint32_t uMaxExtLeaf;
+ uint32_t uMaxCentaurLeaf;
+ uint32_t uMaxIntelFamilyModelStep;
+ char szCpuName[128];
+} CPUMCPUIDCONFIG;
+/** Pointer to CPUID config (from CFGM). */
+typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
+
+
+/**
+ * Mini CPU selection support for making Mac OS X happy.
+ *
+ * Executes the /CPUM/MaxIntelFamilyModelStep config.
+ *
+ * @param pCpum The CPUM instance data.
+ * @param pConfig The CPUID configuration we've read from CFGM.
+ */
+static void cpumR3CpuIdLimitIntelFamModStep(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
+{
+ if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ {
+ PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
+ uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(RTX86GetCpuStepping(pStdFeatureLeaf->uEax),
+ RTX86GetCpuModelIntel(pStdFeatureLeaf->uEax),
+ RTX86GetCpuFamily(pStdFeatureLeaf->uEax),
+ 0);
+ uint32_t uMaxIntelFamilyModelStep = pConfig->uMaxIntelFamilyModelStep;
+ if (pConfig->uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
+ {
+ uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
+ uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
+ uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
+ uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
+ uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
+ if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
+ uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
+ LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
+ pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
+ pStdFeatureLeaf->uEax = uNew;
+ }
+ }
+}
+
+
+
+/**
+ * Limit it the number of entries, zapping the remainder.
+ *
+ * The limits are masking off stuff about power saving and similar, this
+ * is perhaps a bit crudely done as there is probably some relatively harmless
+ * info too in these leaves (like words about having a constant TSC).
+ *
+ * @param pCpum The CPUM instance data.
+ * @param pConfig The CPUID configuration we've read from CFGM.
+ */
+static void cpumR3CpuIdLimitLeaves(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
+{
+ /*
+ * Standard leaves.
+ */
+ uint32_t uSubLeaf = 0;
+ PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0, uSubLeaf);
+ if (pCurLeaf)
+ {
+ uint32_t uLimit = pCurLeaf->uEax;
+ if (uLimit <= UINT32_C(0x000fffff))
+ {
+ if (uLimit > pConfig->uMaxStdLeaf)
+ {
+ pCurLeaf->uEax = uLimit = pConfig->uMaxStdLeaf;
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ uLimit + 1, UINT32_C(0x000fffff));
+ }
+
+ /* NT4 hack, no zapping of extra leaves here. */
+ if (pConfig->fNt4LeafLimit && uLimit > 3)
+ pCurLeaf->uEax = uLimit = 3;
+
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x00000000), ++uSubLeaf)) != NULL)
+ pCurLeaf->uEax = uLimit;
+ }
+ else
+ {
+ LogRel(("CPUID: Invalid standard range: %#x\n", uLimit));
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x00000000), UINT32_C(0x0fffffff));
+ }
+ }
+
+ /*
+ * Extended leaves.
+ */
+ uSubLeaf = 0;
+ pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), uSubLeaf);
+ if (pCurLeaf)
+ {
+ uint32_t uLimit = pCurLeaf->uEax;
+ if ( uLimit >= UINT32_C(0x80000000)
+ && uLimit <= UINT32_C(0x800fffff))
+ {
+ if (uLimit > pConfig->uMaxExtLeaf)
+ {
+ pCurLeaf->uEax = uLimit = pConfig->uMaxExtLeaf;
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ uLimit + 1, UINT32_C(0x800fffff));
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), ++uSubLeaf)) != NULL)
+ pCurLeaf->uEax = uLimit;
+ }
+ }
+ else
+ {
+ LogRel(("CPUID: Invalid extended range: %#x\n", uLimit));
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x80000000), UINT32_C(0x8ffffffd));
+ }
+ }
+
+ /*
+ * Centaur leaves (VIA).
+ */
+ uSubLeaf = 0;
+ pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), uSubLeaf);
+ if (pCurLeaf)
+ {
+ uint32_t uLimit = pCurLeaf->uEax;
+ if ( uLimit >= UINT32_C(0xc0000000)
+ && uLimit <= UINT32_C(0xc00fffff))
+ {
+ if (uLimit > pConfig->uMaxCentaurLeaf)
+ {
+ pCurLeaf->uEax = uLimit = pConfig->uMaxCentaurLeaf;
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ uLimit + 1, UINT32_C(0xcfffffff));
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), ++uSubLeaf)) != NULL)
+ pCurLeaf->uEax = uLimit;
+ }
+ }
+ else
+ {
+ LogRel(("CPUID: Invalid centaur range: %#x\n", uLimit));
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0xc0000000), UINT32_C(0xcfffffff));
+ }
+ }
+}
+
+
+/**
+ * Clears a CPUID leaf and all sub-leaves (to zero).
+ *
+ * @param pCpum The CPUM instance data.
+ * @param uLeaf The leaf to clear.
+ */
+static void cpumR3CpuIdZeroLeaf(PCPUM pCpum, uint32_t uLeaf)
+{
+ uint32_t uSubLeaf = 0;
+ PCPUMCPUIDLEAF pCurLeaf;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax = 0;
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ pCurLeaf->uEdx = 0;
+ uSubLeaf++;
+ }
+}
+
+
+/**
+ * Used by cpumR3CpuIdSanitize to ensure that we don't have any sub-leaves for
+ * the given leaf.
+ *
+ * @returns pLeaf.
+ * @param pCpum The CPUM instance data.
+ * @param pLeaf The leaf to ensure is alone with it's EAX input value.
+ */
+static PCPUMCPUIDLEAF cpumR3CpuIdMakeSingleLeaf(PCPUM pCpum, PCPUMCPUIDLEAF pLeaf)
+{
+ Assert((uintptr_t)(pLeaf - pCpum->GuestInfo.paCpuIdLeavesR3) < pCpum->GuestInfo.cCpuIdLeaves);
+ if (pLeaf->fSubLeafMask != 0)
+ {
+ /*
+ * Figure out how many sub-leaves in need of removal (we'll keep the first).
+ * Log everything while we're at it.
+ */
+ LogRel(("CPUM:\n"
+ "CPUM: Unexpected CPUID sub-leaves for leaf %#x; fSubLeafMask=%#x\n", pLeaf->uLeaf, pLeaf->fSubLeafMask));
+ PCPUMCPUIDLEAF pLast = &pCpum->GuestInfo.paCpuIdLeavesR3[pCpum->GuestInfo.cCpuIdLeaves - 1];
+ PCPUMCPUIDLEAF pSubLeaf = pLeaf;
+ for (;;)
+ {
+ LogRel(("CPUM: %08x/%08x: %08x %08x %08x %08x; flags=%#x mask=%#x\n",
+ pSubLeaf->uLeaf, pSubLeaf->uSubLeaf,
+ pSubLeaf->uEax, pSubLeaf->uEbx, pSubLeaf->uEcx, pSubLeaf->uEdx,
+ pSubLeaf->fFlags, pSubLeaf->fSubLeafMask));
+ if (pSubLeaf == pLast || pSubLeaf[1].uLeaf != pLeaf->uLeaf)
+ break;
+ pSubLeaf++;
+ }
+ LogRel(("CPUM:\n"));
+
+ /*
+ * Remove the offending sub-leaves.
+ */
+ if (pSubLeaf != pLeaf)
+ {
+ if (pSubLeaf != pLast)
+ memmove(pLeaf + 1, pSubLeaf + 1, (uintptr_t)pLast - (uintptr_t)pSubLeaf);
+ pCpum->GuestInfo.cCpuIdLeaves -= (uint32_t)(pSubLeaf - pLeaf);
+ }
+
+ /*
+ * Convert the first sub-leaf into a single leaf.
+ */
+ pLeaf->uSubLeaf = 0;
+ pLeaf->fSubLeafMask = 0;
+ }
+ return pLeaf;
+}
+
+
+/**
+ * Sanitizes and adjust the CPUID leaves.
+ *
+ * Drop features that aren't virtualized (or virtualizable). Adjust information
+ * and capabilities to fit the virtualized hardware. Remove information the
+ * guest shouldn't have (because it's wrong in the virtual world or because it
+ * gives away host details) or that we don't have documentation for and no idea
+ * what means.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure (for cCpus).
+ * @param pCpum The CPUM instance data.
+ * @param pConfig The CPUID configuration we've read from CFGM.
+ */
+static int cpumR3CpuIdSanitize(PVM pVM, PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
+{
+#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
+ if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
+ { \
+ LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
+ (a_pLeafReg) &= ~(uint32_t)(fMask); \
+ }
+#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
+ if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
+ { \
+ LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
+ (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
+ }
+#define PORTABLE_DISABLE_FEATURE_BIT_CFG(Lvl, a_pLeafReg, FeatNm, fBitMask, enmConfig) \
+ if ( pCpum->u8PortableCpuIdLevel >= (Lvl) \
+ && ((a_pLeafReg) & (fBitMask)) \
+ && (enmConfig) != CPUMISAEXTCFG_ENABLED_PORTABLE ) \
+ { \
+ LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
+ (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
+ }
+ Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
+
+ /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
+ must consult HostFeatures when processing CPUMISAEXTCFG variables. */
+ PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
+#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
+ ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
+#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
+ ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
+#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
+
+ /* Cpuid 1:
+ * EAX: CPU model, family and stepping.
+ *
+ * ECX + EDX: Supported features. Only report features we can support.
+ * Note! When enabling new features the Synthetic CPU and Portable CPUID
+ * options may require adjusting (i.e. stripping what was enabled).
+ *
+ * EBX: Branding, CLFLUSH line size, logical processors per package and
+ * initial APIC ID.
+ */
+ PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0); /* Note! Must refetch when used later. */
+ AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
+ pStdFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pStdFeatureLeaf);
+
+ pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
+ | X86_CPUID_FEATURE_EDX_VME
+ | X86_CPUID_FEATURE_EDX_DE
+ | X86_CPUID_FEATURE_EDX_PSE
+ | X86_CPUID_FEATURE_EDX_TSC
+ | X86_CPUID_FEATURE_EDX_MSR
+ //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
+ | X86_CPUID_FEATURE_EDX_MCE
+ | X86_CPUID_FEATURE_EDX_CX8
+ //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
+ //| RT_BIT_32(10) - not defined
+ | X86_CPUID_FEATURE_EDX_SEP
+ | X86_CPUID_FEATURE_EDX_MTRR
+ | X86_CPUID_FEATURE_EDX_PGE
+ | X86_CPUID_FEATURE_EDX_MCA
+ | X86_CPUID_FEATURE_EDX_CMOV
+ | X86_CPUID_FEATURE_EDX_PAT /* 16 */
+ | X86_CPUID_FEATURE_EDX_PSE36
+ //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
+ | X86_CPUID_FEATURE_EDX_CLFSH
+ //| RT_BIT_32(20) - not defined
+ //| X86_CPUID_FEATURE_EDX_DS - no debug store.
+ //| X86_CPUID_FEATURE_EDX_ACPI - not supported (not DevAcpi, right?).
+ | X86_CPUID_FEATURE_EDX_MMX
+ | X86_CPUID_FEATURE_EDX_FXSR
+ | X86_CPUID_FEATURE_EDX_SSE
+ | X86_CPUID_FEATURE_EDX_SSE2
+ //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
+ | X86_CPUID_FEATURE_EDX_HTT
+ //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
+ //| RT_BIT_32(30) - not defined
+ //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
+ ;
+ pStdFeatureLeaf->uEcx &= X86_CPUID_FEATURE_ECX_SSE3
+ | PASSTHRU_FEATURE_TODO(pConfig->enmPClMul, X86_CPUID_FEATURE_ECX_PCLMUL)
+ //| X86_CPUID_FEATURE_ECX_DTES64 - not implemented yet.
+ /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
+ | PASSTHRU_FEATURE_EX(pConfig->enmMonitor, pHstFeat->fMonitorMWait, pVM->cCpus == 1, X86_CPUID_FEATURE_ECX_MONITOR)
+ //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
+ | (pConfig->fNestedHWVirt ? X86_CPUID_FEATURE_ECX_VMX : 0)
+ //| X86_CPUID_FEATURE_ECX_SMX - not virtualized yet.
+ //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
+ //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
+ | X86_CPUID_FEATURE_ECX_SSSE3
+ //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
+ //| X86_CPUID_FEATURE_ECX_FMA - not implemented yet.
+ | PASSTHRU_FEATURE(pConfig->enmCmpXchg16b, pHstFeat->fMovCmpXchg16b, X86_CPUID_FEATURE_ECX_CX16)
+ /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
+ //| X86_CPUID_FEATURE_ECX_TPRUPDATE
+ //| X86_CPUID_FEATURE_ECX_PDCM - not implemented yet.
+ | PASSTHRU_FEATURE(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
+ //| X86_CPUID_FEATURE_ECX_DCA - not implemented yet.
+ | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
+ | PASSTHRU_FEATURE(pConfig->enmSse42, pHstFeat->fSse42, X86_CPUID_FEATURE_ECX_SSE4_2)
+ //| X86_CPUID_FEATURE_ECX_X2APIC - turned on later by the device if enabled.
+ | PASSTHRU_FEATURE_TODO(pConfig->enmMovBe, X86_CPUID_FEATURE_ECX_MOVBE)
+ | PASSTHRU_FEATURE(pConfig->enmPopCnt, pHstFeat->fPopCnt, X86_CPUID_FEATURE_ECX_POPCNT)
+ //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet.
+ | PASSTHRU_FEATURE_TODO(pConfig->enmAesNi, X86_CPUID_FEATURE_ECX_AES)
+ | PASSTHRU_FEATURE(pConfig->enmXSave, pHstFeat->fXSaveRstor, X86_CPUID_FEATURE_ECX_XSAVE)
+ //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically.
+ | PASSTHRU_FEATURE(pConfig->enmAvx, pHstFeat->fAvx, X86_CPUID_FEATURE_ECX_AVX)
+ //| X86_CPUID_FEATURE_ECX_F16C - not implemented yet.
+ | PASSTHRU_FEATURE_TODO(pConfig->enmRdRand, X86_CPUID_FEATURE_ECX_RDRAND)
+ //| X86_CPUID_FEATURE_ECX_HVP - Set explicitly later.
+ ;
+
+ /* Mask out PCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
+ if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
+ && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_PCID))
+ {
+ pStdFeatureLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_PCID;
+ LogRel(("CPUM: Disabled PCID without FSGSBASE to workaround buggy guests\n"));
+ }
+
+ if (pCpum->u8PortableCpuIdLevel > 0)
+ {
+ PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCID, X86_CPUID_FEATURE_ECX_PCID, pConfig->enmPcid);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1, pConfig->enmSse41);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2, pConfig->enmSse42);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, MOVBE, X86_CPUID_FEATURE_ECX_MOVBE, pConfig->enmMovBe);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, AES, X86_CPUID_FEATURE_ECX_AES);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, VMX, X86_CPUID_FEATURE_ECX_VMX);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCLMUL, X86_CPUID_FEATURE_ECX_PCLMUL, pConfig->enmPClMul);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, F16C, X86_CPUID_FEATURE_ECX_F16C);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE, pConfig->enmXSave);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX, pConfig->enmAvx);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16, pConfig->enmCmpXchg16b);
+ PORTABLE_DISABLE_FEATURE_BIT( 2, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
+ PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
+ PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
+ PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
+ PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
+
+ Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP ///??
+ | X86_CPUID_FEATURE_EDX_PSN
+ | X86_CPUID_FEATURE_EDX_DS
+ | X86_CPUID_FEATURE_EDX_ACPI
+ | X86_CPUID_FEATURE_EDX_SS
+ | X86_CPUID_FEATURE_EDX_TM
+ | X86_CPUID_FEATURE_EDX_PBE
+ )));
+ Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_DTES64
+ | X86_CPUID_FEATURE_ECX_CPLDS
+ | X86_CPUID_FEATURE_ECX_AES
+ | X86_CPUID_FEATURE_ECX_VMX
+ | X86_CPUID_FEATURE_ECX_SMX
+ | X86_CPUID_FEATURE_ECX_EST
+ | X86_CPUID_FEATURE_ECX_TM2
+ | X86_CPUID_FEATURE_ECX_CNTXID
+ | X86_CPUID_FEATURE_ECX_FMA
+ | X86_CPUID_FEATURE_ECX_TPRUPDATE
+ | X86_CPUID_FEATURE_ECX_PDCM
+ | X86_CPUID_FEATURE_ECX_DCA
+ | X86_CPUID_FEATURE_ECX_OSXSAVE
+ )));
+ }
+
+ /* Set up APIC ID for CPU 0, configure multi core/threaded smp. */
+ pStdFeatureLeaf->uEbx &= UINT32_C(0x0000ffff); /* (APIC-ID := 0 and #LogCpus := 0) */
+
+ /* The HTT bit is architectural and does not directly indicate hyper-threading or multiple cores;
+ * it was set even on single-core/non-HT Northwood P4s for example. The HTT bit only means that the
+ * information in EBX[23:16] (max number of addressable logical processor IDs) is valid.
+ */
+#ifdef VBOX_WITH_MULTI_CORE
+ if (pVM->cCpus > 1)
+ pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* Force if emulating a multi-core CPU. */
+#endif
+ if (pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_HTT)
+ {
+ /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU
+ core times the number of CPU cores per processor */
+#ifdef VBOX_WITH_MULTI_CORE
+ pStdFeatureLeaf->uEbx |= pVM->cCpus <= 0xff ? (pVM->cCpus << 16) : UINT32_C(0x00ff0000);
+#else
+ /* Single logical processor in a package. */
+ pStdFeatureLeaf->uEbx |= (1 << 16);
+#endif
+ }
+
+ uint32_t uMicrocodeRev;
+ int rc = SUPR3QueryMicrocodeRev(&uMicrocodeRev);
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("CPUM: Microcode revision 0x%08X\n", uMicrocodeRev));
+ }
+ else
+ {
+ uMicrocodeRev = 0;
+ LogRel(("CPUM: Failed to query microcode revision. rc=%Rrc\n", rc));
+ }
+
+ /* Mask out the VME capability on certain CPUs, unless overridden by fForceVme.
+ * VME bug was fixed in AGESA 1.0.0.6, microcode patch level 8001126.
+ */
+ if ( ( pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen
+ /** @todo The following ASSUMES that Hygon uses the same version numbering
+ * as AMD and that they shipped buggy firmware. */
+ || pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Hygon_Dhyana)
+ && uMicrocodeRev < 0x8001126
+ && !pConfig->fForceVme)
+ {
+ /** @todo The above is a very coarse test but at the moment we don't know any better (see @bugref{8852}). */
+ LogRel(("CPUM: Zen VME workaround engaged\n"));
+ pStdFeatureLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_VME;
+ }
+
+ /* Force standard feature bits. */
+ if (pConfig->enmPClMul == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_PCLMUL;
+ if (pConfig->enmMonitor == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MONITOR;
+ if (pConfig->enmCmpXchg16b == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_CX16;
+ if (pConfig->enmSse41 == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_1;
+ if (pConfig->enmSse42 == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_2;
+ if (pConfig->enmMovBe == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MOVBE;
+ if (pConfig->enmPopCnt == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_POPCNT;
+ if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES;
+ if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE;
+ if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX;
+ if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND;
+
+ pStdFeatureLeaf = NULL; /* Must refetch! */
+
+ /* Cpuid 0x80000001: (Similar, but in no way identical to 0x00000001.)
+ * AMD:
+ * EAX: CPU model, family and stepping.
+ *
+ * ECX + EDX: Supported features. Only report features we can support.
+ * Note! When enabling new features the Synthetic CPU and Portable CPUID
+ * options may require adjusting (i.e. stripping what was enabled).
+ * ASSUMES that this is ALWAYS the AMD defined feature set if present.
+ *
+ * EBX: Branding ID and package type (or reserved).
+ *
+ * Intel and probably most others:
+ * EAX: 0
+ * EBX: 0
+ * ECX + EDX: Subset of AMD features, mainly for AMD64 support.
+ */
+ PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
+ if (pExtFeatureLeaf)
+ {
+ pExtFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pExtFeatureLeaf);
+
+ pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
+ | X86_CPUID_AMD_FEATURE_EDX_VME
+ | X86_CPUID_AMD_FEATURE_EDX_DE
+ | X86_CPUID_AMD_FEATURE_EDX_PSE
+ | X86_CPUID_AMD_FEATURE_EDX_TSC
+ | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
+ //| X86_CPUID_AMD_FEATURE_EDX_PAE - turned on when necessary
+ //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
+ | X86_CPUID_AMD_FEATURE_EDX_CX8
+ //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
+ //| RT_BIT_32(10) - reserved
+ | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
+ | X86_CPUID_AMD_FEATURE_EDX_MTRR
+ | X86_CPUID_AMD_FEATURE_EDX_PGE
+ | X86_CPUID_AMD_FEATURE_EDX_MCA
+ | X86_CPUID_AMD_FEATURE_EDX_CMOV
+ | X86_CPUID_AMD_FEATURE_EDX_PAT
+ | X86_CPUID_AMD_FEATURE_EDX_PSE36
+ //| RT_BIT_32(18) - reserved
+ //| RT_BIT_32(19) - reserved
+ | X86_CPUID_EXT_FEATURE_EDX_NX
+ //| RT_BIT_32(21) - reserved
+ | PASSTHRU_FEATURE(pConfig->enmAmdExtMmx, pHstFeat->fAmdMmxExts, X86_CPUID_AMD_FEATURE_EDX_AXMMX)
+ | X86_CPUID_AMD_FEATURE_EDX_MMX
+ | X86_CPUID_AMD_FEATURE_EDX_FXSR
+ | X86_CPUID_AMD_FEATURE_EDX_FFXSR
+ //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
+ | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
+ //| RT_BIT_32(28) - reserved
+ //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
+ | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
+ | X86_CPUID_AMD_FEATURE_EDX_3DNOW
+ ;
+ pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
+ //| X86_CPUID_AMD_FEATURE_ECX_CMPL - set below if applicable.
+ | (pConfig->fNestedHWVirt ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
+ //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
+ /* Note: This could prevent teleporting from AMD to Intel CPUs! */
+ | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
+ | PASSTHRU_FEATURE(pConfig->enmAbm, pHstFeat->fAbm, X86_CPUID_AMD_FEATURE_ECX_ABM)
+ | PASSTHRU_FEATURE_TODO(pConfig->enmSse4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A)
+ | PASSTHRU_FEATURE_TODO(pConfig->enmMisAlnSse, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE)
+ | PASSTHRU_FEATURE(pConfig->enm3dNowPrf, pHstFeat->f3DNowPrefetch, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
+ //| X86_CPUID_AMD_FEATURE_ECX_OSVW
+ //| X86_CPUID_AMD_FEATURE_ECX_IBS
+ //| X86_CPUID_AMD_FEATURE_ECX_XOP
+ //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
+ //| X86_CPUID_AMD_FEATURE_ECX_WDT
+ //| RT_BIT_32(14) - reserved
+ //| X86_CPUID_AMD_FEATURE_ECX_LWP - not supported
+ //| X86_CPUID_AMD_FEATURE_ECX_FMA4 - not yet virtualized.
+ //| RT_BIT_32(17) - reserved
+ //| RT_BIT_32(18) - reserved
+ //| X86_CPUID_AMD_FEATURE_ECX_NODEID - not yet virtualized.
+ //| RT_BIT_32(20) - reserved
+ //| X86_CPUID_AMD_FEATURE_ECX_TBM - not yet virtualized.
+ //| X86_CPUID_AMD_FEATURE_ECX_TOPOEXT - not yet virtualized.
+ //| RT_BIT_32(23) - reserved
+ //| RT_BIT_32(24) - reserved
+ //| RT_BIT_32(25) - reserved
+ //| RT_BIT_32(26) - reserved
+ //| RT_BIT_32(27) - reserved
+ //| RT_BIT_32(28) - reserved
+ //| RT_BIT_32(29) - reserved
+ //| RT_BIT_32(30) - reserved
+ //| RT_BIT_32(31) - reserved
+ ;
+#ifdef VBOX_WITH_MULTI_CORE
+ if ( pVM->cCpus > 1
+ && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; /* CmpLegacy */
+#endif
+
+ if (pCpum->u8PortableCpuIdLevel > 0)
+ {
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, SVM, X86_CPUID_AMD_FEATURE_ECX_SVM);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM, X86_CPUID_AMD_FEATURE_ECX_ABM, pConfig->enmAbm);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A, pConfig->enmSse4A);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, MISALNSSE, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE, pConfig->enmMisAlnSse);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, 3DNOWPRF, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF, pConfig->enm3dNowPrf);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, XOP, X86_CPUID_AMD_FEATURE_ECX_XOP);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, TBM, X86_CPUID_AMD_FEATURE_ECX_TBM);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, FMA4, X86_CPUID_AMD_FEATURE_ECX_FMA4);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEdx, AXMMX, X86_CPUID_AMD_FEATURE_EDX_AXMMX, pConfig->enmAmdExtMmx);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+ PORTABLE_DISABLE_FEATURE_BIT( 2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+ PORTABLE_DISABLE_FEATURE_BIT( 3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
+
+ Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_SVM
+ | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
+ | X86_CPUID_AMD_FEATURE_ECX_OSVW
+ | X86_CPUID_AMD_FEATURE_ECX_IBS
+ | X86_CPUID_AMD_FEATURE_ECX_SKINIT
+ | X86_CPUID_AMD_FEATURE_ECX_WDT
+ | X86_CPUID_AMD_FEATURE_ECX_LWP
+ | X86_CPUID_AMD_FEATURE_ECX_NODEID
+ | X86_CPUID_AMD_FEATURE_ECX_TOPOEXT
+ | UINT32_C(0xff964000)
+ )));
+ Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
+ | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
+ | RT_BIT(18)
+ | RT_BIT(19)
+ | RT_BIT(21)
+ | X86_CPUID_AMD_FEATURE_EDX_AXMMX
+ | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
+ | RT_BIT(28)
+ )));
+ }
+
+ /* Force extended feature bits. */
+ if (pConfig->enmAbm == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_ABM;
+ if (pConfig->enmSse4A == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
+ if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
+ if (pConfig->enm3dNowPrf == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF;
+ if (pConfig->enmAmdExtMmx == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
+ }
+ pExtFeatureLeaf = NULL; /* Must refetch! */
+
+
+ /* Cpuid 2:
+ * Intel: (Nondeterministic) Cache and TLB information
+ * AMD: Reserved
+ * VIA: Reserved
+ * Safe to expose.
+ */
+ uint32_t uSubLeaf = 0;
+ PCPUMCPUIDLEAF pCurLeaf;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 2, uSubLeaf)) != NULL)
+ {
+ if ((pCurLeaf->uEax & 0xff) > 1)
+ {
+ LogRel(("CpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
+ pCurLeaf->uEax &= UINT32_C(0xffffff01);
+ }
+ uSubLeaf++;
+ }
+
+ /* Cpuid 3:
+ * Intel: EAX, EBX - reserved (transmeta uses these)
+ * ECX, EDX - Processor Serial Number if available, otherwise reserved
+ * AMD: Reserved
+ * VIA: Reserved
+ * Safe to expose
+ */
+ pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
+ if (!(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN))
+ {
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 3, uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ if (pCpum->u8PortableCpuIdLevel > 0)
+ pCurLeaf->uEax = pCurLeaf->uEbx = 0;
+ uSubLeaf++;
+ }
+ }
+
+ /* Cpuid 4 + ECX:
+ * Intel: Deterministic Cache Parameters Leaf.
+ * AMD: Reserved
+ * VIA: Reserved
+ * Safe to expose, except for EAX:
+ * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
+ * Bits 31-26: Maximum number of processor cores in this physical package**
+ * Note: These SMP values are constant regardless of ECX
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 4, uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax &= UINT32_C(0x00003fff); /* Clear the #maxcores, #threads-sharing-cache (both are #-1).*/
+#ifdef VBOX_WITH_MULTI_CORE
+ if ( pVM->cCpus > 1
+ && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ {
+ AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
+ /* One logical processor with possibly multiple cores. */
+ /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
+ pCurLeaf->uEax |= pVM->cCpus <= 0x40 ? ((pVM->cCpus - 1) << 26) : UINT32_C(0xfc000000); /* 6 bits only -> 64 cores! */
+ }
+#endif
+ uSubLeaf++;
+ }
+
+ /* Cpuid 5: Monitor/mwait Leaf
+ * Intel: ECX, EDX - reserved
+ * EAX, EBX - Smallest and largest monitor line size
+ * AMD: EDX - reserved
+ * EAX, EBX - Smallest and largest monitor line size
+ * ECX - extensions (ignored for now)
+ * VIA: Reserved
+ * Safe to expose
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 5, uSubLeaf)) != NULL)
+ {
+ pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
+ if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
+ pCurLeaf->uEax = pCurLeaf->uEbx = 0;
+
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ if (pConfig->enmMWaitExtensions)
+ {
+ pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
+ /** @todo for now we just expose host's MWAIT C-states, although conceptually
+ it shall be part of our power management virtualization model */
+#if 0
+ /* MWAIT sub C-states */
+ pCurLeaf->uEdx =
+ (0 << 0) /* 0 in C0 */ |
+ (2 << 4) /* 2 in C1 */ |
+ (2 << 8) /* 2 in C2 */ |
+ (2 << 12) /* 2 in C3 */ |
+ (0 << 16) /* 0 in C4 */
+ ;
+#endif
+ }
+ else
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ uSubLeaf++;
+ }
+
+ /* Cpuid 6: Digital Thermal Sensor and Power Management Paramenters.
+ * Intel: Various thermal and power management related stuff.
+ * AMD: EBX, EDX - reserved.
+ * EAX - Bit two is ARAT, indicating that APIC timers run at a constant
+ * rate regardless of processor P-states. Same as Intel.
+ * ECX - Bit zero is EffFreq, indicating MSR_0000_00e7 and MSR_0000_00e8
+ * present. Same as Intel.
+ * VIA: ??
+ *
+ * We clear everything except for the ARAT bit which is important for Windows 11.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 6, uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ pCurLeaf->uEax &= 0
+ | X86_CPUID_POWER_EAX_ARAT
+ ;
+
+ /* Since we emulate the APIC timers, we can normally set the ARAT bit
+ * regardless of whether the host CPU sets it or not. Intel sets the ARAT
+ * bit circa since the Westmere generation, AMD probably only since Zen.
+ * See @bugref{10567}.
+ */
+ if (pConfig->fInvariantApic)
+ pCurLeaf->uEax |= X86_CPUID_POWER_EAX_ARAT;
+
+ uSubLeaf++;
+ }
+
+ /* Cpuid 7 + ECX: Structured Extended Feature Flags Enumeration
+ * EAX: Number of sub leaves.
+ * EBX+ECX+EDX: Feature flags
+ *
+ * We only have documentation for one sub-leaf, so clear all other (no need
+ * to remove them as such, just set them to zero).
+ *
+ * Note! When enabling new features the Synthetic CPU and Portable CPUID
+ * options may require adjusting (i.e. stripping what was enabled).
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, uSubLeaf)) != NULL)
+ {
+ switch (uSubLeaf)
+ {
+ case 0:
+ {
+ pCurLeaf->uEax = 0; /* Max ECX input is 0. */
+ pCurLeaf->uEbx &= 0
+ | PASSTHRU_FEATURE(pConfig->enmFsGsBase, pHstFeat->fFsGsBase, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE)
+ //| X86_CPUID_STEXT_FEATURE_EBX_TSC_ADJUST RT_BIT(1)
+ //| X86_CPUID_STEXT_FEATURE_EBX_SGX RT_BIT(2)
+ | X86_CPUID_STEXT_FEATURE_EBX_BMI1
+ //| X86_CPUID_STEXT_FEATURE_EBX_HLE RT_BIT(4)
+ | PASSTHRU_FEATURE(pConfig->enmAvx2, pHstFeat->fAvx2, X86_CPUID_STEXT_FEATURE_EBX_AVX2)
+ | X86_CPUID_STEXT_FEATURE_EBX_FDP_EXCPTN_ONLY
+ //| X86_CPUID_STEXT_FEATURE_EBX_SMEP RT_BIT(7)
+ | X86_CPUID_STEXT_FEATURE_EBX_BMI2
+ //| X86_CPUID_STEXT_FEATURE_EBX_ERMS RT_BIT(9)
+ | PASSTHRU_FEATURE(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
+ //| X86_CPUID_STEXT_FEATURE_EBX_RTM RT_BIT(11)
+ //| X86_CPUID_STEXT_FEATURE_EBX_PQM RT_BIT(12)
+ | X86_CPUID_STEXT_FEATURE_EBX_DEPR_FPU_CS_DS
+ //| X86_CPUID_STEXT_FEATURE_EBX_MPE RT_BIT(14)
+ //| X86_CPUID_STEXT_FEATURE_EBX_PQE RT_BIT(15)
+ //| X86_CPUID_STEXT_FEATURE_EBX_AVX512F RT_BIT(16)
+ //| RT_BIT(17) - reserved
+ | PASSTHRU_FEATURE_TODO(pConfig->enmRdSeed, X86_CPUID_STEXT_FEATURE_EBX_RDSEED)
+ //| X86_CPUID_STEXT_FEATURE_EBX_ADX RT_BIT(19)
+ //| X86_CPUID_STEXT_FEATURE_EBX_SMAP RT_BIT(20)
+ //| RT_BIT(21) - reserved
+ //| RT_BIT(22) - reserved
+ | PASSTHRU_FEATURE(pConfig->enmCLFlushOpt, pHstFeat->fClFlushOpt, X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT)
+ //| RT_BIT(24) - reserved
+ //| X86_CPUID_STEXT_FEATURE_EBX_INTEL_PT RT_BIT(25)
+ //| X86_CPUID_STEXT_FEATURE_EBX_AVX512PF RT_BIT(26)
+ //| X86_CPUID_STEXT_FEATURE_EBX_AVX512ER RT_BIT(27)
+ //| X86_CPUID_STEXT_FEATURE_EBX_AVX512CD RT_BIT(28)
+ //| X86_CPUID_STEXT_FEATURE_EBX_SHA RT_BIT(29)
+ //| RT_BIT(30) - reserved
+ //| RT_BIT(31) - reserved
+ ;
+ pCurLeaf->uEcx &= 0
+ //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet.
+ ;
+ pCurLeaf->uEdx &= 0
+ | PASSTHRU_FEATURE(pConfig->enmMdsClear, pHstFeat->fMdsClear, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
+ //| X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT(26)
+ //| X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT(27)
+ | PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
+ | PASSTHRU_FEATURE(pConfig->enmArchCapMsr, pHstFeat->fArchCap, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
+ ;
+
+ /* Mask out INVPCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
+ if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
+ && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_INVPCID))
+ {
+ pCurLeaf->uEbx &= ~X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
+ LogRel(("CPUM: Disabled INVPCID without FSGSBASE to work around buggy guests\n"));
+ }
+
+ if (pCpum->u8PortableCpuIdLevel > 0)
+ {
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, FSGSBASE, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE, pConfig->enmFsGsBase);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SGX, X86_CPUID_STEXT_FEATURE_EBX_SGX);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMEP, X86_CPUID_STEXT_FEATURE_EBX_SMEP);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, BMI2, X86_CPUID_STEXT_FEATURE_EBX_BMI2);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, INVPCID, X86_CPUID_STEXT_FEATURE_EBX_INVPCID, pConfig->enmInvpcid);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512F, X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, RDSEED, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmRdSeed);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, CLFLUSHOPT, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmCLFlushOpt);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512PF, X86_CPUID_STEXT_FEATURE_EBX_AVX512PF);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512ER, X86_CPUID_STEXT_FEATURE_EBX_AVX512ER);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512CD, X86_CPUID_STEXT_FEATURE_EBX_AVX512CD);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMAP, X86_CPUID_STEXT_FEATURE_EBX_SMAP);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA);
+ PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, MD_CLEAR, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR, pConfig->enmMdsClear);
+ PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, ARCHCAP, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP, pConfig->enmArchCapMsr);
+ }
+
+ /* Dependencies. */
+ if (!(pCurLeaf->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD))
+ pCurLeaf->uEdx &= ~X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
+
+ /* Force standard feature bits. */
+ if (pConfig->enmFsGsBase == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE;
+ if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2;
+ if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED;
+ if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT;
+ if (pConfig->enmInvpcid == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
+ if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
+ if (pConfig->enmMdsClear == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
+ if (pConfig->enmArchCapMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
+ break;
+ }
+
+ default:
+ /* Invalid index, all values are zero. */
+ pCurLeaf->uEax = 0;
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ pCurLeaf->uEdx = 0;
+ break;
+ }
+ uSubLeaf++;
+ }
+
+ /* Cpuid 8: Marked as reserved by Intel and AMD.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 8);
+
+ /* Cpuid 9: Direct Cache Access (DCA) Parameters
+ * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
+ * EBX, ECX, EDX - reserved.
+ * AMD: Reserved
+ * VIA: ??
+ *
+ * We zero this.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 9);
+
+ /* Cpuid 0xa: Architectural Performance Monitor Features
+ * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
+ * EBX, ECX, EDX - reserved.
+ * AMD: Reserved
+ * VIA: ??
+ *
+ * We zero this, for now at least.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 10);
+
+ /* Cpuid 0xb+ECX: x2APIC Features / Processor Topology.
+ * Intel: EAX - APCI ID shift right for next level.
+ * EBX - Factory configured cores/threads at this level.
+ * ECX - Level number (same as input) and level type (1,2,0).
+ * EDX - Extended initial APIC ID.
+ * AMD: Reserved
+ * VIA: ??
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 11, uSubLeaf)) != NULL)
+ {
+ if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
+ {
+ uint8_t bLevelType = RT_BYTE2(pCurLeaf->uEcx);
+ if (bLevelType == 1)
+ {
+ /* Thread level - we don't do threads at the moment. */
+ pCurLeaf->uEax = 0; /** @todo is this correct? Real CPUs never do 0 here, I think... */
+ pCurLeaf->uEbx = 1;
+ }
+ else if (bLevelType == 2)
+ {
+ /* Core level. */
+ pCurLeaf->uEax = 1; /** @todo real CPUs are supposed to be in the 4-6 range, not 1. Our APIC ID assignments are a little special... */
+#ifdef VBOX_WITH_MULTI_CORE
+ while (RT_BIT_32(pCurLeaf->uEax) < pVM->cCpus)
+ pCurLeaf->uEax++;
+#endif
+ pCurLeaf->uEbx = pVM->cCpus;
+ }
+ else
+ {
+ AssertLogRelMsg(bLevelType == 0, ("bLevelType=%#x uSubLeaf=%#x\n", bLevelType, uSubLeaf));
+ pCurLeaf->uEax = 0;
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ }
+ pCurLeaf->uEcx = (pCurLeaf->uEcx & UINT32_C(0xffffff00)) | (uSubLeaf & 0xff);
+ pCurLeaf->uEdx = 0; /* APIC ID is filled in by CPUMGetGuestCpuId() at runtime. Init for EMT(0) as usual. */
+ }
+ else
+ {
+ pCurLeaf->uEax = 0;
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ pCurLeaf->uEdx = 0;
+ }
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0xc: Marked as reserved by Intel and AMD.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 12);
+
+ /* Cpuid 0xd + ECX: Processor Extended State Enumeration
+ * ECX=0: EAX - Valid bits in XCR0[31:0].
+ * EBX - Maximum state size as per current XCR0 value.
+ * ECX - Maximum state size for all supported features.
+ * EDX - Valid bits in XCR0[63:32].
+ * ECX=1: EAX - Various X-features.
+ * EBX - Maximum state size as per current XCR0|IA32_XSS value.
+ * ECX - Valid bits in IA32_XSS[31:0].
+ * EDX - Valid bits in IA32_XSS[63:32].
+ * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
+ * if the bit invalid all four registers are set to zero.
+ * EAX - The state size for this feature.
+ * EBX - The state byte offset of this feature.
+ * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
+ * EDX - Reserved, but is set to zero if invalid sub-leaf index.
+ *
+ * Clear them all as we don't currently implement extended CPU state.
+ */
+ /* Figure out the supported XCR0/XSS mask component and make sure CPUID[1].ECX[27] = CR4.OSXSAVE. */
+ uint64_t fGuestXcr0Mask = 0;
+ pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
+ if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
+ {
+ fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE;
+ if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX))
+ fGuestXcr0Mask |= XSAVE_C_YMM;
+ pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0);
+ if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F))
+ fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK;
+ fGuestXcr0Mask &= pCpum->fXStateHostMask;
+
+ pStdFeatureLeaf->fFlags |= CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE;
+ }
+ pStdFeatureLeaf = NULL;
+ pCpum->fXStateGuestMask = fGuestXcr0Mask;
+
+ /* Work the sub-leaves. */
+ uint32_t cbXSaveMaxActual = CPUM_MIN_XSAVE_AREA_SIZE;
+ uint32_t cbXSaveMaxReport = CPUM_MIN_XSAVE_AREA_SIZE;
+ for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++)
+ {
+ pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf);
+ if (pCurLeaf)
+ {
+ if (fGuestXcr0Mask)
+ {
+ switch (uSubLeaf)
+ {
+ case 0:
+ pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask);
+ pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask);
+ AssertLogRelMsgReturn((pCurLeaf->uEax & (XSAVE_C_X87 | XSAVE_C_SSE)) == (XSAVE_C_X87 | XSAVE_C_SSE),
+ ("CPUID(0xd/0).EAX missing mandatory X87 or SSE bits: %#RX32", pCurLeaf->uEax),
+ VERR_CPUM_IPE_1);
+ cbXSaveMaxActual = pCurLeaf->uEcx;
+ AssertLogRelMsgReturn(cbXSaveMaxActual <= CPUM_MAX_XSAVE_AREA_SIZE && cbXSaveMaxActual >= CPUM_MIN_XSAVE_AREA_SIZE,
+ ("%#x max=%#x\n", cbXSaveMaxActual, CPUM_MAX_XSAVE_AREA_SIZE), VERR_CPUM_IPE_2);
+ AssertLogRelMsgReturn(pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE && pCurLeaf->uEbx <= cbXSaveMaxActual,
+ ("ebx=%#x cbXSaveMaxActual=%#x\n", pCurLeaf->uEbx, cbXSaveMaxActual),
+ VERR_CPUM_IPE_2);
+ continue;
+ case 1:
+ pCurLeaf->uEax &= 0;
+ pCurLeaf->uEcx &= 0;
+ pCurLeaf->uEdx &= 0;
+ /** @todo what about checking ebx? */
+ continue;
+ default:
+ if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf))
+ {
+ AssertLogRelMsgReturn( pCurLeaf->uEax <= cbXSaveMaxActual
+ && pCurLeaf->uEax > 0
+ && pCurLeaf->uEbx < cbXSaveMaxActual
+ && pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
+ && pCurLeaf->uEbx + pCurLeaf->uEax <= cbXSaveMaxActual,
+ ("%#x: eax=%#x ebx=%#x cbMax=%#x\n",
+ uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, cbXSaveMaxActual),
+ VERR_CPUM_IPE_2);
+ AssertLogRel(!(pCurLeaf->uEcx & 1));
+ pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */
+ pCurLeaf->uEdx = 0; /* it's reserved... */
+ if (pCurLeaf->uEbx + pCurLeaf->uEax > cbXSaveMaxReport)
+ cbXSaveMaxReport = pCurLeaf->uEbx + pCurLeaf->uEax;
+ continue;
+ }
+ break;
+ }
+ }
+
+ /* Clear the leaf. */
+ pCurLeaf->uEax = 0;
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ pCurLeaf->uEdx = 0;
+ }
+ }
+
+ /* Update the max and current feature sizes to shut up annoying Linux kernels. */
+ if (cbXSaveMaxReport != cbXSaveMaxActual && fGuestXcr0Mask)
+ {
+ pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, 0);
+ if (pCurLeaf)
+ {
+ LogRel(("CPUM: Changing leaf 13[0]: EBX=%#RX32 -> %#RX32, ECX=%#RX32 -> %#RX32\n",
+ pCurLeaf->uEbx, cbXSaveMaxReport, pCurLeaf->uEcx, cbXSaveMaxReport));
+ pCurLeaf->uEbx = cbXSaveMaxReport;
+ pCurLeaf->uEcx = cbXSaveMaxReport;
+ }
+ }
+
+ /* Cpuid 0xe: Marked as reserved by Intel and AMD.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 14);
+
+ /* Cpuid 0xf + ECX: Platform quality of service monitoring (PQM),
+ * also known as Intel Resource Director Technology (RDT) Monitoring
+ * We zero this as we don't currently virtualize PQM.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 15);
+
+ /* Cpuid 0x10 + ECX: Platform quality of service enforcement (PQE),
+ * also known as Intel Resource Director Technology (RDT) Allocation
+ * We zero this as we don't currently virtualize PQE.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 16);
+
+ /* Cpuid 0x11: Marked as reserved by Intel and AMD.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 17);
+
+ /* Cpuid 0x12 + ECX: SGX resource enumeration.
+ * We zero this as we don't currently virtualize this.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 18);
+
+ /* Cpuid 0x13: Marked as reserved by Intel and AMD.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 19);
+
+ /* Cpuid 0x14 + ECX: Processor Trace (PT) capability enumeration.
+ * We zero this as we don't currently virtualize this.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 20);
+
+ /* Cpuid 0x15: Timestamp Counter / Core Crystal Clock info.
+ * Intel: uTscFrequency = uCoreCrystalClockFrequency * EBX / EAX.
+ * EAX - denominator (unsigned).
+ * EBX - numerator (unsigned).
+ * ECX, EDX - reserved.
+ * AMD: Reserved / undefined / not implemented.
+ * VIA: Reserved / undefined / not implemented.
+ * We zero this as we don't currently virtualize this.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 21);
+
+ /* Cpuid 0x16: Processor frequency info
+ * Intel: EAX - Core base frequency in MHz.
+ * EBX - Core maximum frequency in MHz.
+ * ECX - Bus (reference) frequency in MHz.
+ * EDX - Reserved.
+ * AMD: Reserved / undefined / not implemented.
+ * VIA: Reserved / undefined / not implemented.
+ * We zero this as we don't currently virtualize this.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, 22);
+
+ /* Cpuid 0x17..0x10000000: Unknown.
+ * We don't know these and what they mean, so remove them. */
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x00000017), UINT32_C(0x0fffffff));
+
+
+ /* CpuId 0x40000000..0x4fffffff: Reserved for hypervisor/emulator.
+ * We remove all these as we're a hypervisor and must provide our own.
+ */
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x40000000), UINT32_C(0x4fffffff));
+
+
+ /* Cpuid 0x80000000 is harmless. */
+
+ /* Cpuid 0x80000001 is handled with cpuid 1 way up above. */
+
+ /* Cpuid 0x80000002...0x80000004 contains the processor name and is considered harmless. */
+
+ /* Cpuid 0x80000005 & 0x80000006 contain information about L1, L2 & L3 cache and TLB identifiers.
+ * Safe to pass on to the guest.
+ *
+ * AMD: 0x80000005 L1 cache information
+ * 0x80000006 L2/L3 cache information
+ * Intel: 0x80000005 reserved
+ * 0x80000006 L2 cache information
+ * VIA: 0x80000005 TLB and L1 cache information
+ * 0x80000006 L2 cache information
+ */
+
+ /* Cpuid 0x80000007: Advanced Power Management Information.
+ * AMD: EAX: Processor feedback capabilities.
+ * EBX: RAS capabilites.
+ * ECX: Advanced power monitoring interface.
+ * EDX: Enhanced power management capabilities.
+ * Intel: EAX, EBX, ECX - reserved.
+ * EDX - Invariant TSC indicator supported (bit 8), the rest is reserved.
+ * VIA: Reserved
+ * We let the guest see EDX_TSCINVAR (and later maybe EDX_EFRO). Actually, we should set EDX_TSCINVAR.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000007), uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
+ if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
+ {
+ /*
+ * Older 64-bit linux kernels blindly assume that the AMD performance counters work
+ * if X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR is set, see @bugref{7243#c85}. Exposing this
+ * bit is now configurable.
+ */
+ pCurLeaf->uEdx &= 0
+ //| X86_CPUID_AMD_ADVPOWER_EDX_TS
+ //| X86_CPUID_AMD_ADVPOWER_EDX_FID
+ //| X86_CPUID_AMD_ADVPOWER_EDX_VID
+ //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
+ //| X86_CPUID_AMD_ADVPOWER_EDX_TM
+ //| X86_CPUID_AMD_ADVPOWER_EDX_STC
+ //| X86_CPUID_AMD_ADVPOWER_EDX_MC
+ //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
+ | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
+ //| X86_CPUID_AMD_ADVPOWER_EDX_CPB RT_BIT(9)
+ //| X86_CPUID_AMD_ADVPOWER_EDX_EFRO RT_BIT(10)
+ //| X86_CPUID_AMD_ADVPOWER_EDX_PFI RT_BIT(11)
+ //| X86_CPUID_AMD_ADVPOWER_EDX_PA RT_BIT(12)
+ | 0;
+ }
+ else
+ pCurLeaf->uEdx &= X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
+ if (!pConfig->fInvariantTsc)
+ pCurLeaf->uEdx &= ~X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0x80000008:
+ * AMD: EBX, EDX - reserved
+ * EAX: Virtual/Physical/Guest address Size
+ * ECX: Number of cores + APICIdCoreIdSize
+ * Intel: EAX: Virtual/Physical address Size
+ * EBX, ECX, EDX - reserved
+ * VIA: EAX: Virtual/Physical address Size
+ * EBX, ECX, EDX - reserved
+ *
+ * We only expose the virtual+pysical address size to the guest atm.
+ * On AMD we set the core count, but not the apic id stuff as we're
+ * currently not doing the apic id assignments in a complatible manner.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */
+ pCurLeaf->uEbx = 0; /* reserved - [12] == IBPB */
+ pCurLeaf->uEdx = 0; /* reserved */
+
+ /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu).
+ * Set core count to 0, indicating 1 core. Adjust if we're in multi core mode on AMD. */
+ pCurLeaf->uEcx = 0;
+#ifdef VBOX_WITH_MULTI_CORE
+ if ( pVM->cCpus > 1
+ && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
+ pCurLeaf->uEcx |= (pVM->cCpus - 1) & UINT32_C(0xff);
+#endif
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0x80000009: Reserved
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000009));
+
+ /* Cpuid 0x8000000a: SVM information on AMD, invalid on Intel.
+ * AMD: EAX - SVM revision.
+ * EBX - Number of ASIDs.
+ * ECX - Reserved.
+ * EDX - SVM Feature identification.
+ */
+ if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
+ {
+ pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
+ if ( pExtFeatureLeaf
+ && (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM))
+ {
+ PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
+ if (pSvmFeatureLeaf)
+ {
+ pSvmFeatureLeaf->uEax = 0x1;
+ pSvmFeatureLeaf->uEbx = 0x8000; /** @todo figure out virtual NASID. */
+ pSvmFeatureLeaf->uEcx = 0;
+ pSvmFeatureLeaf->uEdx &= ( X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE /** @todo Support other SVM features */
+ | X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID
+ | X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
+ }
+ else
+ {
+ /* Should never happen. */
+ LogRel(("CPUM: Warning! Expected CPUID leaf 0x8000000a not present! SVM features not exposed to the guest\n"));
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
+ }
+ }
+ else
+ {
+ /* If SVM is not supported, this is reserved, zero out. */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
+ }
+ }
+ else
+ {
+ /* Cpuid 0x8000000a: Reserved on Intel.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
+ }
+
+ /* Cpuid 0x8000000b thru 0x80000018: Reserved
+ * We clear these as we don't know what purpose they might have. */
+ for (uint32_t uLeaf = UINT32_C(0x8000000b); uLeaf <= UINT32_C(0x80000018); uLeaf++)
+ cpumR3CpuIdZeroLeaf(pCpum, uLeaf);
+
+ /* Cpuid 0x80000019: TLB configuration
+ * Seems to be harmless, pass them thru as is. */
+
+ /* Cpuid 0x8000001a: Peformance optimization identifiers.
+ * Strip anything we don't know what is or addresses feature we don't implement. */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001a), uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax &= RT_BIT_32(0) /* FP128 - use 1x128-bit instead of 2x64-bit. */
+ | RT_BIT_32(1) /* MOVU - Prefere unaligned MOV over MOVL + MOVH. */
+ //| RT_BIT_32(2) /* FP256 - use 1x256-bit instead of 2x128-bit. */
+ ;
+ pCurLeaf->uEbx = 0; /* reserved */
+ pCurLeaf->uEcx = 0; /* reserved */
+ pCurLeaf->uEdx = 0; /* reserved */
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0x8000001b: Instruct based sampling (IBS) information.
+ * Clear this as we don't currently virtualize this feature. */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001b));
+
+ /* Cpuid 0x8000001c: Lightweight profiling (LWP) information.
+ * Clear this as we don't currently virtualize this feature. */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001c));
+
+ /* Cpuid 0x8000001d+ECX: Get cache configuration descriptors.
+ * We need to sanitize the cores per cache (EAX[25:14]).
+ *
+ * This is very much the same as Intel's CPUID(4) leaf, except EAX[31:26]
+ * and EDX[2] are reserved here, and EAX[14:25] is documented having a
+ * slightly different meaning.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uSubLeaf)) != NULL)
+ {
+#ifdef VBOX_WITH_MULTI_CORE
+ uint32_t cCores = ((pCurLeaf->uEax >> 14) & 0xfff) + 1;
+ if (cCores > pVM->cCpus)
+ cCores = pVM->cCpus;
+ pCurLeaf->uEax &= UINT32_C(0x00003fff);
+ pCurLeaf->uEax |= ((cCores - 1) & 0xfff) << 14;
+#else
+ pCurLeaf->uEax &= UINT32_C(0x00003fff);
+#endif
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0x8000001e: Get APIC / unit / node information.
+ * If AMD, we configure it for our layout (on EMT(0)). In the multi-core
+ * setup, we have one compute unit with all the cores in it. Single node.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001e), uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEax = 0; /* Extended APIC ID = EMT(0).idApic (== 0). */
+ if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
+ {
+#ifdef VBOX_WITH_MULTI_CORE
+ pCurLeaf->uEbx = pVM->cCpus < 0x100
+ ? (pVM->cCpus - 1) << 8 : UINT32_C(0x0000ff00); /* Compute unit ID 0, core per unit. */
+#else
+ pCurLeaf->uEbx = 0; /* Compute unit ID 0, 1 core per unit. */
+#endif
+ pCurLeaf->uEcx = 0; /* Node ID 0, 1 node per CPU. */
+ }
+ else
+ {
+ Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_AMD);
+ Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_HYGON);
+ pCurLeaf->uEbx = 0; /* Reserved. */
+ pCurLeaf->uEcx = 0; /* Reserved. */
+ }
+ pCurLeaf->uEdx = 0; /* Reserved. */
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0x8000001f...0x8ffffffd: Unknown.
+ * We don't know these and what they mean, so remove them. */
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x8000001f), UINT32_C(0x8ffffffd));
+
+ /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf.
+ * Just pass it thru for now. */
+
+ /* Cpuid 0x8fffffff: Mystery hammer time leaf!
+ * Just pass it thru for now. */
+
+ /* Cpuid 0xc0000000: Centaur stuff.
+ * Harmless, pass it thru. */
+
+ /* Cpuid 0xc0000001: Centaur features.
+ * VIA: EAX - Family, model, stepping.
+ * EDX - Centaur extended feature flags. Nothing interesting, except may
+ * FEMMS (bit 5), but VIA marks it as 'reserved', so never mind.
+ * EBX, ECX - reserved.
+ * We keep EAX but strips the rest.
+ */
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000001), uSubLeaf)) != NULL)
+ {
+ pCurLeaf->uEbx = 0;
+ pCurLeaf->uEcx = 0;
+ pCurLeaf->uEdx = 0; /* Bits 0 thru 9 are documented on sandpil.org, but we don't want them, except maybe 5 (FEMMS). */
+ uSubLeaf++;
+ }
+
+ /* Cpuid 0xc0000002: Old Centaur Current Performance Data.
+ * We only have fixed stale values, but should be harmless. */
+
+ /* Cpuid 0xc0000003: Reserved.
+ * We zero this since we don't know what it may have been used for.
+ */
+ cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0xc0000003));
+
+ /* Cpuid 0xc0000004: Centaur Performance Info.
+ * We only have fixed stale values, but should be harmless. */
+
+
+ /* Cpuid 0xc0000005...0xcfffffff: Unknown.
+ * We don't know these and what they mean, so remove them. */
+ cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0xc0000005), UINT32_C(0xcfffffff));
+
+ return VINF_SUCCESS;
+#undef PORTABLE_DISABLE_FEATURE_BIT
+#undef PORTABLE_CLEAR_BITS_WHEN
+}
+
+
+/**
+ * Reads a value in /CPUM/IsaExts/ node.
+ *
+ * @returns VBox status code (error message raised).
+ * @param pVM The cross context VM structure. (For errors.)
+ * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
+ * @param pszValueName The value / extension name.
+ * @param penmValue Where to return the choice.
+ * @param enmDefault The default choice.
+ */
+static int cpumR3CpuIdReadIsaExtCfg(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
+ CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
+{
+ /*
+ * Try integer encoding first.
+ */
+ uint64_t uValue;
+ int rc = CFGMR3QueryInteger(pIsaExts, pszValueName, &uValue);
+ if (RT_SUCCESS(rc))
+ switch (uValue)
+ {
+ case 0: *penmValue = CPUMISAEXTCFG_DISABLED; break;
+ case 1: *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED; break;
+ case 2: *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS; break;
+ case 9: *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE; break;
+ default:
+ return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
+ "Invalid config value for '/CPUM/IsaExts/%s': %llu (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
+ pszValueName, uValue);
+ }
+ /*
+ * If missing, use default.
+ */
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ *penmValue = enmDefault;
+ else
+ {
+ if (rc == VERR_CFGM_NOT_INTEGER)
+ {
+ /*
+ * Not an integer, try read it as a string.
+ */
+ char szValue[32];
+ rc = CFGMR3QueryString(pIsaExts, pszValueName, szValue, sizeof(szValue));
+ if (RT_SUCCESS(rc))
+ {
+ RTStrToLower(szValue);
+ size_t cchValue = strlen(szValue);
+#define EQ(a_str) (cchValue == sizeof(a_str) - 1U && memcmp(szValue, a_str, sizeof(a_str) - 1))
+ if ( EQ("disabled") || EQ("disable") || EQ("off") || EQ("no"))
+ *penmValue = CPUMISAEXTCFG_DISABLED;
+ else if (EQ("enabled") || EQ("enable") || EQ("on") || EQ("yes"))
+ *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED;
+ else if (EQ("forced") || EQ("force") || EQ("always"))
+ *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS;
+ else if (EQ("portable"))
+ *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE;
+ else if (EQ("default") || EQ("def"))
+ *penmValue = enmDefault;
+ else
+ return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
+ "Invalid config value for '/CPUM/IsaExts/%s': '%s' (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
+ pszValueName, uValue);
+#undef EQ
+ }
+ }
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Error reading config value '/CPUM/IsaExts/%s': %Rrc", pszValueName, rc);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted.
+ *
+ * @returns VBox status code (error message raised).
+ * @param pVM The cross context VM structure. (For errors.)
+ * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
+ * @param pszValueName The value / extension name.
+ * @param penmValue Where to return the choice.
+ * @param enmDefault The default choice.
+ * @param fAllowed Allowed choice. Applied both to the result and to
+ * the default value.
+ */
+static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
+ CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed)
+{
+ int rc;
+ if (fAllowed)
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
+ else
+ {
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/);
+ if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS)
+ LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName));
+ *penmValue = CPUMISAEXTCFG_DISABLED;
+ }
+ return rc;
+}
+
+
+/**
+ * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/.
+ *
+ * @returns VBox status code (error message raised).
+ * @param pVM The cross context VM structure. (For errors.)
+ * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
+ * @param pCpumCfg The /CPUM node (can be NULL).
+ * @param pszValueName The value / extension name.
+ * @param penmValue Where to return the choice.
+ * @param enmDefault The default choice.
+ */
+static int cpumR3CpuIdReadIsaExtCfgLegacy(PVM pVM, PCFGMNODE pIsaExts, PCFGMNODE pCpumCfg, const char *pszValueName,
+ CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
+{
+ if (CFGMR3Exists(pCpumCfg, pszValueName))
+ {
+ if (!CFGMR3Exists(pIsaExts, pszValueName))
+ LogRel(("Warning: /CPUM/%s is deprecated, use /CPUM/IsaExts/%s instead.\n", pszValueName, pszValueName));
+ else
+ return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS,
+ "Duplicate config values '/CPUM/%s' and '/CPUM/IsaExts/%s' - please remove the former!",
+ pszValueName, pszValueName);
+
+ bool fLegacy;
+ int rc = CFGMR3QueryBoolDef(pCpumCfg, pszValueName, &fLegacy, enmDefault != CPUMISAEXTCFG_DISABLED);
+ if (RT_SUCCESS(rc))
+ {
+ *penmValue = fLegacy;
+ return VINF_SUCCESS;
+ }
+ return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS, "Error querying '/CPUM/%s': %Rrc", pszValueName, rc);
+ }
+
+ return cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
+}
+
+
+static int cpumR3CpuIdReadConfig(PVM pVM, PCPUMCPUIDCONFIG pConfig, PCFGMNODE pCpumCfg, bool fNestedPagingAndFullGuestExec)
+{
+ int rc;
+
+ /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
+ * When non-zero CPUID features that could cause portability issues will be
+ * stripped. The higher the value the more features gets stripped. Higher
+ * values should only be used when older CPUs are involved since it may
+ * harm performance and maybe also cause problems with specific guests. */
+ rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pVM->cpum.s.u8PortableCpuIdLevel, 0);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/GuestCpuName, string}
+ * The name of the CPU we're to emulate. The default is the host CPU.
+ * Note! CPUs other than "host" one is currently unsupported. */
+ rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", pConfig->szCpuName, sizeof(pConfig->szCpuName), "host");
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
+ * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
+ * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
+ * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &pConfig->fNt4LeafLimit, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/InvariantTsc, boolean, true}
+ * Pass-through the invariant TSC flag in 0x80000007 if available on the host
+ * CPU. On AMD CPUs, users may wish to suppress it to avoid trouble from older
+ * 64-bit linux guests which assume the presence of AMD performance counters
+ * that we do not virtualize.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantTsc", &pConfig->fInvariantTsc, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/InvariantApic, boolean, true}
+ * Set the Always Running APIC Timer (ARAT) flag in lea if true; otherwise
+ * pass through the host setting. The Windows 10/11 HAL won't use APIC timers
+ * unless the ARAT bit is set. Note that both Intel and AMD set this bit.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantApic", &pConfig->fInvariantApic, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/ForceVme, boolean, false}
+ * Always expose the VME (Virtual-8086 Mode Extensions) capability if true.
+ * By default the flag is passed thru as is from the host CPU, except
+ * on AMD Ryzen CPUs where it's masked to avoid trouble with XP/Server 2003
+ * guests and DOS boxes in general.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "ForceVme", &pConfig->fForceVme, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
+ * Restrict the reported CPU family+model+stepping of intel CPUs. This is
+ * probably going to be a temporary hack, so don't depend on this.
+ * The 1st byte of the value is the stepping, the 2nd byte value is the model
+ * number and the 3rd byte value is the family, and the 4th value must be zero.
+ */
+ rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &pConfig->uMaxIntelFamilyModelStep, UINT32_MAX);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MaxStdLeaf, uint32_t, 0x00000016}
+ * The last standard leaf to keep. The actual last value that is stored in EAX
+ * is RT_MAX(CPUID[0].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max leaf are
+ * removed. (This works independently of and differently from NT4LeafLimit.)
+ * The default is usually set to what we're able to reasonably sanitize.
+ */
+ rc = CFGMR3QueryU32Def(pCpumCfg, "MaxStdLeaf", &pConfig->uMaxStdLeaf, UINT32_C(0x00000016));
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MaxExtLeaf, uint32_t, 0x8000001e}
+ * The last extended leaf to keep. The actual last value that is stored in EAX
+ * is RT_MAX(CPUID[0x80000000].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max
+ * leaf are removed. The default is set to what we're able to sanitize.
+ */
+ rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x8000001e));
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MaxCentaurLeaf, uint32_t, 0xc0000004}
+ * The last extended leaf to keep. The actual last value that is stored in EAX
+ * is RT_MAX(CPUID[0xc0000000].EAX,/CPUM/MaxCentaurLeaf). Leaves beyond the max
+ * leaf are removed. The default is set to what we're able to sanitize.
+ */
+ rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004));
+ AssertLogRelRCReturn(rc, rc);
+
+ bool fQueryNestedHwvirt = false
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON
+#endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
+ || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA
+#endif
+ ;
+ if (fQueryNestedHwvirt)
+ {
+ /** @cfgm{/CPUM/NestedHWVirt, bool, false}
+ * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
+ * The default is false, and when enabled requires a 64-bit CPU with support for
+ * nested-paging and AMD-V or unrestricted guest mode.
+ */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
+ AssertLogRelRCReturn(rc, rc);
+ if (pConfig->fNestedHWVirt)
+ {
+ /** @todo Think about enabling this later with NEM/KVM. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ LogRel(("CPUM: Warning! Can't turn on nested VT-x/AMD-V when NEM is used! (later)\n"));
+ pConfig->fNestedHWVirt = false;
+ }
+ else if (!fNestedPagingAndFullGuestExec)
+ return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
+ "Cannot enable nested VT-x/AMD-V without nested-paging and unrestricted guest execution!\n");
+ }
+ }
+
+ /*
+ * Instruction Set Architecture (ISA) Extensions.
+ */
+ PCFGMNODE pIsaExts = CFGMR3GetChild(pCpumCfg, "IsaExts");
+ if (pIsaExts)
+ {
+ rc = CFGMR3ValidateConfig(pIsaExts, "/CPUM/IsaExts/",
+ "CMPXCHG16B"
+ "|MONITOR"
+ "|MWaitExtensions"
+ "|SSE4.1"
+ "|SSE4.2"
+ "|XSAVE"
+ "|AVX"
+ "|AVX2"
+ "|AESNI"
+ "|PCLMUL"
+ "|POPCNT"
+ "|MOVBE"
+ "|RDRAND"
+ "|RDSEED"
+ "|CLFLUSHOPT"
+ "|FSGSBASE"
+ "|PCID"
+ "|INVPCID"
+ "|FlushCmdMsr"
+ "|ABM"
+ "|SSE4A"
+ "|MISALNSSE"
+ "|3DNOWPRF"
+ "|AXMMX"
+ , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /** @cfgm{/CPUM/IsaExts/CMPXCHG16B, boolean, true}
+ * Expose CMPXCHG16B to the guest if available. All host CPUs which support
+ * hardware virtualization have it.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "CMPXCHG16B", &pConfig->enmCmpXchg16b, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/MONITOR, boolean, true}
+ * Expose MONITOR/MWAIT instructions to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MONITOR", &pConfig->enmMonitor, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/MWaitExtensions, boolean, false}
+ * Expose MWAIT extended features to the guest. For now we expose just MWAIT
+ * break on interrupt feature (bit 1).
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MWaitExtensions", &pConfig->enmMWaitExtensions, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/SSE4.1, boolean, true}
+ * Expose SSE4.1 to the guest if available.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.1", &pConfig->enmSse41, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/SSE4.2, boolean, true}
+ * Expose SSE4.2 to the guest if available.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor
+ && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
+ && ( VM_IS_NEM_ENABLED(pVM)
+ ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR
+ : VM_IS_EXEC_ENGINE_IEM(pVM)
+ ? false /** @todo IEM and XSAVE @bugref{9898} */
+ : fNestedPagingAndFullGuestExec);
+ uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
+
+ /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
+ * Expose XSAVE/XRSTOR to the guest if available. For the time being the
+ * default is to only expose this to VMs with nested paging and AMD-V or
+ * unrestricted guest execution mode. Not possible to force this one without
+ * host support at the moment.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec,
+ fMayHaveXSave /*fAllowed*/);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends}
+ * Expose the AVX instruction set extensions to the guest if available and
+ * XSAVE is exposed too. For the time being the default is to only expose this
+ * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
+ fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends}
+ * Expose the AVX2 instruction set extensions to the guest if available and
+ * XSAVE is exposed too. For the time being the default is to only expose this
+ * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec /* temporarily */,
+ fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/AESNI, isaextcfg, depends}
+ * Whether to expose the AES instructions to the guest. For the time being the
+ * default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AESNI", &pConfig->enmAesNi, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/PCLMUL, isaextcfg, depends}
+ * Whether to expose the PCLMULQDQ instructions to the guest. For the time
+ * being the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCLMUL", &pConfig->enmPClMul, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/POPCNT, isaextcfg, true}
+ * Whether to expose the POPCNT instructions to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "POPCNT", &pConfig->enmPopCnt, CPUMISAEXTCFG_ENABLED_SUPPORTED);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/MOVBE, isaextcfg, depends}
+ * Whether to expose the MOVBE instructions to the guest. For the time
+ * being the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MOVBE", &pConfig->enmMovBe, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/RDRAND, isaextcfg, depends}
+ * Whether to expose the RDRAND instructions to the guest. For the time being
+ * the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDRAND", &pConfig->enmRdRand, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/RDSEED, isaextcfg, depends}
+ * Whether to expose the RDSEED instructions to the guest. For the time being
+ * the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDSEED", &pConfig->enmRdSeed, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/CLFLUSHOPT, isaextcfg, depends}
+ * Whether to expose the CLFLUSHOPT instructions to the guest. For the time
+ * being the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "CLFLUSHOPT", &pConfig->enmCLFlushOpt, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/FSGSBASE, isaextcfg, true}
+ * Whether to expose the read/write FSGSBASE instructions to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FSGSBASE", &pConfig->enmFsGsBase, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/PCID, isaextcfg, true}
+ * Whether to expose the PCID feature to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCID", &pConfig->enmPcid, pConfig->enmFsGsBase);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/INVPCID, isaextcfg, true}
+ * Whether to expose the INVPCID instruction to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "INVPCID", &pConfig->enmInvpcid, pConfig->enmFsGsBase);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/FlushCmdMsr, isaextcfg, true}
+ * Whether to expose the IA32_FLUSH_CMD MSR to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FlushCmdMsr", &pConfig->enmFlushCmdMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/MdsClear, isaextcfg, true}
+ * Whether to advertise the VERW and MDS related IA32_FLUSH_CMD MSR bits to
+ * the guest. Requires FlushCmdMsr to be present too.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MdsClear", &pConfig->enmMdsClear, CPUMISAEXTCFG_ENABLED_SUPPORTED);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/ArchCapMSr, isaextcfg, true}
+ * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
+ AssertLogRelRCReturn(rc, rc);
+
+
+ /* AMD: */
+
+ /** @cfgm{/CPUM/IsaExts/ABM, isaextcfg, true}
+ * Whether to expose the AMD ABM instructions to the guest.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ABM", &pConfig->enmAbm, CPUMISAEXTCFG_ENABLED_SUPPORTED);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/SSE4A, isaextcfg, depends}
+ * Whether to expose the AMD SSE4A instructions to the guest. For the time
+ * being the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/MISALNSSE, isaextcfg, depends}
+ * Whether to expose the AMD MisAlSse feature (MXCSR flag 17) to the guest. For
+ * the time being the default is to only do this for VMs with nested paging and
+ * AMD-V or unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MISALNSSE", &pConfig->enmMisAlnSse, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/3DNOWPRF, isaextcfg, depends}
+ * Whether to expose the AMD 3D Now! prefetch instructions to the guest.
+ * For the time being the default is to only do this for VMs with nested paging
+ * and AMD-V or unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "3DNOWPRF", &pConfig->enm3dNowPrf, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/IsaExts/AXMMX, isaextcfg, depends}
+ * Whether to expose the AMD's MMX Extensions to the guest. For the time being
+ * the default is to only do this for VMs with nested paging and AMD-V or
+ * unrestricted guest mode.
+ */
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
+ AssertLogRelRCReturn(rc, rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes the emulated CPU's CPUID & MSR information.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pHostMsrs Pointer to the host MSRs.
+ */
+int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs)
+{
+ Assert(pHostMsrs);
+
+ PCPUM pCpum = &pVM->cpum.s;
+ PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
+
+ /*
+ * Set the fCpuIdApicFeatureVisible flags so the APIC can assume visibility
+ * on construction and manage everything from here on.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.fCpuIdApicFeatureVisible = true;
+ }
+
+ /*
+ * Read the configuration.
+ */
+ CPUMCPUIDCONFIG Config;
+ RT_ZERO(Config);
+
+ bool const fNestedPagingAndFullGuestExec = VM_IS_NEM_ENABLED(pVM)
+ || HMAreNestedPagingAndFullGuestExecEnabled(pVM);
+ int rc = cpumR3CpuIdReadConfig(pVM, &Config, pCpumCfg, fNestedPagingAndFullGuestExec);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Get the guest CPU data from the database and/or the host.
+ *
+ * The CPUID and MSRs are currently living on the regular heap to avoid
+ * fragmenting the hyper heap (and because there isn't/wasn't any realloc
+ * API for the hyper heap). This means special cleanup considerations.
+ */
+ /** @todo The hyper heap will be removed ASAP, so the final destination is
+ * now a fixed sized arrays in the VM structure. Maybe we can simplify
+ * this allocation fun a little now? Or maybe it's too convenient for
+ * the CPU reporter code... No time to figure that out now. */
+ rc = cpumR3DbGetCpuInfo(Config.szCpuName, &pCpum->GuestInfo);
+ if (RT_FAILURE(rc))
+ return rc == VERR_CPUM_DB_CPU_NOT_FOUND
+ ? VMSetError(pVM, rc, RT_SRC_POS,
+ "Info on guest CPU '%s' could not be found. Please, select a different CPU.", Config.szCpuName)
+ : rc;
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ if (pCpum->GuestInfo.fMxCsrMask & ~pVM->cpum.s.fHostMxCsrMask)
+ {
+ LogRel(("Stripping unsupported MXCSR bits from guest mask: %#x -> %#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask,
+ pCpum->GuestInfo.fMxCsrMask & pVM->cpum.s.fHostMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
+ pCpum->GuestInfo.fMxCsrMask &= pVM->cpum.s.fHostMxCsrMask;
+ }
+ LogRel(("CPUM: MXCSR_MASK=%#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
+#else
+ LogRel(("CPUM: MXCSR_MASK=%#x\n", pCpum->GuestInfo.fMxCsrMask));
+#endif
+
+ /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
+ * Overrides the guest MSRs.
+ */
+ rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
+
+ /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
+ * Overrides the CPUID leaf values (from the host CPU usually) used for
+ * calculating the guest CPUID leaves. This can be used to preserve the CPUID
+ * values when moving a VM to a different machine. Another use is restricting
+ * (or extending) the feature set exposed to the guest. */
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
+
+ if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
+ rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
+ "Found unsupported configuration node '/CPUM/CPUID/'. "
+ "Please use IMachine::setCPUIDLeaf() instead.");
+
+ CPUMMSRS GuestMsrs;
+ RT_ZERO(GuestMsrs);
+
+ /*
+ * Pre-explode the CPUID info.
+ */
+ if (RT_SUCCESS(rc))
+ rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs,
+ &pCpum->GuestFeatures);
+
+ /*
+ * Sanitize the cpuid information passed on to the guest.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ rc = cpumR3CpuIdSanitize(pVM, pCpum, &Config);
+ if (RT_SUCCESS(rc))
+ {
+ cpumR3CpuIdLimitLeaves(pCpum, &Config);
+ cpumR3CpuIdLimitIntelFamModStep(pCpum, &Config);
+ }
+ }
+
+ /*
+ * Setup MSRs introduced in microcode updates or that are otherwise not in
+ * the CPU profile, but are advertised in the CPUID info we just sanitized.
+ */
+ if (RT_SUCCESS(rc))
+ rc = cpumR3MsrReconcileWithCpuId(pVM);
+ /*
+ * MSR fudging.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
+ * Fudges some common MSRs if not present in the selected CPU database entry.
+ * This is for trying to keep VMs running when moved between different hosts
+ * and different CPU vendors. */
+ bool fEnable;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
+ if (RT_SUCCESS(rc) && fEnable)
+ {
+ rc = cpumR3MsrApplyFudge(pVM);
+ AssertLogRelRC(rc);
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Move the MSR and CPUID arrays over to the static VM structure allocations
+ * and explode guest CPU features again.
+ */
+ void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
+ rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
+ pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
+ RTMemFree(pvFree);
+
+ AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
+ ("%u\n", pCpum->GuestInfo.cMsrRanges));
+ memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
+ sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
+ RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
+ pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
+
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Some more configuration that we're applying at the end of everything
+ * via the CPUMR3SetGuestCpuIdFeature API.
+ */
+
+ /* Check if 64-bit guest supported was enabled. */
+ bool fEnable64bit;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "Enable64bit", &fEnable64bit, false);
+ AssertRCReturn(rc, rc);
+ if (fEnable64bit)
+ {
+ /* In case of a CPU upgrade: */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* (Long mode only on Intel CPUs.) */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+
+ /* The actual feature: */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
+ }
+
+ /* Check if PAE was explicitely enabled by the user. */
+ bool fEnable;
+ rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, fEnable64bit);
+ AssertRCReturn(rc, rc);
+ if (fEnable && !pVM->cpum.s.GuestFeatures.fPae)
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+
+ /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, fEnable64bit);
+ AssertRCReturn(rc, rc);
+ if (fEnable && !pVM->cpum.s.GuestFeatures.fNoExecute)
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+
+ /* Check if speculation control is enabled. */
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
+ AssertRCReturn(rc, rc);
+ if (fEnable)
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
+ else
+ {
+ /*
+ * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
+ * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
+ * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
+ *
+ * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
+ * EIP: _raw_spin_lock+0x14/0x30
+ * EFLAGS: 00010046 CPU: 0
+ * EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
+ * ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
+ * DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
+ * CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
+ * Call Trace:
+ * speculative_store_bypass_update+0x8e/0x180
+ * ssb_prctl_set+0xc0/0xe0
+ * arch_seccomp_spec_mitigate+0x1d/0x20
+ * do_seccomp+0x3cb/0x610
+ * SyS_seccomp+0x16/0x20
+ * do_fast_syscall_32+0x7f/0x1d0
+ * entry_SYSENTER_32+0x4e/0x7c
+ *
+ * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
+ * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
+ *
+ * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
+ * guest to not even try.
+ */
+ if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
+ {
+ PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
+ if (pLeaf)
+ {
+ pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
+ LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
+ }
+ }
+ }
+
+ /*
+ * Finally, initialize guest VMX MSRs.
+ *
+ * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
+ * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
+ * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
+ */
+ /** @todo r=bird: given that long mode never used to be enabled before the
+ * VMINITCOMPLETED_RING0 state, and we're a lot earlier here in ring-3
+ * init, the above comment cannot be entirely accurate. */
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ {
+ Assert(Config.fNestedHWVirt);
+ cpumR3InitVmxGuestFeaturesAndMsrs(pVM, pCpumCfg, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
+
+ /* Copy MSRs to all VCPUs */
+ PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
+ }
+ }
+
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Failed before switching to hyper heap.
+ */
+ RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
+ pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
+ RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
+ pCpum->GuestInfo.paMsrRangesR3 = NULL;
+ return rc;
+}
+
+
+/**
+ * Sets a CPUID feature bit during VM initialization.
+ *
+ * Since the CPUID feature bits are generally related to CPU features, other
+ * CPUM configuration like MSRs can also be modified by calls to this API.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmFeature The feature to set.
+ */
+VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
+{
+ PCPUMCPUIDLEAF pLeaf;
+ PCPUMMSRRANGE pMsrRange;
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
+ if (!pVM->cpum.s.HostFeatures. a_fFeature) \
+ { \
+ LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \
+ return; \
+ } else do { } while (0)
+#else
+# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) do { } while (0)
+#endif
+
+#define GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
+ do \
+ { \
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); \
+ if (!pLeaf) \
+ { \
+ LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when no 0x80000001 CPUID leaf!\n")); \
+ return; \
+ } \
+ CHECK_X86_HOST_FEATURE_RET(a_fFeature,a_szFeature); \
+ } while (0)
+
+ switch (enmFeature)
+ {
+ /*
+ * Set the APIC bit in both feature masks.
+ */
+ case CPUMCPUIDFEATURE_APIC:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
+
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
+
+ pVM->cpum.s.GuestFeatures.fApic = 1;
+
+ /* Make sure we've got the APICBASE MSR present. */
+ pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
+ if (!pMsrRange)
+ {
+ static CPUMMSRRANGE const s_ApicBase =
+ {
+ /*.uFirst =*/ MSR_IA32_APICBASE, /*.uLast =*/ MSR_IA32_APICBASE,
+ /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ApicBase, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32ApicBase,
+ /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
+ /*.szName = */ "IA32_APIC_BASE"
+ };
+ int rc = CPUMR3MsrRangesInsert(pVM, &s_ApicBase);
+ AssertLogRelRC(rc);
+ }
+
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
+ break;
+
+ /*
+ * Set the x2APIC bit in the standard feature mask.
+ * Note! ASSUMES CPUMCPUIDFEATURE_APIC is called first.
+ */
+ case CPUMCPUIDFEATURE_X2APIC:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
+ pVM->cpum.s.GuestFeatures.fX2Apic = 1;
+
+ /* Make sure the MSR doesn't GP or ignore the EXTD bit. */
+ pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
+ if (pMsrRange)
+ {
+ pMsrRange->fWrGpMask &= ~MSR_IA32_APICBASE_EXTD;
+ pMsrRange->fWrIgnMask &= ~MSR_IA32_APICBASE_EXTD;
+ }
+
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
+ break;
+
+ /*
+ * Set the sysenter/sysexit bit in the standard feature mask.
+ * Assumes the caller knows what it's doing! (host must support these)
+ */
+ case CPUMCPUIDFEATURE_SEP:
+ CHECK_X86_HOST_FEATURE_RET(fSysEnter, "SEP");
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
+ pVM->cpum.s.GuestFeatures.fSysEnter = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
+ break;
+
+ /*
+ * Set the syscall/sysret bit in the extended feature mask.
+ * Assumes the caller knows what it's doing! (host must support these)
+ */
+ case CPUMCPUIDFEATURE_SYSCALL:
+ GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fSysCall, "SYSCALL/SYSRET");
+
+ /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
+ pVM->cpum.s.GuestFeatures.fSysCall = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
+ break;
+
+ /*
+ * Set the PAE bit in both feature masks.
+ * Assumes the caller knows what it's doing! (host must support these)
+ */
+ case CPUMCPUIDFEATURE_PAE:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
+
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if ( pLeaf
+ && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
+
+ pVM->cpum.s.GuestFeatures.fPae = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
+ break;
+
+ /*
+ * Set the LONG MODE bit in the extended feature mask.
+ * Assumes the caller knows what it's doing! (host must support these)
+ */
+ case CPUMCPUIDFEATURE_LONG_MODE:
+ GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLongMode, "LONG MODE");
+
+ /* Valid for both Intel and AMD. */
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+ pVM->cpum.s.GuestFeatures.fLongMode = 1;
+ pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic &= ~VMX_BASIC_PHYSADDR_WIDTH_32BIT;
+ }
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
+ break;
+
+ /*
+ * Set the NX/XD bit in the extended feature mask.
+ * Assumes the caller knows what it's doing! (host must support these)
+ */
+ case CPUMCPUIDFEATURE_NX:
+ GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fNoExecute, "NX/XD");
+
+ /* Valid for both Intel and AMD. */
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
+ pVM->cpum.s.GuestFeatures.fNoExecute = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
+ break;
+
+
+ /*
+ * Set the LAHF/SAHF support in 64-bit mode.
+ * Assumes the caller knows what it's doing! (host must support this)
+ */
+ case CPUMCPUIDFEATURE_LAHF:
+ GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLahfSahf, "LAHF/SAHF");
+
+ /* Valid for both Intel and AMD. */
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+ pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
+ break;
+
+ /*
+ * Set the RDTSCP support bit.
+ * Assumes the caller knows what it's doing! (host must support this)
+ */
+ case CPUMCPUIDFEATURE_RDTSCP:
+ if (pVM->cpum.s.u8PortableCpuIdLevel > 0)
+ return;
+ GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fRdTscP, "RDTSCP");
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+
+ /* Valid for both Intel and AMD. */
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+ pVM->cpum.s.HostFeatures.fRdTscP = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
+ break;
+
+ /*
+ * Set the Hypervisor Present bit in the standard feature mask.
+ */
+ case CPUMCPUIDFEATURE_HVP:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
+ pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
+ break;
+
+ /*
+ * Set up the speculation control CPUID bits and MSRs. This is quite complicated
+ * on Intel CPUs, and different on AMDs.
+ */
+ case CPUMCPUIDFEATURE_SPEC_CTRL:
+ if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ {
+ pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
+ if ( !pLeaf
+ || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
+ {
+ LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
+ return;
+ }
+
+ /* The feature can be enabled. Let's see what we can actually do. */
+ pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
+
+ /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
+ if (pVM->cpum.s.HostFeatures.fIbrs)
+ {
+ pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
+ pVM->cpum.s.GuestFeatures.fIbrs = 1;
+ if (pVM->cpum.s.HostFeatures.fStibp)
+ {
+ pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_STIBP;
+ pVM->cpum.s.GuestFeatures.fStibp = 1;
+ }
+
+ /* Make sure we have the speculation control MSR... */
+ pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_SPEC_CTRL);
+ if (!pMsrRange)
+ {
+ static CPUMMSRRANGE const s_SpecCtrl =
+ {
+ /*.uFirst =*/ MSR_IA32_SPEC_CTRL, /*.uLast =*/ MSR_IA32_SPEC_CTRL,
+ /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
+ /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
+ /*.szName = */ "IA32_SPEC_CTRL"
+ };
+ int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
+ AssertLogRelRC(rc);
+ }
+
+ /* ... and the predictor command MSR. */
+ pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_PRED_CMD);
+ if (!pMsrRange)
+ {
+ /** @todo incorrect fWrGpMask. */
+ static CPUMMSRRANGE const s_SpecCtrl =
+ {
+ /*.uFirst =*/ MSR_IA32_PRED_CMD, /*.uLast =*/ MSR_IA32_PRED_CMD,
+ /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
+ /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
+ /*.szName = */ "IA32_PRED_CMD"
+ };
+ int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
+ AssertLogRelRC(rc);
+ }
+
+ }
+
+ if (pVM->cpum.s.HostFeatures.fArchCap)
+ {
+ /* Install the architectural capabilities MSR. */
+ pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES);
+ if (!pMsrRange)
+ {
+ static CPUMMSRRANGE const s_ArchCaps =
+ {
+ /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
+ /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
+ /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ UINT64_MAX,
+ /*.szName = */ "IA32_ARCH_CAPABILITIES"
+ };
+ int rc = CPUMR3MsrRangesInsert(pVM, &s_ArchCaps);
+ AssertLogRelRC(rc);
+ }
+
+ /* Advertise IBRS_ALL if present at this point... */
+ if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
+ VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
+ }
+
+ LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
+ }
+ else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
+ {
+ /* The precise details of AMD's implementation are not yet clear. */
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("enmFeature=%d\n", enmFeature));
+ break;
+ }
+
+ /** @todo can probably kill this as this API is now init time only... */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
+ }
+
+#undef GET_8000_0001_CHECK_X86_HOST_FEATURE_RET
+#undef CHECK_X86_HOST_FEATURE_RET
+}
+
+
+/**
+ * Queries a CPUID feature bit.
+ *
+ * @returns boolean for feature presence
+ * @param pVM The cross context VM structure.
+ * @param enmFeature The feature to query.
+ * @deprecated Use the cpum.ro.GuestFeatures directly instead.
+ */
+VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
+{
+ switch (enmFeature)
+ {
+ case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
+ case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
+ case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
+ case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
+ case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
+ case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
+ case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
+ case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
+ case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
+ case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
+ case CPUMCPUIDFEATURE_SPEC_CTRL: return pVM->cpum.s.GuestFeatures.fSpeculationControl;
+ case CPUMCPUIDFEATURE_INVALID:
+ case CPUMCPUIDFEATURE_32BIT_HACK:
+ break;
+ }
+ AssertFailed();
+ return false;
+}
+
+
+/**
+ * Clears a CPUID feature bit.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmFeature The feature to clear.
+ *
+ * @deprecated Probably better to default the feature to disabled and only allow
+ * setting (enabling) it during construction.
+ */
+VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
+{
+ PCPUMCPUIDLEAF pLeaf;
+ switch (enmFeature)
+ {
+ case CPUMCPUIDFEATURE_APIC:
+ Assert(!pVM->cpum.s.GuestFeatures.fApic); /* We only expect this call during init. No MSR adjusting needed. */
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
+
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
+
+ pVM->cpum.s.GuestFeatures.fApic = 0;
+ Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
+ break;
+
+ case CPUMCPUIDFEATURE_X2APIC:
+ Assert(!pVM->cpum.s.GuestFeatures.fX2Apic); /* We only expect this call during init. No MSR adjusting needed. */
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
+ pVM->cpum.s.GuestFeatures.fX2Apic = 0;
+ Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
+ break;
+
+#if 0
+ case CPUMCPUIDFEATURE_PAE:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
+
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if ( pLeaf
+ && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
+
+ pVM->cpum.s.GuestFeatures.fPae = 0;
+ Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
+ break;
+
+ case CPUMCPUIDFEATURE_LONG_MODE:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+ pVM->cpum.s.GuestFeatures.fLongMode = 0;
+ pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = 32;
+ if (pVM->cpum.s.GuestFeatures.fVmx)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic |= VMX_BASIC_PHYSADDR_WIDTH_32BIT;
+ }
+ break;
+
+ case CPUMCPUIDFEATURE_LAHF:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+ pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
+ break;
+#endif
+ case CPUMCPUIDFEATURE_RDTSCP:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+ pVM->cpum.s.GuestFeatures.fRdTscP = 0;
+ Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
+ break;
+
+#if 0
+ case CPUMCPUIDFEATURE_HVP:
+ pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ if (pLeaf)
+ pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
+ pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
+ break;
+
+ case CPUMCPUIDFEATURE_SPEC_CTRL:
+ pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
+ if (pLeaf)
+ pLeaf->uEdx &= ~(X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP);
+ VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps &= ~MSR_IA32_ARCH_CAP_F_IBRS_ALL);
+ Log(("CPUM: ClearGuestCpuIdFeature: Disabled speculation control!\n"));
+ break;
+#endif
+ default:
+ AssertMsgFailed(("enmFeature=%d\n", enmFeature));
+ break;
+ }
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
+ }
+}
+
+
+/**
+ * Do some final polishing after all calls to CPUMR3SetGuestCpuIdFeature and
+ * CPUMR3ClearGuestCpuIdFeature are (probably) done.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void cpumR3CpuIdRing3InitDone(PVM pVM)
+{
+ /*
+ * Do not advertise NX w/o PAE, seems to confuse windows 7 (black screen very
+ * early in real mode).
+ */
+ PCPUMCPUIDLEAF pStdLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
+ PCPUMCPUIDLEAF pExtLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
+ if (pStdLeaf && pExtLeaf)
+ {
+ if ( !(pStdLeaf->uEdx & X86_CPUID_FEATURE_EDX_PAE)
+ && (pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX))
+ pExtLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_NX;
+ }
+}
+
+
+/*
+ *
+ *
+ * Saved state related code.
+ * Saved state related code.
+ * Saved state related code.
+ *
+ *
+ */
+
+/**
+ * Called both in pass 0 and the final pass.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Save all the CPU ID leaves.
+ */
+ SSMR3PutU32(pSSM, sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]));
+ SSMR3PutU32(pSSM, pVM->cpum.s.GuestInfo.cCpuIdLeaves);
+ SSMR3PutMem(pSSM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3,
+ sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]) * pVM->cpum.s.GuestInfo.cCpuIdLeaves);
+
+ SSMR3PutMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
+
+ /*
+ * Save a good portion of the raw CPU IDs as well as they may come in
+ * handy when validating features for raw mode.
+ */
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ CPUMCPUID aRawStd[16];
+ for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
+ ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
+ SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
+ SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
+
+ CPUMCPUID aRawExt[32];
+ for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
+ SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
+ SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
+
+#else
+ /* Two zero counts on non-x86 hosts. */
+ SSMR3PutU32(pSSM, 0);
+ SSMR3PutU32(pSSM, 0);
+#endif
+}
+
+
+static int cpumR3LoadOneOldGuestCpuIdArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+ uint32_t cCpuIds;
+ int rc = SSMR3GetU32(pSSM, &cCpuIds);
+ if (RT_SUCCESS(rc))
+ {
+ if (cCpuIds < 64)
+ {
+ for (uint32_t i = 0; i < cCpuIds; i++)
+ {
+ CPUMCPUID CpuId;
+ rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
+ if (RT_FAILURE(rc))
+ break;
+
+ CPUMCPUIDLEAF NewLeaf;
+ NewLeaf.uLeaf = uBase + i;
+ NewLeaf.uSubLeaf = 0;
+ NewLeaf.fSubLeafMask = 0;
+ NewLeaf.uEax = CpuId.uEax;
+ NewLeaf.uEbx = CpuId.uEbx;
+ NewLeaf.uEcx = CpuId.uEcx;
+ NewLeaf.uEdx = CpuId.uEdx;
+ NewLeaf.fFlags = 0;
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
+ }
+ }
+ else
+ rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(*ppaLeaves);
+ *ppaLeaves = NULL;
+ *pcLeaves = 0;
+ }
+ return rc;
+}
+
+
+static int cpumR3LoadGuestCpuIdArray(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+ *ppaLeaves = NULL;
+ *pcLeaves = 0;
+
+ int rc;
+ if (uVersion > CPUM_SAVED_STATE_VERSION_PUT_STRUCT)
+ {
+ /*
+ * The new format. Starts by declaring the leave size and count.
+ */
+ uint32_t cbLeaf;
+ SSMR3GetU32(pSSM, &cbLeaf);
+ uint32_t cLeaves;
+ rc = SSMR3GetU32(pSSM, &cLeaves);
+ if (RT_SUCCESS(rc))
+ {
+ if (cbLeaf == sizeof(**ppaLeaves))
+ {
+ if (cLeaves <= CPUM_CPUID_MAX_LEAVES)
+ {
+ /*
+ * Load the leaves one by one.
+ *
+ * The uPrev stuff is a kludge for working around a week worth of bad saved
+ * states during the CPUID revamp in March 2015. We saved too many leaves
+ * due to a bug in cpumR3CpuIdInstallAndExplodeLeaves, thus ending up with
+ * garbage entires at the end of the array when restoring. We also had
+ * a subleaf insertion bug that triggered with the leaf 4 stuff below,
+ * this kludge doesn't deal correctly with that, but who cares...
+ */
+ uint32_t uPrev = 0;
+ for (uint32_t i = 0; i < cLeaves && RT_SUCCESS(rc); i++)
+ {
+ CPUMCPUIDLEAF Leaf;
+ rc = SSMR3GetMem(pSSM, &Leaf, sizeof(Leaf));
+ if (RT_SUCCESS(rc))
+ {
+ if ( uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
+ || Leaf.uLeaf >= uPrev)
+ {
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
+ uPrev = Leaf.uLeaf;
+ }
+ else
+ uPrev = UINT32_MAX;
+ }
+ }
+ }
+ else
+ rc = SSMR3SetLoadError(pSSM, VERR_TOO_MANY_CPUID_LEAVES, RT_SRC_POS,
+ "Too many CPUID leaves: %#x, max %#x", cLeaves, CPUM_CPUID_MAX_LEAVES);
+ }
+ else
+ rc = SSMR3SetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
+ "CPUMCPUIDLEAF size differs: saved=%#x, our=%#x", cbLeaf, sizeof(**ppaLeaves));
+ }
+ }
+ else
+ {
+ /*
+ * The old format with its three inflexible arrays.
+ */
+ rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Fake up leaf 4 on intel like we used to do in CPUMGetGuestCpuId earlier.
+ */
+ PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(*ppaLeaves, *pcLeaves, 0, 0);
+ if ( pLeaf
+ && RTX86IsIntelCpu(pLeaf->uEbx, pLeaf->uEcx, pLeaf->uEdx))
+ {
+ CPUMCPUIDLEAF Leaf;
+ Leaf.uLeaf = 4;
+ Leaf.fSubLeafMask = UINT32_MAX;
+ Leaf.uSubLeaf = 0;
+ Leaf.uEdx = UINT32_C(0); /* 3 flags, 0 is fine. */
+ Leaf.uEcx = UINT32_C(63); /* sets - 1 */
+ Leaf.uEbx = (UINT32_C(7) << 22) /* associativity -1 */
+ | (UINT32_C(0) << 12) /* phys line partitions - 1 */
+ | UINT32_C(63); /* system coherency line size - 1 */
+ Leaf.uEax = (RT_MIN(pVM->cCpus - 1, UINT32_C(0x3f)) << 26) /* cores per package - 1 */
+ | (UINT32_C(0) << 14) /* threads per cache - 1 */
+ | (UINT32_C(1) << 5) /* cache level */
+ | UINT32_C(1); /* cache type (data) */
+ Leaf.fFlags = 0;
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
+ if (RT_SUCCESS(rc))
+ {
+ Leaf.uSubLeaf = 1; /* Should've been cache type 2 (code), but buggy code made it data. */
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ Leaf.uSubLeaf = 2; /* Should've been cache type 3 (unified), but buggy code made it data. */
+ Leaf.uEcx = 4095; /* sets - 1 */
+ Leaf.uEbx &= UINT32_C(0x003fffff); /* associativity - 1 */
+ Leaf.uEbx |= UINT32_C(23) << 22;
+ Leaf.uEax &= UINT32_C(0xfc003fff); /* threads per cache - 1 */
+ Leaf.uEax |= RT_MIN(pVM->cCpus - 1, UINT32_C(0xfff)) << 14;
+ Leaf.uEax &= UINT32_C(0xffffff1f); /* level */
+ Leaf.uEax |= UINT32_C(2) << 5;
+ rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
+ }
+ }
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Loads the CPU ID leaves saved by pass 0, inner worker.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uVersion The format version.
+ * @param paLeaves Guest CPUID leaves loaded from the state.
+ * @param cLeaves The number of leaves in @a paLeaves.
+ * @param pMsrs The guest MSRs.
+ */
+int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
+{
+ AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
+#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
+ AssertMsgFailed(("Port me!"));
+#endif
+
+ /*
+ * Continue loading the state into stack buffers.
+ */
+ CPUMCPUID GuestDefCpuId;
+ int rc = SSMR3GetMem(pSSM, &GuestDefCpuId, sizeof(GuestDefCpuId));
+ AssertRCReturn(rc, rc);
+
+ CPUMCPUID aRawStd[16];
+ uint32_t cRawStd;
+ rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
+ if (cRawStd > RT_ELEMENTS(aRawStd))
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
+ AssertRCReturn(rc, rc);
+ for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
+#else
+ RT_ZERO(aRawStd[i]);
+#endif
+
+ CPUMCPUID aRawExt[32];
+ uint32_t cRawExt;
+ rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
+ if (cRawExt > RT_ELEMENTS(aRawExt))
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
+ AssertRCReturn(rc, rc);
+ for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
+#else
+ RT_ZERO(aRawExt[i]);
+#endif
+
+ /*
+ * Get the raw CPU IDs for the current host.
+ */
+ CPUMCPUID aHostRawStd[16];
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
+ ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
+#else
+ RT_ZERO(aHostRawStd);
+#endif
+
+ CPUMCPUID aHostRawExt[32];
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
+ &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
+#else
+ RT_ZERO(aHostRawExt);
+#endif
+
+ /*
+ * Get the host and guest overrides so we don't reject the state because
+ * some feature was enabled thru these interfaces.
+ * Note! We currently only need the feature leaves, so skip rest.
+ */
+ PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
+ CPUMCPUID aHostOverrideStd[2];
+ memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
+ cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
+
+ CPUMCPUID aHostOverrideExt[2];
+ memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
+ cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
+
+ /*
+ * This can be skipped.
+ */
+ bool fStrictCpuIdChecks;
+ CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
+
+ /*
+ * Define a bunch of macros for simplifying the santizing/checking code below.
+ */
+ /* Generic expression + failure message. */
+#define CPUID_CHECK_RET(expr, fmt) \
+ do { \
+ if (!(expr)) \
+ { \
+ char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
+ if (fStrictCpuIdChecks) \
+ { \
+ int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
+ RTStrFree(pszMsg); \
+ return rcCpuid; \
+ } \
+ LogRel(("CPUM: %s\n", pszMsg)); \
+ RTStrFree(pszMsg); \
+ } \
+ } while (0)
+#define CPUID_CHECK_WRN(expr, fmt) \
+ do { \
+ if (!(expr)) \
+ LogRel(fmt); \
+ } while (0)
+
+ /* For comparing two values and bitch if they differs. */
+#define CPUID_CHECK2_RET(what, host, saved) \
+ do { \
+ if ((host) != (saved)) \
+ { \
+ if (fStrictCpuIdChecks) \
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
+ N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
+ LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
+ } \
+ } while (0)
+#define CPUID_CHECK2_WRN(what, host, saved) \
+ do { \
+ if ((host) != (saved)) \
+ LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
+ } while (0)
+
+ /* For checking raw cpu features (raw mode). */
+#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
+ do { \
+ if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
+ { \
+ if (fStrictCpuIdChecks) \
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
+ N_(#bit " mismatch: host=%d saved=%d"), \
+ !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
+ LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
+ !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
+ } \
+ } while (0)
+#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
+ do { \
+ if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
+ LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
+ !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
+ } while (0)
+#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
+
+ /* For checking guest features. */
+#define CPUID_GST_FEATURE_RET(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && !(aHostRaw##set [1].reg & bit) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ { \
+ if (fStrictCpuIdChecks) \
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
+ N_(#bit " is not supported by the host but has already exposed to the guest")); \
+ LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
+ } \
+ } while (0)
+#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && !(aHostRaw##set [1].reg & bit) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
+ } while (0)
+#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && !(aHostRaw##set [1].reg & bit) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
+ } while (0)
+#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
+
+ /* For checking guest features if AMD guest CPU. */
+#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && fGuestAmd \
+ && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ { \
+ if (fStrictCpuIdChecks) \
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
+ N_(#bit " is not supported by the host but has already exposed to the guest")); \
+ LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
+ } \
+ } while (0)
+#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && fGuestAmd \
+ && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
+ } while (0)
+#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
+ do { \
+ if ( (aGuestCpuId##set [1].reg & bit) \
+ && fGuestAmd \
+ && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
+ && !(aHostOverride##set [1].reg & bit) \
+ ) \
+ LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
+ } while (0)
+#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
+
+ /* For checking AMD features which have a corresponding bit in the standard
+ range. (Intel defines very few bits in the extended feature sets.) */
+#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
+ do { \
+ if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
+ && !(fHostAmd \
+ ? aHostRawExt[1].reg & (ExtBit) \
+ : aHostRawStd[1].reg & (StdBit)) \
+ && !(aHostOverrideExt[1].reg & (ExtBit)) \
+ ) \
+ { \
+ if (fStrictCpuIdChecks) \
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
+ N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
+ LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
+ } \
+ } while (0)
+#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
+ do { \
+ if ( (aGuestCpuId[1].reg & (ExtBit)) \
+ && !(fHostAmd \
+ ? aHostRawExt[1].reg & (ExtBit) \
+ : aHostRawStd[1].reg & (StdBit)) \
+ && !(aHostOverrideExt[1].reg & (ExtBit)) \
+ ) \
+ LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
+ } while (0)
+#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
+ do { \
+ if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
+ && !(fHostAmd \
+ ? aHostRawExt[1].reg & (ExtBit) \
+ : aHostRawStd[1].reg & (StdBit)) \
+ && !(aHostOverrideExt[1].reg & (ExtBit)) \
+ ) \
+ LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
+ } while (0)
+#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
+
+
+ /*
+ * Verify that we can support the features already exposed to the guest on
+ * this host.
+ *
+ * Most of the features we're emulating requires intercepting instruction
+ * and doing it the slow way, so there is no need to warn when they aren't
+ * present in the host CPU. Thus we use IGN instead of EMU on these.
+ *
+ * Trailing comments:
+ * "EMU" - Possible to emulate, could be lots of work and very slow.
+ * "EMU?" - Can this be emulated?
+ */
+ CPUMCPUID aGuestCpuIdStd[2];
+ RT_ZERO(aGuestCpuIdStd);
+ cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
+
+ /* CPUID(1).ecx */
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
+ CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
+ CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SDBG);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
+ CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
+ CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
+ CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
+ CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
+
+ /* CPUID(1).edx */
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
+ CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
+ CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
+ CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
+ CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
+
+ /* CPUID(0x80000000). */
+ CPUMCPUID aGuestCpuIdExt[2];
+ RT_ZERO(aGuestCpuIdExt);
+ if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
+ {
+ /** @todo deal with no 0x80000001 on the host. */
+ bool const fHostAmd = RTX86IsAmdCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx)
+ || RTX86IsHygonCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
+ bool const fGuestAmd = RTX86IsAmdCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx)
+ || RTX86IsHygonCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
+
+ /* CPUID(0x80000001).ecx */
+ CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_XOP); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
+
+ /* CPUID(0x80000001).edx */
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
+ CPUID_GST_FEATURE_IGN( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
+ CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
+ CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
+ CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
+ CPUID_GST_FEATURE_WRN( Ext, uEdx, RT_BIT_32(21) /*reserved*/);
+ CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
+ CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+ CPUID_GST_FEATURE_IGN( Ext, uEdx, RT_BIT_32(28) /*reserved*/);
+ CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+ CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+ }
+
+ /** @todo check leaf 7 */
+
+ /* CPUID(d) - XCR0 stuff - takes ECX as input.
+ * ECX=0: EAX - Valid bits in XCR0[31:0].
+ * EBX - Maximum state size as per current XCR0 value.
+ * ECX - Maximum state size for all supported features.
+ * EDX - Valid bits in XCR0[63:32].
+ * ECX=1: EAX - Various X-features.
+ * EBX - Maximum state size as per current XCR0|IA32_XSS value.
+ * ECX - Valid bits in IA32_XSS[31:0].
+ * EDX - Valid bits in IA32_XSS[63:32].
+ * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
+ * if the bit invalid all four registers are set to zero.
+ * EAX - The state size for this feature.
+ * EBX - The state byte offset of this feature.
+ * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
+ * EDX - Reserved, but is set to zero if invalid sub-leaf index.
+ */
+ uint64_t fGuestXcr0Mask = 0;
+ PCPUMCPUIDLEAF pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0);
+ if ( pCurLeaf
+ && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE)
+ && ( pCurLeaf->uEax
+ || pCurLeaf->uEbx
+ || pCurLeaf->uEcx
+ || pCurLeaf->uEdx) )
+ {
+ fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx);
+ if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask)
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"),
+ fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask);
+ if ((fGuestXcr0Mask & (XSAVE_C_X87 | XSAVE_C_SSE)) != (XSAVE_C_X87 | XSAVE_C_SSE))
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("CPUID(0xd/0).EDX:EAX missing mandatory X87 or SSE bits: %#RX64"), fGuestXcr0Mask);
+
+ /* We don't support any additional features yet. */
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 1);
+ if (pCurLeaf && pCurLeaf->uEax)
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax);
+ if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx))
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"),
+ RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx));
+
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++)
+ {
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
+ if (pCurLeaf)
+ {
+ /* If advertised, the state component offset and size must match the one used by host. */
+ if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx)
+ {
+ CPUMCPUID RawHost;
+ ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0,
+ &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx);
+ if ( RawHost.uEbx != pCurLeaf->uEbx
+ || RawHost.uEax != pCurLeaf->uEax)
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"),
+ uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax);
+ }
+ }
+ }
+#endif
+ }
+ /* Clear leaf 0xd just in case we're loading an old state... */
+ else if (pCurLeaf)
+ {
+ for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
+ {
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
+ if (pCurLeaf)
+ {
+ AssertLogRelMsg( uVersion <= CPUM_SAVED_STATE_VERSION_PUT_STRUCT
+ || ( pCurLeaf->uEax == 0
+ && pCurLeaf->uEbx == 0
+ && pCurLeaf->uEcx == 0
+ && pCurLeaf->uEdx == 0),
+ ("uVersion=%#x; %#x %#x %#x %#x\n",
+ uVersion, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx));
+ pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ }
+ }
+ }
+
+ /* Update the fXStateGuestMask value for the VM. */
+ if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask)
+ {
+ LogRel(("CPUM: fXStateGuestMask=%#llx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask));
+ pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask;
+ if (!fGuestXcr0Mask && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
+ return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
+ N_("Internal Processing Error: XSAVE feature bit enabled, but leaf 0xd is empty."));
+ }
+
+#undef CPUID_CHECK_RET
+#undef CPUID_CHECK_WRN
+#undef CPUID_CHECK2_RET
+#undef CPUID_CHECK2_WRN
+#undef CPUID_RAW_FEATURE_RET
+#undef CPUID_RAW_FEATURE_WRN
+#undef CPUID_RAW_FEATURE_IGN
+#undef CPUID_GST_FEATURE_RET
+#undef CPUID_GST_FEATURE_WRN
+#undef CPUID_GST_FEATURE_EMU
+#undef CPUID_GST_FEATURE_IGN
+#undef CPUID_GST_FEATURE2_RET
+#undef CPUID_GST_FEATURE2_WRN
+#undef CPUID_GST_FEATURE2_EMU
+#undef CPUID_GST_FEATURE2_IGN
+#undef CPUID_GST_AMD_FEATURE_RET
+#undef CPUID_GST_AMD_FEATURE_WRN
+#undef CPUID_GST_AMD_FEATURE_EMU
+#undef CPUID_GST_AMD_FEATURE_IGN
+
+ /*
+ * We're good, commit the CPU ID leaves.
+ */
+ pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId;
+ rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs);
+ AssertLogRelRCReturn(rc, rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads the CPU ID leaves saved by pass 0.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uVersion The format version.
+ * @param pMsrs The guest MSRs.
+ */
+int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
+{
+ AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
+
+ /*
+ * Load the CPUID leaves array first and call worker to do the rest, just so
+ * we can free the memory when we need to without ending up in column 1000.
+ */
+ PCPUMCPUIDLEAF paLeaves;
+ uint32_t cLeaves;
+ int rc = cpumR3LoadGuestCpuIdArray(pVM, pSSM, uVersion, &paLeaves, &cLeaves);
+ AssertRC(rc);
+ if (RT_SUCCESS(rc))
+ {
+ rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs);
+ RTMemFree(paLeaves);
+ }
+ return rc;
+}
+
+
+
+/**
+ * Loads the CPU ID leaves saved by pass 0 in an pre 3.2 saved state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uVersion The format version.
+ */
+int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ AssertMsgReturn(uVersion < CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
+
+ /*
+ * Restore the CPUID leaves.
+ *
+ * Note that we support restoring less than the current amount of standard
+ * leaves because we've been allowed more is newer version of VBox.
+ */
+ uint32_t cElements;
+ int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
+ if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
+
+ rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
+ if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
+
+ rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
+ if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
+
+ SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
+
+ /*
+ * Check that the basic cpuid id information is unchanged.
+ */
+ /** @todo we should check the 64 bits capabilities too! */
+ uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
+ ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
+#endif
+ uint32_t au32CpuIdSaved[8];
+ rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
+ if (RT_SUCCESS(rc))
+ {
+ /* Ignore CPU stepping. */
+ au32CpuId[4] &= 0xfffffff0;
+ au32CpuIdSaved[4] &= 0xfffffff0;
+
+ /* Ignore APIC ID (AMD specs). */
+ au32CpuId[5] &= ~0xff000000;
+ au32CpuIdSaved[5] &= ~0xff000000;
+
+ /* Ignore the number of Logical CPUs (AMD specs). */
+ au32CpuId[5] &= ~0x00ff0000;
+ au32CpuIdSaved[5] &= ~0x00ff0000;
+
+ /* Ignore some advanced capability bits, that we don't expose to the guest. */
+ au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
+ | X86_CPUID_FEATURE_ECX_VMX
+ | X86_CPUID_FEATURE_ECX_SMX
+ | X86_CPUID_FEATURE_ECX_EST
+ | X86_CPUID_FEATURE_ECX_TM2
+ | X86_CPUID_FEATURE_ECX_CNTXID
+ | X86_CPUID_FEATURE_ECX_TPRUPDATE
+ | X86_CPUID_FEATURE_ECX_PDCM
+ | X86_CPUID_FEATURE_ECX_DCA
+ | X86_CPUID_FEATURE_ECX_X2APIC
+ );
+ au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
+ | X86_CPUID_FEATURE_ECX_VMX
+ | X86_CPUID_FEATURE_ECX_SMX
+ | X86_CPUID_FEATURE_ECX_EST
+ | X86_CPUID_FEATURE_ECX_TM2
+ | X86_CPUID_FEATURE_ECX_CNTXID
+ | X86_CPUID_FEATURE_ECX_TPRUPDATE
+ | X86_CPUID_FEATURE_ECX_PDCM
+ | X86_CPUID_FEATURE_ECX_DCA
+ | X86_CPUID_FEATURE_ECX_X2APIC
+ );
+
+ /* Make sure we don't forget to update the masks when enabling
+ * features in the future.
+ */
+ AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
+ ( X86_CPUID_FEATURE_ECX_DTES64
+ | X86_CPUID_FEATURE_ECX_VMX
+ | X86_CPUID_FEATURE_ECX_SMX
+ | X86_CPUID_FEATURE_ECX_EST
+ | X86_CPUID_FEATURE_ECX_TM2
+ | X86_CPUID_FEATURE_ECX_CNTXID
+ | X86_CPUID_FEATURE_ECX_TPRUPDATE
+ | X86_CPUID_FEATURE_ECX_PDCM
+ | X86_CPUID_FEATURE_ECX_DCA
+ | X86_CPUID_FEATURE_ECX_X2APIC
+ )));
+ /* do the compare */
+ if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
+ {
+ if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
+ LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
+ "Saved=%.*Rhxs\n"
+ "Real =%.*Rhxs\n",
+ sizeof(au32CpuIdSaved), au32CpuIdSaved,
+ sizeof(au32CpuId), au32CpuId));
+ else
+ {
+ LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
+ "Saved=%.*Rhxs\n"
+ "Real =%.*Rhxs\n",
+ sizeof(au32CpuIdSaved), au32CpuIdSaved,
+ sizeof(au32CpuId), au32CpuId));
+ rc = VERR_SSM_LOAD_CPUID_MISMATCH;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+
+/*
+ *
+ *
+ * CPUID Info Handler.
+ * CPUID Info Handler.
+ * CPUID Info Handler.
+ *
+ *
+ */
+
+
+
+/**
+ * Get L1 cache / TLS associativity.
+ */
+static const char *getCacheAss(unsigned u, char *pszBuf)
+{
+ if (u == 0)
+ return "res0 ";
+ if (u == 1)
+ return "direct";
+ if (u == 255)
+ return "fully";
+ if (u >= 256)
+ return "???";
+
+ RTStrPrintf(pszBuf, 16, "%d way", u);
+ return pszBuf;
+}
+
+
+/**
+ * Get L2 cache associativity.
+ */
+const char *getL2CacheAss(unsigned u)
+{
+ switch (u)
+ {
+ case 0: return "off ";
+ case 1: return "direct";
+ case 2: return "2 way ";
+ case 3: return "res3 ";
+ case 4: return "4 way ";
+ case 5: return "res5 ";
+ case 6: return "8 way ";
+ case 7: return "res7 ";
+ case 8: return "16 way";
+ case 9: return "res9 ";
+ case 10: return "res10 ";
+ case 11: return "res11 ";
+ case 12: return "res12 ";
+ case 13: return "res13 ";
+ case 14: return "res14 ";
+ case 15: return "fully ";
+ default: return "????";
+ }
+}
+
+
+/** CPUID(1).EDX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf1EdxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("MSR\0" "Model Specific Registers", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("SEP\0" "SYSENTER and SYSEXIT Present", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
+ DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("PSN\0" "Processor Serial Number", 18, 1, 0),
+ DBGFREGSUBFIELD_RO("CLFSH\0" "CLFLUSH instruction", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("DS\0" "Debug Store", 21, 1, 0),
+ DBGFREGSUBFIELD_RO("ACPI\0" "Thermal Mon. & Soft. Clock Ctrl.", 22, 1, 0),
+ DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR instructions", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE\0" "SSE support", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE2\0" "SSE2 support", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("SS\0" "Self Snoop", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("HTT\0" "Hyper-Threading Technology", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("TM\0" "Therm. Monitor", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("PBE\0" "Pending Break Enabled", 31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(1).ECX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf1EcxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("SSE3\0" "SSE3 support", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("PCLMUL\0" "PCLMULQDQ support (for AES-GCM)", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("DTES64\0" "DS Area 64-bit Layout", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("MONITOR\0" "MONITOR/MWAIT instructions", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("CPL-DS\0" "CPL Qualified Debug Store", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("VMX\0" "Virtual Machine Extensions", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("SMX\0" "Safer Mode Extensions", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("EST\0" "Enhanced SpeedStep Technology", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("TM2\0" "Terminal Monitor 2", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("SSSE3\0" "Supplemental Streaming SIMD Extensions 3", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("CNTX-ID\0" "L1 Context ID", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("SDBG\0" "Silicon Debug interface", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("FMA\0" "Fused Multiply Add extensions", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("CX16\0" "CMPXCHG16B instruction", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("TPRUPDATE\0" "xTPR Update Control", 14, 1, 0),
+ DBGFREGSUBFIELD_RO("PDCM\0" "Perf/Debug Capability MSR", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("PCID\0" "Process Context Identifiers", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("DCA\0" "Direct Cache Access", 18, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE4_1\0" "SSE4_1 support", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE4_2\0" "SSE4_2 support", 20, 1, 0),
+ DBGFREGSUBFIELD_RO("X2APIC\0" "x2APIC support", 21, 1, 0),
+ DBGFREGSUBFIELD_RO("MOVBE\0" "MOVBE instruction", 22, 1, 0),
+ DBGFREGSUBFIELD_RO("POPCNT\0" "POPCNT instruction", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("TSCDEADL\0" "Time Stamp Counter Deadline", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("AES\0" "AES instructions", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("XSAVE\0" "XSAVE instruction", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("OSXSAVE\0" "OSXSAVE instruction", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX\0" "AVX support", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("F16C\0" "16-bit floating point conversion instructions", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("RDRAND\0" "RDRAND instruction", 30, 1, 0),
+ DBGFREGSUBFIELD_RO("HVP\0" "Hypervisor Present (we're a guest)", 31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(7,0).EBX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf7Sub0EbxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("FSGSBASE\0" "RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE instr.", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("TSCADJUST\0" "Supports MSR_IA32_TSC_ADJUST", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("SGX\0" "Supports Software Guard Extensions", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("BMI1\0" "Advanced Bit Manipulation extension 1", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("HLE\0" "Hardware Lock Elision", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX2\0" "Advanced Vector Extensions 2", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("FDP_EXCPTN_ONLY\0" "FPU DP only updated on exceptions", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("SMEP\0" "Supervisor Mode Execution Prevention", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("BMI2\0" "Advanced Bit Manipulation extension 2", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("ERMS\0" "Enhanced REP MOVSB/STOSB instructions", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("INVPCID\0" "INVPCID instruction", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("RTM\0" "Restricted Transactional Memory", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("PQM\0" "Platform Quality of Service Monitoring", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("DEPFPU_CS_DS\0" "Deprecates FPU CS, FPU DS values if set", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("MPE\0" "Intel Memory Protection Extensions", 14, 1, 0),
+ DBGFREGSUBFIELD_RO("PQE\0" "Platform Quality of Service Enforcement", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX512F\0" "AVX512 Foundation instructions", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("RDSEED\0" "RDSEED instruction", 18, 1, 0),
+ DBGFREGSUBFIELD_RO("ADX\0" "ADCX/ADOX instructions", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("SMAP\0" "Supervisor Mode Access Prevention", 20, 1, 0),
+ DBGFREGSUBFIELD_RO("CLFLUSHOPT\0" "CLFLUSHOPT (Cache Line Flush) instruction", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("CLWB\0" "CLWB instruction", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("INTEL_PT\0" "Intel Processor Trace", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX512PF\0" "AVX512 Prefetch instructions", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX512ER\0" "AVX512 Exponential & Reciprocal instructions", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("AVX512CD\0" "AVX512 Conflict Detection instructions", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("SHA\0" "Secure Hash Algorithm extensions", 29, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(7,0).ECX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf7Sub0EcxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0),
+ DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0),
+ DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(7,0).EDX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("MD_CLEAR\0" "Supports MDS related buffer clearing", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("FLUSH_CMD\0" "Supports IA32_FLUSH_CMD", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("CORECAP\0" "Supports IA32_CORE_CAP", 30, 1, 0),
+ DBGFREGSUBFIELD_RO("SSBD\0" "Supports IA32_SPEC_CTRL.SSBD", 31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+
+/** CPUID(13,0).EAX+EDX, XCR0, ++ bit descriptions. */
+static DBGFREGSUBFIELD const g_aXSaveStateBits[] =
+{
+ DBGFREGSUBFIELD_RO("x87\0" "Legacy FPU state", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE\0" "128-bit SSE state", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("YMM_Hi128\0" "Upper 128 bits of YMM0-15 (AVX)", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("BNDREGS\0" "MPX bound register state", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("BNDCSR\0" "MPX bound config and status state", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("Opmask\0" "opmask state", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("ZMM_Hi256\0" "Upper 256 bits of ZMM0-15 (AVX-512)", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("Hi16_ZMM\0" "512-bits ZMM16-31 state (AVX-512)", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling (AMD)", 62, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(13,1).EAX field descriptions. */
+static DBGFREGSUBFIELD const g_aLeaf13Sub1EaxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("XSAVEOPT\0" "XSAVEOPT is available", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("XSAVEC\0" "XSAVEC and compacted XRSTOR supported", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("XGETBC1\0" "XGETBV with ECX=1 supported", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("XSAVES\0" "XSAVES/XRSTORS and IA32_XSS supported", 3, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+
+/** CPUID(0x80000001,0).EDX field descriptions. */
+static DBGFREGSUBFIELD const g_aExtLeaf1EdxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("MSR\0" "K86 Model Specific Registers", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("SEP\0" "SYSCALL/SYSRET", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
+ DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("NX\0" "No-Execute/Execute-Disable", 20, 1, 0),
+ DBGFREGSUBFIELD_RO("AXMMX\0" "AMD Extensions to MMX instructions", 22, 1, 0),
+ DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR Instructions", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("FFXSR\0" "AMD fast FXSAVE and FXRSTOR instructions", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("Page1GB\0" "1 GB large page", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("RDTSCP\0" "RDTSCP instruction", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("LM\0" "AMD64 Long Mode", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("3DNOWEXT\0" "AMD Extensions to 3DNow", 30, 1, 0),
+ DBGFREGSUBFIELD_RO("3DNOW\0" "AMD 3DNow", 31, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(0x80000001,0).ECX field descriptions. */
+static DBGFREGSUBFIELD const g_aExtLeaf1EcxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("LahfSahf\0" "LAHF/SAHF support in 64-bit mode", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("CmpLegacy\0" "Core multi-processing legacy mode", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("SVM\0" "AMD Secure Virtual Machine extensions", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("EXTAPIC\0" "AMD Extended APIC registers", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("CR8L\0" "AMD LOCK MOV CR0 means MOV CR8", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("ABM\0" "AMD Advanced Bit Manipulation", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("SSE4A\0" "SSE4A instructions", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("MISALIGNSSE\0" "AMD Misaligned SSE mode", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("3DNOWPRF\0" "AMD PREFETCH and PREFETCHW instructions", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("OSVW\0" "AMD OS Visible Workaround", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("IBS\0" "Instruct Based Sampling", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("XOP\0" "Extended Operation support", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("SKINIT\0" "SKINIT, STGI, and DEV support", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("WDT\0" "AMD Watchdog Timer support", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling support", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("FMA4\0" "Four operand FMA instruction support", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("TCE\0" "Translation Cache Extension support", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("NodeId\0" "NodeId in MSR C001_100C", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("TBM\0" "Trailing Bit Manipulation instructions", 21, 1, 0),
+ DBGFREGSUBFIELD_RO("TOPOEXT\0" "Topology Extensions", 22, 1, 0),
+ DBGFREGSUBFIELD_RO("PRFEXTCORE\0" "Performance Counter Extensions support", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("PRFEXTNB\0" "NB Performance Counter Extensions support", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("DATABPEXT\0" "Data-access Breakpoint Extension", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("PERFTSC\0" "Performance Time Stamp Counter", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("PCX_L2I\0" "L2I/L3 Performance Counter Extensions", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("MONITORX\0" "MWAITX and MONITORX instructions", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("AddrMaskExt\0" "BP Addressing masking extended to bit 31", 30, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(0x8000000a,0).EDX field descriptions. */
+static DBGFREGSUBFIELD const g_aExtLeafAEdxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("NP\0" "Nested Paging", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("LbrVirt\0" "Last Branch Record Virtualization", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("SVML\0" "SVM Lock", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("NRIPS\0" "NextRIP Save", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("TscRateMsr\0" "MSR based TSC rate control", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("VmcbClean\0" "VMCB clean bits", 5, 1, 0),
+ DBGFREGSUBFIELD_RO("FlushByASID\0" "Flush by ASID", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("DecodeAssists\0" "Decode Assists", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("PauseFilter\0" "Pause intercept filter", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("PauseFilterThreshold\0" "Pause filter threshold", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("AVIC\0" "Advanced Virtual Interrupt Controller", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("VMSAVEVirt\0" "VMSAVE and VMLOAD Virtualization", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("VGIF\0" "Virtual Global-Interrupt Flag", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("GMET\0" "Guest Mode Execute Trap Extension", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("x2AVIC\0" "AVIC support for x2APIC mode", 18, 1, 0),
+ DBGFREGSUBFIELD_RO("SSSCheck\0" "SVM supervisor shadow stack restrictions", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("SpecCtrl\0" "SPEC_CTRL virtualization", 20, 1, 0),
+ DBGFREGSUBFIELD_RO("ROGPT\0" "Read-Only Guest Page Table feature support", 21, 1, 0),
+ DBGFREGSUBFIELD_RO("HOST_MCE_OVERRIDE\0" "Guest #MC can be intercepted", 23, 1, 0),
+ DBGFREGSUBFIELD_RO("TlbiCtl\0" "INVLPGB/TLBSYNC enable and intercept", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("VNMI\0" "NMI Virtualization", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("IbsVirt\0" "IBS Virtualization", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("ExtLvtAvicAccessChg\0" "Extended LVT access changes", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("NestedVirtVmcbAddrChk\0""Guest VMCB address check", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("BusLockThreshold\0" "Bus Lock Threshold", 29, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+
+/** CPUID(0x80000007,0).EDX field descriptions. */
+static DBGFREGSUBFIELD const g_aExtLeaf7EdxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("TS\0" "Temperature Sensor", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("FID\0" "Frequency ID control", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("TTP\0" "Thermal Trip", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("TM\0" "Hardware Thermal Control (HTC)", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("100MHzSteps\0" "100 MHz Multiplier control", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("HwPstate\0" "Hardware P-state control", 7, 1, 0),
+ DBGFREGSUBFIELD_RO("TscInvariant\0" "Invariant Time Stamp Counter", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("CPB\0" "Core Performance Boost", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("EffFreqRO\0" "Read-only Effective Frequency Interface", 10, 1, 0),
+ DBGFREGSUBFIELD_RO("ProcFdbkIf\0" "Processor Feedback Interface", 11, 1, 0),
+ DBGFREGSUBFIELD_RO("ProcPwrRep\0" "Core power reporting interface support", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("ConnectedStandby\0" "Connected Standby", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("RAPL\0" "Running average power limit", 14, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(0x80000008,0).EBX field descriptions. */
+static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] =
+{
+ DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0),
+ DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0),
+ DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR*)", 2, 1, 0),
+ DBGFREGSUBFIELD_RO("INVLPGB\0" "INVLPGB and TLBSYNC instructions", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("RDPRU\0" "RDPRU instruction", 4, 1, 0),
+ DBGFREGSUBFIELD_RO("BE\0" "Bandwidth Enforcement extension", 6, 1, 0),
+ DBGFREGSUBFIELD_RO("MCOMMIT\0" "MCOMMIT instruction", 8, 1, 0),
+ DBGFREGSUBFIELD_RO("WBNOINVD\0" "WBNOINVD instruction", 9, 1, 0),
+ DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0),
+ DBGFREGSUBFIELD_RO("INT_WBINVD\0" "WBINVD/WBNOINVD interruptible", 13, 1, 0),
+ DBGFREGSUBFIELD_RO("IBRS\0" "Indirect Branch Restricted Speculation", 14, 1, 0),
+ DBGFREGSUBFIELD_RO("STIBP\0" "Single Thread Indirect Branch Prediction", 15, 1, 0),
+ DBGFREGSUBFIELD_RO("IbrsAlwaysOn\0" "Processor prefers that IBRS be left on", 16, 1, 0),
+ DBGFREGSUBFIELD_RO("StibpAlwaysOn\0""Processor prefers that STIBP be left on", 17, 1, 0),
+ DBGFREGSUBFIELD_RO("IbrsPreferred\0""IBRS preferred over software solution", 18, 1, 0),
+ DBGFREGSUBFIELD_RO("IbrsSameMode\0" "IBRS limits same mode speculation", 19, 1, 0),
+ DBGFREGSUBFIELD_RO("EferLmsleUnsupported\0" "EFER.LMSLE is unsupported", 20, 1, 0),
+ DBGFREGSUBFIELD_RO("INVLPGBnestedPages\0" "INVLPGB for nested translation", 21, 1, 0),
+ DBGFREGSUBFIELD_RO("SSBD\0" "Speculative Store Bypass Disable", 24, 1, 0),
+ DBGFREGSUBFIELD_RO("SsbdVirtSpecCtrl\0" "Use VIRT_SPEC_CTL for SSBD", 25, 1, 0),
+ DBGFREGSUBFIELD_RO("SsbdNotRequired\0" "SSBD not needed on this processor", 26, 1, 0),
+ DBGFREGSUBFIELD_RO("CPPC\0" "Collaborative Processor Performance Control", 27, 1, 0),
+ DBGFREGSUBFIELD_RO("PSFD\0" "Predictive Store Forward Disable", 28, 1, 0),
+ DBGFREGSUBFIELD_RO("BTC_NO\0" "Unaffected by branch type confusion", 29, 1, 0),
+ DBGFREGSUBFIELD_RO("IBPB_RET\0" "Clears RA predictor when PRED_CMD.IBPB set", 30, 1, 0),
+ DBGFREGSUBFIELD_TERMINATOR()
+};
+
+
+static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc,
+ const char *pszLeadIn, uint32_t cchWidth)
+{
+ if (pszLeadIn)
+ pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
+
+ for (uint32_t iBit = 0; iBit < 32; iBit++)
+ if (RT_BIT_32(iBit) & uVal)
+ {
+ while ( pDesc->pszName != NULL
+ && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
+ pDesc++;
+ if ( pDesc->pszName != NULL
+ && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
+ {
+ if (pDesc->cBits == 1)
+ pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
+ else
+ {
+ uint32_t uFieldValue = uVal >> pDesc->iFirstBit;
+ if (pDesc->cBits < 32)
+ uFieldValue &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
+ pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%u" : " %s=%#x", pDesc->pszName, uFieldValue);
+ iBit = pDesc->iFirstBit + pDesc->cBits - 1;
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, " %u", iBit);
+ }
+ if (pszLeadIn)
+ pHlp->pfnPrintf(pHlp, "\n");
+}
+
+
+static void cpumR3CpuIdInfoMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
+ const char *pszLeadIn, uint32_t cchWidth)
+{
+ if (pszLeadIn)
+ pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
+
+ for (uint32_t iBit = 0; iBit < 64; iBit++)
+ if (RT_BIT_64(iBit) & uVal)
+ {
+ while ( pDesc->pszName != NULL
+ && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
+ pDesc++;
+ if ( pDesc->pszName != NULL
+ && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
+ {
+ if (pDesc->cBits == 1)
+ pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
+ else
+ {
+ uint64_t uFieldValue = uVal >> pDesc->iFirstBit;
+ if (pDesc->cBits < 64)
+ uFieldValue &= RT_BIT_64(pDesc->cBits) - UINT64_C(1);
+ pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%llu" : " %s=%#llx", pDesc->pszName, uFieldValue);
+ iBit = pDesc->iFirstBit + pDesc->cBits - 1;
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, " %u", iBit);
+ }
+ if (pszLeadIn)
+ pHlp->pfnPrintf(pHlp, "\n");
+}
+
+
+static void cpumR3CpuIdInfoValueWithMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
+ const char *pszLeadIn, uint32_t cchWidth)
+{
+ if (!uVal)
+ pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x\n", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
+ else
+ {
+ pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x (", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
+ cpumR3CpuIdInfoMnemonicListU64(pHlp, uVal, pDesc, NULL, 0);
+ pHlp->pfnPrintf(pHlp, " )\n");
+ }
+}
+
+
+static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc,
+ uint32_t cchWidth)
+{
+ uint32_t uCombined = uVal1 | uVal2;
+ for (uint32_t iBit = 0; iBit < 32; iBit++)
+ if ( (RT_BIT_32(iBit) & uCombined)
+ || (iBit == pDesc->iFirstBit && pDesc->pszName) )
+ {
+ while ( pDesc->pszName != NULL
+ && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
+ pDesc++;
+
+ if ( pDesc->pszName != NULL
+ && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
+ {
+ size_t cchMnemonic = strlen(pDesc->pszName);
+ const char *pszDesc = pDesc->pszName + cchMnemonic + 1;
+ size_t cchDesc = strlen(pszDesc);
+ uint32_t uFieldValue1 = uVal1 >> pDesc->iFirstBit;
+ uint32_t uFieldValue2 = uVal2 >> pDesc->iFirstBit;
+ if (pDesc->cBits < 32)
+ {
+ uFieldValue1 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
+ uFieldValue2 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
+ }
+
+ pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s - %s%*s= %u (%u)\n" : " %s - %s%*s= %#x (%#x)\n",
+ pDesc->pszName, pszDesc,
+ cchMnemonic + 3 + cchDesc < cchWidth ? cchWidth - (cchMnemonic + 3 + cchDesc) : 1, "",
+ uFieldValue1, uFieldValue2);
+
+ iBit = pDesc->iFirstBit + pDesc->cBits - 1U;
+ pDesc++;
+ }
+ else
+ pHlp->pfnPrintf(pHlp, " %2u - Reserved%*s= %u (%u)\n", iBit, 13 < cchWidth ? cchWidth - 13 : 1, "",
+ RT_BOOL(uVal1 & RT_BIT_32(iBit)), RT_BOOL(uVal2 & RT_BIT_32(iBit)));
+ }
+}
+
+
+/**
+ * Produces a detailed summary of standard leaf 0x00000001.
+ *
+ * @param pHlp The info helper functions.
+ * @param pCurLeaf The 0x00000001 leaf.
+ * @param fVerbose Whether to be very verbose or not.
+ * @param fIntel Set if intel CPU.
+ */
+static void cpumR3CpuIdInfoStdLeaf1Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose, bool fIntel)
+{
+ Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 1);
+ static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
+ uint32_t uEAX = pCurLeaf->uEax;
+ uint32_t uEBX = pCurLeaf->uEbx;
+
+ pHlp->pfnPrintf(pHlp,
+ "%36s %2d \tExtended: %d \tEffective: %d\n"
+ "%36s %2d \tExtended: %d \tEffective: %d\n"
+ "%36s %d\n"
+ "%36s %d (%s)\n"
+ "%36s %#04x\n"
+ "%36s %d\n"
+ "%36s %d\n"
+ "%36s %#04x\n"
+ ,
+ "Family:", (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
+ "Model:", (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
+ "Stepping:", RTX86GetCpuStepping(uEAX),
+ "Type:", (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
+ "APIC ID:", (uEBX >> 24) & 0xff,
+ "Logical CPUs:",(uEBX >> 16) & 0xff,
+ "CLFLUSH Size:",(uEBX >> 8) & 0xff,
+ "Brand ID:", (uEBX >> 0) & 0xff);
+ if (fVerbose)
+ {
+ CPUMCPUID Host = {0};
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ pHlp->pfnPrintf(pHlp, "Features\n");
+ pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, 56);
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, 56);
+ }
+ else
+ {
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf1EdxSubFields, "Features EDX:", 36);
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf1EcxSubFields, "Features ECX:", 36);
+ }
+}
+
+
+/**
+ * Produces a detailed summary of standard leaf 0x00000007.
+ *
+ * @param pHlp The info helper functions.
+ * @param paLeaves The CPUID leaves array.
+ * @param cLeaves The number of leaves in the array.
+ * @param pCurLeaf The first 0x00000007 leaf.
+ * @param fVerbose Whether to be very verbose or not.
+ */
+static void cpumR3CpuIdInfoStdLeaf7Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
+ PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
+{
+ Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 7);
+ pHlp->pfnPrintf(pHlp, "Structured Extended Feature Flags Enumeration (leaf 7):\n");
+ for (;;)
+ {
+ CPUMCPUID Host = {0};
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+
+ switch (pCurLeaf->uSubLeaf)
+ {
+ case 0:
+ if (fVerbose)
+ {
+ pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, 56);
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56);
+ if (pCurLeaf->uEdx || Host.uEdx)
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);
+ }
+ else
+ {
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aLeaf7Sub0EbxSubFields, "Ext Features EBX:", 36);
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36);
+ if (pCurLeaf->uEdx)
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36);
+ }
+ break;
+
+ default:
+ if (pCurLeaf->uEdx || pCurLeaf->uEcx || pCurLeaf->uEbx)
+ pHlp->pfnPrintf(pHlp, "Unknown extended feature sub-leaf #%u: EAX=%#x EBX=%#x ECX=%#x EDX=%#x\n",
+ pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx);
+ break;
+
+ }
+
+ /* advance. */
+ pCurLeaf++;
+ if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
+ || pCurLeaf->uLeaf != 0x7)
+ break;
+ }
+}
+
+
+/**
+ * Produces a detailed summary of standard leaf 0x0000000d.
+ *
+ * @param pHlp The info helper functions.
+ * @param paLeaves The CPUID leaves array.
+ * @param cLeaves The number of leaves in the array.
+ * @param pCurLeaf The first 0x00000007 leaf.
+ * @param fVerbose Whether to be very verbose or not.
+ */
+static void cpumR3CpuIdInfoStdLeaf13Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
+ PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
+{
+ RT_NOREF_PV(fVerbose);
+ Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 13);
+ pHlp->pfnPrintf(pHlp, "Processor Extended State Enumeration (leaf 0xd):\n");
+ for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
+ {
+ CPUMCPUID Host = {0};
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+
+ switch (uSubLeaf)
+ {
+ case 0:
+ if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
+ pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, guest:",
+ pCurLeaf->uEbx, pCurLeaf->uEcx);
+ pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, host:", Host.uEbx, Host.uEcx);
+
+ if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
+ cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx), g_aXSaveStateBits,
+ "Valid XCR0 bits, guest:", 42);
+ cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEax, Host.uEdx), g_aXSaveStateBits,
+ "Valid XCR0 bits, host:", 42);
+ break;
+
+ case 1:
+ if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, guest:", 42);
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, Host.uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, host:", 42);
+
+ if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
+ pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, guest:", pCurLeaf->uEbx);
+ pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, host:", Host.uEbx);
+
+ if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
+ cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEcx, pCurLeaf->uEdx), g_aXSaveStateBits,
+ " Valid IA32_XSS bits, guest:", 42);
+ cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEdx, Host.uEcx), g_aXSaveStateBits,
+ " Valid IA32_XSS bits, host:", 42);
+ break;
+
+ default:
+ if ( pCurLeaf
+ && pCurLeaf->uSubLeaf == uSubLeaf
+ && (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx) )
+ {
+ pHlp->pfnPrintf(pHlp, " State #%u, guest: off=%#06x, cb=%#06x %s", uSubLeaf, pCurLeaf->uEbx,
+ pCurLeaf->uEax, pCurLeaf->uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
+ if (pCurLeaf->uEcx & ~RT_BIT_32(0))
+ pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", pCurLeaf->uEcx & ~RT_BIT_32(0));
+ if (pCurLeaf->uEdx)
+ pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", pCurLeaf->uEdx);
+ pHlp->pfnPrintf(pHlp, " --");
+ cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ if (Host.uEax || Host.uEbx || Host.uEcx || Host.uEdx)
+ {
+ pHlp->pfnPrintf(pHlp, " State #%u, host: off=%#06x, cb=%#06x %s", uSubLeaf, Host.uEbx,
+ Host.uEax, Host.uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
+ if (Host.uEcx & ~RT_BIT_32(0))
+ pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", Host.uEcx & ~RT_BIT_32(0));
+ if (Host.uEdx)
+ pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", Host.uEdx);
+ pHlp->pfnPrintf(pHlp, " --");
+ cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ break;
+
+ }
+
+ /* advance. */
+ if (pCurLeaf)
+ {
+ while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
+ && pCurLeaf->uSubLeaf <= uSubLeaf
+ && pCurLeaf->uLeaf == UINT32_C(0x0000000d))
+ pCurLeaf++;
+ if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
+ || pCurLeaf->uLeaf != UINT32_C(0x0000000d))
+ pCurLeaf = NULL;
+ }
+ }
+}
+
+
+static PCCPUMCPUIDLEAF cpumR3CpuIdInfoRawRange(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
+ PCCPUMCPUIDLEAF pCurLeaf, uint32_t uUpToLeaf, const char *pszTitle)
+{
+ if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
+ && pCurLeaf->uLeaf <= uUpToLeaf)
+ {
+ pHlp->pfnPrintf(pHlp,
+ " %s\n"
+ " Leaf/sub-leaf eax ebx ecx edx\n", pszTitle);
+ while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
+ && pCurLeaf->uLeaf <= uUpToLeaf)
+ {
+ CPUMCPUID Host = {0};
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ pHlp->pfnPrintf(pHlp,
+ "Gst: %08x/%04x %08x %08x %08x %08x\n"
+ "Hst: %08x %08x %08x %08x\n",
+ pCurLeaf->uLeaf, pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
+ Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
+ pCurLeaf++;
+ }
+ }
+
+ return pCurLeaf;
+}
+
+
+/**
+ * Display the guest CpuId leaves.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs "terse", "default" or "verbose".
+ */
+DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /*
+ * Parse the argument.
+ */
+ unsigned iVerbosity = 1;
+ if (pszArgs)
+ {
+ pszArgs = RTStrStripL(pszArgs);
+ if (!strcmp(pszArgs, "terse"))
+ iVerbosity--;
+ else if (!strcmp(pszArgs, "verbose"))
+ iVerbosity++;
+ }
+
+ uint32_t uLeaf;
+ CPUMCPUID Host = {0};
+ uint32_t cLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
+ PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
+ PCCPUMCPUIDLEAF pCurLeaf;
+ PCCPUMCPUIDLEAF pNextLeaf;
+ bool const fIntel = RTX86IsIntelCpu(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
+ pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
+ pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
+
+ /*
+ * Standard leaves. Custom raw dump here due to ECX sub-leaves host handling.
+ */
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ uint32_t cHstMax = ASMCpuId_EAX(0);
+#else
+ uint32_t cHstMax = 0;
+#endif
+ uint32_t cGstMax = paLeaves[0].uLeaf == 0 ? paLeaves[0].uEax : 0;
+ uint32_t cMax = RT_MAX(cGstMax, cHstMax);
+ pHlp->pfnPrintf(pHlp,
+ " Raw Standard CPUID Leaves\n"
+ " Leaf/sub-leaf eax ebx ecx edx\n");
+ for (uLeaf = 0, pCurLeaf = paLeaves; uLeaf <= cMax; uLeaf++)
+ {
+ uint32_t cMaxSubLeaves = 1;
+ if (uLeaf == 4 || uLeaf == 7 || uLeaf == 0xb)
+ cMaxSubLeaves = 16;
+ else if (uLeaf == 0xd)
+ cMaxSubLeaves = 128;
+
+ for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
+ && pCurLeaf->uLeaf == uLeaf
+ && pCurLeaf->uSubLeaf == uSubLeaf)
+ {
+ pHlp->pfnPrintf(pHlp,
+ "Gst: %08x/%04x %08x %08x %08x %08x\n"
+ "Hst: %08x %08x %08x %08x\n",
+ uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
+ Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
+ pCurLeaf++;
+ }
+ else if ( uLeaf != 0xd
+ || uSubLeaf <= 1
+ || Host.uEbx != 0 )
+ pHlp->pfnPrintf(pHlp,
+ "Hst: %08x/%04x %08x %08x %08x %08x\n",
+ uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
+
+ /* Done? */
+ if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
+ || pCurLeaf->uLeaf != uLeaf)
+ && ( (uLeaf == 0x4 && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8))
+ || (uLeaf == 0x7 && Host.uEax == 0)
+ || (uLeaf == 0xb && ((Host.uEcx & 0xff00) == 0 || (Host.uEcx & 0xff00) >= 8))
+ || (uLeaf == 0xb && (Host.uEcx & 0xff) != uSubLeaf)
+ || (uLeaf == 0xd && uSubLeaf >= 128)
+ )
+ )
+ break;
+ }
+ }
+ pNextLeaf = pCurLeaf;
+
+ /*
+ * If verbose, decode it.
+ */
+ if (iVerbosity && paLeaves[0].uLeaf == 0)
+ pHlp->pfnPrintf(pHlp,
+ "%36s %.04s%.04s%.04s\n"
+ "%36s 0x00000000-%#010x\n"
+ ,
+ "Name:", &paLeaves[0].uEbx, &paLeaves[0].uEdx, &paLeaves[0].uEcx,
+ "Supports:", paLeaves[0].uEax);
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000001), 0)) != NULL)
+ cpumR3CpuIdInfoStdLeaf1Details(pHlp, pCurLeaf, iVerbosity > 1, fIntel);
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000007), 0)) != NULL)
+ cpumR3CpuIdInfoStdLeaf7Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0)) != NULL)
+ cpumR3CpuIdInfoStdLeaf13Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
+
+ pCurLeaf = pNextLeaf;
+
+ /*
+ * Hypervisor leaves.
+ *
+ * Unlike most of the other leaves reported, the guest hypervisor leaves
+ * aren't a subset of the host CPUID bits.
+ */
+ pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x3fffffff), "Unknown CPUID Leaves");
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0x40000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ cHstMax = Host.uEax >= UINT32_C(0x40000001) && Host.uEax <= UINT32_C(0x40000fff) ? Host.uEax : 0;
+ cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x40000000)
+ ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x40000fff)) : 0;
+ cMax = RT_MAX(cHstMax, cGstMax);
+ if (cMax >= UINT32_C(0x40000000))
+ {
+ pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Hypervisor CPUID Leaves");
+
+ /** @todo dump these in more detail. */
+
+ pCurLeaf = pNextLeaf;
+ }
+
+
+ /*
+ * Extended. Custom raw dump here due to ECX sub-leaves host handling.
+ * Implemented after AMD specs.
+ */
+ pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x7fffffff), "Unknown CPUID Leaves");
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0x80000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ cHstMax = RTX86IsValidExtRange(Host.uEax) ? RT_MIN(Host.uEax, UINT32_C(0x80000fff)) : 0;
+ cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x80000000)
+ ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x80000fff)) : 0;
+ cMax = RT_MAX(cHstMax, cGstMax);
+ if (cMax >= UINT32_C(0x80000000))
+ {
+
+ pHlp->pfnPrintf(pHlp,
+ " Raw Extended CPUID Leaves\n"
+ " Leaf/sub-leaf eax ebx ecx edx\n");
+ PCCPUMCPUIDLEAF pExtLeaf = pCurLeaf;
+ for (uLeaf = UINT32_C(0x80000000); uLeaf <= cMax; uLeaf++)
+ {
+ uint32_t cMaxSubLeaves = 1;
+ if (uLeaf == UINT32_C(0x8000001d))
+ cMaxSubLeaves = 16;
+
+ for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
+ && pCurLeaf->uLeaf == uLeaf
+ && pCurLeaf->uSubLeaf == uSubLeaf)
+ {
+ pHlp->pfnPrintf(pHlp,
+ "Gst: %08x/%04x %08x %08x %08x %08x\n"
+ "Hst: %08x %08x %08x %08x\n",
+ uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
+ Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
+ pCurLeaf++;
+ }
+ else if ( uLeaf != 0xd
+ || uSubLeaf <= 1
+ || Host.uEbx != 0 )
+ pHlp->pfnPrintf(pHlp,
+ "Hst: %08x/%04x %08x %08x %08x %08x\n",
+ uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
+
+ /* Done? */
+ if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
+ || pCurLeaf->uLeaf != uLeaf)
+ && (uLeaf == UINT32_C(0x8000001d) && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8)) )
+ break;
+ }
+ }
+ pNextLeaf = pCurLeaf;
+
+ /*
+ * Understandable output
+ */
+ if (iVerbosity)
+ pHlp->pfnPrintf(pHlp,
+ "Ext Name: %.4s%.4s%.4s\n"
+ "Ext Supports: 0x80000000-%#010x\n",
+ &pExtLeaf->uEbx, &pExtLeaf->uEdx, &pExtLeaf->uEcx, pExtLeaf->uEax);
+
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000001), 0);
+ if (iVerbosity && pCurLeaf)
+ {
+ uint32_t uEAX = pCurLeaf->uEax;
+ pHlp->pfnPrintf(pHlp,
+ "Family: %d \tExtended: %d \tEffective: %d\n"
+ "Model: %d \tExtended: %d \tEffective: %d\n"
+ "Stepping: %d\n"
+ "Brand ID: %#05x\n",
+ (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
+ (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
+ RTX86GetCpuStepping(uEAX),
+ pCurLeaf->uEbx & 0xfff);
+
+ if (iVerbosity == 1)
+ {
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf1EdxSubFields, "Ext Features EDX:", 34);
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aExtLeaf1EdxSubFields, "Ext Features ECX:", 34);
+ }
+ else
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ pHlp->pfnPrintf(pHlp, "Ext Features\n");
+ pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, 56);
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, 56);
+ if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
+ {
+ pHlp->pfnPrintf(pHlp, "SVM Feature Identification (leaf A):\n");
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x8000000a), 0);
+ uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0;
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 56);
+ }
+ }
+ }
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000002), 0)) != NULL)
+ {
+ char szString[4*4*3+1] = {0};
+ uint32_t *pu32 = (uint32_t *)szString;
+ *pu32++ = pCurLeaf->uEax;
+ *pu32++ = pCurLeaf->uEbx;
+ *pu32++ = pCurLeaf->uEcx;
+ *pu32++ = pCurLeaf->uEdx;
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000003), 0);
+ if (pCurLeaf)
+ {
+ *pu32++ = pCurLeaf->uEax;
+ *pu32++ = pCurLeaf->uEbx;
+ *pu32++ = pCurLeaf->uEcx;
+ *pu32++ = pCurLeaf->uEdx;
+ }
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000004), 0);
+ if (pCurLeaf)
+ {
+ *pu32++ = pCurLeaf->uEax;
+ *pu32++ = pCurLeaf->uEbx;
+ *pu32++ = pCurLeaf->uEcx;
+ *pu32++ = pCurLeaf->uEdx;
+ }
+ pHlp->pfnPrintf(pHlp, "Full Name: \"%s\"\n", szString);
+ }
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000005), 0)) != NULL)
+ {
+ uint32_t uEAX = pCurLeaf->uEax;
+ uint32_t uEBX = pCurLeaf->uEbx;
+ uint32_t uECX = pCurLeaf->uEcx;
+ uint32_t uEDX = pCurLeaf->uEdx;
+ char sz1[32];
+ char sz2[32];
+
+ pHlp->pfnPrintf(pHlp,
+ "TLB 2/4M Instr/Uni: %s %3d entries\n"
+ "TLB 2/4M Data: %s %3d entries\n",
+ getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
+ getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
+ pHlp->pfnPrintf(pHlp,
+ "TLB 4K Instr/Uni: %s %3d entries\n"
+ "TLB 4K Data: %s %3d entries\n",
+ getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
+ getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
+ pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
+ "L1 Instr Cache Lines Per Tag: %d\n"
+ "L1 Instr Cache Associativity: %s\n"
+ "L1 Instr Cache Size: %d KB\n",
+ (uEDX >> 0) & 0xff,
+ (uEDX >> 8) & 0xff,
+ getCacheAss((uEDX >> 16) & 0xff, sz1),
+ (uEDX >> 24) & 0xff);
+ pHlp->pfnPrintf(pHlp,
+ "L1 Data Cache Line Size: %d bytes\n"
+ "L1 Data Cache Lines Per Tag: %d\n"
+ "L1 Data Cache Associativity: %s\n"
+ "L1 Data Cache Size: %d KB\n",
+ (uECX >> 0) & 0xff,
+ (uECX >> 8) & 0xff,
+ getCacheAss((uECX >> 16) & 0xff, sz1),
+ (uECX >> 24) & 0xff);
+ }
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000006), 0)) != NULL)
+ {
+ uint32_t uEAX = pCurLeaf->uEax;
+ uint32_t uEBX = pCurLeaf->uEbx;
+ uint32_t uEDX = pCurLeaf->uEdx;
+
+ pHlp->pfnPrintf(pHlp,
+ "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
+ "L2 TLB 2/4M Data: %s %4d entries\n",
+ getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
+ getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
+ pHlp->pfnPrintf(pHlp,
+ "L2 TLB 4K Instr/Uni: %s %4d entries\n"
+ "L2 TLB 4K Data: %s %4d entries\n",
+ getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
+ getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
+ pHlp->pfnPrintf(pHlp,
+ "L2 Cache Line Size: %d bytes\n"
+ "L2 Cache Lines Per Tag: %d\n"
+ "L2 Cache Associativity: %s\n"
+ "L2 Cache Size: %d KB\n",
+ (uEDX >> 0) & 0xff,
+ (uEDX >> 8) & 0xf,
+ getL2CacheAss((uEDX >> 12) & 0xf),
+ (uEDX >> 16) & 0xffff);
+ }
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000007), 0)) != NULL)
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0x80000007), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ if (pCurLeaf->uEdx || (Host.uEdx && iVerbosity))
+ {
+ if (iVerbosity < 1)
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34);
+ else
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 56);
+ }
+ }
+
+ pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000008), 0);
+ if (pCurLeaf != NULL)
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0x80000008), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity))
+ {
+ if (iVerbosity < 1)
+ cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
+ else
+ cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56);
+ }
+
+ if (iVerbosity)
+ {
+ uint32_t uEAX = pCurLeaf->uEax;
+ uint32_t uECX = pCurLeaf->uEcx;
+
+ /** @todo 0x80000008:EAX[23:16] is only defined for AMD. We'll get 0 on Intel. On
+ * AMD if we get 0, the guest physical address width should be taken from
+ * 0x80000008:EAX[7:0] instead. Guest Physical address width is relevant
+ * for guests using nested paging. */
+ pHlp->pfnPrintf(pHlp,
+ "Physical Address Width: %d bits\n"
+ "Virtual Address Width: %d bits\n"
+ "Guest Physical Address Width: %d bits\n",
+ (uEAX >> 0) & 0xff,
+ (uEAX >> 8) & 0xff,
+ (uEAX >> 16) & 0xff);
+
+ /** @todo 0x80000008:ECX is reserved on Intel (we'll get incorrect physical core
+ * count here). */
+ pHlp->pfnPrintf(pHlp,
+ "Physical Core Count: %d\n",
+ ((uECX >> 0) & 0xff) + 1);
+ }
+ }
+
+ pCurLeaf = pNextLeaf;
+ }
+
+
+
+ /*
+ * Centaur.
+ */
+ pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xbfffffff), "Unknown CPUID Leaves");
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(UINT32_C(0xc0000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ cHstMax = Host.uEax >= UINT32_C(0xc0000001) && Host.uEax <= UINT32_C(0xc0000fff)
+ ? RT_MIN(Host.uEax, UINT32_C(0xc0000fff)) : 0;
+ cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0xc0000000)
+ ? RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000fff)) : 0;
+ cMax = RT_MAX(cHstMax, cGstMax);
+ if (cMax >= UINT32_C(0xc0000000))
+ {
+ pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Centaur CPUID Leaves");
+
+ /*
+ * Understandable output
+ */
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000000), 0)) != NULL)
+ pHlp->pfnPrintf(pHlp,
+ "Centaur Supports: 0xc0000000-%#010x\n",
+ pCurLeaf->uEax);
+
+ if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000001), 0)) != NULL)
+ {
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+#endif
+ uint32_t uEdxGst = pCurLeaf->uEdx;
+ uint32_t uEdxHst = Host.uEdx;
+
+ if (iVerbosity == 1)
+ {
+ pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
+ if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
+ if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
+ if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
+ if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
+ if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
+ if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
+ if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
+ if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
+ /* possibly indicating MM/HE and MM/HE-E on older chips... */
+ if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
+ if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
+ if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
+ if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
+ if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
+ if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
+ for (unsigned iBit = 14; iBit < 32; iBit++)
+ if (uEdxGst & RT_BIT(iBit))
+ pHlp->pfnPrintf(pHlp, " %d", iBit);
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ else
+ {
+ pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
+ pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
+ pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
+ pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
+ pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
+ pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
+ pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
+ pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
+ pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
+ /* possibly indicating MM/HE and MM/HE-E on older chips... */
+ pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
+ pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
+ pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
+ pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
+ pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
+ pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
+ pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
+ pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
+ pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
+ pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
+ pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
+ pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
+ pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
+ pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
+ pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
+ pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
+ pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
+ pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
+ pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
+ for (unsigned iBit = 27; iBit < 32; iBit++)
+ if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
+ pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ }
+
+ pCurLeaf = pNextLeaf;
+ }
+
+ /*
+ * The remainder.
+ */
+ pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xffffffff), "Unknown CPUID Leaves");
+}
+
+#endif /* !IN_VBOX_CPU_REPORT */
+
diff --git a/src/VBox/VMM/VMMR3/CPUMR3Db.cpp b/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
new file mode 100644
index 00000000..b5e7499e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
@@ -0,0 +1,1149 @@
+/* $Id: CPUMR3Db.cpp $ */
+/** @file
+ * CPUM - CPU database part.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/mm.h>
+
+#include <VBox/err.h>
+#if !defined(RT_ARCH_ARM64)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @def NULL_ALONE
+ * For eliminating an unnecessary data dependency in standalone builds (for
+ * VBoxSVC). */
+/** @def ZERO_ALONE
+ * For eliminating an unnecessary data size dependency in standalone builds (for
+ * VBoxSVC). */
+#ifndef CPUM_DB_STANDALONE
+# define NULL_ALONE(a_aTable) a_aTable
+# define ZERO_ALONE(a_cTable) a_cTable
+#else
+# define NULL_ALONE(a_aTable) NULL
+# define ZERO_ALONE(a_cTable) 0
+#endif
+
+
+/** @name Short macros for the MSR range entries.
+ *
+ * These are rather cryptic, but this is to reduce the attack on the right
+ * margin.
+ *
+ * @{ */
+/** Alias one MSR onto another (a_uTarget). */
+#define MAL(a_uMsr, a_szName, a_uTarget) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
+/** Functions handles everything. */
+#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Functions handles everything, with GP mask. */
+#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
+/** Function handlers, read-only. */
+#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
+/** Function handlers, ignore all writes. */
+#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
+/** Function handlers, with value. */
+#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
+/** Function handlers, with write ignore mask. */
+#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
+/** Function handlers, extended version. */
+#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Function handlers, with CPUMCPU storage variable. */
+#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+ RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
+/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
+#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+ RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Read-only fixed value. */
+#define MVO(a_uMsr, a_szName, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Read-only fixed value, ignores all writes. */
+#define MVI(a_uMsr, a_szName, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Read fixed value, ignore writes outside GP mask. */
+#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
+/** Read fixed value, extended version with both GP and ignore masks. */
+#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** The short form, no CPUM backing. */
+#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+ a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Range: Functions handles everything. */
+#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Range: Read fixed value, read-only. */
+#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Range: Read fixed value, ignore writes. */
+#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Range: The short form, no CPUM backing. */
+#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+ a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Internal form used by the macros. */
+#ifdef VBOX_WITH_STATISTICS
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+ { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
+ { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+ { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
+#endif
+/** @} */
+
+#ifndef CPUM_DB_STANDALONE
+
+#include "cpus/Intel_Core_i7_6700K.h"
+#include "cpus/Intel_Core_i7_5600U.h"
+#include "cpus/Intel_Core_i7_3960X.h"
+#include "cpus/Intel_Core_i5_3570.h"
+#include "cpus/Intel_Core_i7_2635QM.h"
+#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
+#include "cpus/Intel_Core2_X6800_2_93GHz.h"
+#include "cpus/Intel_Core2_T7600_2_33GHz.h"
+#include "cpus/Intel_Core_Duo_T2600_2_16GHz.h"
+#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
+#include "cpus/Intel_Pentium_4_3_00GHz.h"
+#include "cpus/Intel_Pentium_N3530_2_16GHz.h"
+#include "cpus/Intel_Atom_330_1_60GHz.h"
+#include "cpus/Intel_80486.h"
+#include "cpus/Intel_80386.h"
+#include "cpus/Intel_80286.h"
+#include "cpus/Intel_80186.h"
+#include "cpus/Intel_8086.h"
+
+#include "cpus/AMD_Ryzen_7_1800X_Eight_Core.h"
+#include "cpus/AMD_FX_8150_Eight_Core.h"
+#include "cpus/AMD_Phenom_II_X6_1100T.h"
+#include "cpus/Quad_Core_AMD_Opteron_2384.h"
+#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
+#include "cpus/AMD_Athlon_64_3200.h"
+
+#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
+
+#include "cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h"
+
+#include "cpus/Hygon_C86_7185_32_core.h"
+
+
+/**
+ * The database entries.
+ *
+ * 1. The first entry is special. It is the fallback for unknown
+ * processors. Thus, it better be pretty representative.
+ *
+ * 2. The first entry for a CPU vendor is likewise important as it is
+ * the default entry for that vendor.
+ *
+ * Generally we put the most recent CPUs first, since these tend to have the
+ * most complicated and backwards compatible list of MSRs.
+ */
+static CPUMDBENTRY const * const g_apCpumDbEntries[] =
+{
+#ifdef VBOX_CPUDB_Intel_Core_i7_6700K_h
+ &g_Entry_Intel_Core_i7_6700K,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i7_5600U_h
+ &g_Entry_Intel_Core_i7_5600U,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i5_3570_h
+ &g_Entry_Intel_Core_i5_3570,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i7_3960X_h
+ &g_Entry_Intel_Core_i7_3960X,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM_h
+ &g_Entry_Intel_Core_i7_2635QM,
+#endif
+#ifdef VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz_h
+ &g_Entry_Intel_Pentium_N3530_2_16GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Atom_330_1_60GHz_h
+ &g_Entry_Intel_Atom_330_1_60GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz_h
+ &g_Entry_Intel_Pentium_M_processor_2_00GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz_h
+ &g_Entry_Intel_Xeon_X5482_3_20GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core2_X6800_2_93GHz_h
+ &g_Entry_Intel_Core2_X6800_2_93GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core2_T7600_2_33GHz_h
+ &g_Entry_Intel_Core2_T7600_2_33GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_Duo_T2600_2_16GHz_h
+ &g_Entry_Intel_Core_Duo_T2600_2_16GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h
+ &g_Entry_Intel_Pentium_4_3_00GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h
+ &g_Entry_Intel_Pentium_4_3_00GHz,
+#endif
+/** @todo pentium, pentium mmx, pentium pro, pentium II, pentium III */
+#ifdef VBOX_CPUDB_Intel_80486_h
+ &g_Entry_Intel_80486,
+#endif
+#ifdef VBOX_CPUDB_Intel_80386_h
+ &g_Entry_Intel_80386,
+#endif
+#ifdef VBOX_CPUDB_Intel_80286_h
+ &g_Entry_Intel_80286,
+#endif
+#ifdef VBOX_CPUDB_Intel_80186_h
+ &g_Entry_Intel_80186,
+#endif
+#ifdef VBOX_CPUDB_Intel_8086_h
+ &g_Entry_Intel_8086,
+#endif
+
+#ifdef VBOX_CPUDB_AMD_Ryzen_7_1800X_Eight_Core_h
+ &g_Entry_AMD_Ryzen_7_1800X_Eight_Core,
+#endif
+#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core_h
+ &g_Entry_AMD_FX_8150_Eight_Core,
+#endif
+#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T_h
+ &g_Entry_AMD_Phenom_II_X6_1100T,
+#endif
+#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384_h
+ &g_Entry_Quad_Core_AMD_Opteron_2384,
+#endif
+#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200_h
+ &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
+#endif
+#ifdef VBOX_CPUDB_AMD_Athlon_64_3200_h
+ &g_Entry_AMD_Athlon_64_3200,
+#endif
+
+#ifdef VBOX_CPUDB_ZHAOXIN_KaiXian_KX_U5581_1_8GHz_h
+ &g_Entry_ZHAOXIN_KaiXian_KX_U5581_1_8GHz,
+#endif
+
+#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz_h
+ &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
+#endif
+
+#ifdef VBOX_CPUDB_NEC_V20_h
+ &g_Entry_NEC_V20,
+#endif
+
+#ifdef VBOX_CPUDB_Hygon_C86_7185_32_core_h
+ &g_Entry_Hygon_C86_7185_32_core,
+#endif
+};
+
+
+/**
+ * Returns the number of entries in the CPU database.
+ *
+ * @returns Number of entries.
+ * @sa PFNCPUMDBGETENTRIES
+ */
+VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void)
+{
+ return RT_ELEMENTS(g_apCpumDbEntries);
+}
+
+
+/**
+ * Returns CPU database entry for the given index.
+ *
+ * @returns Pointer the CPU database entry, NULL if index is out of bounds.
+ * @param idxCpuDb The index (0..CPUMR3DbGetEntries).
+ * @sa PFNCPUMDBGETENTRYBYINDEX
+ */
+VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb)
+{
+ AssertReturn(idxCpuDb <= RT_ELEMENTS(g_apCpumDbEntries), NULL);
+ return g_apCpumDbEntries[idxCpuDb];
+}
+
+
+/**
+ * Returns CPU database entry with the given name.
+ *
+ * @returns Pointer the CPU database entry, NULL if not found.
+ * @param pszName The name of the profile to return.
+ * @sa PFNCPUMDBGETENTRYBYNAME
+ */
+VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName)
+{
+ AssertPtrReturn(pszName, NULL);
+ AssertReturn(*pszName, NULL);
+ for (size_t i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+ if (strcmp(g_apCpumDbEntries[i]->pszName, pszName) == 0)
+ return g_apCpumDbEntries[i];
+ return NULL;
+}
+
+
+
+/**
+ * Binary search used by cpumR3MsrRangesInsert and has some special properties
+ * wrt to mismatches.
+ *
+ * @returns Insert location.
+ * @param paMsrRanges The MSR ranges to search.
+ * @param cMsrRanges The number of MSR ranges.
+ * @param uMsr What to search for.
+ */
+static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
+{
+ if (!cMsrRanges)
+ return 0;
+
+ uint32_t iStart = 0;
+ uint32_t iLast = cMsrRanges - 1;
+ for (;;)
+ {
+ uint32_t i = iStart + (iLast - iStart + 1) / 2;
+ if ( uMsr >= paMsrRanges[i].uFirst
+ && uMsr <= paMsrRanges[i].uLast)
+ return i;
+ if (uMsr < paMsrRanges[i].uFirst)
+ {
+ if (i <= iStart)
+ return i;
+ iLast = i - 1;
+ }
+ else
+ {
+ if (i >= iLast)
+ {
+ if (i < cMsrRanges)
+ i++;
+ return i;
+ }
+ iStart = i + 1;
+ }
+ }
+}
+
+
+/**
+ * Ensures that there is space for at least @a cNewRanges in the table,
+ * reallocating the table if necessary.
+ *
+ * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
+ * @a *ppaMsrRanges is freed and set to NULL.
+ * @param pVM The cross context VM structure. If NULL,
+ * use the process heap, otherwise the VM's hyper heap.
+ * @param ppaMsrRanges The variable pointing to the ranges (input/output).
+ * @param cMsrRanges The current number of ranges.
+ * @param cNewRanges The number of ranges to be added.
+ */
+static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
+{
+ if ( cMsrRanges + cNewRanges
+ > RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges) + (pVM ? 0 : 128 /* Catch too many MSRs in CPU reporter! */))
+ {
+ LogRel(("CPUM: Too many MSR ranges! %#x, max %#x\n",
+ cMsrRanges + cNewRanges, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aMsrRanges)));
+ return NULL;
+ }
+ if (pVM)
+ {
+ Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
+ Assert(*ppaMsrRanges == pVM->cpum.s.GuestInfo.aMsrRanges);
+ }
+ else
+ {
+ if (cMsrRanges + cNewRanges > RT_ALIGN_32(cMsrRanges, 16))
+ {
+
+ uint32_t const cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
+ void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
+ if (pvNew)
+ *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
+ else
+ {
+ RTMemFree(*ppaMsrRanges);
+ *ppaMsrRanges = NULL;
+ return NULL;
+ }
+ }
+ }
+
+ return *ppaMsrRanges;
+}
+
+
+/**
+ * Inserts a new MSR range in into an sorted MSR range array.
+ *
+ * If the new MSR range overlaps existing ranges, the existing ones will be
+ * adjusted/removed to fit in the new one.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_NO_MEMORY
+ *
+ * @param pVM The cross context VM structure. If NULL,
+ * use the process heap, otherwise the VM's hyper heap.
+ * @param ppaMsrRanges The variable pointing to the ranges (input/output).
+ * Must be NULL if using the hyper heap.
+ * @param pcMsrRanges The variable holding number of ranges. Must be NULL
+ * if using the hyper heap.
+ * @param pNewRange The new range.
+ */
+int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
+{
+ Assert(pNewRange->uLast >= pNewRange->uFirst);
+ Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
+ Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
+
+ /*
+ * Validate and use the VM's MSR ranges array if we are using the hyper heap.
+ */
+ if (pVM)
+ {
+ AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
+ AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM->cpum.s.GuestInfo.paMsrRangesR3 == pVM->cpum.s.GuestInfo.aMsrRanges, VERR_INTERNAL_ERROR_3);
+
+ ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
+ pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
+ }
+ else
+ {
+ AssertReturn(ppaMsrRanges, VERR_INVALID_POINTER);
+ AssertReturn(pcMsrRanges, VERR_INVALID_POINTER);
+ }
+
+ uint32_t cMsrRanges = *pcMsrRanges;
+ PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
+
+ /*
+ * Optimize the linear insertion case where we add new entries at the end.
+ */
+ if ( cMsrRanges > 0
+ && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ paMsrRanges[cMsrRanges] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ else
+ {
+ uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
+ Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
+ Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
+
+ /*
+ * Adding an entirely new entry?
+ */
+ if ( i >= cMsrRanges
+ || pNewRange->uLast < paMsrRanges[i].uFirst)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ /*
+ * Replace existing entry?
+ */
+ else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
+ && pNewRange->uLast == paMsrRanges[i].uLast)
+ paMsrRanges[i] = *pNewRange;
+ /*
+ * Splitting an existing entry?
+ */
+ else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
+ && pNewRange->uLast < paMsrRanges[i].uLast)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i + 1] = *pNewRange;
+ paMsrRanges[i + 2] = paMsrRanges[i];
+ paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
+ paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
+ *pcMsrRanges += 2;
+ }
+ /*
+ * Complicated scenarios that can affect more than one range.
+ *
+ * The current code does not optimize memmove calls when replacing
+ * one or more existing ranges, because it's tedious to deal with and
+ * not expected to be a frequent usage scenario.
+ */
+ else
+ {
+ /* Adjust start of first match? */
+ if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
+ && pNewRange->uLast < paMsrRanges[i].uLast)
+ paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+ else
+ {
+ /* Adjust end of first match? */
+ if (pNewRange->uFirst > paMsrRanges[i].uFirst)
+ {
+ Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
+ paMsrRanges[i].uLast = pNewRange->uFirst - 1;
+ i++;
+ }
+ /* Replace the whole first match (lazy bird). */
+ else
+ {
+ if (i + 1 < cMsrRanges)
+ memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+ cMsrRanges = *pcMsrRanges -= 1;
+ }
+
+ /* Do the new range affect more ranges? */
+ while ( i < cMsrRanges
+ && pNewRange->uLast >= paMsrRanges[i].uFirst)
+ {
+ if (pNewRange->uLast < paMsrRanges[i].uLast)
+ {
+ /* Adjust the start of it, then we're done. */
+ paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+ break;
+ }
+
+ /* Remove it entirely. */
+ if (i + 1 < cMsrRanges)
+ memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+ cMsrRanges = *pcMsrRanges -= 1;
+ }
+ }
+
+ /* Now, perform a normal insertion. */
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Reconciles CPUID info with MSRs (selected ones).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int cpumR3MsrReconcileWithCpuId(PVM pVM)
+{
+ PCCPUMMSRRANGE papToAdd[10];
+ uint32_t cToAdd = 0;
+
+ /*
+ * The IA32_FLUSH_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
+ */
+ if (pVM->cpum.s.GuestFeatures.fFlushCmd && !cpumLookupMsrRange(pVM, MSR_IA32_FLUSH_CMD))
+ {
+ static CPUMMSRRANGE const s_FlushCmd =
+ {
+ /*.uFirst =*/ MSR_IA32_FLUSH_CMD,
+ /*.uLast =*/ MSR_IA32_FLUSH_CMD,
+ /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly,
+ /*.enmWrFn =*/ kCpumMsrWrFn_Ia32FlushCmd,
+ /*.offCpumCpu =*/ UINT16_MAX,
+ /*.fReserved =*/ 0,
+ /*.uValue =*/ 0,
+ /*.fWrIgnMask =*/ 0,
+ /*.fWrGpMask =*/ ~MSR_IA32_FLUSH_CMD_F_L1D,
+ /*.szName = */ "IA32_FLUSH_CMD"
+ };
+ papToAdd[cToAdd++] = &s_FlushCmd;
+ }
+
+ /*
+ * The MSR_IA32_ARCH_CAPABILITIES was introduced in various spectre MCUs, or at least
+ * documented in relation to such.
+ */
+ if (pVM->cpum.s.GuestFeatures.fArchCap && !cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES))
+ {
+ static CPUMMSRRANGE const s_ArchCaps =
+ {
+ /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES,
+ /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
+ /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities,
+ /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
+ /*.offCpumCpu =*/ UINT16_MAX,
+ /*.fReserved =*/ 0,
+ /*.uValue =*/ 0,
+ /*.fWrIgnMask =*/ 0,
+ /*.fWrGpMask =*/ UINT64_MAX,
+ /*.szName = */ "IA32_ARCH_CAPABILITIES"
+ };
+ papToAdd[cToAdd++] = &s_ArchCaps;
+ }
+
+ /*
+ * Do the adding.
+ */
+ for (uint32_t i = 0; i < cToAdd; i++)
+ {
+ PCCPUMMSRRANGE pRange = papToAdd[i];
+ LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName));
+ int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
+ pRange);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for cpumR3MsrApplyFudge that applies one table.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param paRanges Array of MSRs to fudge.
+ * @param cRanges Number of MSRs in the array.
+ */
+static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
+{
+ for (uint32_t i = 0; i < cRanges; i++)
+ if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
+ {
+ LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
+ int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
+ &paRanges[i]);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Fudges the MSRs that guest are known to access in some odd cases.
+ *
+ * A typical example is a VM that has been moved between different hosts where
+ * for instance the cpu vendor differs.
+ *
+ * Another example is older CPU profiles (e.g. Atom Bonnet) for newer CPUs (e.g.
+ * Atom Silvermont), where features reported thru CPUID aren't present in the
+ * MSRs (e.g. AMD64_TSC_AUX).
+ *
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int cpumR3MsrApplyFudge(PVM pVM)
+{
+ /*
+ * Basic.
+ */
+ static CPUMMSRRANGE const s_aFudgeMsrs[] =
+ {
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
+ MVO(0x00000017, "IA32_PLATFORM_ID", 0),
+ MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
+ MVI(0x0000008b, "BIOS_SIGN", 0),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
+ MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
+ MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
+ MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ };
+ int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * XP might mistake opterons and other newer CPUs for P4s.
+ */
+ if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
+ {
+ static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
+ {
+ MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
+ };
+ rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ if (pVM->cpum.s.GuestFeatures.fRdTscP)
+ {
+ static CPUMMSRRANGE const s_aRdTscPFudgeMsrs[] =
+ {
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX),
+ };
+ rc = cpumR3MsrApplyFudgeTable(pVM, &s_aRdTscPFudgeMsrs[0], RT_ELEMENTS(s_aRdTscPFudgeMsrs));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Windows 10 incorrectly writes to MSR_IA32_TSX_CTRL without checking
+ * CPUID.ARCH_CAP(EAX=7h,ECX=0):EDX[bit 29] or the MSR feature bits in
+ * MSR_IA32_ARCH_CAPABILITIES[bit 7], see @bugref{9630}.
+ * Ignore writes to this MSR and return 0 on reads.
+ */
+ if (pVM->cpum.s.GuestFeatures.fArchCap)
+ {
+ static CPUMMSRRANGE const s_aTsxCtrl[] =
+ {
+ MVI(MSR_IA32_TSX_CTRL, "IA32_TSX_CTRL", 0),
+ };
+ rc = cpumR3MsrApplyFudgeTable(pVM, &s_aTsxCtrl[0], RT_ELEMENTS(s_aTsxCtrl));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ return rc;
+}
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+
+/**
+ * Do we consider @a enmConsider a better match for @a enmTarget than
+ * @a enmFound?
+ *
+ * Only called when @a enmConsider isn't exactly what we're looking for.
+ *
+ * @returns true/false.
+ * @param enmConsider The new microarch to consider.
+ * @param enmTarget The target microarch.
+ * @param enmFound The best microarch match we've found thus far.
+ */
+DECLINLINE(bool) cpumR3DbIsBetterMarchMatch(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
+{
+ Assert(enmConsider != enmTarget);
+
+ /*
+ * If we've got an march match, don't bother with enmConsider.
+ */
+ if (enmFound == enmTarget)
+ return false;
+
+ /*
+ * Found is below: Pick 'consider' if it's closer to the target or above it.
+ */
+ if (enmFound < enmTarget)
+ return enmConsider > enmFound;
+
+ /*
+ * Found is above: Pick 'consider' if it's also above (paranoia: or equal)
+ * and but closer to the target.
+ */
+ return enmConsider >= enmTarget && enmConsider < enmFound;
+}
+
+
+/**
+ * Do we consider @a enmConsider a better match for @a enmTarget than
+ * @a enmFound?
+ *
+ * Only called for intel family 06h CPUs.
+ *
+ * @returns true/false.
+ * @param enmConsider The new microarch to consider.
+ * @param enmTarget The target microarch.
+ * @param enmFound The best microarch match we've found thus far.
+ */
+static bool cpumR3DbIsBetterIntelFam06Match(CPUMMICROARCH enmConsider, CPUMMICROARCH enmTarget, CPUMMICROARCH enmFound)
+{
+ /* Check intel family 06h claims. */
+ AssertReturn(enmConsider >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmConsider <= kCpumMicroarch_Intel_P6_Core_Atom_End,
+ false);
+ AssertReturn( (enmTarget >= kCpumMicroarch_Intel_P6_Core_Atom_First && enmTarget <= kCpumMicroarch_Intel_P6_Core_Atom_End)
+ || enmTarget == kCpumMicroarch_Intel_Unknown,
+ false);
+
+ /* Put matches out of the way. */
+ if (enmConsider == enmTarget)
+ return true;
+ if (enmFound == enmTarget)
+ return false;
+
+ /* If found isn't a family 06h march, whatever we're considering must be a better choice. */
+ if ( enmFound < kCpumMicroarch_Intel_P6_Core_Atom_First
+ || enmFound > kCpumMicroarch_Intel_P6_Core_Atom_End)
+ return true;
+
+ /*
+ * The family 06h stuff is split into three categories:
+ * - Common P6 heritage
+ * - Core
+ * - Atom
+ *
+ * Determin which of the three arguments are Atom marchs, because that's
+ * all we need to make the right choice.
+ */
+ bool const fConsiderAtom = enmConsider >= kCpumMicroarch_Intel_Atom_First;
+ bool const fTargetAtom = enmTarget >= kCpumMicroarch_Intel_Atom_First;
+ bool const fFoundAtom = enmFound >= kCpumMicroarch_Intel_Atom_First;
+
+ /*
+ * Want atom:
+ */
+ if (fTargetAtom)
+ {
+ /* Pick the atom if we've got one of each.*/
+ if (fConsiderAtom != fFoundAtom)
+ return fConsiderAtom;
+ /* If we haven't got any atoms under consideration, pick a P6 or the earlier core.
+ Note! Not entirely sure Dothan is the best choice, but it'll do for now. */
+ if (!fConsiderAtom)
+ {
+ if (enmConsider > enmFound)
+ return enmConsider <= kCpumMicroarch_Intel_P6_M_Dothan;
+ return enmFound > kCpumMicroarch_Intel_P6_M_Dothan;
+ }
+ /* else: same category, default comparison rules. */
+ Assert(fConsiderAtom && fFoundAtom);
+ }
+ /*
+ * Want non-atom:
+ */
+ /* Pick the non-atom if we've got one of each. */
+ else if (fConsiderAtom != fFoundAtom)
+ return fFoundAtom;
+ /* If we've only got atoms under consideration, pick the older one just to pick something. */
+ else if (fConsiderAtom)
+ return enmConsider < enmFound;
+ else
+ Assert(!fConsiderAtom && !fFoundAtom);
+
+ /*
+ * Same basic category. Do same compare as caller.
+ */
+ return cpumR3DbIsBetterMarchMatch(enmConsider, enmTarget, enmFound);
+}
+
+#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
+
+int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
+{
+ CPUMDBENTRY const *pEntry = NULL;
+ int rc;
+
+ if (!strcmp(pszName, "host"))
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ {
+ /*
+ * Create a CPU database entry for the host CPU. This means getting
+ * the CPUID bits from the real CPU and grabbing the closest matching
+ * database entry for MSRs.
+ */
+ rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = CPUMCpuIdCollectLeavesX86(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
+ if (RT_FAILURE(rc))
+ return rc;
+ pInfo->fMxCsrMask = CPUMR3DeterminHostMxCsrMask();
+
+ /* Lookup database entry for MSRs. */
+ CPUMCPUVENDOR const enmVendor = CPUMCpuIdDetectX86VendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
+ pInfo->paCpuIdLeavesR3[0].uEbx,
+ pInfo->paCpuIdLeavesR3[0].uEcx,
+ pInfo->paCpuIdLeavesR3[0].uEdx);
+ uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
+ uint8_t const uFamily = RTX86GetCpuFamily(uStd1Eax);
+ uint8_t const uModel = RTX86GetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
+ uint8_t const uStepping = RTX86GetCpuStepping(uStd1Eax);
+ CPUMMICROARCH const enmMicroarch = CPUMCpuIdDetermineX86MicroarchEx(enmVendor, uFamily, uModel, uStepping);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+ {
+ CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
+ if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
+ {
+ /* Match against Family, Microarch, model and stepping. Except
+ for family, always match the closer with preference given to
+ the later/older ones. */
+ if (pCur->uFamily == uFamily)
+ {
+ if (pCur->enmMicroarch == enmMicroarch)
+ {
+ if (pCur->uModel == uModel)
+ {
+ if (pCur->uStepping == uStepping)
+ {
+ /* Perfect match. */
+ pEntry = pCur;
+ break;
+ }
+
+ if ( !pEntry
+ || pEntry->uModel != uModel
+ || pEntry->enmMicroarch != enmMicroarch
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ else if ( pCur->uStepping >= uStepping
+ ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
+ : pCur->uStepping > pEntry->uStepping)
+ pEntry = pCur;
+ }
+ else if ( !pEntry
+ || pEntry->enmMicroarch != enmMicroarch
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ else if ( pCur->uModel >= uModel
+ ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
+ : pCur->uModel > pEntry->uModel)
+ pEntry = pCur;
+ }
+ else if ( !pEntry
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ /* Special march matching rules applies to intel family 06h. */
+ else if ( enmVendor == CPUMCPUVENDOR_INTEL
+ && uFamily == 6
+ ? cpumR3DbIsBetterIntelFam06Match(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch)
+ : cpumR3DbIsBetterMarchMatch(pCur->enmMicroarch, enmMicroarch, pEntry->enmMicroarch))
+ pEntry = pCur;
+ }
+ /* We don't do closeness matching on family, we use the first
+ entry for the CPU vendor instead. (P4 workaround.) */
+ else if (!pEntry)
+ pEntry = pCur;
+ }
+ }
+
+ if (pEntry)
+ LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
+ CPUMCpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMMicroarchName(enmMicroarch),
+ pEntry->pszName, CPUMCpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
+ pEntry->uStepping, CPUMMicroarchName(pEntry->enmMicroarch) ));
+ else
+ {
+ pEntry = g_apCpumDbEntries[0];
+ LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'\n",
+ CPUMCpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMMicroarchName(enmMicroarch),
+ pEntry->pszName));
+ }
+ }
+ else
+#else
+ pszName = g_apCpumDbEntries[0]->pszName; /* Just pick the first entry for non-x86 hosts. */
+#endif
+ {
+ /*
+ * We're supposed to be emulating a specific CPU that is included in
+ * our CPU database. The CPUID tables needs to be copied onto the
+ * heap so the caller can modify them and so they can be freed like
+ * in the host case above.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+ if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
+ {
+ pEntry = g_apCpumDbEntries[i];
+ break;
+ }
+ if (!pEntry)
+ {
+ LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
+ return VERR_CPUM_DB_CPU_NOT_FOUND;
+ }
+
+ pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
+ if (pEntry->cCpuIdLeaves)
+ {
+ /* Must allocate a multiple of 16 here, matching cpumR3CpuIdEnsureSpace. */
+ size_t cbExtra = sizeof(pEntry->paCpuIdLeaves[0]) * (RT_ALIGN(pEntry->cCpuIdLeaves, 16) - pEntry->cCpuIdLeaves);
+ pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDupEx(pEntry->paCpuIdLeaves,
+ sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves,
+ cbExtra);
+ if (!pInfo->paCpuIdLeavesR3)
+ return VERR_NO_MEMORY;
+ }
+ else
+ pInfo->paCpuIdLeavesR3 = NULL;
+
+ pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
+ pInfo->DefCpuId = pEntry->DefUnknownCpuId;
+ pInfo->fMxCsrMask = pEntry->fMxCsrMask;
+
+ LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
+ pEntry->pszName, CPUMCpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
+ pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMMicroarchName(pEntry->enmMicroarch) ));
+ }
+
+ pInfo->fMsrMask = pEntry->fMsrMask;
+ pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
+ pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
+
+ /*
+ * Copy the MSR range.
+ */
+ uint32_t cMsrs = 0;
+ PCPUMMSRRANGE paMsrs = NULL;
+
+ PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
+ uint32_t cLeft = pEntry->cMsrRanges;
+ while (cLeft-- > 0)
+ {
+ rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
+ if (RT_FAILURE(rc))
+ {
+ Assert(!paMsrs); /* The above function frees this. */
+ RTMemFree(pInfo->paCpuIdLeavesR3);
+ pInfo->paCpuIdLeavesR3 = NULL;
+ return rc;
+ }
+ pCurMsr++;
+ }
+
+ pInfo->paMsrRangesR3 = paMsrs;
+ pInfo->cMsrRanges = cMsrs;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Insert an MSR range into the VM.
+ *
+ * If the new MSR range overlaps existing ranges, the existing ones will be
+ * adjusted/removed to fit in the new one.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pNewRange Pointer to the MSR range being inserted.
+ */
+VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
+
+ return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
+}
+
+
+/**
+ * Register statistics for the MSRs.
+ *
+ * This must not be called before the MSRs have been finalized and moved to the
+ * hyper heap.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int cpumR3MsrRegStats(PVM pVM)
+{
+ /*
+ * Global statistics.
+ */
+ PCPUM pCpum = &pVM->cpum.s;
+ STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
+ STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
+ STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
+ STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
+ STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
+ STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
+ STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
+ STAMUNIT_OCCURENCES, "All WRMSRs making it to CPUM.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
+ STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
+ STAMUNIT_OCCURENCES, "Writing of ignored bits.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
+ STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
+
+
+# ifdef VBOX_WITH_STATISTICS
+ /*
+ * Per range.
+ */
+ PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
+ uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
+ for (uint32_t i = 0; i < cRanges; i++)
+ {
+ char szName[160];
+ ssize_t cchName;
+
+ if (paRanges[i].uFirst == paRanges[i].uLast)
+ cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
+ paRanges[i].uFirst, paRanges[i].szName);
+ else
+ cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
+ paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
+ STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
+ STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
+ STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
+ STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
+ }
+# endif /* VBOX_WITH_STATISTICS */
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !CPUM_DB_STANDALONE */
+
diff --git a/src/VBox/VMM/VMMR3/DBGF.cpp b/src/VBox/VMM/VMMR3/DBGF.cpp
new file mode 100644
index 00000000..1563ac0c
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGF.cpp
@@ -0,0 +1,2352 @@
+/* $Id: DBGF.cpp $ */
+/** @file
+ * DBGF - Debugger Facility.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf DBGF - The Debugger Facility
+ *
+ * The purpose of the DBGF is to provide an interface for debuggers to
+ * manipulate the VMM without having to mess up the source code for each of
+ * them. The DBGF is always built in and will always work when a debugger
+ * attaches to the VM. The DBGF provides the basic debugger features, such as
+ * halting execution, handling breakpoints, single step execution, instruction
+ * disassembly, info querying, OS specific diggers, symbol and module
+ * management.
+ *
+ * The interface is working in a manner similar to the win32, linux and os2
+ * debugger interfaces. The interface has an asynchronous nature. This comes
+ * from the fact that the VMM and the Debugger are running in different threads.
+ * They are referred to as the "emulation thread" and the "debugger thread", or
+ * as the "ping thread" and the "pong thread, respectivly. (The last set of
+ * names comes from the use of the Ping-Pong synchronization construct from the
+ * RTSem API.)
+ *
+ * @see grp_dbgf
+ *
+ *
+ * @section sec_dbgf_scenario Usage Scenario
+ *
+ * The debugger starts by attaching to the VM. For practical reasons we limit the
+ * number of concurrently attached debuggers to 1 per VM. The action of
+ * attaching to the VM causes the VM to check and generate debug events.
+ *
+ * The debugger then will wait/poll for debug events and issue commands.
+ *
+ * The waiting and polling is done by the DBGFEventWait() function. It will wait
+ * for the emulation thread to send a ping, thus indicating that there is an
+ * event waiting to be processed.
+ *
+ * An event can be a response to a command issued previously, the hitting of a
+ * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
+ * the ping and must respond to the event at hand - the VMM is waiting. This
+ * usually means that the user of the debugger must do something, but it doesn't
+ * have to. The debugger is free to call any DBGF function (nearly at least)
+ * while processing the event.
+ *
+ * Typically the user will issue a request for the execution to be resumed, so
+ * the debugger calls DBGFResume() and goes back to waiting/polling for events.
+ *
+ * When the user eventually terminates the debugging session or selects another
+ * VM, the debugger detaches from the VM. This means that breakpoints are
+ * disabled and that the emulation thread no longer polls for debugger commands.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/nem.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/semaphore.h>
+#include <iprt/thread.h>
+#include <iprt/asm.h>
+#include <iprt/time.h>
+#include <iprt/assert.h>
+#include <iprt/stream.h>
+#include <iprt/env.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Instruction type returned by dbgfStepGetCurInstrType.
+ */
+typedef enum DBGFSTEPINSTRTYPE
+{
+ DBGFSTEPINSTRTYPE_INVALID = 0,
+ DBGFSTEPINSTRTYPE_OTHER,
+ DBGFSTEPINSTRTYPE_RET,
+ DBGFSTEPINSTRTYPE_CALL,
+ DBGFSTEPINSTRTYPE_END,
+ DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
+} DBGFSTEPINSTRTYPE;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
+DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
+static int dbgfR3CpuWait(PVMCPU pVCpu);
+static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
+static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
+static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
+static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
+
+
+
+/**
+ * Initializes the DBGF.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
+ AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
+
+ pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
+
+ /*
+ * The usual sideways mountain climbing style of init:
+ */
+ int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3TraceInit(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3RegInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3AsInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3BpInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3OSInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3PlugInInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3BugCheckInit(pVM);
+ if (RT_SUCCESS(rc))
+ {
+#ifdef VBOX_WITH_DBGF_TRACING
+ rc = dbgfR3TracerInit(pVM);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ return VINF_SUCCESS;
+ }
+ }
+ dbgfR3PlugInTerm(pUVM);
+ }
+ dbgfR3OSTermPart1(pUVM);
+ dbgfR3OSTermPart2(pUVM);
+ }
+ dbgfR3BpTerm(pUVM);
+ }
+ dbgfR3AsTerm(pUVM);
+ }
+ dbgfR3RegTerm(pUVM);
+ }
+ dbgfR3TraceTerm(pVM);
+ }
+ dbgfR3InfoTerm(pUVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Terminates and cleans up resources allocated by the DBGF.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+
+#ifdef VBOX_WITH_DBGF_TRACING
+ dbgfR3TracerTerm(pVM);
+#endif
+ dbgfR3OSTermPart1(pUVM);
+ dbgfR3PlugInTerm(pUVM);
+ dbgfR3OSTermPart2(pUVM);
+ dbgfR3BpTerm(pUVM);
+ dbgfR3AsTerm(pUVM);
+ dbgfR3RegTerm(pUVM);
+ dbgfR3TraceTerm(pVM);
+ dbgfR3InfoTerm(pUVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This is for tstCFGM and others to avoid trigger leak detection.
+ *
+ * @param pUVM The user mode VM structure.
+ */
+VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
+{
+ dbgfR3InfoTerm(pUVM);
+}
+
+
+/**
+ * Called when the VM is powered off to detach debuggers.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
+{
+ /*
+ * Send a termination event to any attached debugger.
+ */
+ if (pVM->dbgf.s.fAttached)
+ {
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
+ AssertLogRelRC(rc);
+
+ /*
+ * Clear the FF so we won't get confused later on.
+ */
+ VM_FF_CLEAR(pVM, VM_FF_DBGF);
+ }
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ dbgfR3TraceRelocate(pVM);
+ dbgfR3AsRelocate(pVM->pUVM, offDelta);
+}
+
+
+/**
+ * Waits a little while for a debuggger to attach.
+ *
+ * @returns True is a debugger have attached.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per CPU structure.
+ * @param enmEvent Event.
+ *
+ * @thread EMT(pVCpu)
+ */
+bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
+{
+ /*
+ * First a message.
+ */
+#if !defined(DEBUG)
+ int cWait = 10;
+#else
+ int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
+ || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
+ || enmEvent == DBGFEVENT_FATAL_ERROR)
+ && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
+ ? 10
+ : 150;
+#endif
+ RTStrmPrintf(g_pStdErr,
+ "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
+#ifdef DEBUG
+ " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
+#endif
+ ,
+ cWait / 10, cWait != 10 ? "s" : "", enmEvent);
+ RTStrmFlush(g_pStdErr);
+ while (cWait > 0)
+ {
+ RTThreadSleep(100);
+ if (pVM->dbgf.s.fAttached)
+ {
+ RTStrmPrintf(g_pStdErr, "Attached!\n");
+ RTStrmFlush(g_pStdErr);
+ return true;
+ }
+
+ /* Process rendezvous (debugger attaching involves such). */
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
+ if (rc != VINF_SUCCESS)
+ {
+ /** @todo Ignoring these could be bad. */
+ RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
+ RTStrmFlush(g_pStdErr);
+ }
+ }
+
+ /* Process priority stuff. */
+ if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
+ {
+ int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
+ if (rc == VINF_SUCCESS)
+ rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
+ if (rc != VINF_SUCCESS)
+ {
+ /** @todo Ignoring these could be bad. */
+ RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
+ RTStrmFlush(g_pStdErr);
+ }
+ }
+
+ /* next */
+ if (!(cWait % 10))
+ {
+ RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
+ RTStrmFlush(g_pStdErr);
+ }
+ cWait--;
+ }
+
+ RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
+ RTStrmFlush(g_pStdErr);
+ return false;
+}
+
+
+/**
+ * Forced action callback.
+ *
+ * The VMM will call this from it's main loop when either VM_FF_DBGF or
+ * VMCPU_FF_DBGF are set.
+ *
+ * The function checks for and executes pending commands from the debugger.
+ * Then it checks for pending debug events and serves these.
+ *
+ * @returns VINF_SUCCESS normally.
+ * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per CPU structure.
+ */
+VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
+{
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+
+ /*
+ * Dispatch pending events.
+ */
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
+ {
+ if ( pVCpu->dbgf.s.cEvents > 0
+ && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
+ {
+ rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
+ /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
+ }
+
+ /*
+ * Command pending? Process it.
+ */
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
+ if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
+ {
+ bool fResumeExecution;
+ DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
+ DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
+ VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
+ if (!fResumeExecution)
+ rcStrict2 = dbgfR3CpuWait(pVCpu);
+ if ( rcStrict2 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || RT_FAILURE(rcStrict2)
+ || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
+ rcStrict = rcStrict2;
+ }
+ }
+
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Try to determine the event context.
+ *
+ * @returns debug event context.
+ * @param pVCpu The cross context vCPU structure.
+ */
+static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
+{
+ switch (EMGetState(pVCpu))
+ {
+ case EMSTATE_HM:
+ case EMSTATE_NEM:
+ case EMSTATE_DEBUG_GUEST_HM:
+ case EMSTATE_DEBUG_GUEST_NEM:
+ return DBGFEVENTCTX_HM;
+
+ case EMSTATE_IEM:
+ case EMSTATE_RAW:
+ case EMSTATE_IEM_THEN_REM:
+ case EMSTATE_DEBUG_GUEST_IEM:
+ case EMSTATE_DEBUG_GUEST_RAW:
+ return DBGFEVENTCTX_RAW;
+
+
+ case EMSTATE_REM:
+ case EMSTATE_DEBUG_GUEST_REM:
+ return DBGFEVENTCTX_REM;
+
+ case EMSTATE_DEBUG_HYPER:
+ case EMSTATE_GURU_MEDITATION:
+ return DBGFEVENTCTX_HYPER;
+
+ default:
+ return DBGFEVENTCTX_OTHER;
+ }
+}
+
+
+/**
+ * Sends the event to the debugger (i.e. adds it to the event ring buffer).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The CPU sending the event.
+ * @param enmType The event type to send.
+ * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
+ * @param pvPayload Event payload (DBGFEVENT::u data), optional.
+ * @param cbPayload The size of the event payload, optional.
+ */
+static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
+ void const *pvPayload, size_t cbPayload)
+{
+ PUVM pUVM = pVM->pUVM;
+ pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
+
+ /*
+ * Massage the input a little.
+ */
+ AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
+ if (enmCtx == DBGFEVENTCTX_INVALID)
+ enmCtx = dbgfR3FigureEventCtx(pVCpu);
+
+ /*
+ * Put the event into the ring buffer.
+ */
+ RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
+
+ uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
+ uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
+ uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
+ /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
+
+ PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
+
+#ifdef DEBUG
+ ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
+#endif
+ pEvent->enmType = enmType;
+ pEvent->enmCtx = enmCtx;
+ pEvent->idCpu = pVCpu->idCpu;
+ pEvent->uReserved = 0;
+ if (cbPayload)
+ memcpy(&pEvent->u, pvPayload, cbPayload);
+
+ ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
+
+ RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
+
+ /*
+ * Signal the debugger.
+ */
+ return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
+}
+
+
+/**
+ * Send event and wait for the debugger to respond.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The CPU sending the event.
+ * @param enmType The event type to send.
+ * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
+ */
+DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
+{
+ int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3CpuWait(pVCpu);
+ return rc;
+}
+
+
+/**
+ * Send event and wait for the debugger to respond, extended version.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The CPU sending the event.
+ * @param enmType The event type to send.
+ * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
+ * @param pvPayload Event payload (DBGFEVENT::u data), optional.
+ * @param cbPayload The size of the event payload, optional.
+ */
+DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
+ void const *pvPayload, size_t cbPayload)
+{
+ int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3CpuWait(pVCpu);
+ return rc;
+}
+
+
+/**
+ * Send event but do NOT wait for the debugger.
+ *
+ * Currently only used by dbgfR3CpuCmd().
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The CPU sending the event.
+ * @param enmType The event type to send.
+ * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
+ */
+DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
+{
+ return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
+}
+
+
+/**
+ * The common event prologue code.
+ *
+ * It will make sure someone is attached, and perhaps process any high priority
+ * pending actions (none yet).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The vCPU cross context structure.
+ * @param enmEvent The event to be sent.
+ */
+static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
+{
+ /*
+ * Check if a debugger is attached.
+ */
+ if ( !pVM->dbgf.s.fAttached
+ && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
+ {
+ Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
+ return VERR_DBGF_NOT_ATTACHED;
+ }
+
+ /*
+ * Look thru pending commands and finish those which make sense now.
+ */
+ /** @todo Process/purge pending commands. */
+ //int rc = DBGFR3VMMForcedAction(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Processes a pending event on the current CPU.
+ *
+ * This is called by EM in response to VINF_EM_DBG_EVENT.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per CPU structure.
+ *
+ * @thread EMT(pVCpu)
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
+
+ /*
+ * Check that we've got an event first.
+ */
+ AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
+ AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
+ PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
+
+ /*
+ * Make sure we've got a debugger and is allowed to speak to it.
+ */
+ int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
+ if (RT_FAILURE(rc))
+ {
+ /** @todo drop them events? */
+ return rc; /** @todo this will cause trouble if we're here via an FF! */
+ }
+
+ /*
+ * Send the event and mark it as ignore.
+ * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
+ */
+ VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
+ pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
+ return rcStrict;
+}
+
+
+/**
+ * Send a generic debugger event which takes no data.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmEvent The event to send.
+ * @internal
+ */
+VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Do stepping filtering.
+ */
+ /** @todo Would be better if we did some of this inside the execution
+ * engines. */
+ if ( enmEvent == DBGFEVENT_STEPPED
+ || enmEvent == DBGFEVENT_STEPPED_HYPER)
+ {
+ if (!dbgfStepAreWeThereYet(pVM, pVCpu))
+ return VINF_EM_DBG_STEP;
+ }
+
+ int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Send the event and process the reply communication.
+ */
+ return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
+}
+
+
+/**
+ * Send a debugger event which takes the full source file location.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmEvent The event to send.
+ * @param pszFile Source file.
+ * @param uLine Line number in source file.
+ * @param pszFunction Function name.
+ * @param pszFormat Message which accompanies the event.
+ * @param ... Message arguments.
+ * @internal
+ */
+VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
+{
+ va_list args;
+ va_start(args, pszFormat);
+ int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
+ va_end(args);
+ return rc;
+}
+
+
+/**
+ * Send a debugger event which takes the full source file location.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmEvent The event to send.
+ * @param pszFile Source file.
+ * @param uLine Line number in source file.
+ * @param pszFunction Function name.
+ * @param pszFormat Message which accompanies the event.
+ * @param args Message arguments.
+ * @internal
+ */
+VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Format the message.
+ */
+ char *pszMessage = NULL;
+ char szMessage[8192];
+ if (pszFormat && *pszFormat)
+ {
+ pszMessage = &szMessage[0];
+ RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
+ }
+
+ /*
+ * Send the event and process the reply communication.
+ */
+ DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
+ DbgEvent.u.Src.pszFile = pszFile;
+ DbgEvent.u.Src.uLine = uLine;
+ DbgEvent.u.Src.pszFunction = pszFunction;
+ DbgEvent.u.Src.pszMessage = pszMessage;
+ return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
+}
+
+
+/**
+ * Send a debugger event which takes the two assertion messages.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmEvent The event to send.
+ * @param pszMsg1 First assertion message.
+ * @param pszMsg2 Second assertion message.
+ */
+VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Send the event and process the reply communication.
+ */
+ DBGFEVENT DbgEvent;
+ DbgEvent.u.Assert.pszMsg1 = pszMsg1;
+ DbgEvent.u.Assert.pszMsg2 = pszMsg2;
+ return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
+}
+
+
+/**
+ * Breakpoint was hit somewhere.
+ * Figure out which breakpoint it is and notify the debugger.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
+ */
+VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Halt all other vCPUs as well to give the user the ability to inspect other
+ * vCPU states as well.
+ */
+ rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Send the event and process the reply communication.
+ */
+ DBGFEVENT DbgEvent;
+ DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
+ pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
+ if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
+ {
+ DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
+ return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
+ }
+
+ return VERR_DBGF_IPE_1;
+}
+
+
+/**
+ * Returns whether the given vCPU is waiting for the debugger.
+ *
+ * @returns Flags whether the vCPU is currently waiting for the debugger.
+ * @param pUVCpu The user mode vCPU structure.
+ */
+DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
+{
+ return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
+}
+
+
+/**
+ * Checks whether the given vCPU is waiting in the debugger.
+ *
+ * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
+ * is given true is returned when at least one vCPU is halted.
+ * @param pUVM The user mode VM structure.
+ * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
+ */
+DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
+{
+ AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
+
+ /* Check that either the given vCPU or all are actually halted. */
+ if (idCpu != VMCPUID_ALL)
+ return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
+
+ for (VMCPUID i = 0; i < pUVM->cCpus; i++)
+ if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
+ return true;
+ return false;
+}
+
+
+/**
+ * Gets the pending debug command for this EMT/CPU, replacing it with
+ * DBGFCMD_NO_COMMAND.
+ *
+ * @returns Pending command.
+ * @param pUVCpu The user mode virtual CPU structure.
+ * @thread EMT(pUVCpu)
+ */
+DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
+{
+ DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
+ Log2(("DBGF: Getting command: %d\n", enmCmd));
+ return enmCmd;
+}
+
+
+/**
+ * Send a debug command to a CPU, making sure to notify it.
+ *
+ * @returns VBox status code.
+ * @param pUVCpu The user mode virtual CPU structure.
+ * @param enmCmd The command to submit to the CPU.
+ */
+DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
+{
+ Log2(("DBGF: Setting command to %d\n", enmCmd));
+ Assert(enmCmd != DBGFCMD_NO_COMMAND);
+ AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
+
+ ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
+ VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
+
+ VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pvUser);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
+ if ( pVCpu != (PVMCPU)pvUser
+ && !dbgfR3CpuIsHalted(pUVCpu))
+ dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Halts all vCPUs of the given VM except for the given one.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
+ */
+static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
+{
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
+}
+
+
+/**
+ * Waits for the debugger to respond.
+ *
+ * @returns VBox status code. (clearify)
+ * @param pVCpu The cross context vCPU structure.
+ */
+static int dbgfR3CpuWait(PVMCPU pVCpu)
+{
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
+
+ LogFlow(("dbgfR3CpuWait:\n"));
+ int rcRet = VINF_SUCCESS;
+
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
+
+ /*
+ * Waits for the debugger to reply (i.e. issue an command).
+ */
+ for (;;)
+ {
+ /*
+ * Wait.
+ */
+ for (;;)
+ {
+ /*
+ * Process forced flags before we go sleep.
+ */
+ if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
+ || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
+ {
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
+ break;
+
+ int rc;
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
+ else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
+ {
+ LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
+ rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
+ if (rc == VINF_SUCCESS)
+ rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
+ LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
+ }
+ else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
+ {
+ VMSTATE enmState = VMR3GetState(pVM);
+ switch (enmState)
+ {
+ case VMSTATE_FATAL_ERROR:
+ case VMSTATE_FATAL_ERROR_LS:
+ case VMSTATE_GURU_MEDITATION:
+ case VMSTATE_GURU_MEDITATION_LS:
+ rc = VINF_EM_SUSPEND;
+ break;
+ case VMSTATE_DESTROYING:
+ rc = VINF_EM_TERMINATE;
+ break;
+ default:
+ rc = VERR_DBGF_IPE_1;
+ AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
+ }
+ }
+ else
+ rc = VINF_SUCCESS;
+ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
+ {
+ switch (rc)
+ {
+ case VINF_EM_DBG_BREAKPOINT:
+ case VINF_EM_DBG_STEPPED:
+ case VINF_EM_DBG_STEP:
+ case VINF_EM_DBG_STOP:
+ case VINF_EM_DBG_EVENT:
+ AssertMsgFailed(("rc=%Rrc\n", rc));
+ break;
+
+ /* return straight away */
+ case VINF_EM_TERMINATE:
+ case VINF_EM_OFF:
+ LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
+ return rc;
+
+ /* remember return code. */
+ default:
+ AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
+ RT_FALL_THRU();
+ case VINF_EM_RESET:
+ case VINF_EM_SUSPEND:
+ case VINF_EM_HALT:
+ case VINF_EM_RESUME:
+ case VINF_EM_RESCHEDULE:
+ case VINF_EM_RESCHEDULE_REM:
+ case VINF_EM_RESCHEDULE_RAW:
+ if (rc < rcRet || rcRet == VINF_SUCCESS)
+ rcRet = rc;
+ break;
+ }
+ }
+ else if (RT_FAILURE(rc))
+ {
+ LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
+ return rc;
+ }
+ }
+ else if (pVM->dbgf.s.fAttached)
+ {
+ int rc = VMR3WaitU(pUVCpu);
+ if (RT_FAILURE(rc))
+ {
+ LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
+ return rc;
+ }
+ }
+ else
+ {
+ LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
+ return rcRet;
+ }
+ }
+
+ /*
+ * Process the command.
+ */
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
+ bool fResumeExecution;
+ DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
+ DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
+ int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
+ if (fResumeExecution)
+ {
+ if (RT_FAILURE(rc))
+ rcRet = rc;
+ else if ( rc >= VINF_EM_FIRST
+ && rc <= VINF_EM_LAST
+ && (rc < rcRet || rcRet == VINF_SUCCESS))
+ rcRet = rc;
+ LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
+ ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
+ return rcRet;
+ }
+ }
+}
+
+
+/**
+ * Executes command from debugger.
+ *
+ * The caller is responsible for waiting or resuming execution based on the
+ * value returned in the *pfResumeExecution indicator.
+ *
+ * @returns VBox status code. (clearify!)
+ * @param pVCpu The cross context vCPU structure.
+ * @param enmCmd The command in question.
+ * @param pCmdData Pointer to the command data.
+ * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
+ */
+static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
+{
+ RT_NOREF(pCmdData); /* for later */
+
+ /*
+ * The cases in this switch returns directly if no event to send.
+ */
+ DBGFEVENTTYPE enmEvent;
+ DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
+ switch (enmCmd)
+ {
+ /*
+ * Halt is answered by an event say that we've halted.
+ */
+ case DBGFCMD_HALT:
+ {
+ *pfResumeExecution = false;
+ enmEvent = DBGFEVENT_HALT_DONE;
+ break;
+ }
+
+
+ /*
+ * Resume is not answered, we just resume execution.
+ */
+ case DBGFCMD_GO:
+ {
+ pVCpu->dbgf.s.fSingleSteppingRaw = false;
+ *pfResumeExecution = true;
+ return VINF_SUCCESS;
+ }
+
+ /** @todo implement (and define) the rest of the commands. */
+
+ /*
+ * Single step, with trace into.
+ */
+ case DBGFCMD_SINGLE_STEP:
+ {
+ Log2(("Single step\n"));
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
+ {
+ if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
+ pVM->dbgf.s.SteppingFilter.uCallDepth++;
+ }
+ if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
+ {
+ pVCpu->dbgf.s.fSingleSteppingRaw = true;
+ *pfResumeExecution = true;
+ return VINF_EM_DBG_STEP;
+ }
+ /* Stop after zero steps. Nonsense, but whatever. */
+ pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
+ *pfResumeExecution = false;
+ enmCtx = dbgfR3FigureEventCtx(pVCpu);
+ enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
+ break;
+ }
+
+ /*
+ * Default is to send an invalid command event.
+ */
+ default:
+ {
+ *pfResumeExecution = false;
+ enmEvent = DBGFEVENT_INVALID_COMMAND;
+ break;
+ }
+ }
+
+ /*
+ * Send the pending event.
+ */
+ Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
+ int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
+ AssertRCStmt(rc, *pfResumeExecution = true);
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS,
+ * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ PUVM pUVM = pVM->pUVM;
+ int *prcAttach = (int *)pvUser;
+ RT_NOREF(pVCpu);
+
+ if (pVM->dbgf.s.fAttached)
+ {
+ Log(("dbgfR3Attach: Debugger already attached\n"));
+ *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * The per-CPU bits.
+ */
+ for (uint32_t i = 0; i < pUVM->cCpus; i++)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[i];
+
+ pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
+ RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
+ }
+
+ /*
+ * Init of the VM -> Debugger communication part living in the global VM structure.
+ */
+ pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
+ pUVM->dbgf.s.idxDbgEvtWrite = 0;
+ pUVM->dbgf.s.idxDbgEvtRead = 0;
+ pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
+ pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
+ pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
+ int rc;
+ pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
+ if (pUVM->dbgf.s.paDbgEvts)
+ {
+ rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * At last, set the attached flag.
+ */
+ ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
+ *prcAttach = VINF_SUCCESS;
+ return VINF_SUCCESS;
+ }
+
+ RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
+ pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
+ }
+ RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
+ pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
+ }
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ *prcAttach = rc;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Attaches a debugger to the specified VM.
+ *
+ * Only one debugger at a time.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Call the VM, use EMT rendezvous for serialization.
+ */
+ int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
+ if (RT_SUCCESS(rc))
+ rc = rcAttach;
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS,
+ * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ if (pVCpu->idCpu == 0)
+ {
+ PUVM pUVM = (PUVM)pvUser;
+
+ /*
+ * Per-CPU cleanup.
+ */
+ for (VMCPUID i = 0; i < pUVM->cCpus; i++)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[i];
+
+ pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
+ RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
+ }
+
+ /*
+ * De-init of the VM -> Debugger communication part living in the global VM structure.
+ */
+ if (pUVM->dbgf.s.paDbgEvts)
+ {
+ MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
+ pUVM->dbgf.s.paDbgEvts = NULL;
+ }
+
+ if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
+ {
+ RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
+ pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
+ }
+
+ if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
+ {
+ RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
+ pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
+ }
+
+ if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
+ {
+ RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
+ pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
+ }
+
+ pUVM->dbgf.s.cDbgEvtMax = 0;
+ pUVM->dbgf.s.idxDbgEvtWrite = 0;
+ pUVM->dbgf.s.idxDbgEvtRead = 0;
+ pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
+ pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
+ pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
+
+ ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Detaches a debugger from the specified VM.
+ *
+ * Caller must be attached to the VM.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
+{
+ LogFlow(("DBGFR3Detach:\n"));
+
+ /*
+ * Validate input. The UVM handle shall be valid, the VM handle might be
+ * in the processes of being destroyed already, so deal quietly with that.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ if (!VM_IS_VALID_EXT(pVM))
+ return VERR_INVALID_VM_HANDLE;
+
+ /*
+ * Check if attached.
+ */
+ if (!pVM->dbgf.s.fAttached)
+ return VERR_DBGF_NOT_ATTACHED;
+
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
+}
+
+
+/**
+ * Wait for a debug event.
+ *
+ * @returns VBox status code. Will not return VBOX_INTERRUPTED.
+ * @param pUVM The user mode VM handle.
+ * @param cMillies Number of millis to wait.
+ * @param pEvent Where to store the event data.
+ */
+VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
+{
+ /*
+ * Check state.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
+
+ RT_BZERO(pEvent, sizeof(*pEvent));
+
+ /*
+ * Wait for an event to arrive if there are none.
+ */
+ int rc = VINF_SUCCESS;
+ uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
+ if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
+ {
+ do
+ {
+ rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
+ } while ( RT_SUCCESS(rc)
+ && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
+
+ uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
+ memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
+ ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
+ }
+
+ Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
+ return rc;
+}
+
+
+/**
+ * Halts VM execution.
+ *
+ * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
+ * arrives. Until that time it's not possible to issue any new commands.
+ *
+ * @returns VBox status code.
+ * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
+ * are halted.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
+ */
+VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
+{
+ /*
+ * Check state.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
+ AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /*
+ * Halt the requested CPUs as needed.
+ */
+ int rc;
+ if (idCpu != VMCPUID_ALL)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
+ if (!dbgfR3CpuIsHalted(pUVCpu))
+ {
+ dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VWRN_DBGF_ALREADY_HALTED;
+ }
+ else
+ {
+ rc = VWRN_DBGF_ALREADY_HALTED;
+ for (VMCPUID i = 0; i < pUVM->cCpus; i++)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[i];
+ if (!dbgfR3CpuIsHalted(pUVCpu))
+ {
+ dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
+ rc = VINF_SUCCESS;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Checks if any of the specified vCPUs have been halted by the debugger.
+ *
+ * @returns True if at least one halted vCPUs.
+ * @returns False if no halted vCPUs.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
+ * at least a single vCPU is halted in the debugger.
+ */
+VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ AssertReturn(pVM->dbgf.s.fAttached, false);
+
+ return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
+}
+
+
+/**
+ * Checks if the debugger can wait for events or not.
+ *
+ * This function is only used by lazy, multiplexing debuggers. :-)
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if waitable.
+ * @retval VERR_SEM_OUT_OF_TURN if not waitable.
+ * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
+ * (not asserted) or if the handle is invalid (asserted).
+ * @retval VERR_DBGF_NOT_ATTACHED if not attached.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* Note! There is a slight race here, unfortunately. */
+ PVM pVM = pUVM->pVM;
+ if (!RT_VALID_PTR(pVM))
+ return VERR_INVALID_VM_HANDLE;
+ if (pVM->enmVMState >= VMSTATE_DESTROYING)
+ return VERR_INVALID_VM_HANDLE;
+ if (!pVM->dbgf.s.fAttached)
+ return VERR_DBGF_NOT_ATTACHED;
+
+ /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resumes VM execution.
+ *
+ * There is no receipt event on this command.
+ *
+ * @returns VBox status code.
+ * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
+ */
+VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
+{
+ /*
+ * Validate input and attachment state.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
+
+ /*
+ * Ping the halted emulation threads, telling them to run.
+ */
+ int rc = VWRN_DBGF_ALREADY_RUNNING;
+ if (idCpu != VMCPUID_ALL)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
+ if (dbgfR3CpuIsHalted(pUVCpu))
+ {
+ rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
+ AssertRC(rc);
+ }
+ }
+ else
+ {
+ for (VMCPUID i = 0; i < pUVM->cCpus; i++)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[i];
+ if (dbgfR3CpuIsHalted(pUVCpu))
+ {
+ int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
+ AssertRC(rc2);
+ if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
+ rc = rc2;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Classifies the current instruction.
+ *
+ * @returns Type of instruction.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The current CPU.
+ * @thread EMT(pVCpu)
+ */
+static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Read the instruction.
+ */
+ size_t cbRead = 0;
+ uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do minimal parsing. No real need to involve the disassembler here.
+ */
+ uint8_t *pb = abOpcode;
+ for (;;)
+ {
+ switch (*pb++)
+ {
+ default:
+ return DBGFSTEPINSTRTYPE_OTHER;
+
+ case 0xe8: /* call rel16/32 */
+ case 0x9a: /* call farptr */
+ case 0xcc: /* int3 */
+ case 0xcd: /* int xx */
+ // case 0xce: /* into */
+ return DBGFSTEPINSTRTYPE_CALL;
+
+ case 0xc2: /* ret xx */
+ case 0xc3: /* ret */
+ case 0xca: /* retf xx */
+ case 0xcb: /* retf */
+ case 0xcf: /* iret */
+ return DBGFSTEPINSTRTYPE_RET;
+
+ case 0xff:
+ if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
+ || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
+ return DBGFSTEPINSTRTYPE_CALL;
+ return DBGFSTEPINSTRTYPE_OTHER;
+
+ case 0x0f:
+ switch (*pb++)
+ {
+ case 0x05: /* syscall */
+ case 0x34: /* sysenter */
+ return DBGFSTEPINSTRTYPE_CALL;
+ case 0x07: /* sysret */
+ case 0x35: /* sysexit */
+ return DBGFSTEPINSTRTYPE_RET;
+ }
+ break;
+
+ /* Must handle some REX prefixes. So we do all normal prefixes. */
+ case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
+ case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
+ if (!CPUMIsGuestIn64BitCode(pVCpu))
+ return DBGFSTEPINSTRTYPE_OTHER;
+ break;
+
+ case 0x2e: /* CS */
+ case 0x36: /* SS */
+ case 0x3e: /* DS */
+ case 0x26: /* ES */
+ case 0x64: /* FS */
+ case 0x65: /* GS */
+ case 0x66: /* op size */
+ case 0x67: /* addr size */
+ case 0xf0: /* lock */
+ case 0xf2: /* REPNZ */
+ case 0xf3: /* REPZ */
+ break;
+ }
+ }
+ }
+
+ return DBGFSTEPINSTRTYPE_INVALID;
+}
+
+
+/**
+ * Checks if the stepping has reached a stop point.
+ *
+ * Called when raising a stepped event.
+ *
+ * @returns true if the event should be raised, false if we should take one more
+ * step first.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per CPU structure of the calling EMT.
+ * @thread EMT(pVCpu)
+ */
+static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Check valid pVCpu and that it matches the CPU one stepping.
+ */
+ if (pVCpu)
+ {
+ if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
+ {
+ /*
+ * Increase the number of steps and see if we've reached the max.
+ */
+ pVM->dbgf.s.SteppingFilter.cSteps++;
+ if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
+ {
+ /*
+ * Check PC and SP address filtering.
+ */
+ if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
+ {
+ if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
+ && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
+ return true;
+ if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
+ && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
+ < pVM->dbgf.s.SteppingFilter.cbStackPop)
+ return true;
+ }
+
+ /*
+ * Do step-over filtering separate from the step-into one.
+ */
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
+ {
+ DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
+ switch (enmType)
+ {
+ default:
+ if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
+ || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
+ break;
+ return true;
+ case DBGFSTEPINSTRTYPE_CALL:
+ if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
+ && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
+ return true;
+ pVM->dbgf.s.SteppingFilter.uCallDepth++;
+ break;
+ case DBGFSTEPINSTRTYPE_RET:
+ if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
+ {
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
+ return true;
+ /* If after return, we use the cMaxStep limit to stop the next time. */
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
+ pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
+ }
+ else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
+ pVM->dbgf.s.SteppingFilter.uCallDepth--;
+ break;
+ }
+ return false;
+ }
+ /*
+ * Filtered step-into.
+ */
+ else if ( pVM->dbgf.s.SteppingFilter.fFlags
+ & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
+ {
+ DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
+ switch (enmType)
+ {
+ default:
+ break;
+ case DBGFSTEPINSTRTYPE_CALL:
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
+ return true;
+ break;
+ case DBGFSTEPINSTRTYPE_RET:
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
+ return true;
+ /* If after return, we use the cMaxStep limit to stop the next time. */
+ if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
+ pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
+ break;
+ }
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Step Into.
+ *
+ * A single step event is generated from this command.
+ * The current implementation is not reliable, so don't rely on the event coming.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU to single step on.
+ */
+VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
+{
+ return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
+}
+
+
+/**
+ * Full fleged step.
+ *
+ * This extended stepping API allows for doing multiple steps before raising an
+ * event, helping implementing step over, step out and other more advanced
+ * features.
+ *
+ * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
+ * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
+ * events, which will abort the stepping.
+ *
+ * The stop on pop area feature is for safeguarding step out.
+ *
+ * Please note though, that it will always use stepping and never breakpoints.
+ * While this allows for a much greater flexibility it can at times be rather
+ * slow.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU to single step on.
+ * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
+ * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
+ * always be specified.
+ * @param pStopPcAddr Address to stop executing at. Completely ignored
+ * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
+ * @param pStopPopAddr Stack address that SP must be lower than when
+ * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
+ * @param cbStopPop The range starting at @a pStopPopAddr which is
+ * considered to be within the same thread stack. Note
+ * that the API allows @a pStopPopAddr and @a cbStopPop
+ * to form an area that wraps around and it will
+ * consider the part starting at 0 as included.
+ * @param cMaxSteps The maximum number of steps to take. This is to
+ * prevent stepping for ever, so passing UINT32_MAX is
+ * not recommended.
+ *
+ * @remarks The two address arguments must be guest context virtual addresses,
+ * or HMA. The code doesn't make much of a point of out HMA, though.
+ */
+VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
+ PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
+{
+ /*
+ * Check state.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
+ AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
+ if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
+ {
+ AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
+ AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
+ }
+ AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
+ if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
+ {
+ AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
+ AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
+ AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
+ }
+
+ AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
+ PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
+ if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
+ { /* likely */ }
+ else
+ return VERR_SEM_OUT_OF_TURN;
+ Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
+
+ /*
+ * Send the emulation thread a single-step command.
+ */
+ if (fFlags == DBGF_STEP_F_INTO)
+ pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
+ else
+ pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
+ pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
+ if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
+ pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
+ else
+ pVM->dbgf.s.SteppingFilter.AddrPc = 0;
+ if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
+ {
+ pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
+ pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
+ }
+ else
+ {
+ pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
+ pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
+ }
+
+ pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
+ pVM->dbgf.s.SteppingFilter.cSteps = 0;
+ pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
+
+ Assert(dbgfR3CpuIsHalted(pUVCpu));
+ return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
+}
+
+
+
+/**
+ * dbgfR3EventConfigEx argument packet.
+ */
+typedef struct DBGFR3EVENTCONFIGEXARGS
+{
+ PCDBGFEVENTCONFIG paConfigs;
+ size_t cConfigs;
+ int rc;
+} DBGFR3EVENTCONFIGEXARGS;
+/** Pointer to a dbgfR3EventConfigEx argument packet. */
+typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ if (pVCpu->idCpu == 0)
+ {
+ PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
+ DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
+ size_t cConfigs = pArgs->cConfigs;
+
+ /*
+ * Apply the changes.
+ */
+ unsigned cChanges = 0;
+ for (uint32_t i = 0; i < cConfigs; i++)
+ {
+ DBGFEVENTTYPE enmType = paConfigs[i].enmType;
+ AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
+ if (paConfigs[i].fEnabled)
+ cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
+ else
+ cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
+ }
+
+ /*
+ * Inform HM about changes.
+ */
+ if (cChanges > 0)
+ {
+ if (HMIsEnabled(pVM))
+ {
+ HMR3NotifyDebugEventChanged(pVM);
+ HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ }
+ else if (VM_IS_NEM_ENABLED(pVM))
+ {
+ NEMR3NotifyDebugEventChanged(pVM);
+ NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ }
+ }
+ }
+ else if (HMIsEnabled(pVM))
+ HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ else if (VM_IS_NEM_ENABLED(pVM))
+ NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Configures (enables/disables) multiple selectable debug events.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param paConfigs The event to configure and their new state.
+ * @param cConfigs Number of entries in @a paConfigs.
+ */
+VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
+{
+ /*
+ * Validate input.
+ */
+ size_t i = cConfigs;
+ while (i-- > 0)
+ {
+ AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
+ AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
+ }
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
+ * can sync their data and execution with new debug state.
+ */
+ DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
+ dbgfR3EventConfigEx, &Args);
+ if (RT_SUCCESS(rc))
+ rc = Args.rc;
+ return rc;
+}
+
+
+/**
+ * Enables or disables a selectable debug event.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param enmEvent The selectable debug event.
+ * @param fEnabled The new state.
+ */
+VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
+{
+ /*
+ * Convert to an array call.
+ */
+ DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
+ return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
+}
+
+
+/**
+ * Checks if the given selectable event is enabled.
+ *
+ * @returns true if enabled, false if not or invalid input.
+ * @param pUVM The user mode VM handle.
+ * @param enmEvent The selectable debug event.
+ * @sa DBGFR3EventQuery
+ */
+VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
+ && enmEvent < DBGFEVENT_END, false);
+ Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
+ || enmEvent == DBGFEVENT_BREAKPOINT
+ || enmEvent == DBGFEVENT_BREAKPOINT_IO
+ || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
+
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+
+ /*
+ * Check the event status.
+ */
+ return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
+}
+
+
+/**
+ * Queries the status of a set of events.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param paConfigs The events to query and where to return the state.
+ * @param cConfigs The number of elements in @a paConfigs.
+ * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
+ */
+VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ for (size_t i = 0; i < cConfigs; i++)
+ {
+ DBGFEVENTTYPE enmType = paConfigs[i].enmType;
+ AssertReturn( enmType >= DBGFEVENT_HALT_DONE
+ && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
+ Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
+ || enmType == DBGFEVENT_BREAKPOINT
+ || enmType == DBGFEVENT_BREAKPOINT_IO
+ || enmType == DBGFEVENT_BREAKPOINT_MMIO);
+ paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * dbgfR3InterruptConfigEx argument packet.
+ */
+typedef struct DBGFR3INTERRUPTCONFIGEXARGS
+{
+ PCDBGFINTERRUPTCONFIG paConfigs;
+ size_t cConfigs;
+ int rc;
+} DBGFR3INTERRUPTCONFIGEXARGS;
+/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
+typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS,
+ * Worker for DBGFR3InterruptConfigEx.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ if (pVCpu->idCpu == 0)
+ {
+ PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
+ PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
+ size_t cConfigs = pArgs->cConfigs;
+
+ /*
+ * Apply the changes.
+ */
+ bool fChanged = false;
+ bool fThis;
+ for (uint32_t i = 0; i < cConfigs; i++)
+ {
+ /*
+ * Hardware interrupts.
+ */
+ if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
+ {
+ fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
+ if (fThis)
+ {
+ Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
+ pVM->dbgf.s.cHardIntBreakpoints++;
+ }
+ }
+ else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
+ {
+ fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
+ if (fThis)
+ {
+ Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
+ pVM->dbgf.s.cHardIntBreakpoints--;
+ }
+ }
+
+ /*
+ * Software interrupts.
+ */
+ if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
+ {
+ fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
+ if (fThis)
+ {
+ Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
+ pVM->dbgf.s.cSoftIntBreakpoints++;
+ }
+ }
+ else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
+ {
+ fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
+ if (fThis)
+ {
+ Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
+ pVM->dbgf.s.cSoftIntBreakpoints--;
+ }
+ }
+ }
+
+ /*
+ * Update the event bitmap entries.
+ */
+ if (pVM->dbgf.s.cHardIntBreakpoints > 0)
+ fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
+ else
+ fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
+
+ if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
+ fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
+ else
+ fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
+
+ /*
+ * Inform HM about changes.
+ */
+ if (fChanged)
+ {
+ if (HMIsEnabled(pVM))
+ {
+ HMR3NotifyDebugEventChanged(pVM);
+ HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ }
+ else if (VM_IS_NEM_ENABLED(pVM))
+ {
+ NEMR3NotifyDebugEventChanged(pVM);
+ NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ }
+ }
+ }
+ else if (HMIsEnabled(pVM))
+ HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+ else if (VM_IS_NEM_ENABLED(pVM))
+ NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Changes
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param paConfigs The events to query and where to return the state.
+ * @param cConfigs The number of elements in @a paConfigs.
+ * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
+ */
+VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
+{
+ /*
+ * Validate input.
+ */
+ size_t i = cConfigs;
+ while (i-- > 0)
+ {
+ AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
+ AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
+ }
+
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
+ * can sync their data and execution with new debug state.
+ */
+ DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
+ dbgfR3InterruptConfigEx, &Args);
+ if (RT_SUCCESS(rc))
+ rc = Args.rc;
+ return rc;
+}
+
+
+/**
+ * Configures interception of a hardware interrupt.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param iInterrupt The interrupt number.
+ * @param fEnabled Whether interception is enabled or not.
+ * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
+ */
+VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
+{
+ /*
+ * Convert to DBGFR3InterruptConfigEx call.
+ */
+ DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
+ return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
+}
+
+
+/**
+ * Configures interception of a software interrupt.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param iInterrupt The interrupt number.
+ * @param fEnabled Whether interception is enabled or not.
+ * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
+ */
+VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
+{
+ /*
+ * Convert to DBGFR3InterruptConfigEx call.
+ */
+ DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
+ return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
+}
+
+
+/**
+ * Checks whether interception is enabled for a hardware interrupt.
+ *
+ * @returns true if enabled, false if not or invalid input.
+ * @param pUVM The user mode VM handle.
+ * @param iInterrupt The interrupt number.
+ * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
+ * DBGF_IS_SOFTWARE_INT_ENABLED
+ */
+VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+
+ /*
+ * Check it.
+ */
+ return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
+}
+
+
+/**
+ * Checks whether interception is enabled for a software interrupt.
+ *
+ * @returns true if enabled, false if not or invalid input.
+ * @param pUVM The user mode VM handle.
+ * @param iInterrupt The interrupt number.
+ * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
+ * DBGF_IS_HARDWARE_INT_ENABLED,
+ */
+VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+
+ /*
+ * Check it.
+ */
+ return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
+}
+
+
+
+/**
+ * Call this to single step programmatically.
+ *
+ * You must pass down the return code to the EM loop! That's
+ * where the actual single stepping take place (at least in the
+ * current implementation).
+ *
+ * @returns VINF_EM_DBG_STEP
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ *
+ * @thread VCpu EMT
+ * @internal
+ */
+VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ pVCpu->dbgf.s.fSingleSteppingRaw = true;
+ return VINF_EM_DBG_STEP;
+}
+
+
+/**
+ * Inject an NMI into a running VM (only VCPU 0!)
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param idCpu The ID of the CPU to inject the NMI on.
+ */
+VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /** @todo Implement generic NMI injection. */
+ /** @todo NEM: NMI injection */
+ if (!HMIsEnabled(pVM))
+ return VERR_NOT_SUP_BY_NEM;
+
+ VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFAddr.cpp b/src/VBox/VMM/VMMR3/DBGFAddr.cpp
new file mode 100644
index 00000000..faf6b3cd
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFAddr.cpp
@@ -0,0 +1,498 @@
+/* $Id: DBGFAddr.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Mixed Address Methods.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+
+
+/**
+ * Common worker for DBGFR3AddrFromSelOff and DBGFR3AddrFromSelInfoOff.
+ */
+static int dbgfR3AddrFromSelInfoOffWorker(PDBGFADDRESS pAddress, PCDBGFSELINFO pSelInfo, RTUINTPTR off)
+{
+ if (pSelInfo->fFlags & (DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT))
+ return pSelInfo->fFlags & DBGFSELINFO_FLAGS_NOT_PRESENT
+ ? VERR_SELECTOR_NOT_PRESENT
+ : VERR_INVALID_SELECTOR;
+
+ /** @todo This all goes voodoo in long mode. */
+ /* check limit. */
+ if (DBGFSelInfoIsExpandDown(pSelInfo))
+ {
+ if ( !pSelInfo->u.Raw.Gen.u1Granularity
+ && off > UINT32_C(0xffff))
+ return VERR_OUT_OF_SELECTOR_BOUNDS;
+ if (off <= pSelInfo->cbLimit)
+ return VERR_OUT_OF_SELECTOR_BOUNDS;
+ }
+ else if (off > pSelInfo->cbLimit)
+ return VERR_OUT_OF_SELECTOR_BOUNDS;
+
+ pAddress->FlatPtr = pSelInfo->GCPtrBase + off;
+
+ /** @todo fix all these selector tests! */
+ if ( !pSelInfo->GCPtrBase
+ && pSelInfo->u.Raw.Gen.u1Granularity
+ && pSelInfo->u.Raw.Gen.u1DefBig)
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FLAT;
+ else if (pSelInfo->cbLimit <= UINT32_C(0xffff))
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FAR16;
+ else if (pSelInfo->cbLimit <= UINT32_C(0xffffffff))
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FAR32;
+ else
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FAR64;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates a mixed address from a Sel:off pair.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU ID.
+ * @param pAddress Where to store the mixed address.
+ * @param Sel The selector part.
+ * @param off The offset part.
+ */
+VMMR3DECL(int) DBGFR3AddrFromSelOff(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, RTSEL Sel, RTUINTPTR off)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
+
+ pAddress->Sel = Sel;
+ pAddress->off = off;
+ if (Sel != DBGF_SEL_FLAT)
+ {
+ DBGFSELINFO SelInfo;
+ int rc = DBGFR3SelQueryInfo(pUVM, idCpu, Sel, DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = dbgfR3AddrFromSelInfoOffWorker(pAddress, &SelInfo, off);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ else
+ {
+ pAddress->FlatPtr = off;
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FLAT;
+ }
+ pAddress->fFlags |= DBGFADDRESS_FLAGS_VALID;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates a mixed address from selector info and an offset into the segment
+ * described by it.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to store the mixed address.
+ * @param pSelInfo The selector info.
+ * @param off The offset part.
+ */
+VMMR3DECL(int) DBGFR3AddrFromSelInfoOff(PUVM pUVM, PDBGFADDRESS pAddress, PCDBGFSELINFO pSelInfo, RTUINTPTR off)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ pAddress->Sel = pSelInfo->Sel;
+ pAddress->off = off;
+ int rc = dbgfR3AddrFromSelInfoOffWorker(pAddress, pSelInfo, off);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pAddress->fFlags |= DBGFADDRESS_FLAGS_VALID;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates a mixed address from a flat address.
+ *
+ * @returns pAddress.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to store the mixed address.
+ * @param FlatPtr The flat pointer.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromFlat(PUVM pUVM, PDBGFADDRESS pAddress, RTGCUINTPTR FlatPtr)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ pAddress->Sel = DBGF_SEL_FLAT;
+ pAddress->off = FlatPtr;
+ pAddress->FlatPtr = FlatPtr;
+ pAddress->fFlags = DBGFADDRESS_FLAGS_FLAT | DBGFADDRESS_FLAGS_VALID;
+ return pAddress;
+}
+
+
+/**
+ * Creates a mixed address from a guest physical address.
+ *
+ * @returns pAddress.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to store the mixed address.
+ * @param PhysAddr The guest physical address.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromPhys(PUVM pUVM, PDBGFADDRESS pAddress, RTGCPHYS PhysAddr)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ pAddress->Sel = DBGF_SEL_FLAT;
+ pAddress->off = PhysAddr;
+ pAddress->FlatPtr = PhysAddr;
+ pAddress->fFlags = DBGFADDRESS_FLAGS_PHYS | DBGFADDRESS_FLAGS_VALID;
+ return pAddress;
+}
+
+
+/**
+ * Creates a mixed address from a flat host ring-0 address.
+ *
+ * @returns pAddress
+ * @param pAddress Where to store the mixed address.
+ * @param R0Ptr The host ring-0 address.
+ */
+VMMR3_INT_DECL(PDBGFADDRESS) DBGFR3AddrFromHostR0(PDBGFADDRESS pAddress, RTR0UINTPTR R0Ptr)
+{
+ pAddress->FlatPtr = R0Ptr;
+ pAddress->off = R0Ptr;
+ pAddress->fFlags = DBGFADDRESS_FLAGS_RING0 | DBGFADDRESS_FLAGS_VALID;
+ pAddress->Sel = DBGF_SEL_FLAT;
+ return pAddress;
+}
+
+
+/**
+ * Checks if the specified address is valid (checks the structure pointer too).
+ *
+ * @returns true if valid.
+ * @returns false if invalid.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address to validate.
+ */
+VMMR3DECL(bool) DBGFR3AddrIsValid(PUVM pUVM, PCDBGFADDRESS pAddress)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ if (!RT_VALID_PTR(pAddress))
+ return false;
+ if (!DBGFADDRESS_IS_VALID(pAddress))
+ return false;
+ /* more? */
+ return true;
+}
+
+
+/**
+ * Called on the EMT for the VCpu.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pAddress The address.
+ * @param pGCPhys Where to return the physical address.
+ */
+static DECLCALLBACK(int) dbgfR3AddrToPhysOnVCpu(PVMCPU pVCpu, PCDBGFADDRESS pAddress, PRTGCPHYS pGCPhys)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ /* This is just a wrapper because we cannot pass FlatPtr thru VMR3ReqCall directly. */
+ PGMPTWALK Walk;
+ int const rc = PGMGstGetPage(pVCpu, pAddress->FlatPtr, &Walk);
+ *pGCPhys = Walk.GCPhys;
+ return rc;
+}
+
+
+/**
+ * Converts an address to a guest physical address.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_PARAMETER if the address is invalid.
+ * @retval VERR_INVALID_STATE if the VM is being terminated or if the virtual
+ * CPU handle is invalid.
+ * @retval VERR_NOT_SUPPORTED is the type of address cannot be converted.
+ * @retval VERR_PAGE_NOT_PRESENT
+ * @retval VERR_PAGE_TABLE_NOT_PRESENT
+ * @retval VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
+ * @retval VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to convert virtual
+ * addresses.
+ * @param pAddress The address.
+ * @param pGCPhys Where to return the physical address.
+ */
+VMMR3DECL(int) DBGFR3AddrToPhys(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PRTGCPHYS pGCPhys)
+{
+ /*
+ * Parameter validation.
+ */
+ AssertPtr(pGCPhys);
+ *pGCPhys = NIL_RTGCPHYS;
+ AssertPtr(pAddress);
+ AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
+
+ /*
+ * Convert by address type.
+ */
+ int rc;
+ if (pAddress->fFlags & DBGFADDRESS_FLAGS_PHYS)
+ {
+ *pGCPhys = pAddress->FlatPtr;
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ if (VMCPU_IS_EMT(pVCpu))
+ rc = dbgfR3AddrToPhysOnVCpu(pVCpu, pAddress, pGCPhys);
+ else
+ rc = VMR3ReqPriorityCallWaitU(pUVM, pVCpu->idCpu,
+ (PFNRT)dbgfR3AddrToPhysOnVCpu, 3, pVCpu, pAddress, pGCPhys);
+ }
+ return rc;
+}
+
+
+/**
+ * Converts an address to a host physical address.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_PARAMETER if the address is invalid.
+ * @retval VERR_INVALID_STATE if the VM is being terminated or if the virtual
+ * CPU handle is invalid.
+ * @retval VERR_NOT_SUPPORTED is the type of address cannot be converted.
+ * @retval VERR_PAGE_NOT_PRESENT
+ * @retval VERR_PAGE_TABLE_NOT_PRESENT
+ * @retval VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
+ * @retval VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to convert virtual
+ * addresses.
+ * @param pAddress The address.
+ * @param pHCPhys Where to return the physical address.
+ */
+VMMR3DECL(int) DBGFR3AddrToHostPhys(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTHCPHYS pHCPhys)
+{
+ /*
+ * Parameter validation.
+ */
+ AssertPtr(pHCPhys);
+ *pHCPhys = NIL_RTHCPHYS;
+ AssertPtr(pAddress);
+ AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
+
+ /*
+ * Convert it.
+ */
+ RTGCPHYS GCPhys;
+ int rc = DBGFR3AddrToPhys(pUVM, idCpu, pAddress, &GCPhys);
+ if (RT_SUCCESS(rc))
+ rc = PGMPhysGCPhys2HCPhys(pVM, pAddress->FlatPtr, pHCPhys);
+ return rc;
+}
+
+
+/**
+ * Called on the EMT for the VCpu.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context.
+ * @param pAddress The address.
+ * @param fReadOnly Whether returning a read-only page is fine or not.
+ * @param ppvR3Ptr Where to return the address.
+ */
+static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly,
+ void **ppvR3Ptr)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ Assert(idCpu == VMMGetCpuId(pVM));
+
+ /*
+ * This is a tad ugly, but it gets the job done.
+ */
+ int rc;
+ PGMPAGEMAPLOCK Lock;
+ if (pAddress->fFlags & DBGFADDRESS_FLAGS_PHYS)
+ {
+ if (fReadOnly)
+ rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pAddress->FlatPtr, (void const **)ppvR3Ptr, &Lock);
+ else
+ rc = PGMPhysGCPhys2CCPtr(pVM, pAddress->FlatPtr, ppvR3Ptr, &Lock);
+ }
+ else
+ {
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ if (fReadOnly)
+ rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pAddress->FlatPtr, (void const **)ppvR3Ptr, &Lock);
+ else
+ rc = PGMPhysGCPtr2CCPtr(pVCpu, pAddress->FlatPtr, ppvR3Ptr, &Lock);
+ }
+ if (RT_SUCCESS(rc))
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ return rc;
+}
+
+
+
+
+/**
+ * Converts an address to a volatile host virtual address.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_PARAMETER if the address is invalid.
+ * @retval VERR_INVALID_STATE if the VM is being terminated or if the virtual
+ * CPU handle is invalid.
+ * @retval VERR_NOT_SUPPORTED is the type of address cannot be converted.
+ * @retval VERR_PAGE_NOT_PRESENT
+ * @retval VERR_PAGE_TABLE_NOT_PRESENT
+ * @retval VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
+ * @retval VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to convert virtual
+ * addresses.
+ * @param pAddress The address.
+ * @param fReadOnly Whether returning a read-only page is fine or not.
+ * If set to thru the page may have to be made writable
+ * before we return.
+ * @param ppvR3Ptr Where to return the address.
+ */
+VMMR3DECL(int) DBGFR3AddrToVolatileR3Ptr(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly, void **ppvR3Ptr)
+{
+ /*
+ * Parameter validation.
+ */
+ AssertPtr(ppvR3Ptr);
+ *ppvR3Ptr = NULL;
+ AssertPtr(pAddress);
+ AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
+
+ /*
+ * Convert it.
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3AddrToVolatileR3PtrOnVCpu, 5,
+ pUVM, idCpu, pAddress, fReadOnly, ppvR3Ptr);
+}
+
+
+/**
+ * Adds an offset to an address.
+ *
+ * @returns pAddress.
+ *
+ * @param pAddress The address.
+ * @param uAddend How much to add.
+ *
+ * @remarks No address space or segment limit checks are performed,
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrAdd(PDBGFADDRESS pAddress, RTGCUINTPTR uAddend)
+{
+ /*
+ * Parameter validation.
+ */
+ AssertPtrReturn(pAddress, NULL);
+ AssertReturn(DBGFADDRESS_IS_VALID(pAddress), NULL);
+
+ /*
+ * Add the stuff.
+ */
+ pAddress->off += uAddend;
+ pAddress->FlatPtr += uAddend;
+
+ return pAddress;
+}
+
+
+/**
+ * Subtracts an offset from an address.
+ *
+ * @returns VINF_SUCCESS on success.
+ *
+ * @param pAddress The address.
+ * @param uSubtrahend How much to subtract.
+ *
+ * @remarks No address space or segment limit checks are performed,
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrSub(PDBGFADDRESS pAddress, RTGCUINTPTR uSubtrahend)
+{
+ /*
+ * Parameter validation.
+ */
+ AssertPtrReturn(pAddress, NULL);
+ AssertReturn(DBGFADDRESS_IS_VALID(pAddress), NULL);
+
+ /*
+ * Add the stuff.
+ */
+ pAddress->off -= uSubtrahend;
+ pAddress->FlatPtr -= uSubtrahend;
+
+ return pAddress;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp b/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
new file mode 100644
index 00000000..6b409436
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
@@ -0,0 +1,1367 @@
+/* $Id: DBGFAddrSpace.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Address Space Management.
+ */
+
+/*
+ * Copyright (C) 2008-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_addr_space DBGFAddrSpace - Address Space Management
+ *
+ * What's an address space? It's mainly a convenient way of stuffing
+ * module segments and ad-hoc symbols together. It will also help out
+ * when the debugger gets extended to deal with user processes later.
+ *
+ * There are two standard address spaces that will always be present:
+ * - The physical address space.
+ * - The global virtual address space.
+ *
+ * Additional address spaces will be added and removed at runtime for
+ * guest processes. The global virtual address space will be used to
+ * track the kernel parts of the OS, or at least the bits of the kernel
+ * that is part of all address spaces (mac os x and 4G/4G patched linux).
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/mm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/env.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/param.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Address space database node.
+ */
+typedef struct DBGFASDBNODE
+{
+ /** The node core for DBGF::AsHandleTree, the key is the address space handle. */
+ AVLPVNODECORE HandleCore;
+ /** The node core for DBGF::AsPidTree, the key is the process id. */
+ AVLU32NODECORE PidCore;
+ /** The node core for DBGF::AsNameSpace, the string is the address space name. */
+ RTSTRSPACECORE NameCore;
+
+} DBGFASDBNODE;
+/** Pointer to an address space database node. */
+typedef DBGFASDBNODE *PDBGFASDBNODE;
+
+
+/**
+ * For dbgfR3AsLoadImageOpenData and dbgfR3AsLoadMapOpenData.
+ */
+typedef struct DBGFR3ASLOADOPENDATA
+{
+ const char *pszModName;
+ RTGCUINTPTR uSubtrahend;
+ uint32_t fFlags;
+ RTDBGMOD hMod;
+} DBGFR3ASLOADOPENDATA;
+
+#if 0 /* unused */
+/**
+ * Callback for dbgfR3AsSearchPath and dbgfR3AsSearchEnvPath.
+ *
+ * @returns VBox status code. If success, then the search is completed.
+ * @param pszFilename The file name under evaluation.
+ * @param pvUser The user argument.
+ */
+typedef int FNDBGFR3ASSEARCHOPEN(const char *pszFilename, void *pvUser);
+/** Pointer to a FNDBGFR3ASSEARCHOPEN. */
+typedef FNDBGFR3ASSEARCHOPEN *PFNDBGFR3ASSEARCHOPEN;
+#endif
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Locks the address space database for writing. */
+#define DBGF_AS_DB_LOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestWrite((pUVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the address space database after writing. */
+#define DBGF_AS_DB_UNLOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseWrite((pUVM)->dbgf.s.hAsDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Locks the address space database for reading. */
+#define DBGF_AS_DB_LOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestRead((pUVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the address space database after reading. */
+#define DBGF_AS_DB_UNLOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseRead((pUVM)->dbgf.s.hAsDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+
+
+/**
+ * Initializes the address space parts of DBGF.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3AsInit(PUVM pUVM)
+{
+ Assert(pUVM->pVM);
+
+ /*
+ * Create the semaphore.
+ */
+ int rc = RTSemRWCreate(&pUVM->dbgf.s.hAsDbLock);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create the debugging config instance and set it up, defaulting to
+ * deferred loading in order to keep things fast.
+ */
+ rc = RTDbgCfgCreate(&pUVM->dbgf.s.hDbgCfg, "VBOXDBG_", true /*fNativePaths*/);
+ AssertRCReturn(rc, rc);
+ rc = RTDbgCfgChangeUInt(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_FLAGS, RTDBGCFGOP_PREPEND,
+ RTDBGCFG_FLAGS_DEFERRED);
+ AssertRCReturn(rc, rc);
+
+ static struct
+ {
+ RTDBGCFGPROP enmProp;
+ const char *pszEnvName;
+ const char *pszCfgName;
+ } const s_aProps[] =
+ {
+ { RTDBGCFGPROP_FLAGS, "VBOXDBG_FLAGS", "Flags" },
+ { RTDBGCFGPROP_PATH, "VBOXDBG_PATH", "Path" },
+ { RTDBGCFGPROP_SUFFIXES, "VBOXDBG_SUFFIXES", "Suffixes" },
+ { RTDBGCFGPROP_SRC_PATH, "VBOXDBG_SRC_PATH", "SrcPath" },
+ };
+ PCFGMNODE pCfgDbgf = CFGMR3GetChild(CFGMR3GetRootU(pUVM), "/DBGF");
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aProps); i++)
+ {
+ char szEnvValue[8192];
+ rc = RTEnvGetEx(RTENV_DEFAULT, s_aProps[i].pszEnvName, szEnvValue, sizeof(szEnvValue), NULL);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, s_aProps[i].enmProp, RTDBGCFGOP_PREPEND, szEnvValue);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: %s=%s -> %Rrc", s_aProps[i].pszEnvName, szEnvValue, rc);
+ }
+ else if (rc != VERR_ENV_VAR_NOT_FOUND)
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: Error querying env.var. %s: %Rrc", s_aProps[i].pszEnvName, rc);
+
+ char *pszCfgValue;
+ rc = CFGMR3QueryStringAllocDef(pCfgDbgf, s_aProps[i].pszCfgName, &pszCfgValue, NULL);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: Querying /DBGF/%s -> %Rrc", s_aProps[i].pszCfgName, rc);
+ if (pszCfgValue)
+ {
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, s_aProps[i].enmProp, RTDBGCFGOP_PREPEND, pszCfgValue);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: /DBGF/%s=%s -> %Rrc", s_aProps[i].pszCfgName, pszCfgValue, rc);
+ MMR3HeapFree(pszCfgValue);
+ }
+ }
+
+ /*
+ * Prepend the NoArch and VBoxDbgSyms directories to the path.
+ */
+ char szPath[RTPATH_MAX];
+ rc = RTPathAppPrivateNoArch(szPath, sizeof(szPath));
+ AssertRCReturn(rc, rc);
+#ifdef RT_OS_DARWIN
+ rc = RTPathAppend(szPath, sizeof(szPath), "../Resources/VBoxDbgSyms/");
+#else
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_PATH, RTDBGCFGOP_PREPEND, szPath);
+ AssertRCReturn(rc, rc);
+
+ rc = RTPathAppend(szPath, sizeof(szPath), "VBoxDbgSyms/");
+#endif
+ AssertRCReturn(rc, rc);
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_PATH, RTDBGCFGOP_PREPEND, szPath);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create the standard address spaces.
+ */
+ RTDBGAS hDbgAs;
+ rc = RTDbgAsCreate(&hDbgAs, 0, RTGCPTR_MAX, "Global");
+ AssertRCReturn(rc, rc);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
+ AssertRCReturn(rc, rc);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_GLOBAL)] = hDbgAs;
+
+ RTDbgAsRetain(hDbgAs);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_KERNEL)] = hDbgAs;
+
+ rc = RTDbgAsCreate(&hDbgAs, 0, RTGCPHYS_MAX, "Physical");
+ AssertRCReturn(rc, rc);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
+ AssertRCReturn(rc, rc);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_PHYS)] = hDbgAs;
+
+ rc = RTDbgAsCreate(&hDbgAs, 0, RTRCPTR_MAX, "HyperRawMode");
+ AssertRCReturn(rc, rc);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
+ AssertRCReturn(rc, rc);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)] = hDbgAs;
+ RTDbgAsRetain(hDbgAs);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC_AND_GC_GLOBAL)] = hDbgAs;
+
+ rc = RTDbgAsCreate(&hDbgAs, 0, RTR0PTR_MAX, "HyperRing0");
+ AssertRCReturn(rc, rc);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
+ AssertRCReturn(rc, rc);
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_R0)] = hDbgAs;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback used by dbgfR3AsTerm / RTAvlPVDestroy to release an address space.
+ *
+ * @returns 0.
+ * @param pNode The address space database node.
+ * @param pvIgnore NULL.
+ */
+static DECLCALLBACK(int) dbgfR3AsTermDestroyNode(PAVLPVNODECORE pNode, void *pvIgnore)
+{
+ PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)pNode;
+ RTDbgAsRelease((RTDBGAS)pDbNode->HandleCore.Key);
+ pDbNode->HandleCore.Key = NIL_RTDBGAS;
+ /* Don't bother freeing it here as MM will free it soon and MM is much at
+ it when doing it wholesale instead of piecemeal. */
+ NOREF(pvIgnore);
+ return 0;
+}
+
+
+/**
+ * Terminates the address space parts of DBGF.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+void dbgfR3AsTerm(PUVM pUVM)
+{
+ /*
+ * Create the semaphore.
+ */
+ int rc = RTSemRWDestroy(pUVM->dbgf.s.hAsDbLock);
+ AssertRC(rc);
+ pUVM->dbgf.s.hAsDbLock = NIL_RTSEMRW;
+
+ /*
+ * Release all the address spaces.
+ */
+ RTAvlPVDestroy(&pUVM->dbgf.s.AsHandleTree, dbgfR3AsTermDestroyNode, NULL);
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.ahAsAliases); i++)
+ {
+ RTDbgAsRelease(pUVM->dbgf.s.ahAsAliases[i]);
+ pUVM->dbgf.s.ahAsAliases[i] = NIL_RTDBGAS;
+ }
+
+ /*
+ * Release the reference to the debugging config.
+ */
+ rc = RTDbgCfgRelease(pUVM->dbgf.s.hDbgCfg);
+ AssertRC(rc);
+}
+
+
+/**
+ * Relocates the RC address space.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param offDelta The relocation delta.
+ */
+void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta)
+{
+ /*
+ * We will relocate the raw-mode context modules by offDelta if they have
+ * been injected into the DBGF_AS_RC map.
+ */
+ if ( pUVM->dbgf.s.afAsAliasPopuplated[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)]
+ && offDelta != 0)
+ {
+ RTDBGAS hAs = pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)];
+
+ /* Take a snapshot of the modules as we might have overlapping
+ addresses between the previous and new mapping. */
+ RTDbgAsLockExcl(hAs);
+ uint32_t cModules = RTDbgAsModuleCount(hAs);
+ if (cModules > 0 && cModules < _4K)
+ {
+ struct DBGFASRELOCENTRY
+ {
+ RTDBGMOD hDbgMod;
+ RTRCPTR uOldAddr;
+ } *paEntries = (struct DBGFASRELOCENTRY *)RTMemTmpAllocZ(sizeof(paEntries[0]) * cModules);
+ if (paEntries)
+ {
+ /* Snapshot. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ paEntries[i].hDbgMod = RTDbgAsModuleByIndex(hAs, i);
+ AssertLogRelMsg(paEntries[i].hDbgMod != NIL_RTDBGMOD, ("iModule=%#x\n", i));
+
+ RTDBGASMAPINFO aMappings[1] = { { 0, 0 } };
+ uint32_t cMappings = 1;
+ int rc = RTDbgAsModuleQueryMapByIndex(hAs, i, &aMappings[0], &cMappings, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc) && cMappings == 1 && aMappings[0].iSeg == NIL_RTDBGSEGIDX)
+ paEntries[i].uOldAddr = (RTRCPTR)aMappings[0].Address;
+ else
+ AssertLogRelMsgFailed(("iModule=%#x rc=%Rrc cMappings=%#x.\n", i, rc, cMappings));
+ }
+
+ /* Unlink them. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ int rc = RTDbgAsModuleUnlink(hAs, paEntries[i].hDbgMod);
+ AssertLogRelMsg(RT_SUCCESS(rc), ("iModule=%#x rc=%Rrc hDbgMod=%p\n", i, rc, paEntries[i].hDbgMod));
+ }
+
+ /* Link them at the new locations. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ RTRCPTR uNewAddr = paEntries[i].uOldAddr + offDelta;
+ int rc = RTDbgAsModuleLink(hAs, paEntries[i].hDbgMod, uNewAddr,
+ RTDBGASLINK_FLAGS_REPLACE);
+ AssertLogRelMsg(RT_SUCCESS(rc),
+ ("iModule=%#x rc=%Rrc hDbgMod=%p %RRv -> %RRv\n", i, rc, paEntries[i].hDbgMod,
+ paEntries[i].uOldAddr, uNewAddr));
+ RTDbgModRelease(paEntries[i].hDbgMod);
+ }
+
+ RTMemTmpFree(paEntries);
+ }
+ else
+ AssertLogRelMsgFailed(("No memory for %#x modules.\n", cModules));
+ }
+ else
+ AssertLogRelMsgFailed(("cModules=%#x\n", cModules));
+ RTDbgAsUnlockExcl(hAs);
+ }
+}
+
+
+/**
+ * Gets the IPRT debugging configuration handle (no refs retained).
+ *
+ * @returns Config handle or NIL_RTDBGCFG.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(RTDBGCFG) DBGFR3AsGetConfig(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGCFG);
+ return pUVM->dbgf.s.hDbgCfg;
+}
+
+
+/**
+ * Adds the address space to the database.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle. The reference of the caller
+ * will NOT be consumed.
+ * @param ProcId The process id or NIL_RTPROCESS.
+ */
+VMMR3DECL(int) DBGFR3AsAdd(PUVM pUVM, RTDBGAS hDbgAs, RTPROCESS ProcId)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ const char *pszName = RTDbgAsName(hDbgAs);
+ if (!pszName)
+ return VERR_INVALID_HANDLE;
+ uint32_t cRefs = RTDbgAsRetain(hDbgAs);
+ if (cRefs == UINT32_MAX)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Allocate a tracking node.
+ */
+ int rc = VERR_NO_MEMORY;
+ PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_AS, sizeof(*pDbNode));
+ if (pDbNode)
+ {
+ pDbNode->HandleCore.Key = hDbgAs;
+ pDbNode->PidCore.Key = ProcId;
+ pDbNode->NameCore.pszString = pszName;
+ pDbNode->NameCore.cchString = strlen(pszName);
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+ if (RTStrSpaceInsert(&pUVM->dbgf.s.AsNameSpace, &pDbNode->NameCore))
+ {
+ if (RTAvlPVInsert(&pUVM->dbgf.s.AsHandleTree, &pDbNode->HandleCore))
+ {
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+ return VINF_SUCCESS;
+ }
+
+ /* bail out */
+ RTStrSpaceRemove(&pUVM->dbgf.s.AsNameSpace, pszName);
+ }
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+ MMR3HeapFree(pDbNode);
+ }
+ RTDbgAsRelease(hDbgAs);
+ return rc;
+}
+
+
+/**
+ * Delete an address space from the database.
+ *
+ * The address space must not be engaged as any of the standard aliases.
+ *
+ * @returns VBox status code.
+ * @retval VERR_SHARING_VIOLATION if in use as an alias.
+ * @retval VERR_NOT_FOUND if not found in the address space database.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle. Aliases are not allowed.
+ */
+VMMR3DECL(int) DBGFR3AsDelete(PUVM pUVM, RTDBGAS hDbgAs)
+{
+ /*
+ * Input validation. Retain the address space so it can be released outside
+ * the lock as well as validated.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ if (hDbgAs == NIL_RTDBGAS)
+ return VINF_SUCCESS;
+ uint32_t cRefs = RTDbgAsRetain(hDbgAs);
+ if (cRefs == UINT32_MAX)
+ return VERR_INVALID_HANDLE;
+ RTDbgAsRelease(hDbgAs);
+
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+
+ /*
+ * You cannot delete any of the aliases.
+ */
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.ahAsAliases); i++)
+ if (pUVM->dbgf.s.ahAsAliases[i] == hDbgAs)
+ {
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+ return VERR_SHARING_VIOLATION;
+ }
+
+ /*
+ * Ok, try remove it from the database.
+ */
+ PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)RTAvlPVRemove(&pUVM->dbgf.s.AsHandleTree, hDbgAs);
+ if (!pDbNode)
+ {
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+ return VERR_NOT_FOUND;
+ }
+ RTStrSpaceRemove(&pUVM->dbgf.s.AsNameSpace, pDbNode->NameCore.pszString);
+ if (pDbNode->PidCore.Key != NIL_RTPROCESS)
+ RTAvlU32Remove(&pUVM->dbgf.s.AsPidTree, pDbNode->PidCore.Key);
+
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+
+ /*
+ * Free the resources.
+ */
+ RTDbgAsRelease(hDbgAs);
+ MMR3HeapFree(pDbNode);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Changes an alias to point to a new address space.
+ *
+ * Not all the aliases can be changed, currently it's only DBGF_AS_GLOBAL
+ * and DBGF_AS_KERNEL.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The alias to change.
+ * @param hAliasFor The address space hAlias should be an alias for. This
+ * can be an alias. The caller's reference to this address
+ * space will NOT be consumed.
+ */
+VMMR3DECL(int) DBGFR3AsSetAlias(PUVM pUVM, RTDBGAS hAlias, RTDBGAS hAliasFor)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertMsgReturn(DBGF_AS_IS_ALIAS(hAlias), ("%p\n", hAlias), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!DBGF_AS_IS_FIXED_ALIAS(hAlias), ("%p\n", hAlias), VERR_INVALID_PARAMETER);
+ RTDBGAS hRealAliasFor = DBGFR3AsResolveAndRetain(pUVM, hAliasFor);
+ if (hRealAliasFor == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Make sure the handle is already in the database.
+ */
+ int rc = VERR_NOT_FOUND;
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+ if (RTAvlPVGet(&pUVM->dbgf.s.AsHandleTree, hRealAliasFor))
+ {
+ /*
+ * Update the alias table and release the current address space.
+ */
+ RTDBGAS hAsOld;
+ ASMAtomicXchgHandle(&pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(hAlias)], hRealAliasFor, &hAsOld);
+ uint32_t cRefs = RTDbgAsRelease(hAsOld);
+ Assert(cRefs > 0); Assert(cRefs != UINT32_MAX); NOREF(cRefs);
+ rc = VINF_SUCCESS;
+ }
+ else
+ RTDbgAsRelease(hRealAliasFor);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNPDMR3ENUM}
+ */
+static DECLCALLBACK(int) dbgfR3AsLazyPopulateR0Callback(PVM pVM, const char *pszFilename, const char *pszName,
+ RTUINTPTR ImageBase, size_t cbImage, PDMLDRCTX enmCtx, void *pvArg)
+{
+ NOREF(pVM); NOREF(cbImage);
+
+ /* Only ring-0 modules. */
+ if (enmCtx == PDMLDRCTX_RING_0)
+ {
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszName, RTLDRARCH_HOST, pVM->pUVM->dbgf.s.hDbgCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgAsModuleLink((RTDBGAS)pvArg, hDbgMod, ImageBase, 0 /*fFlags*/);
+ if (RT_FAILURE(rc))
+ LogRel(("DBGF: Failed to link module \"%s\" into DBGF_AS_R0 at %RTptr: %Rrc\n",
+ pszName, ImageBase, rc));
+ }
+ else
+ LogRel(("DBGF: RTDbgModCreateFromImage failed with rc=%Rrc for module \"%s\" (%s)\n",
+ rc, pszName, pszFilename));
+ }
+ return VINF_SUCCESS;
+}
+
+
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+/**
+ * @callback_method_impl{FNPDMR3ENUM}
+ */
+static DECLCALLBACK(int) dbgfR3AsLazyPopulateRCCallback(PVM pVM, const char *pszFilename, const char *pszName,
+ RTUINTPTR ImageBase, size_t cbImage, PDMLDRCTX enmCtx, void *pvArg)
+{
+ NOREF(pVM); NOREF(cbImage);
+
+ /* Only raw-mode modules. */
+ if (enmCtx == PDMLDRCTX_RAW_MODE)
+ {
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszName, RTLDRARCH_X86_32, pVM->pUVM->dbgf.s.hDbgCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgAsModuleLink((RTDBGAS)pvArg, hDbgMod, ImageBase, 0 /*fFlags*/);
+ if (RT_FAILURE(rc))
+ LogRel(("DBGF: Failed to link module \"%s\" into DBGF_AS_RC at %RTptr: %Rrc\n",
+ pszName, ImageBase, rc));
+ }
+ else
+ LogRel(("DBGF: RTDbgModCreateFromImage failed with rc=%Rrc for module \"%s\" (%s)\n",
+ rc, pszName, pszFilename));
+ }
+ return VINF_SUCCESS;
+}
+#endif /* VBOX_WITH_RAW_MODE_KEEP */
+
+
+/**
+ * Lazily populates the specified address space.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The alias.
+ */
+static void dbgfR3AsLazyPopulate(PUVM pUVM, RTDBGAS hAlias)
+{
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+ uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
+ if (!pUVM->dbgf.s.afAsAliasPopuplated[iAlias])
+ {
+ RTDBGAS hDbgAs = pUVM->dbgf.s.ahAsAliases[iAlias];
+ if (hAlias == DBGF_AS_R0 && pUVM->pVM)
+ PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateR0Callback, hDbgAs);
+#ifdef VBOX_WITH_RAW_MODE_KEEP /* needs fixing */
+ else if (hAlias == DBGF_AS_RC && pUVM->pVM && VM_IS_RAW_MODE_ENABLED(pUVM->pVM))
+ {
+ LogRel(("DBGF: Lazy init of RC address space\n"));
+ PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateRCCallback, hDbgAs);
+ }
+#endif
+ else if (hAlias == DBGF_AS_PHYS && pUVM->pVM)
+ {
+ /** @todo Lazy load pc and vga bios symbols or the EFI stuff. */
+ }
+
+ pUVM->dbgf.s.afAsAliasPopuplated[iAlias] = true;
+ }
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
+}
+
+
+/**
+ * Resolves the address space handle into a real handle if it's an alias.
+ *
+ * @returns Real address space handle. NIL_RTDBGAS if invalid handle.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The possibly address space alias.
+ *
+ * @remarks Doesn't take any locks.
+ */
+VMMR3DECL(RTDBGAS) DBGFR3AsResolve(PUVM pUVM, RTDBGAS hAlias)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ AssertCompileNS(NIL_RTDBGAS == (RTDBGAS)0);
+
+ uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
+ if (iAlias < DBGF_AS_COUNT)
+ ASMAtomicReadHandle(&pUVM->dbgf.s.ahAsAliases[iAlias], &hAlias);
+ return hAlias;
+}
+
+
+/**
+ * Resolves the address space handle into a real handle if it's an alias,
+ * and retains whatever it is.
+ *
+ * @returns Real address space handle. NIL_RTDBGAS if invalid handle.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The possibly address space alias.
+ */
+VMMR3DECL(RTDBGAS) DBGFR3AsResolveAndRetain(PUVM pUVM, RTDBGAS hAlias)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ AssertCompileNS(NIL_RTDBGAS == (RTDBGAS)0);
+
+ uint32_t cRefs;
+ uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
+ if (iAlias < DBGF_AS_COUNT)
+ {
+ if (DBGF_AS_IS_FIXED_ALIAS(hAlias))
+ {
+ /* Perform lazy address space population. */
+ if (!pUVM->dbgf.s.afAsAliasPopuplated[iAlias])
+ dbgfR3AsLazyPopulate(pUVM, hAlias);
+
+ /* Won't ever change, no need to grab the lock. */
+ hAlias = pUVM->dbgf.s.ahAsAliases[iAlias];
+ cRefs = RTDbgAsRetain(hAlias);
+ }
+ else
+ {
+ /* May change, grab the lock so we can read it safely. */
+ DBGF_AS_DB_LOCK_READ(pUVM);
+ hAlias = pUVM->dbgf.s.ahAsAliases[iAlias];
+ cRefs = RTDbgAsRetain(hAlias);
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
+ }
+ }
+ else
+ /* Not an alias, just retain it. */
+ cRefs = RTDbgAsRetain(hAlias);
+
+ return cRefs != UINT32_MAX ? hAlias : NIL_RTDBGAS;
+}
+
+
+/**
+ * Query an address space by name.
+ *
+ * @returns Retained address space handle if found, NIL_RTDBGAS if not.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pszName The name.
+ */
+VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PUVM pUVM, const char *pszName)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGAS);
+ AssertPtrReturn(pszName, NIL_RTDBGAS);
+ AssertReturn(*pszName, NIL_RTDBGAS);
+
+ /*
+ * Look it up in the string space and retain the result.
+ */
+ RTDBGAS hDbgAs = NIL_RTDBGAS;
+ DBGF_AS_DB_LOCK_READ(pUVM);
+
+ PRTSTRSPACECORE pNode = RTStrSpaceGet(&pUVM->dbgf.s.AsNameSpace, pszName);
+ if (pNode)
+ {
+ PDBGFASDBNODE pDbNode = RT_FROM_MEMBER(pNode, DBGFASDBNODE, NameCore);
+ hDbgAs = (RTDBGAS)pDbNode->HandleCore.Key;
+ uint32_t cRefs = RTDbgAsRetain(hDbgAs);
+ if (RT_UNLIKELY(cRefs == UINT32_MAX))
+ hDbgAs = NIL_RTDBGAS;
+ }
+
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
+ return hDbgAs;
+}
+
+
+/**
+ * Query an address space by process ID.
+ *
+ * @returns Retained address space handle if found, NIL_RTDBGAS if not.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param ProcId The process ID.
+ */
+VMMR3DECL(RTDBGAS) DBGFR3AsQueryByPid(PUVM pUVM, RTPROCESS ProcId)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGAS);
+ AssertReturn(ProcId != NIL_RTPROCESS, NIL_RTDBGAS);
+
+ /*
+ * Look it up in the PID tree and retain the result.
+ */
+ RTDBGAS hDbgAs = NIL_RTDBGAS;
+ DBGF_AS_DB_LOCK_READ(pUVM);
+
+ PAVLU32NODECORE pNode = RTAvlU32Get(&pUVM->dbgf.s.AsPidTree, ProcId);
+ if (pNode)
+ {
+ PDBGFASDBNODE pDbNode = RT_FROM_MEMBER(pNode, DBGFASDBNODE, PidCore);
+ hDbgAs = (RTDBGAS)pDbNode->HandleCore.Key;
+ uint32_t cRefs = RTDbgAsRetain(hDbgAs);
+ if (RT_UNLIKELY(cRefs == UINT32_MAX))
+ hDbgAs = NIL_RTDBGAS;
+ }
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
+
+ return hDbgAs;
+}
+
+#if 0 /* unused */
+
+/**
+ * Searches for the file in the path.
+ *
+ * The file is first tested without any path modification, then we walk the path
+ * looking in each directory.
+ *
+ * @returns VBox status code.
+ * @param pszFilename The file to search for.
+ * @param pszPath The search path.
+ * @param pfnOpen The open callback function.
+ * @param pvUser User argument for the callback.
+ */
+static int dbgfR3AsSearchPath(const char *pszFilename, const char *pszPath, PFNDBGFR3ASSEARCHOPEN pfnOpen, void *pvUser)
+{
+ char szFound[RTPATH_MAX];
+
+ /* Check the filename length. */
+ size_t const cchFilename = strlen(pszFilename);
+ if (cchFilename >= sizeof(szFound))
+ return VERR_FILENAME_TOO_LONG;
+ const char *pszName = RTPathFilename(pszFilename);
+ if (!pszName)
+ return VERR_IS_A_DIRECTORY;
+ size_t const cchName = strlen(pszName);
+
+ /*
+ * Try default location first.
+ */
+ memcpy(szFound, pszFilename, cchFilename + 1);
+ int rc = pfnOpen(szFound, pvUser);
+ if (RT_SUCCESS(rc))
+ return rc;
+
+ /*
+ * Walk the search path.
+ */
+ const char *psz = pszPath;
+ while (*psz)
+ {
+ /* Skip leading blanks - no directories with leading spaces, thank you. */
+ while (RT_C_IS_BLANK(*psz))
+ psz++;
+
+ /* Find the end of this element. */
+ const char *pszNext;
+ const char *pszEnd = strchr(psz, ';');
+ if (!pszEnd)
+ pszEnd = pszNext = strchr(psz, '\0');
+ else
+ pszNext = pszEnd + 1;
+ if (pszEnd != psz)
+ {
+ size_t const cch = pszEnd - psz;
+ if (cch + 1 + cchName < sizeof(szFound))
+ {
+ /** @todo RTPathCompose, RTPathComposeN(). This code isn't right
+ * for 'E:' on DOS systems. It may also create unwanted double slashes. */
+ memcpy(szFound, psz, cch);
+ szFound[cch] = '/';
+ memcpy(szFound + cch + 1, pszName, cchName + 1);
+ int rc2 = pfnOpen(szFound, pvUser);
+ if (RT_SUCCESS(rc2))
+ return rc2;
+ if ( rc2 != rc
+ && ( rc == VERR_FILE_NOT_FOUND
+ || rc == VERR_OPEN_FAILED))
+ rc = rc2;
+ }
+ }
+
+ /* advance */
+ psz = pszNext;
+ }
+
+ /*
+ * Walk the path once again, this time do a depth search.
+ */
+ /** @todo do a depth search using the specified path. */
+
+ /* failed */
+ return rc;
+}
+
+
+/**
+ * Same as dbgfR3AsSearchEnv, except that the path is taken from the environment.
+ *
+ * If the environment variable doesn't exist, the current directory is searched
+ * instead.
+ *
+ * @returns VBox status code.
+ * @param pszFilename The filename.
+ * @param pszEnvVar The environment variable name.
+ * @param pfnOpen The open callback function.
+ * @param pvUser User argument for the callback.
+ */
+static int dbgfR3AsSearchEnvPath(const char *pszFilename, const char *pszEnvVar, PFNDBGFR3ASSEARCHOPEN pfnOpen, void *pvUser)
+{
+ int rc;
+ char *pszPath = RTEnvDupEx(RTENV_DEFAULT, pszEnvVar);
+ if (pszPath)
+ {
+ rc = dbgfR3AsSearchPath(pszFilename, pszPath, pfnOpen, pvUser);
+ RTStrFree(pszPath);
+ }
+ else
+ rc = dbgfR3AsSearchPath(pszFilename, ".", pfnOpen, pvUser);
+ return rc;
+}
+
+
+/**
+ * Same as dbgfR3AsSearchEnv, except that the path is taken from the DBGF config
+ * (CFGM).
+ *
+ * Nothing is done if the CFGM variable isn't set.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszFilename The filename.
+ * @param pszCfgValue The name of the config variable (under /DBGF/).
+ * @param pfnOpen The open callback function.
+ * @param pvUser User argument for the callback.
+ */
+static int dbgfR3AsSearchCfgPath(PUVM pUVM, const char *pszFilename, const char *pszCfgValue,
+ PFNDBGFR3ASSEARCHOPEN pfnOpen, void *pvUser)
+{
+ char *pszPath;
+ int rc = CFGMR3QueryStringAllocDef(CFGMR3GetChild(CFGMR3GetRootU(pUVM), "/DBGF"), pszCfgValue, &pszPath, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (!pszPath)
+ return VERR_FILE_NOT_FOUND;
+ rc = dbgfR3AsSearchPath(pszFilename, pszPath, pfnOpen, pvUser);
+ MMR3HeapFree(pszPath);
+ return rc;
+}
+
+#endif /* unused */
+
+
+/**
+ * Load symbols from an executable module into the specified address space.
+ *
+ * If an module exist at the specified address it will be replaced by this
+ * call, otherwise a new module is created.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space.
+ * @param pszFilename The filename of the executable module.
+ * @param pszModName The module name. If NULL, then then the file name
+ * base is used (no extension or nothing).
+ * @param enmArch The desired architecture, use RTLDRARCH_WHATEVER if
+ * it's not relevant or known.
+ * @param pModAddress The load address of the module.
+ * @param iModSeg The segment to load, pass NIL_RTDBGSEGIDX to load
+ * the whole image.
+ * @param fFlags For DBGFR3AsLinkModule, see RTDBGASLINK_FLAGS_*.
+ */
+VMMR3DECL(int) DBGFR3AsLoadImage(PUVM pUVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName, RTLDRARCH enmArch,
+ PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, uint32_t fFlags)
+{
+ /*
+ * Validate input
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
+ AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & ~RTDBGASLINK_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszModName, enmArch, pUVM->dbgf.s.hDbgCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGFR3AsLinkModule(pUVM, hRealAS, hDbgMod, pModAddress, iModSeg, fFlags & RTDBGASLINK_FLAGS_VALID_MASK);
+ if (RT_FAILURE(rc))
+ RTDbgModRelease(hDbgMod);
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
+ * Load symbols from a map file into a module at the specified address space.
+ *
+ * If an module exist at the specified address it will be replaced by this
+ * call, otherwise a new module is created.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space.
+ * @param pszFilename The map file.
+ * @param pszModName The module name. If NULL, then then the file name
+ * base is used (no extension or nothing).
+ * @param pModAddress The load address of the module.
+ * @param iModSeg The segment to load, pass NIL_RTDBGSEGIDX to load
+ * the whole image.
+ * @param uSubtrahend Value to to subtract from the symbols in the map
+ * file. This is useful for the linux System.map and
+ * /proc/kallsyms.
+ * @param fFlags Flags reserved for future extensions, must be 0.
+ */
+VMMR3DECL(int) DBGFR3AsLoadMap(PUVM pUVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName,
+ PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, RTGCUINTPTR uSubtrahend, uint32_t fFlags)
+{
+ /*
+ * Validate input
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
+ AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromMap(&hDbgMod, pszFilename, pszModName, uSubtrahend, pUVM->dbgf.s.hDbgCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGFR3AsLinkModule(pUVM, hRealAS, hDbgMod, pModAddress, iModSeg, 0);
+ if (RT_FAILURE(rc))
+ RTDbgModRelease(hDbgMod);
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
+ * Wrapper around RTDbgAsModuleLink, RTDbgAsModuleLinkSeg and DBGFR3AsResolve.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param hMod The module handle.
+ * @param pModAddress The link address.
+ * @param iModSeg The segment to link, NIL_RTDBGSEGIDX for the entire image.
+ * @param fFlags Flags to pass to the link functions, see RTDBGASLINK_FLAGS_*.
+ */
+VMMR3DECL(int) DBGFR3AsLinkModule(PUVM pUVM, RTDBGAS hDbgAs, RTDBGMOD hMod, PCDBGFADDRESS pModAddress,
+ RTDBGSEGIDX iModSeg, uint32_t fFlags)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the job.
+ */
+ int rc;
+ if (iModSeg == NIL_RTDBGSEGIDX)
+ rc = RTDbgAsModuleLink(hRealAS, hMod, pModAddress->FlatPtr, fFlags);
+ else
+ rc = RTDbgAsModuleLinkSeg(hRealAS, hMod, iModSeg, pModAddress->FlatPtr, fFlags);
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
+ * Wrapper around RTDbgAsModuleByName and RTDbgAsModuleUnlink.
+ *
+ * Unlinks all mappings matching the given module name.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pszModName The name of the module to unlink.
+ */
+VMMR3DECL(int) DBGFR3AsUnlinkModuleByName(PUVM pUVM, RTDBGAS hDbgAs, const char *pszModName)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the job.
+ */
+ RTDBGMOD hMod;
+ int rc = RTDbgAsModuleByName(hRealAS, pszModName, 0, &hMod);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ rc = RTDbgAsModuleUnlink(hRealAS, hMod);
+ RTDbgModRelease(hMod);
+ if (RT_FAILURE(rc))
+ break;
+ rc = RTDbgAsModuleByName(hRealAS, pszModName, 0, &hMod);
+ if (RT_FAILURE_NP(rc))
+ {
+ if (rc == VERR_NOT_FOUND)
+ rc = VINF_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
+ * Adds the module name to the symbol name.
+ *
+ * @param pSymbol The symbol info (in/out).
+ * @param hMod The module handle.
+ */
+static void dbgfR3AsSymbolJoinNames(PRTDBGSYMBOL pSymbol, RTDBGMOD hMod)
+{
+ /* Figure the lengths, adjust them if the result is too long. */
+ const char *pszModName = RTDbgModName(hMod);
+ size_t cchModName = strlen(pszModName);
+ size_t cchSymbol = strlen(pSymbol->szName);
+ if (cchModName + 1 + cchSymbol >= sizeof(pSymbol->szName))
+ {
+ if (cchModName >= sizeof(pSymbol->szName) / 4)
+ cchModName = sizeof(pSymbol->szName) / 4;
+ if (cchModName + 1 + cchSymbol >= sizeof(pSymbol->szName))
+ cchSymbol = sizeof(pSymbol->szName) - cchModName - 2;
+ Assert(cchModName + 1 + cchSymbol < sizeof(pSymbol->szName));
+ }
+
+ /* Do the moving and copying. */
+ memmove(&pSymbol->szName[cchModName + 1], &pSymbol->szName[0], cchSymbol + 1);
+ memcpy(&pSymbol->szName[0], pszModName, cchModName);
+ pSymbol->szName[cchModName] = '!';
+}
+
+
+/**
+ * Query a symbol by address.
+ *
+ * The returned symbol is the one we consider closes to the specified address.
+ *
+ * @returns VBox status code. See RTDbgAsSymbolByAddr.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pAddress The address to lookup.
+ * @param fFlags One of the RTDBGSYMADDR_FLAGS_XXX flags.
+ * @param poffDisp Where to return the distance between the returned
+ * symbol and pAddress. Optional.
+ * @param pSymbol Where to return the symbol information. The returned
+ * symbol name will be prefixed by the module name as
+ * far as space allows.
+ * @param phMod Where to return the module handle. Optional.
+ */
+VMMR3DECL(int) DBGFR3AsSymbolByAddr(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress, uint32_t fFlags,
+ PRTGCINTPTR poffDisp, PRTDBGSYMBOL pSymbol, PRTDBGMOD phMod)
+{
+ /*
+ * Implement the special address space aliases the lazy way.
+ */
+ if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
+ {
+ int rc = DBGFR3AsSymbolByAddr(pUVM, DBGF_AS_RC, pAddress, fFlags, poffDisp, pSymbol, phMod);
+ if (RT_FAILURE(rc))
+ rc = DBGFR3AsSymbolByAddr(pUVM, DBGF_AS_GLOBAL, pAddress, fFlags, poffDisp, pSymbol, phMod);
+ return rc;
+ }
+
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(poffDisp, VERR_INVALID_POINTER);
+ AssertPtrReturn(pSymbol, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
+ if (poffDisp)
+ *poffDisp = 0;
+ if (phMod)
+ *phMod = NIL_RTDBGMOD;
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the lookup.
+ */
+ RTDBGMOD hMod;
+ int rc = RTDbgAsSymbolByAddr(hRealAS, pAddress->FlatPtr, fFlags, poffDisp, pSymbol, &hMod);
+ if (RT_SUCCESS(rc))
+ {
+ dbgfR3AsSymbolJoinNames(pSymbol, hMod);
+ if (!phMod)
+ RTDbgModRelease(hMod);
+ else
+ *phMod = hMod;
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
+ * Convenience function that combines RTDbgSymbolDup and DBGFR3AsSymbolByAddr.
+ *
+ * @returns Pointer to the symbol on success. This must be free using
+ * RTDbgSymbolFree(). NULL is returned if not found or any error
+ * occurs.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs See DBGFR3AsSymbolByAddr.
+ * @param pAddress See DBGFR3AsSymbolByAddr.
+ * @param fFlags See DBGFR3AsSymbolByAddr.
+ * @param poffDisp See DBGFR3AsSymbolByAddr.
+ * @param phMod See DBGFR3AsSymbolByAddr.
+ */
+VMMR3DECL(PRTDBGSYMBOL) DBGFR3AsSymbolByAddrA(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress, uint32_t fFlags,
+ PRTGCINTPTR poffDisp, PRTDBGMOD phMod)
+{
+ RTDBGSYMBOL SymInfo;
+ int rc = DBGFR3AsSymbolByAddr(pUVM, hDbgAs, pAddress, fFlags, poffDisp, &SymInfo, phMod);
+ if (RT_SUCCESS(rc))
+ return RTDbgSymbolDup(&SymInfo);
+ return NULL;
+}
+
+
+/**
+ * Query a symbol by name.
+ *
+ * The symbol can be prefixed by a module name pattern to scope the search. The
+ * pattern is a simple string pattern with '*' and '?' as wild chars. See
+ * RTStrSimplePatternMatch().
+ *
+ * @returns VBox status code. See RTDbgAsSymbolByAddr.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pszSymbol The symbol to search for, maybe prefixed by a
+ * module pattern.
+ * @param pSymbol Where to return the symbol information.
+ * The returned symbol name will be prefixed by
+ * the module name as far as space allows.
+ * @param phMod Where to return the module handle. Optional.
+ */
+VMMR3DECL(int) DBGFR3AsSymbolByName(PUVM pUVM, RTDBGAS hDbgAs, const char *pszSymbol,
+ PRTDBGSYMBOL pSymbol, PRTDBGMOD phMod)
+{
+ /*
+ * Implement the special address space aliases the lazy way.
+ */
+ if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
+ {
+ int rc = DBGFR3AsSymbolByName(pUVM, DBGF_AS_RC, pszSymbol, pSymbol, phMod);
+ if (RT_FAILURE(rc))
+ rc = DBGFR3AsSymbolByName(pUVM, DBGF_AS_GLOBAL, pszSymbol, pSymbol, phMod);
+ return rc;
+ }
+
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pSymbol, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
+ if (phMod)
+ *phMod = NIL_RTDBGMOD;
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+
+ /*
+ * Do the lookup.
+ */
+ RTDBGMOD hMod;
+ int rc = RTDbgAsSymbolByName(hRealAS, pszSymbol, pSymbol, &hMod);
+ if (RT_SUCCESS(rc))
+ {
+ dbgfR3AsSymbolJoinNames(pSymbol, hMod);
+ if (!phMod)
+ RTDbgModRelease(hMod);
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+VMMR3DECL(int) DBGFR3AsLineByAddr(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress,
+ PRTGCINTPTR poffDisp, PRTDBGLINE pLine, PRTDBGMOD phMod)
+{
+ /*
+ * Implement the special address space aliases the lazy way.
+ */
+ if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
+ {
+ int rc = DBGFR3AsLineByAddr(pUVM, DBGF_AS_RC, pAddress, poffDisp, pLine, phMod);
+ if (RT_FAILURE(rc))
+ rc = DBGFR3AsLineByAddr(pUVM, DBGF_AS_GLOBAL, pAddress, poffDisp, pLine, phMod);
+ return rc;
+ }
+
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(poffDisp, VERR_INVALID_POINTER);
+ AssertPtrReturn(pLine, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
+ if (poffDisp)
+ *poffDisp = 0;
+ if (phMod)
+ *phMod = NIL_RTDBGMOD;
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the lookup.
+ */
+ int rc = RTDbgAsLineByAddr(hRealAS, pAddress->FlatPtr, poffDisp, pLine, phMod);
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+VMMR3DECL(PRTDBGLINE) DBGFR3AsLineByAddrA(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress,
+ PRTGCINTPTR poffDisp, PRTDBGMOD phMod)
+{
+ RTDBGLINE Line;
+ int rc = DBGFR3AsLineByAddr(pUVM, hDbgAs, pAddress, poffDisp, &Line, phMod);
+ if (RT_SUCCESS(rc))
+ return RTDbgLineDup(&Line);
+ return NULL;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp b/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
new file mode 100644
index 00000000..165c2781
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
@@ -0,0 +1,675 @@
+/* $Id: DBGFCoreWrite.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Guest Core Dump.
+ */
+
+/*
+ * Copyright (C) 2010-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_dbgf_vmcore VMCore Format
+ *
+ * The VirtualBox VMCore Format:
+ * [ ELF 64 Header] -- Only 1
+ *
+ * [ PT_NOTE ] -- Only 1
+ * - Offset into CoreDescriptor followed by list of Notes (Note Hdr + data) of VBox CPUs.
+ * - (Any Additional custom Note sections).
+ *
+ * [ PT_LOAD ] -- One for each contiguous memory chunk
+ * - Memory offset (physical).
+ * - File offset.
+ *
+ * CoreDescriptor
+ * - Magic, VBox version.
+ * - Number of CPus.
+ *
+ * Per-CPU register dump
+ * - CPU 1 Note Hdr + Data.
+ * - CPU 2 Note Hdr + Data.
+ * ...
+ * (Additional custom notes Hdr+data)
+ * - VBox 1 Note Hdr + Data.
+ * - VBox 2 Note Hdr + Data.
+ * ...
+ * Memory dump
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <iprt/param.h>
+#include <iprt/file.h>
+#include <iprt/mem.h>
+#include <iprt/formats/elf64.h>
+
+#include "DBGFInternal.h"
+
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/dbgfcorefmt.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/version.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define DBGFLOG_NAME "DBGFCoreWrite"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static const int g_NoteAlign = 8;
+static const int g_cbNoteName = 16;
+
+/* The size of these strings (incl. NULL terminator) must align to 8 bytes (g_NoteAlign) and -not- 4 bytes. */
+static const char *g_pcszCoreVBoxCore = "VBCORE";
+static const char *g_pcszCoreVBoxCpu = "VBCPU";
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Guest core writer data.
+ *
+ * Used to pass parameters from DBGFR3CoreWrite to dbgfR3CoreWriteRendezvous().
+ */
+typedef struct DBGFCOREDATA
+{
+ /** The name of the file to write the file to. */
+ const char *pszFilename;
+ /** Whether to replace (/overwrite) any existing file. */
+ bool fReplaceFile;
+} DBGFCOREDATA;
+/** Pointer to the guest core writer data. */
+typedef DBGFCOREDATA *PDBGFCOREDATA;
+
+
+
+/**
+ * ELF function to write 64-bit ELF header.
+ *
+ * @param hFile The file to write to.
+ * @param cProgHdrs Number of program headers.
+ * @param cSecHdrs Number of section headers.
+ *
+ * @return IPRT status code.
+ */
+static int Elf64WriteElfHdr(RTFILE hFile, uint16_t cProgHdrs, uint16_t cSecHdrs)
+{
+ Elf64_Ehdr ElfHdr;
+ RT_ZERO(ElfHdr);
+ ElfHdr.e_ident[EI_MAG0] = ELFMAG0;
+ ElfHdr.e_ident[EI_MAG1] = ELFMAG1;
+ ElfHdr.e_ident[EI_MAG2] = ELFMAG2;
+ ElfHdr.e_ident[EI_MAG3] = ELFMAG3;
+ ElfHdr.e_ident[EI_DATA] = ELFDATA2LSB;
+ ElfHdr.e_type = ET_CORE;
+ ElfHdr.e_version = EV_CURRENT;
+ ElfHdr.e_ident[EI_CLASS] = ELFCLASS64;
+ /* 32-bit builds will produce cores with e_machine EM_386. */
+#ifdef RT_ARCH_AMD64
+ ElfHdr.e_machine = EM_X86_64;
+#else
+ ElfHdr.e_machine = EM_386;
+#endif
+ ElfHdr.e_phnum = cProgHdrs;
+ ElfHdr.e_shnum = cSecHdrs;
+ ElfHdr.e_ehsize = sizeof(ElfHdr);
+ ElfHdr.e_phoff = sizeof(ElfHdr);
+ ElfHdr.e_phentsize = sizeof(Elf64_Phdr);
+ ElfHdr.e_shentsize = sizeof(Elf64_Shdr);
+
+ return RTFileWrite(hFile, &ElfHdr, sizeof(ElfHdr), NULL /* all */);
+}
+
+
+/**
+ * ELF function to write 64-bit program header.
+ *
+ * @param hFile The file to write to.
+ * @param Type Type of program header (PT_*).
+ * @param fFlags Flags (access permissions, PF_*).
+ * @param offFileData File offset of contents.
+ * @param cbFileData Size of contents in the file.
+ * @param cbMemData Size of contents in memory.
+ * @param Phys Physical address, pass zero if not applicable.
+ *
+ * @return IPRT status code.
+ */
+static int Elf64WriteProgHdr(RTFILE hFile, uint32_t Type, uint32_t fFlags, uint64_t offFileData, uint64_t cbFileData,
+ uint64_t cbMemData, RTGCPHYS Phys)
+{
+ Elf64_Phdr ProgHdr;
+ RT_ZERO(ProgHdr);
+ ProgHdr.p_type = Type;
+ ProgHdr.p_flags = fFlags;
+ ProgHdr.p_offset = offFileData;
+ ProgHdr.p_filesz = cbFileData;
+ ProgHdr.p_memsz = cbMemData;
+ ProgHdr.p_paddr = Phys;
+
+ return RTFileWrite(hFile, &ProgHdr, sizeof(ProgHdr), NULL /* all */);
+}
+
+
+/**
+ * Returns the size of the NOTE section given the name and size of the data.
+ *
+ * @param pszName Name of the note section.
+ * @param cbData Size of the data portion of the note section.
+ *
+ * @return The size of the NOTE section as rounded to the file alignment.
+ */
+static uint64_t Elf64NoteSectionSize(const char *pszName, uint64_t cbData)
+{
+ uint64_t cbNote = sizeof(Elf64_Nhdr);
+
+ size_t cbName = strlen(pszName) + 1;
+ size_t cbNameAlign = RT_ALIGN_Z(cbName, g_NoteAlign);
+
+ cbNote += cbNameAlign;
+ cbNote += RT_ALIGN_64(cbData, g_NoteAlign);
+ return cbNote;
+}
+
+
+/**
+ * Elf function to write 64-bit note header.
+ *
+ * @param hFile The file to write to.
+ * @param Type Type of this section.
+ * @param pszName Name of this section.
+ * @param pvData Opaque pointer to the data, if NULL only computes size.
+ * @param cbData Size of the data.
+ *
+ * @returns IPRT status code.
+ */
+static int Elf64WriteNoteHdr(RTFILE hFile, uint16_t Type, const char *pszName, const void *pvData, uint64_t cbData)
+{
+ AssertReturn(pvData, VERR_INVALID_POINTER);
+ AssertReturn(cbData > 0, VERR_NO_DATA);
+
+ char szNoteName[g_cbNoteName];
+ RT_ZERO(szNoteName);
+ RTStrCopy(szNoteName, sizeof(szNoteName), pszName);
+
+ size_t cbName = strlen(szNoteName) + 1;
+ size_t cbNameAlign = RT_ALIGN_Z(cbName, g_NoteAlign);
+ uint64_t cbDataAlign = RT_ALIGN_64(cbData, g_NoteAlign);
+
+ /*
+ * Yell loudly and bail if we are going to be writing a core file that is not compatible with
+ * both Solaris and the 64-bit ELF spec. which dictates 8-byte alignment. See @bugref{5211#c3}.
+ */
+ if (cbNameAlign - cbName > 3)
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr pszName=%s cbName=%u cbNameAlign=%u, cbName aligns to 4 not 8-bytes!\n",
+ pszName, cbName, cbNameAlign));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (cbDataAlign - cbData > 3)
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr pszName=%s cbData=%u cbDataAlign=%u, cbData aligns to 4 not 8-bytes!\n",
+ pszName, cbData, cbDataAlign));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ static const char s_achPad[7] = { 0, 0, 0, 0, 0, 0, 0 };
+ AssertCompile(sizeof(s_achPad) >= g_NoteAlign - 1);
+
+ Elf64_Nhdr ElfNoteHdr;
+ RT_ZERO(ElfNoteHdr);
+ ElfNoteHdr.n_namesz = (Elf64_Word)cbName - 1; /* Again, a discrepancy between ELF-64 and Solaris,
+ we will follow ELF-64, see @bugref{5211#c3}. */
+ ElfNoteHdr.n_type = Type;
+ ElfNoteHdr.n_descsz = (Elf64_Word)cbDataAlign;
+
+ /*
+ * Write note header.
+ */
+ int rc = RTFileWrite(hFile, &ElfNoteHdr, sizeof(ElfNoteHdr), NULL /* all */);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write note name.
+ */
+ rc = RTFileWrite(hFile, szNoteName, cbName, NULL /* all */);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write note name padding if required.
+ */
+ if (cbNameAlign > cbName)
+ rc = RTFileWrite(hFile, s_achPad, cbNameAlign - cbName, NULL);
+
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write note data.
+ */
+ rc = RTFileWrite(hFile, pvData, cbData, NULL /* all */);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write note data padding if required.
+ */
+ if (cbDataAlign > cbData)
+ rc = RTFileWrite(hFile, s_achPad, cbDataAlign - cbData, NULL /* all*/);
+ }
+ }
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ LogRel((DBGFLOG_NAME ": RTFileWrite failed. rc=%Rrc pszName=%s cbName=%u cbNameAlign=%u cbData=%u cbDataAlign=%u\n",
+ rc, pszName, cbName, cbNameAlign, cbData, cbDataAlign));
+
+ return rc;
+}
+
+
+/**
+ * Count the number of memory ranges that go into the core file.
+ *
+ * We cannot do a page-by-page dump of the entire guest memory as there will be
+ * way too many program header entries. Also we don't want to dump MMIO regions
+ * which means we cannot have a 1:1 mapping between core file offset and memory
+ * offset. Instead we dump the memory in ranges. A memory range is a contiguous
+ * memory area suitable for dumping to a core file.
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @return Number of memory ranges
+ */
+static uint32_t dbgfR3GetRamRangeCount(PVM pVM)
+{
+ return PGMR3PhysGetRamRangeCount(pVM);
+}
+
+
+/**
+ * Gets the guest-CPU context suitable for dumping into the core file.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pDbgfCpu Where to dump the guest-CPU data.
+ */
+static void dbgfR3GetCoreCpu(PVMCPU pVCpu, PDBGFCORECPU pDbgfCpu)
+{
+#define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \
+ do { \
+ (a_dbgfsel).uBase = (a_cpumselreg).u64Base; \
+ (a_dbgfsel).uLimit = (a_cpumselreg).u32Limit; \
+ (a_dbgfsel).uAttr = (a_cpumselreg).Attr.u; \
+ (a_dbgfsel).uSel = (a_cpumselreg).Sel; \
+ } while (0)
+
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ pDbgfCpu->rax = pCtx->rax;
+ pDbgfCpu->rbx = pCtx->rbx;
+ pDbgfCpu->rcx = pCtx->rcx;
+ pDbgfCpu->rdx = pCtx->rdx;
+ pDbgfCpu->rsi = pCtx->rsi;
+ pDbgfCpu->rdi = pCtx->rdi;
+ pDbgfCpu->r8 = pCtx->r8;
+ pDbgfCpu->r9 = pCtx->r9;
+ pDbgfCpu->r10 = pCtx->r10;
+ pDbgfCpu->r11 = pCtx->r11;
+ pDbgfCpu->r12 = pCtx->r12;
+ pDbgfCpu->r13 = pCtx->r13;
+ pDbgfCpu->r14 = pCtx->r14;
+ pDbgfCpu->r15 = pCtx->r15;
+ pDbgfCpu->rip = pCtx->rip;
+ pDbgfCpu->rsp = pCtx->rsp;
+ pDbgfCpu->rbp = pCtx->rbp;
+ pDbgfCpu->rflags = pCtx->rflags.u;
+ DBGFCOPYSEL(pDbgfCpu->cs, pCtx->cs);
+ DBGFCOPYSEL(pDbgfCpu->ds, pCtx->ds);
+ DBGFCOPYSEL(pDbgfCpu->es, pCtx->es);
+ DBGFCOPYSEL(pDbgfCpu->fs, pCtx->fs);
+ DBGFCOPYSEL(pDbgfCpu->gs, pCtx->gs);
+ DBGFCOPYSEL(pDbgfCpu->ss, pCtx->ss);
+ pDbgfCpu->cr0 = pCtx->cr0;
+ pDbgfCpu->cr2 = pCtx->cr2;
+ pDbgfCpu->cr3 = pCtx->cr3;
+ pDbgfCpu->cr4 = pCtx->cr4;
+ AssertCompile(RT_ELEMENTS(pDbgfCpu->dr) == RT_ELEMENTS(pCtx->dr));
+ for (unsigned i = 0; i < RT_ELEMENTS(pDbgfCpu->dr); i++)
+ pDbgfCpu->dr[i] = pCtx->dr[i];
+ pDbgfCpu->gdtr.uAddr = pCtx->gdtr.pGdt;
+ pDbgfCpu->gdtr.cb = pCtx->gdtr.cbGdt;
+ pDbgfCpu->idtr.uAddr = pCtx->idtr.pIdt;
+ pDbgfCpu->idtr.cb = pCtx->idtr.cbIdt;
+ DBGFCOPYSEL(pDbgfCpu->ldtr, pCtx->ldtr);
+ DBGFCOPYSEL(pDbgfCpu->tr, pCtx->tr);
+ pDbgfCpu->sysenter.cs = pCtx->SysEnter.cs;
+ pDbgfCpu->sysenter.eip = pCtx->SysEnter.eip;
+ pDbgfCpu->sysenter.esp = pCtx->SysEnter.esp;
+ pDbgfCpu->msrEFER = pCtx->msrEFER;
+ pDbgfCpu->msrSTAR = pCtx->msrSTAR;
+ pDbgfCpu->msrPAT = pCtx->msrPAT;
+ pDbgfCpu->msrLSTAR = pCtx->msrLSTAR;
+ pDbgfCpu->msrCSTAR = pCtx->msrCSTAR;
+ pDbgfCpu->msrSFMASK = pCtx->msrSFMASK;
+ pDbgfCpu->msrKernelGSBase = pCtx->msrKERNELGSBASE;
+ pDbgfCpu->msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
+ pDbgfCpu->msrTscAux = CPUMGetGuestTscAux(pVCpu);
+ pDbgfCpu->aXcr[0] = pCtx->aXcr[0];
+ pDbgfCpu->aXcr[1] = pCtx->aXcr[1];
+ AssertCompile(sizeof(pDbgfCpu->ext) == sizeof(pCtx->XState));
+ pDbgfCpu->cbExt = pVM->cpum.ro.GuestFeatures.cbMaxExtendedState;
+ if (RT_LIKELY(pDbgfCpu->cbExt))
+ memcpy(&pDbgfCpu->ext, &pCtx->XState, pDbgfCpu->cbExt);
+
+#undef DBGFCOPYSEL
+}
+
+
+/**
+ * Worker function for dbgfR3CoreWrite() which does the writing.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ * @param hFile The file to write to. Caller closes this.
+ */
+static int dbgfR3CoreWriteWorker(PVM pVM, RTFILE hFile)
+{
+ /*
+ * Collect core information.
+ */
+ uint32_t const cu32MemRanges = dbgfR3GetRamRangeCount(pVM);
+ uint16_t const cMemRanges = cu32MemRanges < UINT16_MAX - 1 ? cu32MemRanges : UINT16_MAX - 1; /* One PT_NOTE Program header */
+ uint16_t const cProgHdrs = cMemRanges + 1;
+
+ DBGFCOREDESCRIPTOR CoreDescriptor;
+ RT_ZERO(CoreDescriptor);
+ CoreDescriptor.u32Magic = DBGFCORE_MAGIC;
+ CoreDescriptor.u32FmtVersion = DBGFCORE_FMT_VERSION;
+ CoreDescriptor.cbSelf = sizeof(CoreDescriptor);
+ CoreDescriptor.u32VBoxVersion = VBOX_FULL_VERSION;
+ CoreDescriptor.u32VBoxRevision = VMMGetSvnRev();
+ CoreDescriptor.cCpus = pVM->cCpus;
+
+ Log((DBGFLOG_NAME ": CoreDescriptor Version=%u Revision=%u\n", CoreDescriptor.u32VBoxVersion, CoreDescriptor.u32VBoxRevision));
+
+ /*
+ * Compute the file layout (see pg_dbgf_vmcore).
+ */
+ uint64_t const offElfHdr = RTFileTell(hFile);
+ uint64_t const offNoteSection = offElfHdr + sizeof(Elf64_Ehdr);
+ uint64_t const offLoadSections = offNoteSection + sizeof(Elf64_Phdr);
+ uint64_t const cbLoadSections = cMemRanges * sizeof(Elf64_Phdr);
+ uint64_t const offCoreDescriptor = offLoadSections + cbLoadSections;
+ uint64_t const cbCoreDescriptor = Elf64NoteSectionSize(g_pcszCoreVBoxCore, sizeof(CoreDescriptor));
+ uint64_t const offCpuDumps = offCoreDescriptor + cbCoreDescriptor;
+ uint64_t const cbCpuDumps = pVM->cCpus * Elf64NoteSectionSize(g_pcszCoreVBoxCpu, sizeof(DBGFCORECPU));
+ uint64_t const offMemory = offCpuDumps + cbCpuDumps;
+
+ uint64_t const offNoteSectionData = offCoreDescriptor;
+ uint64_t const cbNoteSectionData = cbCoreDescriptor + cbCpuDumps;
+
+ /*
+ * Write ELF header.
+ */
+ int rc = Elf64WriteElfHdr(hFile, cProgHdrs, 0 /* cSecHdrs */);
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteElfHdr failed. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Write PT_NOTE program header.
+ */
+ Assert(RTFileTell(hFile) == offNoteSection);
+ rc = Elf64WriteProgHdr(hFile, PT_NOTE, PF_R,
+ offNoteSectionData, /* file offset to contents */
+ cbNoteSectionData, /* size in core file */
+ cbNoteSectionData, /* size in memory */
+ 0); /* physical address */
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WritreProgHdr failed for PT_NOTE. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Write PT_LOAD program header for each memory range.
+ */
+ Assert(RTFileTell(hFile) == offLoadSections);
+ uint64_t offMemRange = offMemory;
+ for (uint16_t iRange = 0; iRange < cMemRanges; iRange++)
+ {
+ RTGCPHYS GCPhysStart;
+ RTGCPHYS GCPhysEnd;
+ bool fIsMmio;
+ rc = PGMR3PhysGetRange(pVM, iRange, &GCPhysStart, &GCPhysEnd, NULL /* pszDesc */, &fIsMmio);
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": PGMR3PhysGetRange failed for iRange(%u) rc=%Rrc\n", iRange, rc));
+ return rc;
+ }
+
+ uint64_t cbMemRange = GCPhysEnd - GCPhysStart + 1;
+ uint64_t cbFileRange = fIsMmio ? 0 : cbMemRange;
+
+ Log((DBGFLOG_NAME ": PGMR3PhysGetRange iRange=%u GCPhysStart=%#x GCPhysEnd=%#x cbMemRange=%u\n",
+ iRange, GCPhysStart, GCPhysEnd, cbMemRange));
+
+ rc = Elf64WriteProgHdr(hFile, PT_LOAD, PF_R,
+ offMemRange, /* file offset to contents */
+ cbFileRange, /* size in core file */
+ cbMemRange, /* size in memory */
+ GCPhysStart); /* physical address */
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteProgHdr failed for memory range(%u) cbFileRange=%u cbMemRange=%u rc=%Rrc\n",
+ iRange, cbFileRange, cbMemRange, rc));
+ return rc;
+ }
+
+ offMemRange += cbFileRange;
+ }
+
+ /*
+ * Write the Core descriptor note header and data.
+ */
+ Assert(RTFileTell(hFile) == offCoreDescriptor);
+ rc = Elf64WriteNoteHdr(hFile, NT_VBOXCORE, g_pcszCoreVBoxCore, &CoreDescriptor, sizeof(CoreDescriptor));
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr failed for Note '%s' rc=%Rrc\n", g_pcszCoreVBoxCore, rc));
+ return rc;
+ }
+
+ /*
+ * Write the CPU context note headers and data.
+ * We allocate the DBGFCORECPU struct. rather than using the stack as it can be pretty large due to X86XSAVEAREA.
+ */
+ Assert(RTFileTell(hFile) == offCpuDumps);
+ PDBGFCORECPU pDbgfCoreCpu = (PDBGFCORECPU)RTMemAlloc(sizeof(*pDbgfCoreCpu));
+ if (RT_UNLIKELY(!pDbgfCoreCpu))
+ {
+ LogRel((DBGFLOG_NAME ": Failed to alloc %u bytes for DBGFCORECPU\n", sizeof(*pDbgfCoreCpu)));
+ return VERR_NO_MEMORY;
+ }
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ RT_BZERO(pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
+ dbgfR3GetCoreCpu(pVCpu, pDbgfCoreCpu);
+
+ rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, g_pcszCoreVBoxCpu, pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr failed for vCPU[%u] rc=%Rrc\n", idCpu, rc));
+ RTMemFree(pDbgfCoreCpu);
+ return rc;
+ }
+ }
+ RTMemFree(pDbgfCoreCpu);
+ pDbgfCoreCpu = NULL;
+
+ /*
+ * Write memory ranges.
+ */
+ Assert(RTFileTell(hFile) == offMemory);
+ for (uint16_t iRange = 0; iRange < cMemRanges; iRange++)
+ {
+ RTGCPHYS GCPhysStart;
+ RTGCPHYS GCPhysEnd;
+ bool fIsMmio;
+ rc = PGMR3PhysGetRange(pVM, iRange, &GCPhysStart, &GCPhysEnd, NULL /* pszDesc */, &fIsMmio);
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": PGMR3PhysGetRange(2) failed for iRange(%u) rc=%Rrc\n", iRange, rc));
+ return rc;
+ }
+
+ if (fIsMmio)
+ continue;
+
+ /*
+ * Write page-by-page of this memory range.
+ *
+ * The read function may fail on MMIO ranges, we write these as zero
+ * pages for now (would be nice to have the VGA bits there though).
+ */
+ uint64_t cbMemRange = GCPhysEnd - GCPhysStart + 1;
+ uint64_t cPages = cbMemRange >> GUEST_PAGE_SHIFT;
+ for (uint64_t iPage = 0; iPage < cPages; iPage++)
+ {
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ rc = PGMPhysSimpleReadGCPhys(pVM, abPage, GCPhysStart + (iPage << GUEST_PAGE_SHIFT), sizeof(abPage));
+ if (RT_FAILURE(rc))
+ {
+ if (rc != VERR_PGM_PHYS_PAGE_RESERVED)
+ LogRel((DBGFLOG_NAME ": PGMPhysRead failed for iRange=%u iPage=%u. rc=%Rrc. Ignoring...\n", iRange, iPage, rc));
+ RT_ZERO(abPage);
+ }
+
+ rc = RTFileWrite(hFile, abPage, sizeof(abPage), NULL /* all */);
+ if (RT_FAILURE(rc))
+ {
+ LogRel((DBGFLOG_NAME ": RTFileWrite failed. iRange=%u iPage=%u rc=%Rrc\n", iRange, iPage, rc));
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * EMT Rendezvous worker function for DBGFR3CoreWrite().
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvData Opaque data.
+ *
+ * @return VBox status code.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3CoreWriteRendezvous(PVM pVM, PVMCPU pVCpu, void *pvData)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pVCpu, VERR_INVALID_VMCPU_HANDLE);
+ AssertReturn(pvData, VERR_INVALID_POINTER);
+
+ PDBGFCOREDATA pDbgfData = (PDBGFCOREDATA)pvData;
+
+ /*
+ * Create the core file.
+ */
+ uint32_t fFlags = (pDbgfData->fReplaceFile ? RTFILE_O_CREATE_REPLACE : RTFILE_O_CREATE)
+ | RTFILE_O_WRITE
+ | RTFILE_O_DENY_ALL
+ | (0600 << RTFILE_O_CREATE_MODE_SHIFT);
+ RTFILE hFile;
+ int rc = RTFileOpen(&hFile, pDbgfData->pszFilename, fFlags);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3CoreWriteWorker(pVM, hFile);
+ RTFileClose(hFile);
+ }
+ else
+ LogRel((DBGFLOG_NAME ": RTFileOpen failed for '%s' rc=%Rrc\n", pDbgfData->pszFilename, rc));
+ return rc;
+}
+
+
+/**
+ * Write core dump of the guest.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszFilename The name of the file to which the guest core
+ * dump should be written.
+ * @param fReplaceFile Whether to replace the file or not.
+ *
+ * @remarks The VM may need to be suspended before calling this function in
+ * order to truly stop all device threads and drivers. This function
+ * only synchronizes EMTs.
+ */
+VMMR3DECL(int) DBGFR3CoreWrite(PUVM pUVM, const char *pszFilename, bool fReplaceFile)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pszFilename, VERR_INVALID_HANDLE);
+
+ /*
+ * Pass the core write request down to EMT rendezvous which makes sure
+ * other EMTs, if any, are not running. IO threads could still be running
+ * but we don't care about them.
+ */
+ DBGFCOREDATA CoreData;
+ RT_ZERO(CoreData);
+ CoreData.pszFilename = pszFilename;
+ CoreData.fReplaceFile = fReplaceFile;
+
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, dbgfR3CoreWriteRendezvous, &CoreData);
+ if (RT_SUCCESS(rc))
+ LogRel((DBGFLOG_NAME ": Successfully wrote guest core dump '%s'\n", pszFilename));
+ else
+ LogRel((DBGFLOG_NAME ": Failed to write guest core dump '%s'. rc=%Rrc\n", pszFilename, rc));
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFCpu.cpp b/src/VBox/VMM/VMMR3/DBGFCpu.cpp
new file mode 100644
index 00000000..a88efe75
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFCpu.cpp
@@ -0,0 +1,208 @@
+/* $Id: DBGFCpu.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, CPU State Accessors.
+ */
+
+/*
+ * Copyright (C) 2009-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#define VMCPU_INCL_CPUM_GST_CTX /* For CPUM_IMPORT_EXTRN_RET(). */
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/cpum.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <iprt/errcore.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <iprt/assert.h>
+
+
+/**
+ * Wrapper around CPUMGetGuestMode.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The current CPU ID.
+ * @param penmMode Where to return the mode.
+ */
+static DECLCALLBACK(int) dbgfR3CpuGetMode(PVM pVM, VMCPUID idCpu, CPUMMODE *penmMode)
+{
+ Assert(idCpu == VMMGetCpuId(pVM));
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
+ *penmMode = CPUMGetGuestMode(pVCpu);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Get the current CPU mode.
+ *
+ * @returns The CPU mode on success, CPUMMODE_INVALID on failure.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
+ */
+VMMR3DECL(CPUMMODE) DBGFR3CpuGetMode(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, CPUMMODE_INVALID);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, CPUMMODE_INVALID);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, CPUMMODE_INVALID);
+
+ CPUMMODE enmMode;
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuGetMode, 3, pUVM->pVM, idCpu, &enmMode);
+ if (RT_FAILURE(rc))
+ return CPUMMODE_INVALID;
+ return enmMode;
+}
+
+
+/**
+ * Wrapper around CPUMIsGuestIn64BitCode.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The current CPU ID.
+ * @param pfIn64BitCode Where to return the result.
+ */
+static DECLCALLBACK(int) dbgfR3CpuIn64BitCode(PVM pVM, VMCPUID idCpu, bool *pfIn64BitCode)
+{
+ Assert(idCpu == VMMGetCpuId(pVM));
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
+ *pfIn64BitCode = CPUMIsGuestIn64BitCode(pVCpu);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the given CPU is executing 64-bit code or not.
+ *
+ * @returns true / false accordingly.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
+ */
+VMMR3DECL(bool) DBGFR3CpuIsIn64BitCode(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, false);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, false);
+
+ bool fIn64BitCode;
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuIn64BitCode, 3, pUVM->pVM, idCpu, &fIn64BitCode);
+ if (RT_FAILURE(rc))
+ return false;
+ return fIn64BitCode;
+}
+
+
+/**
+ * Wrapper around CPUMIsGuestInV86Code.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The current CPU ID.
+ * @param pfInV86Code Where to return the result.
+ */
+static DECLCALLBACK(int) dbgfR3CpuInV86Code(PVM pVM, VMCPUID idCpu, bool *pfInV86Code)
+{
+ Assert(idCpu == VMMGetCpuId(pVM));
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RFLAGS);
+ *pfInV86Code = CPUMIsGuestInV86ModeEx(CPUMQueryGuestCtxPtr(pVCpu));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the given CPU is executing V8086 code or not.
+ *
+ * @returns true / false accordingly.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
+ */
+VMMR3DECL(bool) DBGFR3CpuIsInV86Code(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, false);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, false);
+
+ bool fInV86Code;
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuInV86Code, 3, pUVM->pVM, idCpu, &fInV86Code);
+ if (RT_FAILURE(rc))
+ return false;
+ return fInV86Code;
+}
+
+
+/**
+ * Get the number of CPUs (or threads if you insist).
+ *
+ * @returns The number of CPUs
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMCPUID) DBGFR3CpuGetCount(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, 1);
+ return pUVM->cCpus;
+}
+
+
+/**
+ * Returns the state of the given CPU as a human readable string.
+ *
+ * @returns Pointer to the human readable CPU state string.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
+ */
+VMMR3DECL(const char *) DBGFR3CpuGetState(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, NULL);
+
+ PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
+ VMCPUSTATE enmCpuState = (VMCPUSTATE)ASMAtomicReadU32((volatile uint32_t *)&pVCpu->enmState);
+
+ switch (enmCpuState)
+ {
+ case VMCPUSTATE_INVALID: return "<INVALID>";
+ case VMCPUSTATE_STOPPED: return "Stopped";
+ case VMCPUSTATE_STARTED: return "Started";
+ case VMCPUSTATE_STARTED_HM: return "Started (HM)";
+ case VMCPUSTATE_STARTED_EXEC: return "Started (Exec)";
+ case VMCPUSTATE_STARTED_EXEC_NEM: return "Started (Exec NEM)";
+ case VMCPUSTATE_STARTED_EXEC_NEM_WAIT: return "Started (Exec NEM Wait)";
+ case VMCPUSTATE_STARTED_EXEC_NEM_CANCELED: return "Started (Exec NEM Canceled)";
+ case VMCPUSTATE_STARTED_HALTED: return "Started (Halted)";
+ case VMCPUSTATE_END: return "END";
+ default: break;
+ }
+
+ AssertMsgFailedReturn(("Unknown CPU state %u\n", enmCpuState), "<UNKNOWN>");
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFDisas.cpp b/src/VBox/VMM/VMMR3/DBGFDisas.cpp
new file mode 100644
index 00000000..96ad6ab3
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFDisas.cpp
@@ -0,0 +1,798 @@
+/* $Id: DBGFDisas.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Disassembler.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include "DBGFInternal.h"
+#include <VBox/dis.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <iprt/alloca.h>
+#include <iprt/ctype.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Structure used when disassembling and instructions in DBGF.
+ * This is used so the reader function can get the stuff it needs.
+ */
+typedef struct
+{
+ /** The core structure. */
+ DISCPUSTATE Cpu;
+ /** The cross context VM structure. */
+ PVM pVM;
+ /** The cross context virtual CPU structure. */
+ PVMCPU pVCpu;
+ /** The address space for resolving symbol. */
+ RTDBGAS hDbgAs;
+ /** Pointer to the first byte in the segment. */
+ RTGCUINTPTR GCPtrSegBase;
+ /** Pointer to the byte after the end of the segment. (might have wrapped!) */
+ RTGCUINTPTR GCPtrSegEnd;
+ /** The size of the segment minus 1. */
+ RTGCUINTPTR cbSegLimit;
+ /** The guest paging mode. */
+ PGMMODE enmMode;
+ /** Pointer to the current page - R3 Ptr. */
+ void const *pvPageR3;
+ /** Pointer to the current page - GC Ptr. */
+ RTGCPTR GCPtrPage;
+ /** Pointer to the next instruction (relative to GCPtrSegBase). */
+ RTGCUINTPTR GCPtrNext;
+ /** The lock information that PGMPhysReleasePageMappingLock needs. */
+ PGMPAGEMAPLOCK PageMapLock;
+ /** Whether the PageMapLock is valid or not. */
+ bool fLocked;
+ /** 64 bits mode or not. */
+ bool f64Bits;
+} DBGFDISASSTATE, *PDBGFDISASSTATE;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNDISREADBYTES dbgfR3DisasInstrRead;
+
+
+
+/**
+ * Calls the disassembler with the proper reader functions and such for disa
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pSelInfo The selector info.
+ * @param enmMode The guest paging mode.
+ * @param fFlags DBGF_DISAS_FLAGS_XXX.
+ * @param GCPtr The GC pointer (selector offset).
+ * @param pState The disas CPU state.
+ */
+static int dbgfR3DisasInstrFirst(PVM pVM, PVMCPU pVCpu, PDBGFSELINFO pSelInfo, PGMMODE enmMode,
+ RTGCPTR GCPtr, uint32_t fFlags, PDBGFDISASSTATE pState)
+{
+ pState->GCPtrSegBase = pSelInfo->GCPtrBase;
+ pState->GCPtrSegEnd = pSelInfo->cbLimit + 1 + (RTGCUINTPTR)pSelInfo->GCPtrBase;
+ pState->cbSegLimit = pSelInfo->cbLimit;
+ pState->enmMode = enmMode;
+ pState->GCPtrPage = 0;
+ pState->pvPageR3 = NULL;
+ pState->hDbgAs = DBGF_AS_GLOBAL;
+ pState->pVM = pVM;
+ pState->pVCpu = pVCpu;
+ pState->fLocked = false;
+ pState->f64Bits = enmMode >= PGMMODE_AMD64 && pSelInfo->u.Raw.Gen.u1Long;
+
+ DISCPUMODE enmCpuMode;
+ switch (fFlags & DBGF_DISAS_FLAGS_MODE_MASK)
+ {
+ default:
+ AssertFailed();
+ RT_FALL_THRU();
+ case DBGF_DISAS_FLAGS_DEFAULT_MODE:
+ enmCpuMode = pState->f64Bits
+ ? DISCPUMODE_64BIT
+ : pSelInfo->u.Raw.Gen.u1DefBig
+ ? DISCPUMODE_32BIT
+ : DISCPUMODE_16BIT;
+ break;
+ case DBGF_DISAS_FLAGS_16BIT_MODE:
+ case DBGF_DISAS_FLAGS_16BIT_REAL_MODE:
+ enmCpuMode = DISCPUMODE_16BIT;
+ break;
+ case DBGF_DISAS_FLAGS_32BIT_MODE:
+ enmCpuMode = DISCPUMODE_32BIT;
+ break;
+ case DBGF_DISAS_FLAGS_64BIT_MODE:
+ enmCpuMode = DISCPUMODE_64BIT;
+ break;
+ }
+
+ uint32_t cbInstr;
+ int rc = DISInstrWithReader(GCPtr,
+ enmCpuMode,
+ dbgfR3DisasInstrRead,
+ &pState->Cpu,
+ &pState->Cpu,
+ &cbInstr);
+ if (RT_SUCCESS(rc))
+ {
+ pState->GCPtrNext = GCPtr + cbInstr;
+ return VINF_SUCCESS;
+ }
+
+ /* cleanup */
+ if (pState->fLocked)
+ {
+ PGMPhysReleasePageMappingLock(pVM, &pState->PageMapLock);
+ pState->fLocked = false;
+ }
+ return rc;
+}
+
+
+#if 0
+/**
+ * Calls the disassembler for disassembling the next instruction.
+ *
+ * @returns VBox status code.
+ * @param pState The disas CPU state.
+ */
+static int dbgfR3DisasInstrNext(PDBGFDISASSTATE pState)
+{
+ uint32_t cbInstr;
+ int rc = DISInstr(&pState->Cpu, (void *)pState->GCPtrNext, 0, &cbInstr, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ pState->GCPtrNext = GCPtr + cbInstr;
+ return VINF_SUCCESS;
+ }
+ return rc;
+}
+#endif
+
+
+/**
+ * Done with the disassembler state, free associated resources.
+ *
+ * @param pState The disas CPU state ++.
+ */
+static void dbgfR3DisasInstrDone(PDBGFDISASSTATE pState)
+{
+ if (pState->fLocked)
+ {
+ PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
+ pState->fLocked = false;
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDISREADBYTES}
+ *
+ * @remarks The source is relative to the base address indicated by
+ * DBGFDISASSTATE::GCPtrSegBase.
+ */
+static DECLCALLBACK(int) dbgfR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
+{
+ PDBGFDISASSTATE pState = (PDBGFDISASSTATE)pDis;
+ for (;;)
+ {
+ RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
+
+ /*
+ * Need to update the page translation?
+ */
+ if ( !pState->pvPageR3
+ || (GCPtr >> GUEST_PAGE_SHIFT) != (pState->GCPtrPage >> GUEST_PAGE_SHIFT))
+ {
+ int rc = VINF_SUCCESS;
+
+ /* translate the address */
+ pState->GCPtrPage = GCPtr & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
+ if (pState->fLocked)
+ PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
+ if (pState->enmMode <= PGMMODE_PROTECTED)
+ rc = PGMPhysGCPhys2CCPtrReadOnly(pState->pVM, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock);
+ else
+ rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock);
+ if (RT_SUCCESS(rc))
+ pState->fLocked = true;
+ else
+ {
+ pState->fLocked = false;
+ pState->pvPageR3 = NULL;
+ return rc;
+ }
+ }
+
+ /*
+ * Check the segment limit.
+ */
+ if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
+ return VERR_OUT_OF_SELECTOR_BOUNDS;
+
+ /*
+ * Calc how much we can read, maxing out the read.
+ */
+ uint32_t cb = GUEST_PAGE_SIZE - (GCPtr & GUEST_PAGE_OFFSET_MASK);
+ if (!pState->f64Bits)
+ {
+ RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
+ if (cb > cbSeg && cbSeg)
+ cb = cbSeg;
+ }
+ if (cb > cbMaxRead)
+ cb = cbMaxRead;
+
+ /*
+ * Read and advance,
+ */
+ memcpy(&pDis->abInstr[offInstr], (char *)pState->pvPageR3 + (GCPtr & GUEST_PAGE_OFFSET_MASK), cb);
+ offInstr += (uint8_t)cb;
+ if (cb >= cbMinRead)
+ {
+ pDis->cbCachedInstr = offInstr;
+ return VINF_SUCCESS;
+ }
+ cbMaxRead -= (uint8_t)cb;
+ cbMinRead -= (uint8_t)cb;
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDISGETSYMBOL}
+ */
+static DECLCALLBACK(int) dbgfR3DisasGetSymbol(PCDISCPUSTATE pDis, uint32_t u32Sel, RTUINTPTR uAddress,
+ char *pszBuf, size_t cchBuf, RTINTPTR *poff, void *pvUser)
+{
+ PDBGFDISASSTATE pState = (PDBGFDISASSTATE)pDis;
+ PCDBGFSELINFO pSelInfo = (PCDBGFSELINFO)pvUser;
+
+ /*
+ * Address conversion
+ */
+ DBGFADDRESS Addr;
+ int rc;
+ /* Start with CS. */
+ if ( DIS_FMT_SEL_IS_REG(u32Sel)
+ ? DIS_FMT_SEL_GET_REG(u32Sel) == DISSELREG_CS
+ : pSelInfo->Sel == DIS_FMT_SEL_GET_VALUE(u32Sel))
+ rc = DBGFR3AddrFromSelInfoOff(pState->pVM->pUVM, &Addr, pSelInfo, uAddress);
+ /* In long mode everything but FS and GS is easy. */
+ else if ( pState->Cpu.uCpuMode == DISCPUMODE_64BIT
+ && DIS_FMT_SEL_IS_REG(u32Sel)
+ && DIS_FMT_SEL_GET_REG(u32Sel) != DISSELREG_GS
+ && DIS_FMT_SEL_GET_REG(u32Sel) != DISSELREG_FS)
+ {
+ DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress);
+ rc = VINF_SUCCESS;
+ }
+ /* Here's a quick hack to catch patch manager SS relative access. */
+ else if ( DIS_FMT_SEL_IS_REG(u32Sel)
+ && DIS_FMT_SEL_GET_REG(u32Sel) == DISSELREG_SS
+ && pSelInfo->GCPtrBase == 0
+ && pSelInfo->cbLimit >= UINT32_MAX)
+ {
+ DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress);
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ /** @todo implement a generic solution here. */
+ rc = VERR_SYMBOL_NOT_FOUND;
+ }
+
+ /*
+ * If we got an address, try resolve it into a symbol.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ RTDBGSYMBOL Sym;
+ RTGCINTPTR off;
+ rc = DBGFR3AsSymbolByAddr(pState->pVM->pUVM, pState->hDbgAs, &Addr,
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ &off, &Sym, NULL /*phMod*/);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Return the symbol and offset.
+ */
+ size_t cchName = strlen(Sym.szName);
+ if (cchName >= cchBuf)
+ cchName = cchBuf - 1;
+ memcpy(pszBuf, Sym.szName, cchName);
+ pszBuf[cchName] = '\0';
+
+ *poff = off;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Disassembles the one instruction according to the specified flags and
+ * address, internal worker executing on the EMT of the specified virtual CPU.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param Sel The code selector. This used to determine the 32/16 bit ness and
+ * calculation of the actual instruction address.
+ * @param pGCPtr Pointer to the variable holding the code address
+ * relative to the base of Sel.
+ * @param fFlags Flags controlling where to start and how to format.
+ * A combination of the DBGF_DISAS_FLAGS_* \#defines.
+ * @param pszOutput Output buffer.
+ * @param cbOutput Size of the output buffer.
+ * @param pcbInstr Where to return the size of the instruction.
+ * @param pDisState Where to store the disassembler state into.
+ */
+static DECLCALLBACK(int)
+dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint32_t fFlags,
+ char *pszOutput, uint32_t cbOutput, uint32_t *pcbInstr, PDBGFDISSTATE pDisState)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ RTGCPTR GCPtr = *pGCPtr;
+ int rc;
+
+ /*
+ * Get the Sel and GCPtr if fFlags requests that.
+ */
+ PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PCCPUMSELREG pSRegCS = NULL;
+ if (fFlags & DBGF_DISAS_FLAGS_CURRENT_GUEST)
+ {
+ Sel = pCtx->cs.Sel;
+ pSRegCS = &pCtx->cs;
+ GCPtr = pCtx->rip;
+ }
+ /*
+ * Check if the selector matches the guest CS, use the hidden
+ * registers from that if they are valid. Saves time and effort.
+ */
+ else
+ {
+ if (pCtx->cs.Sel == Sel && Sel != DBGF_SEL_FLAT)
+ pSRegCS = &pCtx->cs;
+ else
+ pCtx = NULL;
+ }
+
+ /*
+ * Read the selector info - assume no stale selectors and nasty stuff like that.
+ *
+ * Note! We CANNOT load invalid hidden selector registers since that would
+ * mean that log/debug statements or the debug will influence the
+ * guest state and make things behave differently.
+ */
+ DBGFSELINFO SelInfo;
+ const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ bool fRealModeAddress = false;
+
+ if ( pSRegCS
+ && CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
+ {
+ SelInfo.Sel = Sel;
+ SelInfo.SelGate = 0;
+ SelInfo.GCPtrBase = pSRegCS->u64Base;
+ SelInfo.cbLimit = pSRegCS->u32Limit;
+ SelInfo.fFlags = PGMMODE_IS_LONG_MODE(enmMode)
+ ? DBGFSELINFO_FLAGS_LONG_MODE
+ : enmMode != PGMMODE_REAL && !pCtx->eflags.Bits.u1VM
+ ? DBGFSELINFO_FLAGS_PROT_MODE
+ : DBGFSELINFO_FLAGS_REAL_MODE;
+
+ SelInfo.u.Raw.au32[0] = 0;
+ SelInfo.u.Raw.au32[1] = 0;
+ SelInfo.u.Raw.Gen.u16LimitLow = 0xffff;
+ SelInfo.u.Raw.Gen.u4LimitHigh = 0xf;
+ SelInfo.u.Raw.Gen.u1Present = pSRegCS->Attr.n.u1Present;
+ SelInfo.u.Raw.Gen.u1Granularity = pSRegCS->Attr.n.u1Granularity;;
+ SelInfo.u.Raw.Gen.u1DefBig = pSRegCS->Attr.n.u1DefBig;
+ SelInfo.u.Raw.Gen.u1Long = pSRegCS->Attr.n.u1Long;
+ SelInfo.u.Raw.Gen.u1DescType = pSRegCS->Attr.n.u1DescType;
+ SelInfo.u.Raw.Gen.u4Type = pSRegCS->Attr.n.u4Type;
+ fRealModeAddress = !!(SelInfo.fFlags & DBGFSELINFO_FLAGS_REAL_MODE);
+ }
+ else if (Sel == DBGF_SEL_FLAT)
+ {
+ SelInfo.Sel = Sel;
+ SelInfo.SelGate = 0;
+ SelInfo.GCPtrBase = 0;
+ SelInfo.cbLimit = ~(RTGCUINTPTR)0;
+ SelInfo.fFlags = PGMMODE_IS_LONG_MODE(enmMode)
+ ? DBGFSELINFO_FLAGS_LONG_MODE
+ : enmMode != PGMMODE_REAL
+ ? DBGFSELINFO_FLAGS_PROT_MODE
+ : DBGFSELINFO_FLAGS_REAL_MODE;
+ SelInfo.u.Raw.au32[0] = 0;
+ SelInfo.u.Raw.au32[1] = 0;
+ SelInfo.u.Raw.Gen.u16LimitLow = 0xffff;
+ SelInfo.u.Raw.Gen.u4LimitHigh = 0xf;
+
+ pSRegCS = &CPUMQueryGuestCtxPtr(pVCpu)->cs;
+ if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
+ {
+ /* Assume the current CS defines the execution mode. */
+ SelInfo.u.Raw.Gen.u1Present = pSRegCS->Attr.n.u1Present;
+ SelInfo.u.Raw.Gen.u1Granularity = pSRegCS->Attr.n.u1Granularity;;
+ SelInfo.u.Raw.Gen.u1DefBig = pSRegCS->Attr.n.u1DefBig;
+ SelInfo.u.Raw.Gen.u1Long = pSRegCS->Attr.n.u1Long;
+ SelInfo.u.Raw.Gen.u1DescType = pSRegCS->Attr.n.u1DescType;
+ SelInfo.u.Raw.Gen.u4Type = pSRegCS->Attr.n.u4Type;
+ }
+ else
+ {
+ pSRegCS = NULL;
+ SelInfo.u.Raw.Gen.u1Present = 1;
+ SelInfo.u.Raw.Gen.u1Granularity = 1;
+ SelInfo.u.Raw.Gen.u1DefBig = 1;
+ SelInfo.u.Raw.Gen.u1DescType = 1;
+ SelInfo.u.Raw.Gen.u4Type = X86_SEL_TYPE_EO;
+ }
+ }
+ else if ( (pCtx && pCtx->eflags.Bits.u1VM)
+ || enmMode == PGMMODE_REAL
+ || (fFlags & DBGF_DISAS_FLAGS_MODE_MASK) == DBGF_DISAS_FLAGS_16BIT_REAL_MODE)
+ { /* V86 mode or real mode - real mode addressing */
+ SelInfo.Sel = Sel;
+ SelInfo.SelGate = 0;
+ SelInfo.GCPtrBase = Sel * 16;
+ SelInfo.cbLimit = ~(RTGCUINTPTR)0;
+ SelInfo.fFlags = DBGFSELINFO_FLAGS_REAL_MODE;
+ SelInfo.u.Raw.au32[0] = 0;
+ SelInfo.u.Raw.au32[1] = 0;
+ SelInfo.u.Raw.Gen.u16LimitLow = 0xffff;
+ SelInfo.u.Raw.Gen.u4LimitHigh = 0xf;
+ SelInfo.u.Raw.Gen.u1Present = 1;
+ SelInfo.u.Raw.Gen.u1Granularity = 1;
+ SelInfo.u.Raw.Gen.u1DefBig = 0; /* 16 bits */
+ SelInfo.u.Raw.Gen.u1DescType = 1;
+ SelInfo.u.Raw.Gen.u4Type = X86_SEL_TYPE_EO;
+ fRealModeAddress = true;
+ }
+ else
+ {
+ rc = SELMR3GetSelectorInfo(pVCpu, Sel, &SelInfo);
+ if (RT_FAILURE(rc))
+ {
+ RTStrPrintf(pszOutput, cbOutput, "Sel=%04x -> %Rrc\n", Sel, rc);
+ return rc;
+ }
+ }
+
+ /*
+ * Disassemble it.
+ */
+ DBGFDISASSTATE State;
+ rc = dbgfR3DisasInstrFirst(pVM, pVCpu, &SelInfo, enmMode, GCPtr, fFlags, &State);
+ if (RT_FAILURE(rc))
+ {
+ if (State.Cpu.cbCachedInstr)
+ RTStrPrintf(pszOutput, cbOutput, "Disas -> %Rrc; %.*Rhxs\n", rc, (size_t)State.Cpu.cbCachedInstr, State.Cpu.abInstr);
+ else
+ RTStrPrintf(pszOutput, cbOutput, "Disas -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Format it.
+ */
+ char szBuf[512];
+ DISFormatYasmEx(&State.Cpu, szBuf, sizeof(szBuf),
+ DIS_FMT_FLAGS_RELATIVE_BRANCH,
+ fFlags & DBGF_DISAS_FLAGS_NO_SYMBOLS ? NULL : dbgfR3DisasGetSymbol,
+ &SelInfo);
+
+ /*
+ * Print it to the user specified buffer.
+ */
+ size_t cch;
+ if (fFlags & DBGF_DISAS_FLAGS_NO_BYTES)
+ {
+ if (fFlags & DBGF_DISAS_FLAGS_NO_ADDRESS)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%s", szBuf);
+ else if (fRealModeAddress)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %s", Sel, (unsigned)GCPtr, szBuf);
+ else if (Sel == DBGF_SEL_FLAT)
+ {
+ if (enmMode >= PGMMODE_AMD64)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%RGv %s", GCPtr, szBuf);
+ else
+ cch = RTStrPrintf(pszOutput, cbOutput, "%08RX32 %s", (uint32_t)GCPtr, szBuf);
+ }
+ else
+ {
+ if (enmMode >= PGMMODE_AMD64)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %s", Sel, GCPtr, szBuf);
+ else
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %s", Sel, (uint32_t)GCPtr, szBuf);
+ }
+ }
+ else
+ {
+ uint32_t cbInstr = State.Cpu.cbInstr;
+ uint8_t const *pabInstr = State.Cpu.abInstr;
+ if (fFlags & DBGF_DISAS_FLAGS_NO_ADDRESS)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%.*Rhxs%*s %s",
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ else if (fRealModeAddress)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %.*Rhxs%*s %s",
+ Sel, (unsigned)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ else if (Sel == DBGF_SEL_FLAT)
+ {
+ if (enmMode >= PGMMODE_AMD64)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%RGv %.*Rhxs%*s %s",
+ GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ else
+ cch = RTStrPrintf(pszOutput, cbOutput, "%08RX32 %.*Rhxs%*s %s",
+ (uint32_t)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ }
+ else
+ {
+ if (enmMode >= PGMMODE_AMD64)
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %.*Rhxs%*s %s",
+ Sel, GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ else
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %.*Rhxs%*s %s",
+ Sel, (uint32_t)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
+ }
+ }
+
+ if (pcbInstr)
+ *pcbInstr = State.Cpu.cbInstr;
+
+ if (pDisState)
+ {
+ pDisState->pCurInstr = State.Cpu.pCurInstr;
+ pDisState->cbInstr = State.Cpu.cbInstr;
+ pDisState->Param1 = State.Cpu.Param1;
+ pDisState->Param2 = State.Cpu.Param2;
+ pDisState->Param3 = State.Cpu.Param3;
+ pDisState->Param4 = State.Cpu.Param4;
+ }
+
+ dbgfR3DisasInstrDone(&State);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Disassembles the one instruction according to the specified flags and address
+ * returning part of the disassembler state.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of virtual CPU.
+ * @param pAddr The code address.
+ * @param fFlags Flags controlling where to start and how to format.
+ * A combination of the DBGF_DISAS_FLAGS_* \#defines.
+ * @param pszOutput Output buffer. This will always be properly
+ * terminated if @a cbOutput is greater than zero.
+ * @param cbOutput Size of the output buffer.
+ * @param pDisState The disassembler state to fill in.
+ *
+ * @remarks May have to switch to the EMT of the virtual CPU in order to do
+ * address conversion.
+ */
+DECLHIDDEN(int) dbgfR3DisasInstrStateEx(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddr, uint32_t fFlags,
+ char *pszOutput, uint32_t cbOutput, PDBGFDISSTATE pDisState)
+{
+ AssertReturn(cbOutput > 0, VERR_INVALID_PARAMETER);
+ *pszOutput = '\0';
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(!(fFlags & ~DBGF_DISAS_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn((fFlags & DBGF_DISAS_FLAGS_MODE_MASK) <= DBGF_DISAS_FLAGS_64BIT_MODE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Optimize the common case where we're called on the EMT of idCpu since
+ * we're using this all the time when logging.
+ */
+ int rc;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if ( pVCpu
+ && pVCpu->idCpu == idCpu)
+ rc = dbgfR3DisasInstrExOnVCpu(pVM, pVCpu, pAddr->Sel, &pAddr->off, fFlags, pszOutput, cbOutput, NULL, pDisState);
+ else
+ rc = VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3DisasInstrExOnVCpu, 9,
+ pVM, VMMGetCpuById(pVM, idCpu), pAddr->Sel, &pAddr->off, fFlags, pszOutput, cbOutput, NULL, pDisState);
+ return rc;
+}
+
+/**
+ * Disassembles the one instruction according to the specified flags and address.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of virtual CPU.
+ * @param Sel The code selector. This used to determine the 32/16 bit ness and
+ * calculation of the actual instruction address.
+ * @param GCPtr The code address relative to the base of Sel.
+ * @param fFlags Flags controlling where to start and how to format.
+ * A combination of the DBGF_DISAS_FLAGS_* \#defines.
+ * @param pszOutput Output buffer. This will always be properly
+ * terminated if @a cbOutput is greater than zero.
+ * @param cbOutput Size of the output buffer.
+ * @param pcbInstr Where to return the size of the instruction.
+ *
+ * @remarks May have to switch to the EMT of the virtual CPU in order to do
+ * address conversion.
+ */
+VMMR3DECL(int) DBGFR3DisasInstrEx(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, RTGCPTR GCPtr, uint32_t fFlags,
+ char *pszOutput, uint32_t cbOutput, uint32_t *pcbInstr)
+{
+ AssertReturn(cbOutput > 0, VERR_INVALID_PARAMETER);
+ *pszOutput = '\0';
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(!(fFlags & ~DBGF_DISAS_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn((fFlags & DBGF_DISAS_FLAGS_MODE_MASK) <= DBGF_DISAS_FLAGS_64BIT_MODE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Optimize the common case where we're called on the EMT of idCpu since
+ * we're using this all the time when logging.
+ */
+ int rc;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if ( pVCpu
+ && pVCpu->idCpu == idCpu)
+ rc = dbgfR3DisasInstrExOnVCpu(pVM, pVCpu, Sel, &GCPtr, fFlags, pszOutput, cbOutput, pcbInstr, NULL);
+ else
+ rc = VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3DisasInstrExOnVCpu, 9,
+ pVM, VMMGetCpuById(pVM, idCpu), Sel, &GCPtr, fFlags, pszOutput, cbOutput, pcbInstr, NULL);
+ return rc;
+}
+
+
+/**
+ * Disassembles the current guest context instruction.
+ * All registers and data will be displayed. Addresses will be attempted resolved to symbols.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pszOutput Output buffer. This will always be properly
+ * terminated if @a cbOutput is greater than zero.
+ * @param cbOutput Size of the output buffer.
+ * @thread EMT(pVCpu)
+ */
+VMMR3_INT_DECL(int) DBGFR3DisasInstrCurrent(PVMCPU pVCpu, char *pszOutput, uint32_t cbOutput)
+{
+ AssertReturn(cbOutput > 0, VERR_INVALID_PARAMETER);
+ *pszOutput = '\0';
+ Assert(VMCPU_IS_EMT(pVCpu));
+
+ RTGCPTR GCPtr = 0;
+ return dbgfR3DisasInstrExOnVCpu(pVCpu->pVMR3, pVCpu, 0, &GCPtr,
+ DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE
+ | DBGF_DISAS_FLAGS_ANNOTATE_PATCHED,
+ pszOutput, cbOutput, NULL, NULL);
+}
+
+
+/**
+ * Disassembles the current guest context instruction and writes it to the log.
+ * All registers and data will be displayed. Addresses will be attempted resolved to symbols.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pszPrefix Short prefix string to the disassembly string. (optional)
+ * @thread EMT(pVCpu)
+ */
+VMMR3DECL(int) DBGFR3DisasInstrCurrentLogInternal(PVMCPU pVCpu, const char *pszPrefix)
+{
+ char szBuf[256];
+ szBuf[0] = '\0';
+ int rc = DBGFR3DisasInstrCurrent(pVCpu, &szBuf[0], sizeof(szBuf));
+ if (RT_FAILURE(rc))
+ RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrCurrentLog failed with rc=%Rrc\n", rc);
+ if (pszPrefix && *pszPrefix)
+ {
+ if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+ RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ else
+ RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+ }
+ else
+ RTLogPrintf("%s\n", szBuf);
+ return rc;
+}
+
+
+
+/**
+ * Disassembles the specified guest context instruction and writes it to the log.
+ * Addresses will be attempted resolved to symbols.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling
+ * EMT.
+ * @param Sel The code selector. This used to determine the 32/16
+ * bit-ness and calculation of the actual instruction
+ * address.
+ * @param GCPtr The code address relative to the base of Sel.
+ * @param pszPrefix Short prefix string to the disassembly string.
+ * (optional)
+ * @thread EMT(pVCpu)
+ */
+VMMR3DECL(int) DBGFR3DisasInstrLogInternal(PVMCPU pVCpu, RTSEL Sel, RTGCPTR GCPtr, const char *pszPrefix)
+{
+ Assert(VMCPU_IS_EMT(pVCpu));
+
+ char szBuf[256];
+ RTGCPTR GCPtrTmp = GCPtr;
+ int rc = dbgfR3DisasInstrExOnVCpu(pVCpu->pVMR3, pVCpu, Sel, &GCPtrTmp, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ &szBuf[0], sizeof(szBuf), NULL, NULL);
+ if (RT_FAILURE(rc))
+ RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrLog(, %RTsel, %RGv) failed with rc=%Rrc\n", Sel, GCPtr, rc);
+ if (pszPrefix && *pszPrefix)
+ {
+ if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+ RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ else
+ RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+ }
+ else
+ RTLogPrintf("%s\n", szBuf);
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFInfo.cpp b/src/VBox/VMM/VMMR3/DBGFInfo.cpp
new file mode 100644
index 00000000..fca87568
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFInfo.cpp
@@ -0,0 +1,1474 @@
+/* $Id: DBGFInfo.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Info.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF_INFO
+#include <VBox/vmm/dbgf.h>
+
+#include <VBox/vmm/mm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/getopt.h>
+#include <iprt/param.h>
+#include <iprt/semaphore.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(void) dbgfR3InfoLog_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...);
+static DECLCALLBACK(void) dbgfR3InfoLog_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args);
+static DECLCALLBACK(void) dbgfR3InfoLogRel_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...);
+static DECLCALLBACK(void) dbgfR3InfoLogRel_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args);
+static DECLCALLBACK(void) dbgfR3InfoStdErr_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...);
+static DECLCALLBACK(void) dbgfR3InfoStdErr_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args);
+static DECLCALLBACK(void) dbgfR3InfoHelp(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Logger output. */
+static const DBGFINFOHLP g_dbgfR3InfoLogHlp =
+{
+ dbgfR3InfoLog_Printf,
+ dbgfR3InfoLog_PrintfV,
+ DBGFR3InfoGenericGetOptError,
+};
+
+/** Release logger output. */
+static const DBGFINFOHLP g_dbgfR3InfoLogRelHlp =
+{
+ dbgfR3InfoLogRel_Printf,
+ dbgfR3InfoLogRel_PrintfV,
+ DBGFR3InfoGenericGetOptError
+};
+
+/** Standard error output. */
+static const DBGFINFOHLP g_dbgfR3InfoStdErrHlp =
+{
+ dbgfR3InfoStdErr_Printf,
+ dbgfR3InfoStdErr_PrintfV,
+ DBGFR3InfoGenericGetOptError
+};
+
+
+/**
+ * Initialize the info handlers.
+ *
+ * This is called first during the DBGF init process and thus does the shared
+ * critsect init.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3InfoInit(PUVM pUVM)
+{
+ /*
+ * Make sure we already didn't initialized in the lazy manner.
+ */
+ if (RTCritSectRwIsInitialized(&pUVM->dbgf.s.CritSect))
+ return VINF_SUCCESS;
+
+ /*
+ * Initialize the crit sect.
+ */
+ int rc = RTCritSectRwInit(&pUVM->dbgf.s.CritSect);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the 'info help' item.
+ */
+ rc = DBGFR3InfoRegisterInternal(pUVM->pVM, "help", "List of info items.", dbgfR3InfoHelp);
+ AssertRCReturn(rc, rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminate the info handlers.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3InfoTerm(PUVM pUVM)
+{
+ /*
+ * Delete the crit sect.
+ */
+ int rc = RTCritSectRwDelete(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnGetOptError}
+ */
+VMMR3DECL(void) DBGFR3InfoGenericGetOptError(PCDBGFINFOHLP pHlp, int rc, PRTGETOPTUNION pValueUnion, PRTGETOPTSTATE pState)
+{
+ RT_NOREF(pState);
+ char szMsg[1024];
+ RTGetOptFormatError(szMsg, sizeof(szMsg), rc, pValueUnion);
+ pHlp->pfnPrintf(pHlp, "syntax error: %s\n", szMsg);
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintf, Logger output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoLog_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ NOREF(pHlp);
+ va_list args;
+ va_start(args, pszFormat);
+ RTLogPrintfV(pszFormat, args);
+ va_end(args);
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintfV, Logger output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoLog_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
+{
+ NOREF(pHlp);
+ RTLogPrintfV(pszFormat, args);
+}
+
+
+/**
+ * Gets the logger info helper.
+ * The returned info helper will unconditionally write all output to the log.
+ *
+ * @returns Pointer to the logger info helper.
+ */
+VMMR3DECL(PCDBGFINFOHLP) DBGFR3InfoLogHlp(void)
+{
+ return &g_dbgfR3InfoLogHlp;
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintf, Release logger output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoLogRel_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ NOREF(pHlp);
+ va_list args;
+ va_start(args, pszFormat);
+ RTLogRelPrintfV(pszFormat, args);
+ va_end(args);
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintfV, Release logger output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoLogRel_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
+{
+ NOREF(pHlp);
+ RTLogRelPrintfV(pszFormat, args);
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintf, Stdandard error output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoStdErr_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ NOREF(pHlp);
+ va_list args;
+ va_start(args, pszFormat);
+ RTStrmPrintfV(g_pStdErr, pszFormat, args);
+ va_end(args);
+}
+
+
+/**
+ * @interface_method_impl{DBGFINFOHLP,pfnPrintfV, Stdandard error output.}
+ */
+static DECLCALLBACK(void) dbgfR3InfoStdErr_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
+{
+ NOREF(pHlp);
+ RTStrmPrintfV(g_pStdErr, pszFormat, args);
+}
+
+
+/**
+ * Gets the release logger info helper.
+ * The returned info helper will unconditionally write all output to the release log.
+ *
+ * @returns Pointer to the release logger info helper.
+ */
+VMMR3DECL(PCDBGFINFOHLP) DBGFR3InfoLogRelHlp(void)
+{
+ return &g_dbgfR3InfoLogRelHlp;
+}
+
+
+/**
+ * Handle registration worker.
+ *
+ * This allocates the structure, initializes the common fields and inserts into the list.
+ * Upon successful return the we're inside the crit sect and the caller must leave it.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param fFlags The flags.
+ * @param ppInfo Where to store the created
+ */
+static int dbgfR3InfoRegister(PUVM pUVM, const char *pszName, const char *pszDesc, uint32_t fFlags, PDBGFINFO *ppInfo)
+{
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+ AssertReturn(*pszName, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertMsgReturn(!(fFlags & ~(DBGFINFO_FLAGS_RUN_ON_EMT | DBGFINFO_FLAGS_ALL_EMTS)),
+ ("fFlags=%#x\n", fFlags), VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and initialize.
+ */
+ int rc;
+ size_t cchName = strlen(pszName) + 1;
+ PDBGFINFO pInfo = (PDBGFINFO)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_INFO, RT_UOFFSETOF_DYN(DBGFINFO, szName[cchName]));
+ if (pInfo)
+ {
+ pInfo->enmType = DBGFINFOTYPE_INVALID;
+ pInfo->fFlags = fFlags;
+ pInfo->pszDesc = pszDesc;
+ pInfo->cchName = cchName - 1;
+ memcpy(pInfo->szName, pszName, cchName);
+
+ /* lazy init */
+ rc = VINF_SUCCESS;
+ if (!RTCritSectRwIsInitialized(&pUVM->dbgf.s.CritSect))
+ rc = dbgfR3InfoInit(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Insert in alphabetical order.
+ */
+ rc = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ PDBGFINFO pPrev = NULL;
+ PDBGFINFO pCur;
+ for (pCur = pUVM->dbgf.s.pInfoFirst; pCur; pPrev = pCur, pCur = pCur->pNext)
+ if (strcmp(pszName, pCur->szName) < 0)
+ break;
+ pInfo->pNext = pCur;
+ if (pPrev)
+ pPrev->pNext = pInfo;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo;
+
+ *ppInfo = pInfo;
+ return VINF_SUCCESS;
+ }
+ MMR3HeapFree(pInfo);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pDevIns The device instance owning the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDevice(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLERDEV pfnHandler, PPDMDEVINS pDevIns)
+{
+ LogFlow(("DBGFR3InfoRegisterDevice: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDevIns=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pDevIns));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_DEV;
+ pInfo->u.Dev.pfnHandler = pfnHandler;
+ pInfo->u.Dev.pDevIns = pDevIns;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pDrvIns The driver instance owning the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDriver(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDRV pfnHandler, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("DBGFR3InfoRegisterDriver: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDrvIns=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pDrvIns));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_DRV;
+ pInfo->u.Drv.pfnHandler = pfnHandler;
+ pInfo->u.Drv.pDrvIns = pDrvIns;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by an internal component.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterInternal(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERINT pfnHandler)
+{
+ return DBGFR3InfoRegisterInternalEx(pVM, pszName, pszDesc, pfnHandler, 0);
+}
+
+
+/**
+ * Register a info handler owned by an internal component.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param fFlags Flags, see the DBGFINFO_FLAGS_*.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterInternalEx(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLERINT pfnHandler, uint32_t fFlags)
+{
+ LogFlow(("DBGFR3InfoRegisterInternalEx: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p fFlags=%x\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, fFlags));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, fFlags, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_INT;
+ pInfo->u.Int.pfnHandler = pfnHandler;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by an external component.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pvUser User argument to be passed to the handler.
+ */
+VMMR3DECL(int) DBGFR3InfoRegisterExternal(PUVM pUVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLEREXT pfnHandler, void *pvUser)
+{
+ LogFlow(("DBGFR3InfoRegisterExternal: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pvUser=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pvUser));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_EXT;
+ pInfo->u.Ext.pfnHandler = pfnHandler;
+ pInfo->u.Ext.pvUser = pvUser;
+ RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by a device, argv style.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pDevIns The device instance owning the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDeviceArgv(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVDEV pfnHandler, PPDMDEVINS pDevIns)
+{
+ LogFlow(("DBGFR3InfoRegisterDeviceArgv: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDevIns=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pDevIns));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_DEV_ARGV;
+ pInfo->u.DevArgv.pfnHandler = pfnHandler;
+ pInfo->u.DevArgv.pDevIns = pDevIns;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by a driver, argv style.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pDrvIns The driver instance owning the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDriverArgv(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVDRV pfnHandler, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("DBGFR3InfoRegisterDriverArgv: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDrvIns=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pDrvIns));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_DRV_ARGV;
+ pInfo->u.DrvArgv.pfnHandler = pfnHandler;
+ pInfo->u.DrvArgv.pDrvIns = pDrvIns;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by a USB device, argv style.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pUsbIns The USB device instance owning the info.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterUsbArgv(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVUSB pfnHandler, PPDMUSBINS pUsbIns)
+{
+ LogFlow(("DBGFR3InfoRegisterDriverArgv: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pUsbIns=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pUsbIns));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pUsbIns, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_USB_ARGV;
+ pInfo->u.UsbArgv.pfnHandler = pfnHandler;
+ pInfo->u.UsbArgv.pUsbIns = pUsbIns;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by an internal component, argv style.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param fFlags Flags, see the DBGFINFO_FLAGS_*.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterInternalArgv(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVINT pfnHandler, uint32_t fFlags)
+{
+ LogFlow(("DBGFR3InfoRegisterInternalArgv: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p fFlags=%x\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, fFlags));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, fFlags, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_INT_ARGV;
+ pInfo->u.IntArgv.pfnHandler = pfnHandler;
+ RTCritSectRwLeaveExcl(&pVM->pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Register a info handler owned by an external component.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName The identifier of the info.
+ * @param pszDesc The description of the info and any arguments the handler may take.
+ * @param pfnHandler The handler function to be called to display the info.
+ * @param pvUser User argument to be passed to the handler.
+ */
+VMMR3DECL(int) DBGFR3InfoRegisterExternalArgv(PUVM pUVM, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVEXT pfnHandler, void *pvUser)
+{
+ LogFlow(("DBGFR3InfoRegisterExternalArgv: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pvUser=%p\n",
+ pszName, pszName, pszDesc, pszDesc, pfnHandler, pvUser));
+
+ /*
+ * Validate the specific stuff.
+ */
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Register
+ */
+ PDBGFINFO pInfo;
+ int rc = dbgfR3InfoRegister(pUVM, pszName, pszDesc, 0, &pInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->enmType = DBGFINFOTYPE_EXT_ARGV;
+ pInfo->u.ExtArgv.pfnHandler = pfnHandler;
+ pInfo->u.ExtArgv.pvUser = pvUser;
+ RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Deregister one(/all) info handler(s) owned by a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param pszName The identifier of the info. If NULL all owned by the device.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName)
+{
+ LogFlow(("DBGFR3InfoDeregisterDevice: pDevIns=%p pszName=%p:{%s}\n", pDevIns, pszName, pszName));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Enumerate the info handlers and free the requested entries.
+ */
+ int rc = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect); AssertRC(rc);
+ rc = VERR_FILE_NOT_FOUND;
+ PDBGFINFO pPrev = NULL;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
+ if (pszName)
+ {
+ /*
+ * Free a specific one.
+ */
+ for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
+ if ( ( (pInfo->enmType == DBGFINFOTYPE_DEV && pInfo->u.Dev.pDevIns == pDevIns)
+ || (pInfo->enmType == DBGFINFOTYPE_DEV_ARGV && pInfo->u.DevArgv.pDevIns == pDevIns))
+ && pInfo->cchName == cchName
+ && memcmp(pInfo->szName, pszName, cchName) == 0)
+ {
+ if (pPrev)
+ pPrev->pNext = pInfo->pNext;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ MMR3HeapFree(pInfo);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Free all owned by the device.
+ */
+ while (pInfo != NULL)
+ if ( (pInfo->enmType == DBGFINFOTYPE_DEV && pInfo->u.Dev.pDevIns == pDevIns)
+ || (pInfo->enmType == DBGFINFOTYPE_DEV_ARGV && pInfo->u.DevArgv.pDevIns == pDevIns))
+ {
+ PDBGFINFO volatile pFree = pInfo;
+ pInfo = pInfo->pNext;
+ if (pPrev)
+ pPrev->pNext = pInfo;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo;
+ MMR3HeapFree(pFree);
+ }
+ else
+ {
+ pPrev = pInfo;
+ pInfo = pInfo->pNext;
+ }
+ rc = VINF_SUCCESS;
+ }
+ int rc2 = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+ AssertRC(rc);
+ LogFlow(("DBGFR3InfoDeregisterDevice: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Deregister one(/all) info handler(s) owned by a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param pszName The identifier of the info. If NULL all owned by the driver.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName)
+{
+ LogFlow(("DBGFR3InfoDeregisterDriver: pDrvIns=%p pszName=%p:{%s}\n", pDrvIns, pszName, pszName));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Enumerate the info handlers and free the requested entries.
+ */
+ int rc = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect); AssertRC(rc);
+ rc = VERR_FILE_NOT_FOUND;
+ PDBGFINFO pPrev = NULL;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
+ if (pszName)
+ {
+ /*
+ * Free a specific one.
+ */
+ for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
+ if ( ( (pInfo->enmType == DBGFINFOTYPE_DRV && pInfo->u.Drv.pDrvIns == pDrvIns)
+ || (pInfo->enmType == DBGFINFOTYPE_DRV_ARGV && pInfo->u.DrvArgv.pDrvIns == pDrvIns))
+ && pInfo->cchName == cchName
+ && memcmp(pInfo->szName, pszName, cchName) == 0)
+ {
+ if (pPrev)
+ pPrev->pNext = pInfo->pNext;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ MMR3HeapFree(pInfo);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Free all owned by the driver.
+ */
+ while (pInfo != NULL)
+ if ( (pInfo->enmType == DBGFINFOTYPE_DRV && pInfo->u.Drv.pDrvIns == pDrvIns)
+ || (pInfo->enmType == DBGFINFOTYPE_DRV_ARGV && pInfo->u.DrvArgv.pDrvIns == pDrvIns))
+ {
+ PDBGFINFO volatile pFree = pInfo;
+ pInfo = pInfo->pNext;
+ if (pPrev)
+ pPrev->pNext = pInfo;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo;
+ MMR3HeapFree(pFree);
+ }
+ else
+ {
+ pPrev = pInfo;
+ pInfo = pInfo->pNext;
+ }
+ rc = VINF_SUCCESS;
+ }
+ int rc2 = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+ AssertRC(rc);
+ LogFlow(("DBGFR3InfoDeregisterDriver: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Deregister one(/all) info handler(s) owned by a USB device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns USB device instance.
+ * @param pszName The identifier of the info. If NULL all owned by the driver.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterUsb(PVM pVM, PPDMUSBINS pUsbIns, const char *pszName)
+{
+ LogFlow(("DBGFR3InfoDeregisterUsb: pUsbIns=%p pszName=%p:{%s}\n", pUsbIns, pszName, pszName));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pUsbIns, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Enumerate the info handlers and free the requested entries.
+ */
+ int rc = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect); AssertRC(rc);
+ rc = VERR_FILE_NOT_FOUND;
+ PDBGFINFO pPrev = NULL;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
+ if (pszName)
+ {
+ /*
+ * Free a specific one.
+ */
+ for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
+ if ( pInfo->enmType == DBGFINFOTYPE_USB_ARGV
+ && pInfo->u.UsbArgv.pUsbIns == pUsbIns
+ && pInfo->cchName == cchName
+ && memcmp(pInfo->szName, pszName, cchName) == 0)
+ {
+ if (pPrev)
+ pPrev->pNext = pInfo->pNext;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ MMR3HeapFree(pInfo);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Free all owned by the driver.
+ */
+ while (pInfo != NULL)
+ if ( pInfo->enmType == DBGFINFOTYPE_USB_ARGV
+ && pInfo->u.UsbArgv.pUsbIns == pUsbIns)
+ {
+ PDBGFINFO volatile pFree = pInfo;
+ pInfo = pInfo->pNext;
+ if (pPrev)
+ pPrev->pNext = pInfo;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo;
+ MMR3HeapFree(pFree);
+ }
+ else
+ {
+ pPrev = pInfo;
+ pInfo = pInfo->pNext;
+ }
+ rc = VINF_SUCCESS;
+ }
+ int rc2 = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+ AssertRC(rc);
+ LogFlow(("DBGFR3InfoDeregisterDriver: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Internal deregistration helper.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the VM.
+ * @param pszName The identifier of the info.
+ * @param enmType1 The first info owner type (old style).
+ * @param enmType2 The second info owner type (argv).
+ */
+static int dbgfR3InfoDeregister(PUVM pUVM, const char *pszName, DBGFINFOTYPE enmType1, DBGFINFOTYPE enmType2)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+
+ /*
+ * Find the info handler.
+ */
+ size_t cchName = strlen(pszName);
+ int rc = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ rc = VERR_FILE_NOT_FOUND;
+ PDBGFINFO pPrev = NULL;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
+ for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
+ if ( pInfo->cchName == cchName
+ && memcmp(pInfo->szName, pszName, cchName) == 0
+ && (pInfo->enmType == enmType1 || pInfo->enmType == enmType2))
+ {
+ if (pPrev)
+ pPrev->pNext = pInfo->pNext;
+ else
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ MMR3HeapFree(pInfo);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ int rc2 = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+ AssertRC(rc);
+ LogFlow(("dbgfR3InfoDeregister: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Deregister a info handler owned by an internal component.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The identifier of the info. If NULL all owned by the device.
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterInternal(PVM pVM, const char *pszName)
+{
+ LogFlow(("DBGFR3InfoDeregisterInternal: pszName=%p:{%s}\n", pszName, pszName));
+ return dbgfR3InfoDeregister(pVM->pUVM, pszName, DBGFINFOTYPE_INT, DBGFINFOTYPE_INT_ARGV);
+}
+
+
+/**
+ * Deregister a info handler owned by an external component.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName The identifier of the info. If NULL all owned by the device.
+ */
+VMMR3DECL(int) DBGFR3InfoDeregisterExternal(PUVM pUVM, const char *pszName)
+{
+ LogFlow(("DBGFR3InfoDeregisterExternal: pszName=%p:{%s}\n", pszName, pszName));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ return dbgfR3InfoDeregister(pUVM, pszName, DBGFINFOTYPE_EXT, DBGFINFOTYPE_EXT_ARGV);
+}
+
+
+/**
+ * Worker for DBGFR3InfoEx.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu Which CPU to run EMT bound handlers on. VMCPUID_ANY or
+ * a valid CPU ID.
+ * @param pszName What to dump.
+ * @param pszArgs Arguments, optional.
+ * @param pHlp Output helper, optional.
+ */
+static DECLCALLBACK(int) dbgfR3Info(PUVM pUVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszArgs, VERR_INVALID_POINTER);
+ if (pHlp)
+ {
+ AssertPtrReturn(pHlp, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pHlp->pfnPrintf, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pHlp->pfnPrintfV, VERR_INVALID_PARAMETER);
+ }
+ else
+ pHlp = &g_dbgfR3InfoLogHlp;
+ Assert(idCpu == NIL_VMCPUID || idCpu < pUVM->cCpus); /* if not nil, we're on that EMT already. */
+
+ /*
+ * Find the info handler.
+ */
+ size_t cchName = strlen(pszName);
+ int rc = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
+ for (; pInfo; pInfo = pInfo->pNext)
+ if ( pInfo->cchName == cchName
+ && !memcmp(pInfo->szName, pszName, cchName))
+ break;
+ if (pInfo)
+ {
+ /*
+ * Found it.
+ */
+ VMCPUID idDstCpu = NIL_VMCPUID;
+ if ((pInfo->fFlags & (DBGFINFO_FLAGS_RUN_ON_EMT | DBGFINFO_FLAGS_ALL_EMTS)) && idCpu == NIL_VMCPUID)
+ idDstCpu = pInfo->fFlags & DBGFINFO_FLAGS_ALL_EMTS ? VMCPUID_ALL : VMCPUID_ANY;
+
+ rc = VINF_SUCCESS;
+ switch (pInfo->enmType)
+ {
+ case DBGFINFOTYPE_DEV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Dev.pfnHandler, 3,
+ pInfo->u.Dev.pDevIns, pHlp, pszArgs);
+ else
+ pInfo->u.Dev.pfnHandler(pInfo->u.Dev.pDevIns, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_DRV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Drv.pfnHandler, 3,
+ pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
+ else
+ pInfo->u.Drv.pfnHandler(pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_INT:
+ if (RT_VALID_PTR(pUVM->pVM))
+ {
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Int.pfnHandler, 3,
+ pUVM->pVM, pHlp, pszArgs);
+ else
+ pInfo->u.Int.pfnHandler(pUVM->pVM, pHlp, pszArgs);
+ }
+ else
+ rc = VERR_INVALID_VM_HANDLE;
+ break;
+
+ case DBGFINFOTYPE_EXT:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Ext.pfnHandler, 3,
+ pInfo->u.Ext.pvUser, pHlp, pszArgs);
+ else
+ pInfo->u.Ext.pfnHandler(pInfo->u.Ext.pvUser, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_DEV_ARGV:
+ case DBGFINFOTYPE_DRV_ARGV:
+ case DBGFINFOTYPE_USB_ARGV:
+ case DBGFINFOTYPE_INT_ARGV:
+ case DBGFINFOTYPE_EXT_ARGV:
+ {
+ char **papszArgv;
+ int cArgs;
+ rc = RTGetOptArgvFromString(&papszArgv, &cArgs, pszArgs ? pszArgs : "", RTGETOPTARGV_CNV_QUOTE_BOURNE_SH, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ switch (pInfo->enmType)
+ {
+ case DBGFINFOTYPE_DEV_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.DevArgv.pfnHandler, 4,
+ pInfo->u.DevArgv.pDevIns, pHlp, cArgs, papszArgv);
+ else
+ pInfo->u.DevArgv.pfnHandler(pInfo->u.DevArgv.pDevIns, pHlp, cArgs, papszArgv);
+ break;
+
+ case DBGFINFOTYPE_DRV_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.DrvArgv.pfnHandler, 4,
+ pInfo->u.DrvArgv.pDrvIns, pHlp, cArgs, papszArgv);
+ else
+ pInfo->u.DrvArgv.pfnHandler(pInfo->u.DrvArgv.pDrvIns, pHlp, cArgs, papszArgv);
+ break;
+
+ case DBGFINFOTYPE_USB_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.UsbArgv.pfnHandler, 4,
+ pInfo->u.UsbArgv.pUsbIns, pHlp, cArgs, papszArgv);
+ else
+ pInfo->u.UsbArgv.pfnHandler(pInfo->u.UsbArgv.pUsbIns, pHlp, cArgs, papszArgv);
+ break;
+
+ case DBGFINFOTYPE_INT_ARGV:
+ if (RT_VALID_PTR(pUVM->pVM))
+ {
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.IntArgv.pfnHandler, 4,
+ pUVM->pVM, pHlp, cArgs, papszArgv);
+ else
+ pInfo->u.IntArgv.pfnHandler(pUVM->pVM, pHlp, cArgs, papszArgv);
+ }
+ else
+ rc = VERR_INVALID_VM_HANDLE;
+ break;
+
+ case DBGFINFOTYPE_EXT_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.ExtArgv.pfnHandler, 4,
+ pInfo->u.ExtArgv.pvUser, pHlp, cArgs, papszArgv);
+ else
+ pInfo->u.ExtArgv.pfnHandler(pInfo->u.ExtArgv.pvUser, pHlp, cArgs, papszArgv);
+ break;
+
+ default:
+ AssertFailedBreakStmt(rc = VERR_INTERNAL_ERROR);
+ }
+
+ RTGetOptArgvFree(papszArgv);
+ }
+ break;
+ }
+
+ default:
+ AssertMsgFailedReturn(("Invalid info type enmType=%d\n", pInfo->enmType), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+
+ int rc2 = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+ }
+ else
+ {
+ rc = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ rc = VERR_FILE_NOT_FOUND;
+ }
+ return rc;
+}
+
+
+/**
+ * Display a piece of info writing to the supplied handler.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName The identifier of the info to display.
+ * @param pszArgs Arguments to the info handler.
+ * @param pHlp The output helper functions. If NULL the logger will be used.
+ */
+VMMR3DECL(int) DBGFR3Info(PUVM pUVM, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+{
+ return DBGFR3InfoEx(pUVM, NIL_VMCPUID, pszName, pszArgs, pHlp);
+}
+
+
+/**
+ * Display a piece of info writing to the supplied handler.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU to exectue the request on. Pass NIL_VMCPUID
+ * to not involve any EMT unless necessary.
+ * @param pszName The identifier of the info to display.
+ * @param pszArgs Arguments to the info handler.
+ * @param pHlp The output helper functions. If NULL the logger will be used.
+ */
+VMMR3DECL(int) DBGFR3InfoEx(PUVM pUVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Some input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn( idCpu != VMCPUID_ANY_QUEUE
+ && idCpu != VMCPUID_ALL
+ && idCpu != VMCPUID_ALL_REVERSE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Run on any specific EMT?
+ */
+ if (idCpu == NIL_VMCPUID)
+ return dbgfR3Info(pUVM, NIL_VMCPUID, pszName, pszArgs, pHlp);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu,
+ (PFNRT)dbgfR3Info, 5, pUVM, idCpu, pszName, pszArgs, pHlp);
+}
+
+
+/**
+ * Wrapper for DBGFR3Info that outputs to the release log.
+ *
+ * @returns See DBGFR3Info.
+ * @param pUVM The user mode VM handle.
+ * @param pszName See DBGFR3Info.
+ * @param pszArgs See DBGFR3Info.
+ */
+VMMR3DECL(int) DBGFR3InfoLogRel(PUVM pUVM, const char *pszName, const char *pszArgs)
+{
+ return DBGFR3InfoEx(pUVM, NIL_VMCPUID, pszName, pszArgs, &g_dbgfR3InfoLogRelHlp);
+}
+
+
+/**
+ * Wrapper for DBGFR3Info that outputs to standard error.
+ *
+ * @returns See DBGFR3Info.
+ * @param pUVM The user mode VM handle.
+ * @param pszName See DBGFR3Info.
+ * @param pszArgs See DBGFR3Info.
+ */
+VMMR3DECL(int) DBGFR3InfoStdErr(PUVM pUVM, const char *pszName, const char *pszArgs)
+{
+ return DBGFR3InfoEx(pUVM, NIL_VMCPUID, pszName, pszArgs, &g_dbgfR3InfoStdErrHlp);
+}
+
+
+/**
+ * Display several info items.
+ *
+ * This is intended used by the fatal error dump only.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszIncludePat Simple string pattern of info items to include.
+ * @param pszExcludePat Simple string pattern of info items to exclude.
+ * @param pszSepFmt Item separator format string. The item name will be
+ * given as parameter.
+ * @param pHlp The output helper functions. If NULL the logger
+ * will be used.
+ *
+ * @thread EMT
+ */
+VMMR3_INT_DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *pszExcludePat, const char *pszSepFmt,
+ PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Validate input.
+ */
+ PUVM pUVM = pVM->pUVM;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pszIncludePat, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszExcludePat, VERR_INVALID_POINTER);
+ if (pHlp)
+ {
+ AssertPtrReturn(pHlp->pfnPrintf, VERR_INVALID_POINTER);
+ AssertPtrReturn(pHlp->pfnPrintfV, VERR_INVALID_POINTER);
+ }
+ else
+ pHlp = &g_dbgfR3InfoLogHlp;
+
+ size_t const cchIncludePat = strlen(pszIncludePat);
+ size_t const cchExcludePat = strlen(pszExcludePat);
+ const char *pszArgs = "";
+
+ /*
+ * Enumerate the info handlers and call the ones matching.
+ * Note! We won't leave the critical section here...
+ */
+ char *apszArgs[2] = { NULL, NULL };
+ int rc = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+ rc = VWRN_NOT_FOUND;
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ {
+ if ( RTStrSimplePatternMultiMatch(pszIncludePat, cchIncludePat, pInfo->szName, pInfo->cchName, NULL)
+ && !RTStrSimplePatternMultiMatch(pszExcludePat, cchExcludePat, pInfo->szName, pInfo->cchName, NULL))
+ {
+ pHlp->pfnPrintf(pHlp, pszSepFmt, pInfo->szName);
+
+ VMCPUID idDstCpu = NIL_VMCPUID;
+ if (pInfo->fFlags & (DBGFINFO_FLAGS_RUN_ON_EMT | DBGFINFO_FLAGS_ALL_EMTS))
+ idDstCpu = pInfo->fFlags & DBGFINFO_FLAGS_ALL_EMTS ? VMCPUID_ALL : VMCPUID_ANY;
+
+ rc = VINF_SUCCESS;
+ switch (pInfo->enmType)
+ {
+ case DBGFINFOTYPE_DEV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallVoidWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Dev.pfnHandler, 3,
+ pInfo->u.Dev.pDevIns, pHlp, pszArgs);
+ else
+ pInfo->u.Dev.pfnHandler(pInfo->u.Dev.pDevIns, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_DRV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallVoidWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Drv.pfnHandler, 3,
+ pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
+ else
+ pInfo->u.Drv.pfnHandler(pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_INT:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallVoidWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Int.pfnHandler, 3,
+ pVM, pHlp, pszArgs);
+ else
+ pInfo->u.Int.pfnHandler(pVM, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_EXT:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallVoidWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.Ext.pfnHandler, 3,
+ pInfo->u.Ext.pvUser, pHlp, pszArgs);
+ else
+ pInfo->u.Ext.pfnHandler(pInfo->u.Ext.pvUser, pHlp, pszArgs);
+ break;
+
+ case DBGFINFOTYPE_DEV_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.DevArgv.pfnHandler, 4,
+ pInfo->u.DevArgv.pDevIns, pHlp, 0, &apszArgs[0]);
+ else
+ pInfo->u.DevArgv.pfnHandler(pInfo->u.DevArgv.pDevIns, pHlp, 0, &apszArgs[0]);
+ break;
+
+ case DBGFINFOTYPE_DRV_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.DrvArgv.pfnHandler, 4,
+ pInfo->u.DrvArgv.pDrvIns, pHlp, 0, &apszArgs[0]);
+ else
+ pInfo->u.DrvArgv.pfnHandler(pInfo->u.DrvArgv.pDrvIns, pHlp, 0, &apszArgs[0]);
+ break;
+
+ case DBGFINFOTYPE_USB_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.UsbArgv.pfnHandler, 4,
+ pInfo->u.UsbArgv.pUsbIns, pHlp, 0, &apszArgs[0]);
+ else
+ pInfo->u.UsbArgv.pfnHandler(pInfo->u.UsbArgv.pUsbIns, pHlp, 0, &apszArgs[0]);
+ break;
+
+ case DBGFINFOTYPE_INT_ARGV:
+ if (RT_VALID_PTR(pUVM->pVM))
+ {
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.IntArgv.pfnHandler, 4,
+ pUVM->pVM, pHlp, 0, &apszArgs[0]);
+ else
+ pInfo->u.IntArgv.pfnHandler(pUVM->pVM, pHlp, 0, &apszArgs[0]);
+ }
+ else
+ rc = VERR_INVALID_VM_HANDLE;
+ break;
+
+ case DBGFINFOTYPE_EXT_ARGV:
+ if (idDstCpu != NIL_VMCPUID)
+ rc = VMR3ReqPriorityCallWaitU(pUVM, idDstCpu, (PFNRT)pInfo->u.ExtArgv.pfnHandler, 4,
+ pInfo->u.ExtArgv.pvUser, pHlp, 0, &apszArgs[0]);
+ else
+ pInfo->u.ExtArgv.pfnHandler(pInfo->u.ExtArgv.pvUser, pHlp, 0, &apszArgs[0]);
+ break;
+
+ default:
+ AssertMsgFailedReturn(("Invalid info type enmType=%d\n", pInfo->enmType), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ }
+ }
+ int rc2 = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+
+ return rc;
+}
+
+
+/**
+ * Enumerate all the register info handlers.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pfnCallback Pointer to callback function.
+ * @param pvUser User argument to pass to the callback.
+ */
+VMMR3DECL(int) DBGFR3InfoEnum(PUVM pUVM, PFNDBGFINFOENUM pfnCallback, void *pvUser)
+{
+ LogFlow(("DBGFR3InfoLog: pfnCallback=%p pvUser=%p\n", pfnCallback, pvUser));
+
+ /*
+ * Validate input.
+ */
+ if (!pfnCallback)
+ {
+ AssertMsgFailed(("!pfnCallback\n"));
+ return VERR_INVALID_PARAMETER;
+ }
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Enter and enumerate.
+ */
+ int rc = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+
+ rc = VINF_SUCCESS;
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; RT_SUCCESS(rc) && pInfo; pInfo = pInfo->pNext)
+ rc = pfnCallback(pUVM, pInfo->szName, pInfo->pszDesc, pvUser);
+
+ /*
+ * Leave and exit.
+ */
+ int rc2 = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc2);
+
+ LogFlow(("DBGFR3InfoLog: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Info handler, internal version.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp Callback functions for doing output.
+ * @param pszArgs Argument string. Optional and specific to the handler.
+ */
+static DECLCALLBACK(void) dbgfR3InfoHelp(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ LogFlow(("dbgfR3InfoHelp: pszArgs=%s\n", pszArgs));
+
+ /*
+ * Enter and enumerate.
+ */
+ PUVM pUVM = pVM->pUVM;
+ int rc = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+
+ if (pszArgs && *pszArgs)
+ {
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ {
+ const char *psz = strstr(pszArgs, pInfo->szName);
+ if ( psz
+ && ( psz == pszArgs
+ || RT_C_IS_SPACE(psz[-1]))
+ && ( !psz[pInfo->cchName]
+ || RT_C_IS_SPACE(psz[pInfo->cchName])))
+ pHlp->pfnPrintf(pHlp, "%-16s %s\n",
+ pInfo->szName, pInfo->pszDesc);
+ }
+ }
+ else
+ {
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ pHlp->pfnPrintf(pHlp, "%-16s %s\n",
+ pInfo->szName, pInfo->pszDesc);
+ }
+
+ /*
+ * Leave and exit.
+ */
+ rc = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect);
+ AssertRC(rc);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFLog.cpp b/src/VBox/VMM/VMMR3/DBGFLog.cpp
new file mode 100644
index 00000000..a864af45
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFLog.cpp
@@ -0,0 +1,197 @@
+/* $Id: DBGFLog.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Log Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/vmapi.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/assert.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+
+
+/**
+ * Checkes for logger prefixes and selects the right logger.
+ *
+ * @returns Target logger.
+ * @param ppsz Pointer to the string pointer.
+ */
+static PRTLOGGER dbgfR3LogResolvedLogger(const char **ppsz)
+{
+ PRTLOGGER pLogger;
+ const char *psz = *ppsz;
+ if (!strncmp(psz, RT_STR_TUPLE("release:")))
+ {
+ *ppsz += sizeof("release:") - 1;
+ pLogger = RTLogRelGetDefaultInstance();
+ }
+ else
+ {
+ if (!strncmp(psz, RT_STR_TUPLE("debug:")))
+ *ppsz += sizeof("debug:") - 1;
+ pLogger = RTLogDefaultInstance();
+ }
+ return pLogger;
+}
+
+
+/**
+ * EMT worker for DBGFR3LogModifyGroups.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszGroupSettings The group settings string. (VBOX_LOG)
+ */
+static DECLCALLBACK(int) dbgfR3LogModifyGroups(PUVM pUVM, const char *pszGroupSettings)
+{
+ PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszGroupSettings);
+ if (!pLogger)
+ return VINF_SUCCESS;
+
+ int rc = RTLogGroupSettings(pLogger, pszGroupSettings);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Changes the logger group settings.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszGroupSettings The group settings string. (VBOX_LOG)
+ * By prefixing the string with \"release:\" the
+ * changes will be applied to the release log
+ * instead of the debug log. The prefix \"debug:\"
+ * is also recognized.
+ */
+VMMR3DECL(int) DBGFR3LogModifyGroups(PUVM pUVM, const char *pszGroupSettings)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszGroupSettings, VERR_INVALID_POINTER);
+
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyGroups, 2, pUVM, pszGroupSettings);
+}
+
+
+/**
+ * EMT worker for DBGFR3LogModifyFlags.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszFlagSettings The group settings string. (VBOX_LOG_FLAGS)
+ */
+static DECLCALLBACK(int) dbgfR3LogModifyFlags(PUVM pUVM, const char *pszFlagSettings)
+{
+ PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszFlagSettings);
+ if (!pLogger)
+ return VINF_SUCCESS;
+
+ int rc = RTLogFlags(pLogger, pszFlagSettings);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Changes the logger flag settings.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszFlagSettings The group settings string. (VBOX_LOG_FLAGS)
+ * By prefixing the string with \"release:\" the
+ * changes will be applied to the release log
+ * instead of the debug log. The prefix \"debug:\"
+ * is also recognized.
+ */
+VMMR3DECL(int) DBGFR3LogModifyFlags(PUVM pUVM, const char *pszFlagSettings)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszFlagSettings, VERR_INVALID_POINTER);
+
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyFlags, 2, pUVM, pszFlagSettings);
+}
+
+
+/**
+ * EMT worker for DBGFR3LogModifyFlags.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDestSettings The destination settings string. (VBOX_LOG_DEST)
+ */
+static DECLCALLBACK(int) dbgfR3LogModifyDestinations(PUVM pUVM, const char *pszDestSettings)
+{
+ PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszDestSettings);
+ if (!pLogger)
+ return VINF_SUCCESS;
+
+ int rc = RTLogDestinations(NULL, pszDestSettings);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Changes the logger destination settings.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDestSettings The destination settings string. (VBOX_LOG_DEST)
+ * By prefixing the string with \"release:\" the
+ * changes will be applied to the release log
+ * instead of the debug log. The prefix \"debug:\"
+ * is also recognized.
+ */
+VMMR3DECL(int) DBGFR3LogModifyDestinations(PUVM pUVM, const char *pszDestSettings)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszDestSettings, VERR_INVALID_POINTER);
+
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyDestinations, 2, pUVM, pszDestSettings);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFMem.cpp b/src/VBox/VMM/VMMR3/DBGFMem.cpp
new file mode 100644
index 00000000..9acdbecd
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFMem.cpp
@@ -0,0 +1,652 @@
+/* $Id: DBGFMem.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Memory Methods.
+ */
+
+/*
+ * Copyright (C) 2007-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/hm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/vmm/mm.h>
+
+
+
+/**
+ * Scan guest memory for an exact byte string.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to search in.
+ * @param pAddress Where to store the mixed address.
+ * @param puAlign The alignment restriction imposed on the search result.
+ * @param pcbRange The number of bytes to scan. Passed as a pointer because
+ * it may be 64-bit.
+ * @param pabNeedle What to search for - exact search.
+ * @param cbNeedle Size of the search byte string.
+ * @param pHitAddress Where to put the address of the first hit.
+ */
+static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange,
+ RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ Assert(idCpu == VMMGetCpuId(pVM));
+
+ /*
+ * Validate the input we use, PGM does the rest.
+ */
+ RTGCUINTPTR cbRange = *pcbRange;
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
+ return VERR_INVALID_POINTER;
+ if (!RT_VALID_PTR(pHitAddress))
+ return VERR_INVALID_POINTER;
+
+ /*
+ * Select DBGF worker by addressing mode.
+ */
+ int rc;
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ if ( enmMode == PGMMODE_REAL
+ || enmMode == PGMMODE_PROTECTED
+ || DBGFADDRESS_IS_PHYS(pAddress)
+ )
+ {
+ RTGCPHYS GCPhysAlign = *puAlign;
+ if (GCPhysAlign != *puAlign)
+ return VERR_OUT_OF_RANGE;
+ RTGCPHYS PhysHit;
+ rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit);
+ if (RT_SUCCESS(rc))
+ DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit);
+ }
+ else
+ {
+#if GC_ARCH_BITS > 32
+ if ( ( pAddress->FlatPtr >= _4G
+ || pAddress->FlatPtr + cbRange > _4G)
+ && enmMode != PGMMODE_AMD64
+ && enmMode != PGMMODE_AMD64_NX)
+ return VERR_DBGF_MEM_NOT_FOUND;
+#endif
+ RTGCUINTPTR GCPtrHit;
+ rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit);
+ if (RT_SUCCESS(rc))
+ DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Scan guest memory for an exact byte string.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS and *pGCPtrHit on success.
+ * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
+ * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
+ * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to search in.
+ * @param pAddress Where to store the mixed address.
+ * @param cbRange The number of bytes to scan.
+ * @param uAlign The alignment restriction imposed on the result.
+ * Usually set to 1.
+ * @param pvNeedle What to search for - exact search.
+ * @param cbNeedle Size of the search byte string.
+ * @param pHitAddress Where to put the address of the first hit.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
+ const void *pvNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
+ pUVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
+
+}
+
+
+/**
+ * Read guest memory.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU context to read memory from.
+ * @param pAddress Where to start reading.
+ * @param pvBuf Where to store the data we've read.
+ * @param cbRead The number of bytes to read.
+ */
+static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ Assert(idCpu == VMMGetCpuId(pVM));
+
+ /*
+ * Validate the input we use, PGM does the rest.
+ */
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
+ return VERR_INVALID_POINTER;
+ if (!RT_VALID_PTR(pvBuf))
+ return VERR_INVALID_POINTER;
+
+ /*
+ * Select PGM worker by addressing mode.
+ */
+ int rc;
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ if ( enmMode == PGMMODE_REAL
+ || enmMode == PGMMODE_PROTECTED
+ || DBGFADDRESS_IS_PHYS(pAddress) )
+ rc = PGMPhysSimpleReadGCPhys(pVM, pvBuf, pAddress->FlatPtr, cbRead);
+ else
+ {
+#if GC_ARCH_BITS > 32
+ if ( ( pAddress->FlatPtr >= _4G
+ || pAddress->FlatPtr + cbRead > _4G)
+ && enmMode != PGMMODE_AMD64
+ && enmMode != PGMMODE_AMD64_NX)
+ return VERR_PAGE_TABLE_NOT_PRESENT;
+#endif
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pvBuf, pAddress->FlatPtr, cbRead);
+ }
+ return rc;
+}
+
+
+/**
+ * Read guest memory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pvBuf Where to store the data we've read.
+ * @param cbRead The number of bytes to read.
+ */
+VMMR3DECL(int) DBGFR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+
+ if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
+ {
+ AssertCompile(sizeof(RTHCUINTPTR) <= sizeof(pAddress->FlatPtr));
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ return VMMR3ReadR0Stack(pUVM->pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
+ }
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pUVM, idCpu, pAddress, pvBuf, cbRead);
+}
+
+
+/**
+ * Read a zero terminated string from guest memory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pszBuf Where to store the string.
+ * @param cchBuf The size of the buffer.
+ */
+static DECLCALLBACK(int) dbgfR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
+{
+ /*
+ * Validate the input we use, PGM does the rest.
+ */
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
+ return VERR_INVALID_POINTER;
+ if (!RT_VALID_PTR(pszBuf))
+ return VERR_INVALID_POINTER;
+
+ /*
+ * Let dbgfR3MemRead do the job.
+ */
+ int rc = dbgfR3MemRead(pUVM, idCpu, pAddress, pszBuf, cchBuf);
+
+ /*
+ * Make sure the result is terminated and that overflow is signaled.
+ * This may look a bit reckless with the rc but, it should be fine.
+ */
+ if (!RTStrEnd(pszBuf, cchBuf))
+ {
+ pszBuf[cchBuf - 1] = '\0';
+ rc = VINF_BUFFER_OVERFLOW;
+ }
+ /*
+ * Handle partial reads (not perfect).
+ */
+ else if (RT_FAILURE(rc))
+ {
+ if (pszBuf[0])
+ rc = VINF_SUCCESS;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Read a zero terminated string from guest memory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pszBuf Where to store the string.
+ * @param cchBuf The size of the buffer.
+ */
+VMMR3DECL(int) DBGFR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
+{
+ /*
+ * Validate and zero output.
+ */
+ if (!RT_VALID_PTR(pszBuf))
+ return VERR_INVALID_POINTER;
+ if (cchBuf <= 0)
+ return VERR_INVALID_PARAMETER;
+ memset(pszBuf, 0, cchBuf);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /*
+ * Pass it on to the EMT.
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pUVM, idCpu, pAddress, pszBuf, cchBuf);
+}
+
+
+/**
+ * Writes guest memory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the target CPU context (for the address).
+ * @param pAddress Where to start writing.
+ * @param pvBuf The data to write.
+ * @param cbWrite The number of bytes to write.
+ */
+static DECLCALLBACK(int) dbgfR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
+{
+ /*
+ * Validate the input we use, PGM does the rest.
+ */
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
+ return VERR_INVALID_POINTER;
+ if (!RT_VALID_PTR(pvBuf))
+ return VERR_INVALID_POINTER;
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Select PGM function by addressing mode.
+ */
+ int rc;
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ if ( enmMode == PGMMODE_REAL
+ || enmMode == PGMMODE_PROTECTED
+ || DBGFADDRESS_IS_PHYS(pAddress) )
+ rc = PGMPhysSimpleWriteGCPhys(pVM, pAddress->FlatPtr, pvBuf, cbWrite);
+ else
+ {
+#if GC_ARCH_BITS > 32
+ if ( ( pAddress->FlatPtr >= _4G
+ || pAddress->FlatPtr + cbWrite > _4G)
+ && enmMode != PGMMODE_AMD64
+ && enmMode != PGMMODE_AMD64_NX)
+ return VERR_PAGE_TABLE_NOT_PRESENT;
+#endif
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pAddress->FlatPtr, pvBuf, cbWrite);
+ }
+ return rc;
+}
+
+
+/**
+ * Read guest memory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the target CPU context (for the address).
+ * @param pAddress Where to start writing.
+ * @param pvBuf The data to write.
+ * @param cbWrite The number of bytes to write.
+ */
+VMMR3DECL(int) DBGFR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pUVM, idCpu, pAddress, pvBuf, cbWrite);
+}
+
+
+/**
+ * Worker for DBGFR3SelQueryInfo that calls into SELM.
+ */
+static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Make the query.
+ */
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ VMCPU_ASSERT_EMT(pVCpu);
+ int rc = SELMR3GetSelectorInfo(pVCpu, Sel, pSelInfo);
+
+ /*
+ * 64-bit mode HACKS for making data and stack selectors wide open when
+ * queried. This is voodoo magic.
+ */
+ if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)
+ {
+ /* Expand 64-bit data and stack selectors. The check is a bit bogus... */
+ if ( RT_SUCCESS(rc)
+ && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE
+ | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER
+ | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT))
+ == DBGFSELINFO_FLAGS_LONG_MODE
+ && pSelInfo->cbLimit != ~(RTGCPTR)0
+ && CPUMIsGuestIn64BitCode(pVCpu) )
+ {
+ pSelInfo->GCPtrBase = 0;
+ pSelInfo->cbLimit = ~(RTGCPTR)0;
+ }
+ else if ( Sel == 0
+ && CPUMIsGuestIn64BitCode(pVCpu))
+ {
+ pSelInfo->GCPtrBase = 0;
+ pSelInfo->cbLimit = ~(RTGCPTR)0;
+ pSelInfo->Sel = 0;
+ pSelInfo->SelGate = 0;
+ pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
+ pSelInfo->u.Raw64.Gen.u1Present = 1;
+ pSelInfo->u.Raw64.Gen.u1Long = 1;
+ pSelInfo->u.Raw64.Gen.u1DescType = 1;
+ rc = VINF_SUCCESS;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Gets information about a selector.
+ *
+ * Intended for the debugger mostly and will prefer the guest
+ * descriptor tables over the shadow ones.
+ *
+ * @returns VBox status code, the following are the common ones.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
+ * descriptor table.
+ * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
+ * is not returned if the selector itself isn't present, you have to
+ * check that for yourself (see DBGFSELINFO::fFlags).
+ * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
+ * pagetable or page backing the selector table wasn't present.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the virtual CPU context.
+ * @param Sel The selector to get info about.
+ * @param fFlags Flags, see DBGFQSEL_FLAGS_*.
+ * @param pSelInfo Where to store the information. This will always be
+ * updated.
+ *
+ * @remarks This is a wrapper around SELMR3GetSelectorInfo and
+ * SELMR3GetShadowSelectorInfo.
+ */
+VMMR3DECL(int) DBGFR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
+
+ /* Clear the return data here on this thread. */
+ memset(pSelInfo, 0, sizeof(*pSelInfo));
+
+ /*
+ * Dispatch the request to a worker running on the target CPU.
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);
+}
+
+
+/**
+ * Validates a CS selector.
+ *
+ * @returns VBox status code.
+ * @param pSelInfo Pointer to the selector information for the CS selector.
+ * @param SelCPL The selector defining the CPL (SS).
+ */
+VMMDECL(int) DBGFR3SelInfoValidateCS(PCDBGFSELINFO pSelInfo, RTSEL SelCPL)
+{
+ /*
+ * Check if present.
+ */
+ if (pSelInfo->u.Raw.Gen.u1Present)
+ {
+ /*
+ * Type check.
+ */
+ if ( pSelInfo->u.Raw.Gen.u1DescType == 1
+ && (pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CODE))
+ {
+ /*
+ * Check level.
+ */
+ unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, pSelInfo->Sel & X86_SEL_RPL);
+ if ( !(pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CONF)
+ ? uLevel <= pSelInfo->u.Raw.Gen.u2Dpl
+ : uLevel >= pSelInfo->u.Raw.Gen.u2Dpl /* hope I got this right now... */
+ )
+ return VINF_SUCCESS;
+ return VERR_INVALID_RPL;
+ }
+ return VERR_NOT_CODE_SELECTOR;
+ }
+ return VERR_SELECTOR_NOT_PRESENT;
+}
+
+
+/**
+ * Converts a PGM paging mode to a set of DBGFPGDMP_XXX flags.
+ *
+ * @returns Flags. UINT32_MAX if the mode is invalid (asserted).
+ * @param enmMode The mode.
+ */
+static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
+{
+ switch (enmMode)
+ {
+ case PGMMODE_32_BIT:
+ return DBGFPGDMP_FLAGS_PSE;
+ case PGMMODE_PAE:
+ return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE;
+ case PGMMODE_PAE_NX:
+ return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
+ case PGMMODE_AMD64:
+ return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME;
+ case PGMMODE_AMD64_NX:
+ return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
+ case PGMMODE_NESTED_32BIT:
+ return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE;
+ case PGMMODE_NESTED_PAE:
+ return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
+ case PGMMODE_NESTED_AMD64:
+ return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
+ case PGMMODE_EPT:
+ return DBGFPGDMP_FLAGS_EPT;
+ case PGMMODE_NONE:
+ return 0;
+ default:
+ AssertFailedReturn(UINT32_MAX);
+ }
+}
+
+
+/**
+ * EMT worker for DBGFR3PagingDumpEx.
+ *
+ * @returns VBox status code.
+ * @param pUVM The shared VM handle.
+ * @param idCpu The current CPU ID.
+ * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid.
+ * @param pcr3 The CR3 to use (unless we're getting the current
+ * state, see @a fFlags).
+ * @param pu64FirstAddr The first address.
+ * @param pu64LastAddr The last address.
+ * @param cMaxDepth The depth.
+ * @param pHlp The output callbacks.
+ */
+static DECLCALLBACK(int) dbgfR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
+ uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
+ uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Implement dumping both context by means of recursion.
+ */
+ if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
+ {
+ int rc1 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
+ pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
+ int rc2 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
+ pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
+ return RT_FAILURE(rc1) ? rc1 : rc2;
+ }
+
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Get the current CR3/mode if required.
+ */
+ uint64_t cr3 = *pcr3;
+ if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE))
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
+ {
+ if (PGMGetShadowMode(pVCpu) == PGMMODE_NONE)
+ {
+ pHlp->pfnPrintf(pHlp, "Shadow paging mode is 'none' (NEM)\n");
+ return VINF_SUCCESS;
+ }
+
+ if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
+ cr3 = PGMGetHyperCR3(pVCpu);
+ if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
+ fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu));
+ }
+ else
+ {
+ if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
+ cr3 = CPUMGetGuestCR3(pVCpu);
+ if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
+ {
+ AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE); AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE);
+ fFlags |= CPUMGetGuestCR4(pVCpu) & (X86_CR4_PSE | X86_CR4_PAE);
+ AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME); AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE);
+ fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE);
+ }
+ }
+ }
+ fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3);
+
+ /*
+ * Call PGM to do the real work.
+ */
+ int rc;
+ if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
+ rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
+ else
+ rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
+ return rc;
+}
+
+
+/**
+ * Dump paging structures.
+ *
+ * This API can be used to dump both guest and shadow structures.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The current CPU ID.
+ * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
+ * @param cr3 The CR3 to use (unless we're getting the current
+ * state, see @a fFlags).
+ * @param u64FirstAddr The address to start dumping at.
+ * @param u64LastAddr The address to end dumping after.
+ * @param cMaxDepth The depth.
+ * @param pHlp The output callbacks. Defaults to the debug log if
+ * NULL.
+ */
+VMMDECL(int) DBGFR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
+ uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(!(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
+ AssertReturn(fFlags & (DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_GUEST), VERR_INVALID_FLAGS);
+ AssertReturn((fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) || (fFlags & DBGFPGDMP_FLAGS_MODE_MASK), VERR_INVALID_FLAGS);
+ AssertReturn( !(fFlags & DBGFPGDMP_FLAGS_EPT)
+ || !(fFlags & (DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_NXE))
+ , VERR_INVALID_FLAGS);
+ AssertReturn(cMaxDepth, VERR_INVALID_PARAMETER);
+
+ /*
+ * Forward the request to the target CPU.
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
+ pUVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp ? pHlp : DBGFR3InfoLogHlp());
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFModule.cpp b/src/VBox/VMM/VMMR3/DBGFModule.cpp
new file mode 100644
index 00000000..e4eccc37
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFModule.cpp
@@ -0,0 +1,300 @@
+/* $Id: DBGFModule.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Module & Segment Management.
+ */
+
+/*
+ * Copyright (C) 2008-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_module DBGFModule - Module & Segment Management
+ *
+ * A module is our representation of an executable binary. It's main purpose
+ * is to provide segments that can be mapped into address spaces and thereby
+ * provide debug info for those parts for the guest code or data.
+ *
+ * This module will not deal directly with debug info, it will only serve
+ * as an interface between the debugger / symbol lookup and the debug info
+ * readers.
+ *
+ * An executable binary doesn't need to have a file, or that is, we don't
+ * need the file to create a module for it. There will be interfaces for
+ * ROMs to register themselves so we can get to their symbols, and there
+ * will be interfaces for the guest OS plugins (@see pg_dbgf_os) to
+ * register kernel, drivers and other global modules.
+ */
+
+#if 0
+#include <VBox/vmm/dbgf.h>
+
+
+/** Special segment number that indicates that the offset is a relative
+ * virtual address (RVA). I.e. an offset from the start of the module. */
+#define DBGF_SEG_RVA UINT32_C(0xfffffff0)
+
+/** @defgroup grp_dbgf_dbginfo Debug Info Types
+ * @{ */
+/** Other format. */
+#define DBGF_DBGINFO_OTHER RT_BIT_32(0)
+/** Stabs. */
+#define DBGF_DBGINFO_STABS RT_BIT_32(1)
+/** Debug With Arbitrary Record Format (DWARF). */
+#define DBGF_DBGINFO_DWARF RT_BIT_32(2)
+/** Microsoft Codeview debug info. */
+#define DBGF_DBGINFO_CODEVIEW RT_BIT_32(3)
+/** Watcom debug info. */
+#define DBGF_DBGINFO_WATCOM RT_BIT_32(4)
+/** IBM High Level Language debug info. */
+#define DBGF_DBGINFO_HLL RT_BIT_32(5)
+/** Old OS/2 and Windows symbol file. */
+#define DBGF_DBGINFO_SYM RT_BIT_32(6)
+/** Map file. */
+#define DBGF_DBGINFO_MAP RT_BIT_32(7)
+/** @} */
+
+/** @defgroup grp_dbgf_exeimg Executable Image Types
+ * @{ */
+/** Some other format. */
+#define DBGF_EXEIMG_OTHER RT_BIT_32(0)
+/** Portable Executable. */
+#define DBGF_EXEIMG_PE RT_BIT_32(1)
+/** Linear eXecutable. */
+#define DBGF_EXEIMG_LX RT_BIT_32(2)
+/** Linear Executable. */
+#define DBGF_EXEIMG_LE RT_BIT_32(3)
+/** New Executable. */
+#define DBGF_EXEIMG_NE RT_BIT_32(4)
+/** DOS Executable (Mark Zbikowski). */
+#define DBGF_EXEIMG_MZ RT_BIT_32(5)
+/** COM Executable. */
+#define DBGF_EXEIMG_COM RT_BIT_32(6)
+/** a.out Executable. */
+#define DBGF_EXEIMG_AOUT RT_BIT_32(7)
+/** Executable and Linkable Format. */
+#define DBGF_EXEIMG_ELF RT_BIT_32(8)
+/** Mach-O Executable (including FAT ones). */
+#define DBGF_EXEIMG_MACHO RT_BIT_32(9)
+/** @} */
+
+/** Pointer to a module. */
+typedef struct DBGFMOD *PDBGFMOD;
+
+
+/**
+ * Virtual method table for executable image interpreters.
+ */
+typedef struct DBGFMODVTIMG
+{
+ /** Magic number (DBGFMODVTIMG_MAGIC). */
+ uint32_t u32Magic;
+ /** Mask of supported debug info types, see grp_dbgf_exeimg.
+ * Used to speed up the search for a suitable interpreter. */
+ uint32_t fSupports;
+ /** The name of the interpreter. */
+ const char *pszName;
+
+ /**
+ * Try open the image.
+ *
+ * This combines probing and opening.
+ *
+ * @returns VBox status code. No informational returns defined.
+ *
+ * @param pMod Pointer to the module that is being opened.
+ *
+ * The DBGFMOD::pszDbgFile member will point to
+ * the filename of any debug info we're aware of
+ * on input. Also, or alternatively, it is expected
+ * that the interpreter will look for debug info in
+ * the executable image file when present and that it
+ * may ask the image interpreter for this when it's
+ * around.
+ *
+ * Upon successful return the method is expected to
+ * initialize pDbgOps and pvDbgPriv.
+ */
+ DECLCALLBACKMEMBER(int, pfnTryOpen,(PDBGFMOD pMod));
+
+ /**
+ * Close the interpreter, freeing all associated resources.
+ *
+ * The caller sets the pDbgOps and pvDbgPriv DBGFMOD members
+ * to NULL upon return.
+ *
+ * @param pMod Pointer to the module structure.
+ */
+ DECLCALLBACKMEMBER(int, pfnClose,(PDBGFMOD pMod));
+
+} DBGFMODVTIMG
+
+/**
+ * Virtual method table for debug info interpreters.
+ */
+typedef struct DBGFMODVTDBG
+{
+ /** Magic number (DBGFMODVTDBG_MAGIC). */
+ uint32_t u32Magic;
+ /** Mask of supported debug info types, see grp_dbgf_dbginfo.
+ * Used to speed up the search for a suitable interpreter. */
+ uint32_t fSupports;
+ /** The name of the interpreter. */
+ const char *pszName;
+
+ /**
+ * Try open the image.
+ *
+ * This combines probing and opening.
+ *
+ * @returns VBox status code. No informational returns defined.
+ *
+ * @param pMod Pointer to the module that is being opened.
+ *
+ * The DBGFMOD::pszDbgFile member will point to
+ * the filename of any debug info we're aware of
+ * on input. Also, or alternatively, it is expected
+ * that the interpreter will look for debug info in
+ * the executable image file when present and that it
+ * may ask the image interpreter for this when it's
+ * around.
+ *
+ * Upon successful return the method is expected to
+ * initialize pDbgOps and pvDbgPriv.
+ */
+ DECLCALLBACKMEMBER(int, pfnTryOpen,(PDBGFMOD pMod));
+
+ /**
+ * Close the interpreter, freeing all associated resources.
+ *
+ * The caller sets the pDbgOps and pvDbgPriv DBGFMOD members
+ * to NULL upon return.
+ *
+ * @param pMod Pointer to the module structure.
+ */
+ DECLCALLBACKMEMBER(int, pfnClose,(PDBGFMOD pMod));
+
+ /**
+ * Queries symbol information by symbol name.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, no informational status code.
+ * @retval VERR_DBGF_NO_SYMBOLS if there aren't any symbols.
+ * @retval VERR_SYMBOL_NOT_FOUND if no suitable symbol was found.
+ *
+ * @param pMod Pointer to the module structure.
+ * @param pszSymbol The symbol name.
+ * @para pSymbol Where to store the symbol information.
+ */
+ DECLCALLBACKMEMBER(int, pfnSymbolByName,(PDBGFMOD pMod, const char *pszSymbol, PDBGFSYMBOL pSymbol));
+
+ /**
+ * Queries symbol information by address.
+ *
+ * The returned symbol is what the debug info interpreter considers the symbol
+ * most applicable to the specified address. This usually means a symbol with an
+ * address equal or lower than the requested.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, no informational status code.
+ * @retval VERR_DBGF_NO_SYMBOLS if there aren't any symbols.
+ * @retval VERR_SYMBOL_NOT_FOUND if no suitable symbol was found.
+ *
+ * @param pMod Pointer to the module structure.
+ * @param iSeg The segment number (0-based). DBGF_SEG_RVA can be used.
+ * @param off The offset into the segment.
+ * @param poffDisp Where to store the distance between the specified address
+ * and the returned symbol. Optional.
+ * @param pSymbol Where to store the symbol information.
+ */
+ DECLCALLBACKMEMBER(int, pfnSymbolByAddr,(PDBGFMOD pMod, uint32_t iSeg, RTGCUINTPTR off, PRTGCINTPTR poffDisp, PDBGFSYMBOL pSymbol));
+
+ /**
+ * Queries line number information by address.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, no informational status code.
+ * @retval VERR_DBGF_NO_LINE_NUMBERS if there aren't any line numbers.
+ * @retval VERR_DBGF_LINE_NOT_FOUND if no suitable line number was found.
+ *
+ * @param pMod Pointer to the module structure.
+ * @param iSeg The segment number (0-based). DBGF_SEG_RVA can be used.
+ * @param off The offset into the segment.
+ * @param poffDisp Where to store the distance between the specified address
+ * and the returned line number. Optional.
+ * @param pLine Where to store the information about the closest line number.
+ */
+ DECLCALLBACKMEMBER(int, pfnLineByAddr,(PDBGFMOD pMod, uint32_t iSeg, RTGCUINTPTR off, PRTGCINTPTR poffDisp, PDBGFLINE pLine));
+
+ /**
+ * Adds a symbol to the module (optional).
+ *
+ * This method is used to implement DBGFR3SymbolAdd.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_SUPPORTED if the interpreter doesn't support this feature.
+ *
+ * @param pMod Pointer to the module structure.
+ * @param pszSymbol The symbol name.
+ * @param iSeg The segment number (0-based). DBGF_SEG_RVA can be used.
+ * @param off The offset into the segment.
+ * @param cbSymbol The area covered by the symbol. 0 is fine.
+ */
+ DECLCALLBACKMEMBER(int, pfnSymbolAdd,(PDBGFMOD pMod, const char *pszSymbol, uint32_t iSeg, RTGCUINTPTR off, RTUINT cbSymbol));
+
+ /** For catching initialization errors (DBGFMODVTDBG_MAGIC). */
+ uint32_t u32EndMagic;
+} DBGFMODVTDBG;
+
+#define DBGFMODVTDBG_MAGIC 123
+
+/**
+ * Module.
+ */
+typedef struct DBGFMOD
+{
+ /** Magic value (DBGFMOD_MAGIC). */
+ uint32_t u32Magic;
+ /** The number of address spaces this module is currently linked into.
+ * This is used to perform automatic cleanup and sharing. */
+ uint32_t cLinks;
+ /** The module name (short). */
+ const char *pszName;
+ /** The module filename. Can be NULL. */
+ const char *pszImgFile;
+ /** The debug info file (if external). Can be NULL. */
+ const char *pszDbgFile;
+
+ /** The method table for the executable image interpreter. */
+ PCDBGFMODVTIMG pImgVt;
+ /** Pointer to the private data of the executable image interpreter. */
+ void *pvImgPriv;
+
+ /** The method table for the debug info interpreter. */
+ PCDBGFMODVTDBG pDbgVt;
+ /** Pointer to the private data of the debug info interpreter. */
+ void *pvDbgPriv;
+
+} DBGFMOD;
+
+#define DBGFMOD_MAGIC 0x12345678
+
+#endif
+
diff --git a/src/VBox/VMM/VMMR3/DBGFOS.cpp b/src/VBox/VMM/VMMR3/DBGFOS.cpp
new file mode 100644
index 00000000..be5d0cf6
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFOS.cpp
@@ -0,0 +1,750 @@
+/* $Id: DBGFOS.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Guest OS Diggers.
+ */
+
+/*
+ * Copyright (C) 2008-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vmm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/param.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+#define DBGF_OS_READ_LOCK(pUVM) \
+ do { int rcLock = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+#define DBGF_OS_READ_UNLOCK(pUVM) \
+ do { int rcLock = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+
+#define DBGF_OS_WRITE_LOCK(pUVM) \
+ do { int rcLock = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+#define DBGF_OS_WRITE_UNLOCK(pUVM) \
+ do { int rcLock = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * EMT interface wrappers.
+ *
+ * The diggers expects to be called on an EMT. To avoid the debugger+Main having
+ *
+ * Since the user (debugger/Main) shouldn't be calling directly into the digger code, but rather
+ */
+typedef struct DBGFOSEMTWRAPPER
+{
+ /** Pointer to the next list entry. */
+ struct DBGFOSEMTWRAPPER *pNext;
+ /** The interface type. */
+ DBGFOSINTERFACE enmIf;
+ /** The digger interface pointer. */
+ union
+ {
+ /** Generic void pointer. */
+ void *pv;
+ /** DBGFOSINTERFACE_DMESG.*/
+ PDBGFOSIDMESG pDmesg;
+ /** DBGFOSINTERFACE_WINNT.*/
+ PDBGFOSIWINNT pWinNt;
+ } uDigger;
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** The wrapper interface union (consult enmIf). */
+ union
+ {
+ /** DBGFOSINTERFACE_DMESG.*/
+ DBGFOSIDMESG Dmesg;
+ /** DBGFOSINTERFACE_WINNT.*/
+ DBGFOSIWINNT WinNt;
+ } uWrapper;
+} DBGFOSEMTWRAPPER;
+/** Pointer to an EMT interface wrapper. */
+typedef DBGFOSEMTWRAPPER *PDBGFOSEMTWRAPPER;
+
+
+/**
+ * Internal init routine called by DBGFR3Init().
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3OSInit(PUVM pUVM)
+{
+ RT_NOREF_PV(pUVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal cleanup routine called by DBGFR3Term(), part 1.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+void dbgfR3OSTermPart1(PUVM pUVM)
+{
+ DBGF_OS_WRITE_LOCK(pUVM);
+
+ /*
+ * Terminate the current one.
+ */
+ if (pUVM->dbgf.s.pCurOS)
+ {
+ pUVM->dbgf.s.pCurOS->pReg->pfnTerm(pUVM, VMMR3GetVTable(), pUVM->dbgf.s.pCurOS->abData);
+ pUVM->dbgf.s.pCurOS = NULL;
+ }
+
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+}
+
+
+/**
+ * Internal cleanup routine called by DBGFR3Term(), part 2.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+void dbgfR3OSTermPart2(PUVM pUVM)
+{
+ DBGF_OS_WRITE_LOCK(pUVM);
+
+ /* This shouldn't happen. */
+ AssertStmt(!pUVM->dbgf.s.pCurOS, dbgfR3OSTermPart1(pUVM));
+
+ /*
+ * Destroy all the instances.
+ */
+ while (pUVM->dbgf.s.pOSHead)
+ {
+ PDBGFOS pOS = pUVM->dbgf.s.pOSHead;
+ pUVM->dbgf.s.pOSHead = pOS->pNext;
+ if (pOS->pReg->pfnDestruct)
+ pOS->pReg->pfnDestruct(pUVM, VMMR3GetVTable(), pOS->abData);
+
+ PDBGFOSEMTWRAPPER pFree = pOS->pWrapperHead;
+ while ((pFree = pOS->pWrapperHead) != NULL)
+ {
+ pOS->pWrapperHead = pFree->pNext;
+ pFree->pNext = NULL;
+ MMR3HeapFree(pFree);
+ }
+
+ MMR3HeapFree(pOS);
+ }
+
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+}
+
+
+/**
+ * EMT worker function for DBGFR3OSRegister.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The registration structure.
+ */
+static DECLCALLBACK(int) dbgfR3OSRegister(PUVM pUVM, PDBGFOSREG pReg)
+{
+ /* more validations. */
+ DBGF_OS_READ_LOCK(pUVM);
+ PDBGFOS pOS;
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
+ if (!strcmp(pOS->pReg->szName, pReg->szName))
+ {
+ DBGF_OS_READ_UNLOCK(pUVM);
+ Log(("dbgfR3OSRegister: %s -> VERR_ALREADY_LOADED\n", pReg->szName));
+ return VERR_ALREADY_LOADED;
+ }
+ DBGF_OS_READ_UNLOCK(pUVM);
+
+ /*
+ * Allocate a new structure, call the constructor and link it into the list.
+ */
+ pOS = (PDBGFOS)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_OS, RT_UOFFSETOF_DYN(DBGFOS, abData[pReg->cbData]));
+ AssertReturn(pOS, VERR_NO_MEMORY);
+ pOS->pReg = pReg;
+
+ int rc = pOS->pReg->pfnConstruct(pUVM, VMMR3GetVTable(), pOS->abData);
+ if (RT_SUCCESS(rc))
+ {
+ DBGF_OS_WRITE_LOCK(pUVM);
+ pOS->pNext = pUVM->dbgf.s.pOSHead;
+ pUVM->dbgf.s.pOSHead = pOS;
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ }
+ else
+ {
+ if (pOS->pReg->pfnDestruct)
+ pOS->pReg->pfnDestruct(pUVM, VMMR3GetVTable(), pOS->abData);
+ MMR3HeapFree(pOS);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Registers a guest OS digger.
+ *
+ * This will instantiate an instance of the digger and add it
+ * to the list for us in the next call to DBGFR3OSDetect().
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The registration structure.
+ * @thread Any.
+ */
+VMMR3DECL(int) DBGFR3OSRegister(PUVM pUVM, PCDBGFOSREG pReg)
+{
+ /*
+ * Validate intput.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ AssertPtrReturn(pReg, VERR_INVALID_POINTER);
+ AssertReturn(pReg->u32Magic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
+ AssertReturn(pReg->u32EndMagic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
+ AssertReturn(!pReg->fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(pReg->cbData < _2G, VERR_INVALID_PARAMETER);
+ AssertReturn(pReg->szName[0], VERR_INVALID_NAME);
+ AssertReturn(RTStrEnd(&pReg->szName[0], sizeof(pReg->szName)), VERR_INVALID_NAME);
+ AssertPtrReturn(pReg->pfnConstruct, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pReg->pfnDestruct, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnProbe, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnInit, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnRefresh, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnTerm, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnQueryVersion, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReg->pfnQueryInterface, VERR_INVALID_POINTER);
+
+ /*
+ * Pass it on to EMT(0).
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSRegister, 2, pUVM, pReg);
+}
+
+
+/**
+ * EMT worker function for DBGFR3OSDeregister.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The registration structure.
+ */
+static DECLCALLBACK(int) dbgfR3OSDeregister(PUVM pUVM, PDBGFOSREG pReg)
+{
+ /*
+ * Unlink it.
+ */
+ bool fWasCurOS = false;
+ PDBGFOS pOSPrev = NULL;
+ PDBGFOS pOS;
+ DBGF_OS_WRITE_LOCK(pUVM);
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOSPrev = pOS, pOS = pOS->pNext)
+ if (pOS->pReg == pReg)
+ {
+ if (pOSPrev)
+ pOSPrev->pNext = pOS->pNext;
+ else
+ pUVM->dbgf.s.pOSHead = pOS->pNext;
+ if (pUVM->dbgf.s.pCurOS == pOS)
+ {
+ pUVM->dbgf.s.pCurOS = NULL;
+ fWasCurOS = true;
+ }
+ break;
+ }
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ if (!pOS)
+ {
+ Log(("DBGFR3OSDeregister: %s -> VERR_NOT_FOUND\n", pReg->szName));
+ return VERR_NOT_FOUND;
+ }
+
+ /*
+ * Terminate it if it was the current OS, then invoke the
+ * destructor and clean up.
+ */
+ if (fWasCurOS)
+ pOS->pReg->pfnTerm(pUVM, VMMR3GetVTable(), pOS->abData);
+ if (pOS->pReg->pfnDestruct)
+ pOS->pReg->pfnDestruct(pUVM, VMMR3GetVTable(), pOS->abData);
+
+ PDBGFOSEMTWRAPPER pFree = pOS->pWrapperHead;
+ while ((pFree = pOS->pWrapperHead) != NULL)
+ {
+ pOS->pWrapperHead = pFree->pNext;
+ pFree->pNext = NULL;
+ MMR3HeapFree(pFree);
+ }
+
+ MMR3HeapFree(pOS);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a guest OS digger previously registered by DBGFR3OSRegister.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pReg The registration structure.
+ * @thread Any.
+ */
+VMMR3DECL(int) DBGFR3OSDeregister(PUVM pUVM, PCDBGFOSREG pReg)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pReg, VERR_INVALID_POINTER);
+ AssertReturn(pReg->u32Magic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
+ AssertReturn(pReg->u32EndMagic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
+ AssertReturn(RTStrEnd(&pReg->szName[0], sizeof(pReg->szName)), VERR_INVALID_NAME);
+
+ DBGF_OS_READ_LOCK(pUVM);
+ PDBGFOS pOS;
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
+ if (pOS->pReg == pReg)
+ break;
+ DBGF_OS_READ_UNLOCK(pUVM);
+
+ if (!pOS)
+ {
+ Log(("DBGFR3OSDeregister: %s -> VERR_NOT_FOUND\n", pReg->szName));
+ return VERR_NOT_FOUND;
+ }
+
+ /*
+ * Pass it on to EMT(0).
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDeregister, 2, pUVM, pReg);
+}
+
+
+/**
+ * EMT worker function for DBGFR3OSDetect.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if successfully detected.
+ * @retval VINF_DBGF_OS_NOT_DETCTED if we cannot figure it out.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pszName Where to store the OS name. Empty string if not detected.
+ * @param cchName Size of the buffer.
+ */
+static DECLCALLBACK(int) dbgfR3OSDetect(PUVM pUVM, char *pszName, size_t cchName)
+{
+ /*
+ * Cycle thru the detection routines.
+ */
+ DBGF_OS_WRITE_LOCK(pUVM);
+
+ PDBGFOS const pOldOS = pUVM->dbgf.s.pCurOS;
+ pUVM->dbgf.s.pCurOS = NULL;
+
+ for (PDBGFOS pNewOS = pUVM->dbgf.s.pOSHead; pNewOS; pNewOS = pNewOS->pNext)
+ if (pNewOS->pReg->pfnProbe(pUVM, VMMR3GetVTable(), pNewOS->abData))
+ {
+ int rc;
+ pUVM->dbgf.s.pCurOS = pNewOS;
+ if (pOldOS == pNewOS)
+ rc = pNewOS->pReg->pfnRefresh(pUVM, VMMR3GetVTable(), pNewOS->abData);
+ else
+ {
+ if (pOldOS)
+ pOldOS->pReg->pfnTerm(pUVM, VMMR3GetVTable(), pNewOS->abData);
+ rc = pNewOS->pReg->pfnInit(pUVM, VMMR3GetVTable(), pNewOS->abData);
+ }
+ if (pszName && cchName)
+ strncat(pszName, pNewOS->pReg->szName, cchName);
+
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ return rc;
+ }
+
+ /* not found */
+ if (pOldOS)
+ pOldOS->pReg->pfnTerm(pUVM, VMMR3GetVTable(), pOldOS->abData);
+
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ return VINF_DBGF_OS_NOT_DETCTED;
+}
+
+
+/**
+ * Detects the guest OS and try dig out symbols and useful stuff.
+ *
+ * When called the 2nd time, symbols will be updated that if the OS
+ * is the same.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if successfully detected.
+ * @retval VINF_DBGF_OS_NOT_DETCTED if we cannot figure it out.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pszName Where to store the OS name. Empty string if not detected.
+ * @param cchName Size of the buffer.
+ * @thread Any.
+ */
+VMMR3DECL(int) DBGFR3OSDetect(PUVM pUVM, char *pszName, size_t cchName)
+{
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
+ if (pszName && cchName)
+ *pszName = '\0';
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Pass it on to EMT(0).
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDetect, 3, pUVM, pszName, cchName);
+}
+
+
+/**
+ * EMT worker function for DBGFR3OSQueryNameAndVersion
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName Where to store the OS name. Optional.
+ * @param cchName The size of the name buffer.
+ * @param pszVersion Where to store the version string. Optional.
+ * @param cchVersion The size of the version buffer.
+ */
+static DECLCALLBACK(int) dbgfR3OSQueryNameAndVersion(PUVM pUVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
+{
+ /*
+ * Any known OS?
+ */
+ DBGF_OS_READ_LOCK(pUVM);
+
+ if (pUVM->dbgf.s.pCurOS)
+ {
+ int rc = VINF_SUCCESS;
+ if (pszName && cchName)
+ {
+ size_t cch = strlen(pUVM->dbgf.s.pCurOS->pReg->szName);
+ if (cchName > cch)
+ memcpy(pszName, pUVM->dbgf.s.pCurOS->pReg->szName, cch + 1);
+ else
+ {
+ memcpy(pszName, pUVM->dbgf.s.pCurOS->pReg->szName, cchName - 1);
+ pszName[cchName - 1] = '\0';
+ rc = VINF_BUFFER_OVERFLOW;
+ }
+ }
+
+ if (pszVersion && cchVersion)
+ {
+ int rc2 = pUVM->dbgf.s.pCurOS->pReg->pfnQueryVersion(pUVM, VMMR3GetVTable(), pUVM->dbgf.s.pCurOS->abData,
+ pszVersion, cchVersion);
+ if (RT_FAILURE(rc2) || rc == VINF_SUCCESS)
+ rc = rc2;
+ }
+
+ DBGF_OS_READ_UNLOCK(pUVM);
+ return rc;
+ }
+
+ DBGF_OS_READ_UNLOCK(pUVM);
+ return VERR_DBGF_OS_NOT_DETCTED;
+}
+
+
+/**
+ * Queries the name and/or version string for the guest OS.
+ *
+ * It goes without saying that this querying is done using the current
+ * guest OS digger and not additions or user configuration.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName Where to store the OS name. Optional.
+ * @param cchName The size of the name buffer.
+ * @param pszVersion Where to store the version string. Optional.
+ * @param cchVersion The size of the version buffer.
+ * @thread Any.
+ */
+VMMR3DECL(int) DBGFR3OSQueryNameAndVersion(PUVM pUVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszVersion, VERR_INVALID_POINTER);
+
+ /*
+ * Initialize the output up front.
+ */
+ if (pszName && cchName)
+ *pszName = '\0';
+ if (pszVersion && cchVersion)
+ *pszVersion = '\0';
+
+ /*
+ * Pass it on to EMT(0).
+ */
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/,
+ (PFNRT)dbgfR3OSQueryNameAndVersion, 5, pUVM, pszName, cchName, pszVersion, cchVersion);
+}
+
+
+/**
+ * @interface_method_impl{DBGFOSIDMESG,pfnQueryKernelLog, Generic EMT wrapper.}
+ */
+static DECLCALLBACK(int) dbgfR3OSEmtIDmesg_QueryKernelLog(PDBGFOSIDMESG pThis, PUVM pUVM, PCVMMR3VTABLE pVMM, uint32_t fFlags,
+ uint32_t cMessages, char *pszBuf, size_t cbBuf, size_t *pcbActual)
+{
+ PDBGFOSEMTWRAPPER pWrapper = RT_FROM_MEMBER(pThis, DBGFOSEMTWRAPPER, uWrapper.Dmesg);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pUVM == pWrapper->pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(!fFlags, VERR_INVALID_FLAGS);
+ AssertReturn(cMessages > 0, VERR_INVALID_PARAMETER);
+ if (cbBuf)
+ AssertPtrReturn(pszBuf, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pcbActual, VERR_INVALID_POINTER);
+
+ return VMR3ReqPriorityCallWaitU(pWrapper->pUVM, 0 /*idDstCpu*/,
+ (PFNRT)pWrapper->uDigger.pDmesg->pfnQueryKernelLog, 8,
+ pWrapper->uDigger.pDmesg, pUVM, pVMM, fFlags, cMessages, pszBuf, cbBuf, pcbActual);
+
+}
+
+
+/**
+ * @interface_method_impl{DBGFOSIWINNT,pfnQueryVersion, Generic EMT wrapper.}
+ */
+static DECLCALLBACK(int) dbgfR3OSEmtIWinNt_QueryVersion(PDBGFOSIWINNT pThis, PUVM pUVM, PCVMMR3VTABLE pVMM, uint32_t *puVersMajor,
+ uint32_t *puVersMinor, uint32_t *puBuildNumber, bool *pf32Bit)
+{
+ PDBGFOSEMTWRAPPER pWrapper = RT_FROM_MEMBER(pThis, DBGFOSEMTWRAPPER, uWrapper.WinNt);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pUVM == pWrapper->pUVM, VERR_INVALID_VM_HANDLE);
+
+ return VMR3ReqPriorityCallWaitU(pWrapper->pUVM, 0 /*idDstCpu*/,
+ (PFNRT)pWrapper->uDigger.pWinNt->pfnQueryVersion, 7,
+ pWrapper->uDigger.pWinNt, pUVM, pVMM, puVersMajor, puVersMinor,
+ puBuildNumber, pf32Bit);
+}
+
+
+/**
+ * @interface_method_impl{DBGFOSIWINNT,pfnQueryKernelPtrs, Generic EMT wrapper.}
+ */
+static DECLCALLBACK(int) dbgfR3OSEmtIWinNt_QueryKernelPtrs(PDBGFOSIWINNT pThis, PUVM pUVM, PCVMMR3VTABLE pVMM,
+ PRTGCUINTPTR pGCPtrKernBase, PRTGCUINTPTR pGCPtrPsLoadedModuleList)
+{
+ PDBGFOSEMTWRAPPER pWrapper = RT_FROM_MEMBER(pThis, DBGFOSEMTWRAPPER, uWrapper.WinNt);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pUVM == pWrapper->pUVM, VERR_INVALID_VM_HANDLE);
+
+ return VMR3ReqPriorityCallWaitU(pWrapper->pUVM, 0 /*idDstCpu*/,
+ (PFNRT)pWrapper->uDigger.pWinNt->pfnQueryKernelPtrs, 5,
+ pWrapper->uDigger.pWinNt, pUVM, pVMM, pGCPtrKernBase, pGCPtrPsLoadedModuleList);
+}
+
+
+/**
+ * @interface_method_impl{DBGFOSIWINNT,pfnQueryKpcrForVCpu, Generic EMT wrapper.}
+ */
+static DECLCALLBACK(int) dbgfR3OSEmtIWinNt_QueryKpcrForVCpu(struct DBGFOSIWINNT *pThis, PUVM pUVM, PCVMMR3VTABLE pVMM,
+ VMCPUID idCpu, PRTGCUINTPTR pKpcr, PRTGCUINTPTR pKpcrb)
+{
+ PDBGFOSEMTWRAPPER pWrapper = RT_FROM_MEMBER(pThis, DBGFOSEMTWRAPPER, uWrapper.WinNt);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pUVM == pWrapper->pUVM, VERR_INVALID_VM_HANDLE);
+
+ return VMR3ReqPriorityCallWaitU(pWrapper->pUVM, 0 /*idDstCpu*/,
+ (PFNRT)pWrapper->uDigger.pWinNt->pfnQueryKpcrForVCpu, 6,
+ pWrapper->uDigger.pWinNt, pUVM, pVMM, idCpu, pKpcr, pKpcrb);
+}
+
+
+/**
+ * @interface_method_impl{DBGFOSIWINNT,pfnQueryCurThrdForVCpu, Generic EMT wrapper.}
+ */
+static DECLCALLBACK(int) dbgfR3OSEmtIWinNt_QueryCurThrdForVCpu(struct DBGFOSIWINNT *pThis, PUVM pUVM, PCVMMR3VTABLE pVMM,
+ VMCPUID idCpu, PRTGCUINTPTR pCurThrd)
+{
+ PDBGFOSEMTWRAPPER pWrapper = RT_FROM_MEMBER(pThis, DBGFOSEMTWRAPPER, uWrapper.WinNt);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pUVM == pWrapper->pUVM, VERR_INVALID_VM_HANDLE);
+
+ return VMR3ReqPriorityCallWaitU(pWrapper->pUVM, 0 /*idDstCpu*/,
+ (PFNRT)pWrapper->uDigger.pWinNt->pfnQueryCurThrdForVCpu, 5,
+ pWrapper->uDigger.pWinNt, pUVM, pVMM, idCpu, pCurThrd);
+}
+
+
+/**
+ * EMT worker for DBGFR3OSQueryInterface.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param enmIf The interface identifier.
+ * @param ppvIf Where to store the interface pointer on success.
+ */
+static DECLCALLBACK(void) dbgfR3OSQueryInterface(PUVM pUVM, DBGFOSINTERFACE enmIf, void **ppvIf)
+{
+ AssertPtrReturnVoid(ppvIf);
+ *ppvIf = NULL;
+ AssertReturnVoid(enmIf > DBGFOSINTERFACE_INVALID && enmIf < DBGFOSINTERFACE_END);
+ UVM_ASSERT_VALID_EXT_RETURN_VOID(pUVM);
+
+ /*
+ * Forward the query to the current OS.
+ */
+ DBGF_OS_READ_LOCK(pUVM);
+ PDBGFOS pOS = pUVM->dbgf.s.pCurOS;
+ if (pOS)
+ {
+ void *pvDiggerIf;
+ pvDiggerIf = pOS->pReg->pfnQueryInterface(pUVM, VMMR3GetVTable(), pUVM->dbgf.s.pCurOS->abData, enmIf);
+ if (pvDiggerIf)
+ {
+ /*
+ * Do we have an EMT wrapper for this interface already?
+ *
+ * We ASSUME the interfaces are static and not dynamically allocated
+ * for each QueryInterface call.
+ */
+ PDBGFOSEMTWRAPPER pWrapper = pOS->pWrapperHead;
+ while ( pWrapper != NULL
+ && ( pWrapper->uDigger.pv != pvDiggerIf
+ && pWrapper->enmIf != enmIf) )
+ pWrapper = pWrapper->pNext;
+ if (pWrapper)
+ {
+ *ppvIf = &pWrapper->uWrapper;
+ DBGF_OS_READ_UNLOCK(pUVM);
+ return;
+ }
+ DBGF_OS_READ_UNLOCK(pUVM);
+
+ /*
+ * Create a wrapper.
+ */
+ int rc = MMR3HeapAllocExU(pUVM, MM_TAG_DBGF_OS, sizeof(*pWrapper), (void **)&pWrapper);
+ if (RT_FAILURE(rc))
+ return;
+ pWrapper->uDigger.pv = pvDiggerIf;
+ pWrapper->pUVM = pUVM;
+ pWrapper->enmIf = enmIf;
+ switch (enmIf)
+ {
+ case DBGFOSINTERFACE_DMESG:
+ pWrapper->uWrapper.Dmesg.u32Magic = DBGFOSIDMESG_MAGIC;
+ pWrapper->uWrapper.Dmesg.pfnQueryKernelLog = dbgfR3OSEmtIDmesg_QueryKernelLog;
+ pWrapper->uWrapper.Dmesg.u32EndMagic = DBGFOSIDMESG_MAGIC;
+ break;
+ case DBGFOSINTERFACE_WINNT:
+ pWrapper->uWrapper.WinNt.u32Magic = DBGFOSIWINNT_MAGIC;
+ pWrapper->uWrapper.WinNt.pfnQueryVersion = dbgfR3OSEmtIWinNt_QueryVersion;
+ pWrapper->uWrapper.WinNt.pfnQueryKernelPtrs = dbgfR3OSEmtIWinNt_QueryKernelPtrs;
+ pWrapper->uWrapper.WinNt.pfnQueryKpcrForVCpu = dbgfR3OSEmtIWinNt_QueryKpcrForVCpu;
+ pWrapper->uWrapper.WinNt.pfnQueryCurThrdForVCpu = dbgfR3OSEmtIWinNt_QueryCurThrdForVCpu;
+ pWrapper->uWrapper.WinNt.u32EndMagic = DBGFOSIWINNT_MAGIC;
+ break;
+ default:
+ AssertFailed();
+ MMR3HeapFree(pWrapper);
+ return;
+ }
+
+ DBGF_OS_WRITE_LOCK(pUVM);
+ if (pUVM->dbgf.s.pCurOS == pOS)
+ {
+ pWrapper->pNext = pOS->pWrapperHead;
+ pOS->pWrapperHead = pWrapper;
+ *ppvIf = &pWrapper->uWrapper;
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ }
+ else
+ {
+ DBGF_OS_WRITE_UNLOCK(pUVM);
+ MMR3HeapFree(pWrapper);
+ }
+ return;
+ }
+ }
+ DBGF_OS_READ_UNLOCK(pUVM);
+}
+
+
+/**
+ * Query an optional digger interface.
+ *
+ * @returns Pointer to the digger interface on success, NULL if the interfaces isn't
+ * available or no active guest OS digger.
+ * @param pUVM The user mode VM handle.
+ * @param enmIf The interface identifier.
+ * @thread Any.
+ */
+VMMR3DECL(void *) DBGFR3OSQueryInterface(PUVM pUVM, DBGFOSINTERFACE enmIf)
+{
+ AssertMsgReturn(enmIf > DBGFOSINTERFACE_INVALID && enmIf < DBGFOSINTERFACE_END, ("%d\n", enmIf), NULL);
+
+ /*
+ * Pass it on to an EMT.
+ */
+ void *pvIf = NULL;
+ VMR3ReqPriorityCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3OSQueryInterface, 3, pUVM, enmIf, &pvIf);
+ return pvIf;
+}
+
+
+
+/**
+ * Internal wrapper for calling DBGFOSREG::pfnStackUnwindAssist.
+ */
+int dbgfR3OSStackUnwindAssist(PUVM pUVM, VMCPUID idCpu, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState,
+ PCCPUMCTX pInitialCtx, RTDBGAS hAs, uint64_t *puScratch)
+{
+ int rc = VINF_SUCCESS;
+ if (pUVM->dbgf.s.pCurOS)
+ {
+ ASMCompilerBarrier();
+ DBGF_OS_READ_LOCK(pUVM);
+ PDBGFOS pOS = pUVM->dbgf.s.pCurOS;
+ if (pOS)
+ rc = pOS->pReg->pfnStackUnwindAssist(pUVM, VMMR3GetVTable(), pUVM->dbgf.s.pCurOS->abData, idCpu, pFrame,
+ pState, pInitialCtx, hAs, puScratch);
+ DBGF_OS_READ_UNLOCK(pUVM);
+ }
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp b/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp
new file mode 100644
index 00000000..174e0666
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp
@@ -0,0 +1,2776 @@
+/* $Id: DBGFR3Bp.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Breakpoint Management.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
+ *
+ * The debugger facilities breakpoint managers purpose is to efficiently manage
+ * large amounts of breakpoints for various use cases like dtrace like operations
+ * or execution flow tracing for instance. Especially execution flow tracing can
+ * require thousands of breakpoints which need to be managed efficiently to not slow
+ * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
+ * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
+ * manager is supposed to be able to handle up to one million breakpoints.
+ *
+ * @see grp_dbgf
+ *
+ *
+ * @section sec_dbgf_bp_owner Breakpoint owners
+ *
+ * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
+ * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
+ * The common part of the owner is managed by a single table mapped into both ring-0
+ * and ring-3 and the handle being the index into the table. This allows resolving
+ * the handle to the internal structure efficiently. Searching for a free entry is
+ * done using a bitmap indicating free and occupied entries. For the optional
+ * ring-0 owner part there is a separate ring-0 only table for security reasons.
+ *
+ * The callback of the owner can be used to gather and log guest state information
+ * and decide whether to continue guest execution or stop and drop into the debugger.
+ * Breakpoints which don't have an owner assigned will always drop the VM right into
+ * the debugger.
+ *
+ *
+ * @section sec_dbgf_bp_bps Breakpoints
+ *
+ * Breakpoints are referenced by an opaque handle which acts as an index into a global table
+ * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
+ * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
+ * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
+ * a dedicated ring-0 table is held saving possible ring-0 user arguments.
+ *
+ * To keep memory consumption under control and still support large amounts of
+ * breakpoints the table is split into fixed sized chunks and the chunk index and index
+ * into the chunk can be derived from the handle with only a few logical operations.
+ *
+ *
+ * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
+ *
+ * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
+ * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
+ * as possible. The following scheme is employed to achieve this:
+ *
+ * @verbatim
+ * 7 6 5 4 3 2 1 0
+ * +---+---+---+---+---+---+---+---+
+ * | | | | | | | | | BP address
+ * +---+---+---+---+---+---+---+---+
+ * \_____________________/ \_____/
+ * | |
+ * | +---------------+
+ * | |
+ * BP table | v
+ * +------------+ | +-----------+
+ * | hBp 0 | | X <- | 0 | xxxxx |
+ * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
+ * | | | +--- | 2 | idxL2 |
+ * | hBp <m> | <---+ v | |...| ... |
+ * | | | +-----------+ | |...| ... |
+ * | | | | | | |...| ... |
+ * | hBp <n> | <-+ +----- | +> leaf | | | . |
+ * | | | | | | | | . |
+ * | | | | + root + | <------------+ | . |
+ * | | | | | | +-----------+
+ * | | +------- | leaf<+ | L1: 65536
+ * | . | | . |
+ * | . | | . |
+ * | . | | . |
+ * +------------+ +-----------+
+ * L2 idx BST
+ * @endverbatim
+ *
+ * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
+ * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
+ * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
+ * and the meaning of the remaining 28bits depends on the topmost 4 bits:
+ * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
+ * so forward the event to the guest.
+ * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
+ * handle which can be resolved by extracting the chunk index and index into the chunk
+ * of the global breakpoint table. If the address matches the breakpoint is processed
+ * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
+ * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
+ * matching the lowest 16bits and the search must continue in the L2 table with the
+ * remaining 28bits acting as an index into the L2 table indicating the search root.
+ * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
+ * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
+ * used for searching. This tree is traversed until either a matching address is found and
+ * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
+ * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
+ *
+ * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
+ *
+ * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
+ * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
+ * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
+ * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
+ * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
+ * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
+ * when there is no I/O port breakpoint enabled.
+ *
+ * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
+ *
+ * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
+ * hopefully the ones being the most varying ones across breakpoints so the traversal
+ * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
+ * individual trees should be quite shallow resulting in low overhead when walking it
+ * (though only real world testing can assert this assumption).
+ * - Index based tables and trees are used instead of pointers because the tables
+ * are always mapped into ring-0 and ring-3 with different base addresses.
+ * - Efficent breakpoint allocation is done by having a global bitmap indicating free
+ * and occupied breakpoint entries. Same applies for the L2 BST table.
+ * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
+ * might still access it (want to try a lockless approach first using
+ * atomic updates, have to resort to locking if that turns out to be too difficult).
+ * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
+ * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
+ * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
+ * and do strict bounds checking before accessing anything. The L1 and L2 table
+ * are written to from ring-3 only. Same goes for the breakpoint table with the
+ * exception being the opaque user argument for ring-0 which is stored in ring-0 only
+ * memory.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/hm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+
+#include "DBGFInline.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+RT_C_DECLS_END
+
+
+/**
+ * Initialize the breakpoint mangement.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
+{
+ PVM pVM = pUVM->pVM;
+
+ //pUVM->dbgf.s.paBpOwnersR3 = NULL;
+ //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
+
+ /* Init hardware breakpoint states. */
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
+ {
+ PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
+
+ AssertCompileSize(DBGFBP, sizeof(uint32_t));
+ pHwBp->hBp = NIL_DBGFBP;
+ //pHwBp->fEnabled = false;
+ }
+
+ /* Now the global breakpoint table chunks. */
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
+ {
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
+
+ //pBpChunk->pBpBaseR3 = NULL;
+ //pBpChunk->pbmAlloc = NULL;
+ //pBpChunk->cBpsFree = 0;
+ pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
+ }
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
+ {
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
+
+ //pL2Chunk->pL2BaseR3 = NULL;
+ //pL2Chunk->pbmAlloc = NULL;
+ //pL2Chunk->cFree = 0;
+ pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
+ }
+
+ //pUVM->dbgf.s.paBpLocL1R3 = NULL;
+ //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
+ pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
+ return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
+}
+
+
+/**
+ * Terminates the breakpoint mangement.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
+{
+ if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
+ {
+ RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
+ pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
+ }
+
+ /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
+ {
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
+
+ if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
+ {
+ AssertPtr(pBpChunk->pbmAlloc);
+ RTMemFree((void *)pBpChunk->pbmAlloc);
+ pBpChunk->pbmAlloc = NULL;
+ pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
+ }
+ }
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
+ {
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
+
+ if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
+ {
+ AssertPtr(pL2Chunk->pbmAlloc);
+ RTMemFree((void *)pL2Chunk->pbmAlloc);
+ pL2Chunk->pbmAlloc = NULL;
+ pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
+ }
+ }
+
+ if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
+ {
+ RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
+ pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pvUser);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * The initialization will be done on EMT(0). It is possible that multiple
+ * initialization attempts are done because dbgfR3BpEnsureInit() can be called
+ * from racing non EMT threads when trying to set a breakpoint for the first time.
+ * Just fake success if the L1 is already present which means that a previous rendezvous
+ * successfully initialized the breakpoint manager.
+ */
+ PUVM pUVM = pVM->pUVM;
+ if ( pVCpu->idCpu == 0
+ && !pUVM->dbgf.s.paBpLocL1R3)
+ {
+ if (!SUPR3IsDriverless())
+ {
+ DBGFBPINITREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.paBpLocL1R3 = NULL;
+ int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
+ AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
+ pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
+ }
+ else
+ {
+ /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
+ uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
+ pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
+ AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Ensures that the breakpoint manager is fully initialized.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpEnsureInit(PUVM pUVM)
+{
+ /* If the L1 lookup table is allocated initialization succeeded before. */
+ if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
+ return VINF_SUCCESS;
+
+ /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pvUser);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * The initialization will be done on EMT(0). It is possible that multiple
+ * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
+ * from racing non EMT threads when trying to set a breakpoint for the first time.
+ * Just fake success if the L1 is already present which means that a previous rendezvous
+ * successfully initialized the breakpoint manager.
+ */
+ PUVM pUVM = pVM->pUVM;
+ if ( pVCpu->idCpu == 0
+ && !pUVM->dbgf.s.paBpLocPortIoR3)
+ {
+ if (!SUPR3IsDriverless())
+ {
+ DBGFBPINITREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.paBpLocL1R3 = NULL;
+ int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
+ AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
+ pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
+ }
+ else
+ {
+ /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
+ uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
+ pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
+ AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
+{
+ /* If the L1 lookup table is allocated initialization succeeded before. */
+ if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
+ return VINF_SUCCESS;
+
+ /* Ensure that the breakpoint manager is initialized. */
+ int rc = dbgfR3BpEnsureInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pvUser);
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * The initialization will be done on EMT(0). It is possible that multiple
+ * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
+ * from racing non EMT threads when trying to create a breakpoint owner for the first time.
+ * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
+ * successfully initialized the breakpoint owner table.
+ */
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = pVM->pUVM;
+ if ( pVCpu->idCpu == 0
+ && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
+ {
+ AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
+ pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
+ if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
+ {
+ if (!SUPR3IsDriverless())
+ {
+ DBGFBPOWNERINITREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.paBpOwnerR3 = NULL;
+ rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ {
+ pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
+ return VINF_SUCCESS;
+ }
+ AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
+ }
+ else
+ {
+ /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
+ uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
+ pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbBpOwnerR3);
+ if (pUVM->dbgf.s.paBpLocPortIoR3)
+ return VINF_SUCCESS;
+ AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+
+ RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
+ pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Ensures that the breakpoint manager is fully initialized.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
+{
+ /* If the allocation bitmap is allocated initialization succeeded before. */
+ if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
+ return VINF_SUCCESS;
+
+ /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
+}
+
+
+/**
+ * Retains the given breakpoint owner handle for use.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
+ * @param pUVM The user mode VM handle.
+ * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
+ * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
+ */
+DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
+{
+ if (hBpOwner == NIL_DBGFBPOWNER)
+ return VINF_SUCCESS;
+
+ PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
+ if (pBpOwner)
+ {
+ AssertReturn ( ( fIo
+ && pBpOwner->pfnBpIoHitR3)
+ || ( !fIo
+ && pBpOwner->pfnBpHitR3),
+ VERR_INVALID_HANDLE);
+ ASMAtomicIncU32(&pBpOwner->cRefs);
+ return VINF_SUCCESS;
+ }
+
+ return VERR_INVALID_HANDLE;
+}
+
+
+/**
+ * Releases the given breakpoint owner handle.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
+ * @param pUVM The user mode VM handle.
+ * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
+ */
+DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
+{
+ if (hBpOwner == NIL_DBGFBPOWNER)
+ return VINF_SUCCESS;
+
+ PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
+ if (pBpOwner)
+ {
+ Assert(pBpOwner->cRefs > 1);
+ ASMAtomicDecU32(&pBpOwner->cRefs);
+ return VINF_SUCCESS;
+ }
+
+ return VERR_INVALID_HANDLE;
+}
+
+
+/**
+ * Returns the internal breakpoint state for the given handle.
+ *
+ * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to resolve.
+ */
+DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
+{
+ uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
+ uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
+
+ AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
+ AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
+
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
+ AssertReturn(pBpChunk->idChunk == idChunk, NULL);
+ AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
+ AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
+
+ return &pBpChunk->pBpBaseR3[idxEntry];
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
+
+ AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
+ || pBpChunk->idChunk == idChunk,
+ VERR_DBGF_BP_IPE_2);
+
+ /*
+ * The initialization will be done on EMT(0). It is possible that multiple
+ * allocation attempts are done when multiple racing non EMT threads try to
+ * allocate a breakpoint and a new chunk needs to be allocated.
+ * Ignore the request and succeed if the chunk is allocated meaning that a
+ * previous rendezvous successfully allocated the chunk.
+ */
+ int rc = VINF_SUCCESS;
+ if ( pVCpu->idCpu == 0
+ && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
+ {
+ /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
+ AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
+ void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
+ if (RT_LIKELY(pbmAlloc))
+ {
+ if (!SUPR3IsDriverless())
+ {
+ DBGFBPCHUNKALLOCREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idChunk = idChunk;
+ Req.pChunkBaseR3 = NULL;
+ rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
+ else
+ AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
+ }
+ else
+ {
+ /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
+ uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
+ pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
+ AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
+ pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
+ pBpChunk->idChunk = idChunk;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pbmAlloc);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Tries to allocate the given chunk which requires an EMT rendezvous.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idChunk The chunk to allocate.
+ *
+ * @thread Any thread.
+ */
+DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
+{
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
+}
+
+
+/**
+ * Tries to allocate a new breakpoint of the given type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
+ * @param pvUser Opaque user data passed in the owner callback.
+ * @param enmType Breakpoint type to allocate.
+ * @param fFlags Flags assoicated with the allocated breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to return the opaque breakpoint handle on success.
+ * @param ppBp Where to return the pointer to the internal breakpoint state on success.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
+ uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
+ PDBGFBPINT *ppBp)
+{
+ bool fIo = enmType == DBGFBPTYPE_PORT_IO
+ || enmType == DBGFBPTYPE_MMIO;
+ int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Search for a chunk having a free entry, allocating new chunks
+ * if the encountered ones are full.
+ *
+ * This can be called from multiple threads at the same time so special care
+ * has to be taken to not require any locking here.
+ */
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
+ {
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
+
+ uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
+ if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
+ {
+ rc = dbgfR3BpChunkAlloc(pUVM, i);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
+ break;
+ }
+
+ idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
+ Assert(idChunk == i);
+ }
+
+ /** @todo Optimize with some hinting if this turns out to be too slow. */
+ for (;;)
+ {
+ uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
+ if (cBpsFree)
+ {
+ /*
+ * Scan the associated bitmap for a free entry, if none can be found another thread
+ * raced us and we go to the next chunk.
+ */
+ int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
+ if (iClr != -1)
+ {
+ /*
+ * Try to allocate, we could get raced here as well. In that case
+ * we try again.
+ */
+ if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
+ {
+ /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
+ ASMAtomicDecU32(&pBpChunk->cBpsFree);
+
+ PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
+ pBp->Pub.cHits = 0;
+ pBp->Pub.iHitTrigger = iHitTrigger;
+ pBp->Pub.iHitDisable = iHitDisable;
+ pBp->Pub.hOwner = hOwner;
+ pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
+ pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
+ pBp->pvUserR3 = pvUser;
+
+ /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
+
+ *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
+ *ppBp = pBp;
+ return VINF_SUCCESS;
+ }
+ /* else Retry with another spot. */
+ }
+ else /* no free entry in bitmap, go to the next chunk */
+ break;
+ }
+ else /* !cBpsFree, go to the next chunk */
+ break;
+ }
+ }
+
+ rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
+ return VERR_DBGF_NO_MORE_BP_SLOTS;
+}
+
+
+/**
+ * Frees the given breakpoint handle.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to free.
+ * @param pBp The internal breakpoint state pointer.
+ */
+static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
+ uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
+
+ AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
+ AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
+
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
+ AssertPtrReturnVoid(pBpChunk->pbmAlloc);
+ AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
+
+ /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
+ int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
+ memset(pBp, 0, sizeof(*pBp));
+
+ ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
+ ASMAtomicIncU32(&pBpChunk->cBpsFree);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
+
+ AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
+ || pL2Chunk->idChunk == idChunk,
+ VERR_DBGF_BP_IPE_2);
+
+ /*
+ * The initialization will be done on EMT(0). It is possible that multiple
+ * allocation attempts are done when multiple racing non EMT threads try to
+ * allocate a breakpoint and a new chunk needs to be allocated.
+ * Ignore the request and succeed if the chunk is allocated meaning that a
+ * previous rendezvous successfully allocated the chunk.
+ */
+ int rc = VINF_SUCCESS;
+ if ( pVCpu->idCpu == 0
+ && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
+ {
+ /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
+ AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
+ void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
+ if (RT_LIKELY(pbmAlloc))
+ {
+ if (!SUPR3IsDriverless())
+ {
+ DBGFBPL2TBLCHUNKALLOCREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idChunk = idChunk;
+ Req.pChunkBaseR3 = NULL;
+ rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
+ else
+ AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
+ }
+ else
+ {
+ /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
+ uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
+ pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
+ AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
+ pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
+ pL2Chunk->idChunk = idChunk;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pbmAlloc);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idChunk The chunk to allocate.
+ *
+ * @thread Any thread.
+ */
+DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
+{
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
+}
+
+
+/**
+ * Tries to allocate a new breakpoint of the given type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pidxL2Tbl Where to return the L2 table entry index on success.
+ * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
+{
+ /*
+ * Search for a chunk having a free entry, allocating new chunks
+ * if the encountered ones are full.
+ *
+ * This can be called from multiple threads at the same time so special care
+ * has to be taken to not require any locking here.
+ */
+ for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
+ {
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
+
+ uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
+ if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
+ {
+ int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
+ break;
+ }
+
+ idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
+ Assert(idChunk == i);
+ }
+
+ /** @todo Optimize with some hinting if this turns out to be too slow. */
+ for (;;)
+ {
+ uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
+ if (cFree)
+ {
+ /*
+ * Scan the associated bitmap for a free entry, if none can be found another thread
+ * raced us and we go to the next chunk.
+ */
+ int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
+ if (iClr != -1)
+ {
+ /*
+ * Try to allocate, we could get raced here as well. In that case
+ * we try again.
+ */
+ if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
+ {
+ /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
+ ASMAtomicDecU32(&pL2Chunk->cFree);
+
+ PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
+
+ *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
+ *ppL2TblEntry = pL2Entry;
+ return VINF_SUCCESS;
+ }
+ /* else Retry with another spot. */
+ }
+ else /* no free entry in bitmap, go to the next chunk */
+ break;
+ }
+ else /* !cFree, go to the next chunk */
+ break;
+ }
+ }
+
+ return VERR_DBGF_NO_MORE_BP_SLOTS;
+}
+
+
+/**
+ * Frees the given breakpoint handle.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idxL2Tbl The L2 table index to free.
+ * @param pL2TblEntry The L2 table entry pointer to free.
+ */
+static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
+{
+ uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
+ uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
+
+ AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
+ AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
+
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
+ AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
+ AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
+
+ memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
+
+ ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
+ ASMAtomicIncU32(&pL2Chunk->cFree);
+}
+
+
+/**
+ * Sets the enabled flag of the given breakpoint to the given value.
+ *
+ * @param pBp The breakpoint to set the state.
+ * @param fEnabled Enabled status.
+ */
+DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
+{
+ if (fEnabled)
+ pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
+ else
+ pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
+}
+
+
+/**
+ * Assigns a hardware breakpoint state to the given register breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross-context VM structure pointer.
+ * @param hBp The breakpoint handle to assign.
+ * @param pBp The internal breakpoint state.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
+
+ for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
+ {
+ PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
+
+ AssertCompileSize(DBGFBP, sizeof(uint32_t));
+ if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
+ {
+ pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
+ pHwBp->fType = pBp->Pub.u.Reg.fType;
+ pHwBp->cb = pBp->Pub.u.Reg.cb;
+ pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
+
+ pBp->Pub.u.Reg.iReg = i;
+ return VINF_SUCCESS;
+ }
+ }
+
+ return VERR_DBGF_NO_MORE_BP_SLOTS;
+}
+
+
+/**
+ * Removes the assigned hardware breakpoint state from the given register breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross-context VM structure pointer.
+ * @param hBp The breakpoint handle to remove.
+ * @param pBp The internal breakpoint state.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
+
+ PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
+ AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
+ AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
+
+ pHwBp->GCPtr = 0;
+ pHwBp->fType = 0;
+ pHwBp->cb = 0;
+ ASMCompilerBarrier();
+
+ ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Returns the pointer to the L2 table entry from the given index.
+ *
+ * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
+ * @param pUVM The user mode VM handle.
+ * @param idxL2 The L2 table index to resolve.
+ *
+ * @note The content of the resolved L2 table entry is not validated!.
+ */
+DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
+{
+ uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
+ uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
+
+ AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
+ AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
+
+ PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
+ AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
+ AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
+
+ return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
+}
+
+
+/**
+ * Creates a binary search tree with the given root and leaf nodes.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL1 The index into the L1 table where the created tree should be linked into.
+ * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
+ * @param hBpRoot The root node DBGF handle to assign.
+ * @param GCPtrRoot The root nodes GC pointer to use as a key.
+ * @param hBpLeaf The leafs node DBGF handle to assign.
+ * @param GCPtrLeaf The leafs node GC pointer to use as a key.
+ */
+static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
+ DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
+ DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
+{
+ AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
+ Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
+
+ /* Allocate two nodes. */
+ uint32_t idxL2Root = 0;
+ PDBGFBPL2ENTRY pL2Root = NULL;
+ int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t idxL2Leaf = 0;
+ PDBGFBPL2ENTRY pL2Leaf = NULL;
+ rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
+ if (RT_SUCCESS(rc))
+ {
+ dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
+ if (GCPtrLeaf < GCPtrRoot)
+ dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
+ else
+ dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
+
+ uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
+ if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
+ return VINF_SUCCESS;
+
+ /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
+ dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
+ rc = VINF_TRY_AGAIN;
+ }
+
+ dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Inserts the given breakpoint handle into an existing binary search tree.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL2Root The index of the tree root in the L2 table.
+ * @param hBp The node DBGF handle to insert.
+ * @param GCPtr The nodes GC pointer to use as a key.
+ */
+static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
+{
+ GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
+
+ /* Allocate a new node first. */
+ uint32_t idxL2Nd = 0;
+ PDBGFBPL2ENTRY pL2Nd = NULL;
+ int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
+ if (RT_SUCCESS(rc))
+ {
+ /* Walk the tree and find the correct node to insert to. */
+ PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
+ while (RT_LIKELY(pL2Entry))
+ {
+ /* Make a copy of the entry. */
+ DBGFBPL2ENTRY L2Entry;
+ L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
+ L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
+
+ RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
+ AssertBreak(GCPtr != GCPtrL2Entry);
+
+ /* Not found, get to the next level. */
+ uint32_t idxL2Next = GCPtr < GCPtrL2Entry
+ ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
+ : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
+ if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
+ {
+ /* Insert the new node here. */
+ dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
+ if (GCPtr < GCPtrL2Entry)
+ dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
+ else
+ dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
+ return VINF_SUCCESS;
+ }
+
+ pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
+ }
+
+ dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
+ rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
+ * possibly creating a new tree.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL1 The index into the L1 table the breakpoint uses.
+ * @param hBp The breakpoint handle which is to be added.
+ * @param GCPtr The GC pointer the breakpoint is keyed with.
+ */
+static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
+{
+ int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
+
+ uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
+ uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
+ if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
+ {
+ /* Create a new search tree, gather the necessary information first. */
+ DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
+ PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
+ AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Int3.GCPtr);
+ }
+ else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
+ rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
+
+ int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
+ return rc;
+}
+
+
+/**
+ * Gets the leftmost from the given tree node start index.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL2Start The start index to walk from.
+ * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
+ * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
+ * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
+ * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
+ */
+static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
+ uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
+ uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
+{
+ uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
+ PDBGFBPL2ENTRY pL2NdParent = NULL;
+
+ for (;;)
+ {
+ PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
+ AssertPtr(pL2Entry);
+
+ uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
+ if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
+ {
+ *pidxL2Leftmost = idxL2Start;
+ *ppL2NdLeftmost = pL2Entry;
+ *pidxL2NdLeftParent = idxL2Parent;
+ *ppL2NdLeftParent = pL2NdParent;
+ break;
+ }
+
+ idxL2Parent = idxL2Start;
+ idxL2Start = idxL2Left;
+ pL2NdParent = pL2Entry;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Removes the given node rearranging the tree.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
+ * @param idxL2Root The L2 table index where the tree root is located.
+ * @param idxL2Nd The node index to remove.
+ * @param pL2Nd The L2 table entry to remove.
+ * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
+ * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
+ * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
+ */
+static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
+ uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
+ uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
+ bool fLeftChild)
+{
+ /*
+ * If there are only two nodes remaining the tree will get destroyed and the
+ * L1 entry will be converted to the direct handle type.
+ */
+ uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
+ uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
+
+ Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
+ uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
+ if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
+ idxL2ParentNew = idxL2Left;
+ else
+ {
+ /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
+ PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
+ PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
+ uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
+ uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
+ int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
+ &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
+ AssertRCReturn(rc, rc);
+
+ if (pL2NdLeftmostParent)
+ {
+ /* Rearrange the leftmost entries parents pointer. */
+ dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
+ dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
+ }
+
+ dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
+
+ /* Update the remove nodes parent to point to the new node. */
+ idxL2ParentNew = idxL2Leftmost;
+ }
+
+ if (pL2NdParent)
+ {
+ /* Asssign the new L2 index to proper parents left or right pointer. */
+ if (fLeftChild)
+ dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
+ else
+ dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
+ }
+ else
+ {
+ /* The root node is removed, set the new root in the L1 table. */
+ Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
+ idxL2Root = idxL2ParentNew;
+ ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
+ }
+
+ /* Free the node. */
+ dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
+
+ /*
+ * Check whether the old/new root is the only node remaining and convert the L1
+ * table entry to a direct breakpoint handle one in that case.
+ */
+ pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
+ AssertPtr(pL2Nd);
+ if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
+ && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
+ {
+ DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
+ dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
+ ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
+ * pointed to by the given L2 root index.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idxL1 The index into the L1 table pointing to the binary search tree.
+ * @param idxL2Root The L2 table index where the tree root is located.
+ * @param hBp The breakpoint handle which is to be removed.
+ * @param GCPtr The GC pointer the breakpoint is keyed with.
+ */
+static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
+{
+ GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
+
+ int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
+
+ uint32_t idxL2Cur = idxL2Root;
+ uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
+ bool fLeftChild = false;
+ PDBGFBPL2ENTRY pL2EntryParent = NULL;
+ for (;;)
+ {
+ PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
+ AssertPtr(pL2Entry);
+
+ /* Check whether this node is to be removed.. */
+ RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
+ if (GCPtrL2Entry == GCPtr)
+ {
+ Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
+
+ rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
+ break;
+ }
+
+ pL2EntryParent = pL2Entry;
+ idxL2Parent = idxL2Cur;
+
+ if (GCPtrL2Entry < GCPtr)
+ {
+ fLeftChild = true;
+ idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
+ }
+ else
+ {
+ fLeftChild = false;
+ idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
+ }
+
+ AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
+ }
+
+ int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
+
+ return rc;
+}
+
+
+/**
+ * Adds the given int3 breakpoint to the appropriate lookup tables.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to add.
+ * @param pBp The internal breakpoint state.
+ */
+static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
+
+ int rc = VINF_SUCCESS;
+ uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
+ uint8_t cTries = 16;
+
+ while (cTries--)
+ {
+ uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
+ if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
+ {
+ /*
+ * No breakpoint assigned so far for this entry, create an entry containing
+ * the direct breakpoint handle and try to exchange it atomically.
+ */
+ u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
+ if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
+ break;
+ }
+ else
+ {
+ rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Int3.GCPtr);
+ if (rc != VINF_TRY_AGAIN)
+ break;
+ }
+ }
+
+ if ( RT_SUCCESS(rc)
+ && !cTries) /* Too much contention, abort with an error. */
+ rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
+
+ return rc;
+}
+
+
+/**
+ * Adds the given port I/O breakpoint to the appropriate lookup tables.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to add.
+ * @param pBp The internal breakpoint state.
+ */
+static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
+
+ uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
+ uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
+ for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
+ {
+ bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
+ if (!fXchg)
+ {
+ /* Something raced us, so roll back the other registrations. */
+ while (idxPort > pBp->Pub.u.PortIo.uPort)
+ {
+ fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
+ Assert(fXchg); RT_NOREF(fXchg);
+ }
+
+ return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Get a breakpoint give by address.
+ *
+ * @returns The breakpoint handle on success or NIL_DBGFBP if not found.
+ * @param pUVM The user mode VM handle.
+ * @param enmType The breakpoint type.
+ * @param GCPtr The breakpoint address.
+ * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
+ */
+static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
+{
+ DBGFBP hBp = NIL_DBGFBP;
+
+ switch (enmType)
+ {
+ case DBGFBPTYPE_REG:
+ {
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
+ {
+ PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
+
+ AssertCompileSize(DBGFBP, sizeof(uint32_t));
+ DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
+ if ( pHwBp->GCPtr == GCPtr
+ && hBpTmp != NIL_DBGFBP)
+ {
+ hBp = hBpTmp;
+ break;
+ }
+ }
+ break;
+ }
+
+ case DBGFBPTYPE_INT3:
+ {
+ const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
+ const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
+
+ if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
+ {
+ uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
+ if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
+ hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
+ else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
+ {
+ RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
+ PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
+
+ for (;;)
+ {
+ AssertPtr(pL2Nd);
+
+ RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
+ if (GCPtrKey == GCPtrL2Entry)
+ {
+ hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
+ break;
+ }
+
+ /* Not found, get to the next level. */
+ uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
+ ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
+ : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
+ /* Address not found if the entry denotes the end. */
+ if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
+ break;
+
+ pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
+ }
+ }
+ }
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ break;
+ }
+
+ if ( hBp != NIL_DBGFBP
+ && ppBp)
+ *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ return hBp;
+}
+
+
+/**
+ * Get a port I/O breakpoint given by the range.
+ *
+ * @returns The breakpoint handle on success or NIL_DBGF if not found.
+ * @param pUVM The user mode VM handle.
+ * @param uPort First port in the range.
+ * @param cPorts Number of ports in the range.
+ * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
+ */
+static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
+{
+ DBGFBP hBp = NIL_DBGFBP;
+
+ for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
+ {
+ const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
+ if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
+ {
+ hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
+ break;
+ }
+ }
+
+ if ( hBp != NIL_DBGFBP
+ && ppBp)
+ *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ return hBp;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
+
+ int rc = VINF_SUCCESS;
+ if (pVCpu->idCpu == 0)
+ {
+ uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
+ uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
+ AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
+
+ uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
+ if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
+ {
+ /* Single breakpoint, just exchange atomically with the null value. */
+ if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
+ {
+ /*
+ * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
+ * and remove the node from the created binary search tree.
+ *
+ * This works because after the entry was converted to an L2 index it can only be converted back
+ * to a direct handle by removing one or more nodes which always goes through the fast mutex
+ * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
+ * so there is serialization here and the node can be removed safely without having to worry about
+ * concurrent tree modifications.
+ */
+ u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
+ AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
+
+ rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
+ hBp, pBp->Pub.u.Int3.GCPtr);
+ }
+ }
+ else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
+ rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
+ hBp, pBp->Pub.u.Int3.GCPtr);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Removes the given int3 breakpoint from all lookup tables.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to remove.
+ * @param pBp The internal breakpoint state.
+ */
+static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
+
+ /*
+ * This has to be done by an EMT rendezvous in order to not have an EMT traversing
+ * any L2 trees while it is being removed.
+ */
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
+
+ VMCPU_ASSERT_EMT(pVCpu);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
+
+ int rc = VINF_SUCCESS;
+ if (pVCpu->idCpu == 0)
+ {
+ /*
+ * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
+ * allowed right now.
+ */
+ uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
+ for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
+ {
+ uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
+ AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
+
+ uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
+ AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
+
+ bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
+ Assert(fXchg); RT_NOREF(fXchg);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Removes the given port I/O breakpoint from all lookup tables.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to remove.
+ * @param pBp The internal breakpoint state.
+ */
+static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
+
+ /*
+ * This has to be done by an EMT rendezvous in order to not have an EMT accessing
+ * the breakpoint while it is removed.
+ */
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pvUser);
+
+ /*
+ * CPU 0 updates the enabled hardware breakpoint counts.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ pVM->dbgf.s.cEnabledHwBreakpoints = 0;
+ pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
+
+ for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
+ {
+ if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
+ {
+ pVM->dbgf.s.cEnabledHwBreakpoints += 1;
+ pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
+ }
+ }
+ }
+
+ return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
+}
+
+
+/**
+ * Arms the given breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to arm.
+ * @param pBp The internal breakpoint state pointer for the handle.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ int rc;
+ PVM pVM = pUVM->pVM;
+
+ Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
+ switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
+ {
+ case DBGFBPTYPE_REG:
+ {
+ Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
+ PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
+ Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
+
+ dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
+ ASMAtomicWriteBool(&pBpHw->fEnabled, true);
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
+ if (RT_FAILURE(rc))
+ {
+ ASMAtomicWriteBool(&pBpHw->fEnabled, false);
+ dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
+ }
+ break;
+ }
+ case DBGFBPTYPE_INT3:
+ {
+ dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
+
+ /** @todo When we enable the first int3 breakpoint we should do this in an EMT rendezvous
+ * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
+ * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
+ /*
+ * Save current byte and write the int3 instruction byte.
+ */
+ rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Int3.bOrg, pBp->Pub.u.Int3.PhysAddr, sizeof(pBp->Pub.u.Int3.bOrg));
+ if (RT_SUCCESS(rc))
+ {
+ static const uint8_t s_bInt3 = 0xcc;
+ rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &s_bInt3, sizeof(s_bInt3));
+ if (RT_SUCCESS(rc))
+ {
+ ASMAtomicIncU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
+ Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
+
+ break;
+ }
+ case DBGFBPTYPE_PORT_IO:
+ {
+ dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
+ ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
+ IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ case DBGFBPTYPE_MMIO:
+ rc = VERR_NOT_IMPLEMENTED;
+ break;
+ default:
+ AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
+ VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Disarms the given breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The breakpoint handle to disarm.
+ * @param pBp The internal breakpoint state pointer for the handle.
+ *
+ * @thread Any thread.
+ */
+static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
+{
+ int rc;
+ PVM pVM = pUVM->pVM;
+
+ Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
+ switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
+ {
+ case DBGFBPTYPE_REG:
+ {
+ Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
+ PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
+ Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
+
+ dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
+ ASMAtomicWriteBool(&pBpHw->fEnabled, false);
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
+ if (RT_FAILURE(rc))
+ {
+ ASMAtomicWriteBool(&pBpHw->fEnabled, true);
+ dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
+ }
+ break;
+ }
+ case DBGFBPTYPE_INT3:
+ {
+ /*
+ * Check that the current byte is the int3 instruction, and restore the original one.
+ * We currently ignore invalid bytes.
+ */
+ uint8_t bCurrent = 0;
+ rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Int3.PhysAddr, sizeof(bCurrent));
+ if ( RT_SUCCESS(rc)
+ && bCurrent == 0xcc)
+ {
+ rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &pBp->Pub.u.Int3.bOrg, sizeof(pBp->Pub.u.Int3.bOrg));
+ if (RT_SUCCESS(rc))
+ {
+ ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
+ dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
+ Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
+ }
+ }
+ break;
+ }
+ case DBGFBPTYPE_PORT_IO:
+ {
+ dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
+ uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
+ if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
+ IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
+ rc = VINF_SUCCESS;
+ break;
+ }
+ case DBGFBPTYPE_MMIO:
+ rc = VERR_NOT_IMPLEMENTED;
+ break;
+ default:
+ AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
+ VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Worker for DBGFR3BpHit() differnetiating on the breakpoint type.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The vCPU the breakpoint event happened on.
+ * @param hBp The breakpoint handle.
+ * @param pBp The breakpoint data.
+ * @param pBpOwner The breakpoint owner data.
+ *
+ * @thread EMT
+ */
+static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
+{
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+
+ switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
+ {
+ case DBGFBPTYPE_REG:
+ case DBGFBPTYPE_INT3:
+ {
+ if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
+ rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ uint8_t abInstr[DBGF_BP_INSN_MAX];
+ RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
+ AssertRC(rc);
+ if (RT_SUCCESS(rc))
+ {
+ /* Replace the int3 with the original instruction byte. */
+ abInstr[0] = pBp->Pub.u.Int3.bOrg;
+ rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr));
+ if ( rcStrict == VINF_SUCCESS
+ && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
+ {
+ VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
+ DBGF_BP_F_HIT_EXEC_AFTER);
+ if (rcStrict2 == VINF_SUCCESS)
+ return VBOXSTRICTRC_VAL(rcStrict);
+ if (rcStrict2 != VINF_DBGF_BP_HALT)
+ return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
+ }
+ else
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+ }
+ break;
+ }
+ case DBGFBPTYPE_PORT_IO:
+ case DBGFBPTYPE_MMIO:
+ {
+ pVCpu->dbgf.s.fBpIoActive = false;
+ rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
+ pVCpu->dbgf.s.fBpIoBefore
+ ? DBGF_BP_F_HIT_EXEC_BEFORE
+ : DBGF_BP_F_HIT_EXEC_AFTER,
+ pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
+ pVCpu->dbgf.s.uBpIoValue);
+
+ break;
+ }
+ default:
+ AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
+ VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+
+ return rcStrict;
+}
+
+
+/**
+ * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
+ *
+ * @returns VBox status code.
+ * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
+ * @param pUVM The user mode VM handle.
+ * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
+ * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
+ * @param phBpOwner Where to store the owner handle on success.
+ *
+ * @thread Any thread but might defer work to EMT on the first call.
+ */
+VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
+
+ int rc = dbgfR3BpOwnerEnsureInit(pUVM);
+ AssertRCReturn(rc ,rc);
+
+ /* Try to find a free entry in the owner table. */
+ for (;;)
+ {
+ /* Scan the associated bitmap for a free entry. */
+ int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
+ if (iClr != -1)
+ {
+ /*
+ * Try to allocate, we could get raced here as well. In that case
+ * we try again.
+ */
+ if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
+ {
+ PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
+ pBpOwner->cRefs = 1;
+ pBpOwner->pfnBpHitR3 = pfnBpHit;
+ pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
+
+ *phBpOwner = (DBGFBPOWNER)iClr;
+ return VINF_SUCCESS;
+ }
+ /* else Retry with another spot. */
+ }
+ else /* no free entry in bitmap, out of entries. */
+ {
+ rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Destroys the owner identified by the given handle.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
+ * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
+ * @param pUVM The user mode VM handle.
+ * @param hBpOwner The breakpoint owner handle to destroy.
+ */
+VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
+
+ int rc = dbgfR3BpOwnerEnsureInit(pUVM);
+ AssertRCReturn(rc ,rc);
+
+ PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
+ if (RT_LIKELY(pBpOwner))
+ {
+ if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
+ {
+ pBpOwner->pfnBpHitR3 = NULL;
+ ASMAtomicDecU32(&pBpOwner->cRefs);
+ ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
+ }
+ else
+ rc = VERR_DBGF_OWNER_BUSY;
+ }
+ else
+ rc = VERR_INVALID_HANDLE;
+
+ return rc;
+}
+
+
+/**
+ * Sets a breakpoint (int 3 based).
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idSrcCpu The ID of the virtual CPU used for the
+ * breakpoint address resolution.
+ * @param pAddress The address of the breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle on success.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
+ uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
+ DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
+}
+
+
+/**
+ * Sets a breakpoint (int 3 based) - extended version.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
+ * @param pvUser Opaque user data to pass in the owner callback.
+ * @param idSrcCpu The ID of the virtual CPU used for the
+ * breakpoint address resolution.
+ * @param pAddress The address of the breakpoint.
+ * @param fFlags Combination of DBGF_BP_F_XXX.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle on success.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
+ VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
+ uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phBp, VERR_INVALID_POINTER);
+
+ int rc = dbgfR3BpEnsureInit(pUVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Translate & save the breakpoint address into a guest-physical address.
+ */
+ RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
+ rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * The physical address from DBGFR3AddrToPhys() is the start of the page,
+ * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
+ */
+ GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
+
+ PDBGFBPINT pBp = NULL;
+ DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_INT3, pAddress->FlatPtr, &pBp);
+ if ( hBp != NIL_DBGFBP
+ && pBp->Pub.u.Int3.PhysAddr == GCPhysBpAddr)
+ {
+ rc = VINF_SUCCESS;
+ if ( !DBGF_BP_PUB_IS_ENABLED(&pBp->Pub)
+ && (fFlags & DBGF_BP_F_ENABLED))
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VINF_DBGF_BP_ALREADY_EXIST;
+ if (phBp)
+ *phBp = hBp;
+ }
+ return rc;
+ }
+
+ rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_INT3, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
+ if (RT_SUCCESS(rc))
+ {
+ pBp->Pub.u.Int3.PhysAddr = GCPhysBpAddr;
+ pBp->Pub.u.Int3.GCPtr = pAddress->FlatPtr;
+
+ /* Add the breakpoint to the lookup tables. */
+ rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ /* Enable the breakpoint if requested. */
+ if (fFlags & DBGF_BP_F_ENABLED)
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ *phBp = hBp;
+ return VINF_SUCCESS;
+ }
+
+ int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
+ }
+
+ dbgfR3BpFree(pUVM, hBp, pBp);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Sets a register breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address of the breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param fType The access type (one of the X86_DR7_RW_* defines).
+ * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
+ * Must be 1 if fType is X86_DR7_RW_EO.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
+ uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
+{
+ return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
+ DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
+}
+
+
+/**
+ * Sets a register breakpoint - extended version.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
+ * @param pvUser Opaque user data to pass in the owner callback.
+ * @param pAddress The address of the breakpoint.
+ * @param fFlags Combination of DBGF_BP_F_XXX.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param fType The access type (one of the X86_DR7_RW_* defines).
+ * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
+ * Must be 1 if fType is X86_DR7_RW_EO.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
+ PCDBGFADDRESS pAddress, uint16_t fFlags,
+ uint64_t iHitTrigger, uint64_t iHitDisable,
+ uint8_t fType, uint8_t cb, PDBGFBP phBp)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
+ AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phBp, VERR_INVALID_POINTER);
+ switch (fType)
+ {
+ case X86_DR7_RW_EO:
+ AssertMsgReturn(cb == 1, ("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
+ break;
+ case X86_DR7_RW_IO:
+ case X86_DR7_RW_RW:
+ case X86_DR7_RW_WO:
+ break;
+ default:
+ AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
+ }
+
+ int rc = dbgfR3BpEnsureInit(pUVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Check if we've already got a matching breakpoint for that address.
+ */
+ PDBGFBPINT pBp = NULL;
+ DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
+ if ( hBp != NIL_DBGFBP
+ && pBp->Pub.u.Reg.cb == cb
+ && pBp->Pub.u.Reg.fType == fType)
+ {
+ rc = VINF_SUCCESS;
+ if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub) && (fFlags & DBGF_BP_F_ENABLED))
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ /* else: We don't disable it when DBGF_BP_F_ENABLED isn't given. */
+ if (RT_SUCCESS(rc))
+ {
+ rc = VINF_DBGF_BP_ALREADY_EXIST;
+ if (phBp)
+ *phBp = hBp;
+ }
+ return rc;
+ }
+
+ /*
+ * Allocate new breakpoint.
+ */
+ rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
+ if (RT_SUCCESS(rc))
+ {
+ pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
+ pBp->Pub.u.Reg.fType = fType;
+ pBp->Pub.u.Reg.cb = cb;
+ pBp->Pub.u.Reg.iReg = UINT8_MAX;
+ ASMCompilerBarrier();
+
+ /* Assign the proper hardware breakpoint. */
+ rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ /* Arm the breakpoint. */
+ if (fFlags & DBGF_BP_F_ENABLED)
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ if (phBp)
+ *phBp = hBp;
+ return VINF_SUCCESS;
+ }
+
+ int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
+ AssertRC(rc2); RT_NOREF(rc2);
+ }
+
+ dbgfR3BpFree(pUVM, hBp, pBp);
+ }
+
+ return rc;
+}
+
+
+/**
+ * This is only kept for now to not mess with the debugger implementation at this point,
+ * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
+ * and should probably be merged with the DBGF breakpoints).
+ */
+VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
+ uint64_t iHitDisable, PDBGFBP phBp)
+{
+ RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Sets an I/O port breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param uPort The first I/O port.
+ * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
+ * @param fAccess The access we want to break on.
+ * @param iHitTrigger The hit count at which the breakpoint start
+ * triggering. Use 0 (or 1) if it's gonna trigger at
+ * once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
+ uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
+ DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
+}
+
+
+/**
+ * Sets an I/O port breakpoint - extended version.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
+ * @param pvUser Opaque user data to pass in the owner callback.
+ * @param uPort The first I/O port.
+ * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
+ * @param fAccess The access we want to break on.
+ * @param fFlags Combination of DBGF_BP_F_XXX.
+ * @param iHitTrigger The hit count at which the breakpoint start
+ * triggering. Use 0 (or 1) if it's gonna trigger at
+ * once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
+ RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
+ uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
+ AssertReturn(fAccess, VERR_INVALID_FLAGS);
+ AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
+ AssertReturn(fFlags, VERR_INVALID_FLAGS);
+ AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phBp, VERR_INVALID_POINTER);
+ AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
+ AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
+
+ int rc = dbgfR3BpPortIoEnsureInit(pUVM);
+ AssertRCReturn(rc, rc);
+
+ PDBGFBPINT pBp = NULL;
+ DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
+ if ( hBp != NIL_DBGFBP
+ && pBp->Pub.u.PortIo.uPort == uPort
+ && pBp->Pub.u.PortIo.cPorts == cPorts
+ && pBp->Pub.u.PortIo.fAccess == fAccess)
+ {
+ rc = VINF_SUCCESS;
+ if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VINF_DBGF_BP_ALREADY_EXIST;
+ if (phBp)
+ *phBp = hBp;
+ }
+ return rc;
+ }
+
+ rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
+ if (RT_SUCCESS(rc))
+ {
+ pBp->Pub.u.PortIo.uPort = uPort;
+ pBp->Pub.u.PortIo.cPorts = cPorts;
+ pBp->Pub.u.PortIo.fAccess = fAccess;
+
+ /* Add the breakpoint to the lookup tables. */
+ rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ /* Enable the breakpoint if requested. */
+ if (fFlags & DBGF_BP_F_ENABLED)
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ if (RT_SUCCESS(rc))
+ {
+ *phBp = hBp;
+ return VINF_SUCCESS;
+ }
+
+ int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
+ }
+
+ dbgfR3BpFree(pUVM, hBp, pBp);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Sets a memory mapped I/O breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param GCPhys The first MMIO address.
+ * @param cb The size of the MMIO range to break on.
+ * @param fAccess The access we want to break on.
+ * @param iHitTrigger The hit count at which the breakpoint start
+ * triggering. Use 0 (or 1) if it's gonna trigger at
+ * once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
+ uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
+ DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
+}
+
+
+/**
+ * Sets a memory mapped I/O breakpoint - extended version.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
+ * @param pvUser Opaque user data to pass in the owner callback.
+ * @param GCPhys The first MMIO address.
+ * @param cb The size of the MMIO range to break on.
+ * @param fAccess The access we want to break on.
+ * @param fFlags Combination of DBGF_BP_F_XXX.
+ * @param iHitTrigger The hit count at which the breakpoint start
+ * triggering. Use 0 (or 1) if it's gonna trigger at
+ * once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param phBp Where to store the breakpoint handle.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
+ RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
+ uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
+ AssertReturn(fAccess, VERR_INVALID_FLAGS);
+ AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
+ AssertReturn(fFlags, VERR_INVALID_FLAGS);
+ AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phBp, VERR_INVALID_POINTER);
+ AssertReturn(cb, VERR_OUT_OF_RANGE);
+ AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
+
+ int rc = dbgfR3BpEnsureInit(pUVM);
+ AssertRCReturn(rc, rc);
+
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Clears a breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The handle of the breakpoint which should be removed (cleared).
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
+
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
+
+ /* Disarm the breakpoint when it is enabled. */
+ if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
+ {
+ int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
+ AssertRC(rc);
+ }
+
+ switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
+ {
+ case DBGFBPTYPE_REG:
+ {
+ int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
+ AssertRC(rc);
+ break;
+ }
+ case DBGFBPTYPE_INT3:
+ {
+ int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
+ AssertRC(rc);
+ break;
+ }
+ case DBGFBPTYPE_PORT_IO:
+ {
+ int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
+ AssertRC(rc);
+ break;
+ }
+ default:
+ break;
+ }
+
+ dbgfR3BpFree(pUVM, hBp, pBp);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enables a breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The handle of the breakpoint which should be enabled.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
+
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
+
+ int rc;
+ if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
+ rc = dbgfR3BpArm(pUVM, hBp, pBp);
+ else
+ rc = VINF_DBGF_BP_ALREADY_ENABLED;
+
+ return rc;
+}
+
+
+/**
+ * Disables a breakpoint.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hBp The handle of the breakpoint which should be disabled.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
+{
+ /*
+ * Validate the input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
+
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+ AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
+
+ int rc;
+ if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
+ rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
+ else
+ rc = VINF_DBGF_BP_ALREADY_DISABLED;
+
+ return rc;
+}
+
+
+/**
+ * Enumerate the breakpoints.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pfnCallback The callback function.
+ * @param pvUser The user argument to pass to the callback.
+ *
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
+ {
+ PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
+
+ if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
+ break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
+
+ if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
+ {
+ /* Scan the bitmap for allocated entries. */
+ int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
+ if (iAlloc != -1)
+ {
+ do
+ {
+ DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
+
+ /* Make a copy of the breakpoints public data to have a consistent view. */
+ DBGFBPPUB BpPub;
+ BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
+ BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
+ BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
+ BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
+ BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
+ BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
+ memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
+
+ /* Check if a removal raced us. */
+ if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
+ {
+ int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
+ if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
+ return rc;
+ }
+
+ iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
+ } while (iAlloc != -1);
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The vCPU the breakpoint event happened on.
+ *
+ * @thread EMT
+ */
+VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
+{
+ /* Send it straight into the debugger?. */
+ if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
+ {
+ DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
+ pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
+
+ PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
+ AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
+
+ /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
+ if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
+ {
+ PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
+ if (pBpOwner)
+ {
+ VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
+ if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
+ {
+ pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
+ return VINF_SUCCESS;
+ }
+ if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
+ return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
+ /* else: Halt in the debugger. */
+ }
+ }
+ }
+
+ return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3BugCheck.cpp b/src/VBox/VMM/VMMR3/DBGFR3BugCheck.cpp
new file mode 100644
index 00000000..c78a3101
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3BugCheck.cpp
@@ -0,0 +1,930 @@
+/* $Id: DBGFR3BugCheck.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, NT Bug Checks.
+ */
+
+/*
+ * Copyright (C) 2018-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/tm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNDBGFHANDLERINT dbgfR3BugCheckInfo;
+
+
+/**
+ * Initializes the bug check state and registers the info callback.
+ *
+ * No termination function needed.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle.
+ */
+int dbgfR3BugCheckInit(PVM pVM)
+{
+ pVM->dbgf.s.BugCheck.idCpu = NIL_VMCPUID;
+ pVM->dbgf.s.BugCheck.enmEvent = DBGFEVENT_END;
+
+ return DBGFR3InfoRegisterInternal(pVM, "bugcheck",
+ "Show bugcheck info. Can specify bug check code and parameters to lookup info.",
+ dbgfR3BugCheckInfo);
+}
+
+
+/**
+ * Names a few common NT status codes for DBGFR3FormatBugCheck.
+ */
+static const char *dbgfR3GetNtStatusName(uint32_t uNtStatus)
+{
+ switch (uNtStatus)
+ {
+ case 0x80000001: return " - STATUS_GUARD_PAGE_VIOLATION";
+ case 0x80000002: return " - STATUS_DATATYPE_MISALIGNMENT";
+ case 0x80000003: return " - STATUS_BREAKPOINT";
+ case 0x80000004: return " - STATUS_SINGLE_STEP";
+ case 0xc0000008: return " - STATUS_INVALID_HANDLE";
+ case 0xc0000005: return " - STATUS_ACCESS_VIOLATION";
+ case 0xc0000027: return " - STATUS_UNWIND";
+ case 0xc0000028: return " - STATUS_BAD_STACK";
+ case 0xc0000029: return " - STATUS_INVALID_UNWIND_TARGET";
+ default: return "";
+ }
+}
+
+
+/**
+ * Formats a symbol for DBGFR3FormatBugCheck.
+ */
+static const char *dbgfR3FormatSymbol(PUVM pUVM, char *pszSymbol, size_t cchSymbol, const char *pszPrefix, uint64_t uFlatAddr)
+{
+ DBGFADDRESS Addr;
+ RTGCINTPTR offDisp = 0;
+ PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pUVM, &Addr, uFlatAddr),
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ &offDisp, NULL /*phMod*/);
+ if (pSym)
+ {
+ if (!offDisp)
+ RTStrPrintf(pszSymbol, cchSymbol, "%s%s", pszPrefix, pSym->szName);
+ else if (offDisp > 0)
+ RTStrPrintf(pszSymbol, cchSymbol, "%s%s + %#RX64", pszPrefix, pSym->szName, (uint64_t)offDisp);
+ else
+ RTStrPrintf(pszSymbol, cchSymbol, "%s%s - %#RX64", pszPrefix, pSym->szName, (uint64_t)-offDisp);
+ RTDbgSymbolFree(pSym);
+ }
+ else
+ *pszSymbol = '\0';
+ return pszSymbol;
+}
+
+
+/**
+ * Formats a windows bug check (BSOD).
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VINF_BUFFER_OVERFLOW if there is more data than the buffer can handle.
+ *
+ * @param pUVM The usermode VM handle.
+ * @param pszDetails The output buffer.
+ * @param cbDetails The size of the output buffer.
+ * @param uBugCheck The bugheck code.
+ * @param uP1 Bug check parameter 1.
+ * @param uP2 Bug check parameter 2.
+ * @param uP3 Bug check parameter 3.
+ * @param uP4 Bug check parameter 4.
+ */
+VMMR3DECL(int) DBGFR3FormatBugCheck(PUVM pUVM, char *pszDetails, size_t cbDetails,
+ uint64_t uBugCheck, uint64_t uP1, uint64_t uP2, uint64_t uP3, uint64_t uP4)
+{
+ /*
+ * Start with bug check line typically seen in windbg.
+ */
+ size_t cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "BugCheck %RX64 {%RX64, %RX64, %RX64, %RX64}\n", uBugCheck, uP1, uP2, uP3, uP4);
+ if (cchUsed >= cbDetails)
+ return VINF_BUFFER_OVERFLOW;
+ pszDetails += cchUsed;
+ cbDetails -= cchUsed;
+
+ /*
+ * Try name the bugcheck and format parameters if we can/care.
+ */
+ char szSym[512];
+ switch (uBugCheck)
+ {
+ case 0x00000001: cchUsed = RTStrPrintf(pszDetails, cbDetails, "APC_INDEX_MISMATCH\n"); break;
+ case 0x00000002: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DEVICE_QUEUE_NOT_BUSY\n"); break;
+ case 0x00000003: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_AFFINITY_SET\n"); break;
+ case 0x00000004: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_DATA_ACCESS_TRAP\n"); break;
+ case 0x00000005: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_PROCESS_ATTACH_ATTEMPT\n"); break;
+ case 0x00000006: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_PROCESS_DETACH_ATTEMPT\n"); break;
+ case 0x00000007: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_SOFTWARE_INTERRUPT\n"); break;
+ case 0x00000008: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IRQL_NOT_DISPATCH_LEVEL\n"); break;
+ case 0x00000009: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IRQL_NOT_GREATER_OR_EQUAL\n"); break;
+ case 0x0000000a:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "IRQL_NOT_LESS_OR_EQUAL\n"
+ "P1: %016RX64 - memory referenced\n"
+ "P2: %016RX64 - IRQL\n"
+ "P3: %016RX64 - bitfield\n"
+ " b0: %u - %s operation\n"
+ " b3: %u - %sexecute operation\n"
+ "P4: %016RX64 - EIP/RIP%s\n",
+ uP1, uP2, uP3,
+ RT_BOOL(uP3 & RT_BIT_64(0)), uP3 & RT_BIT_64(0) ? "write" : "read",
+ RT_BOOL(uP3 & RT_BIT_64(3)), uP3 & RT_BIT_64(3) ? "not-" : "",
+ uP4, dbgfR3FormatSymbol(pUVM, szSym, sizeof(szSym), ": ", uP4));
+ break;
+ case 0x0000000b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_EXCEPTION_HANDLING_SUPPORT\n"); break;
+ case 0x0000000c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MAXIMUM_WAIT_OBJECTS_EXCEEDED\n"); break;
+ case 0x0000000d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MUTEX_LEVEL_NUMBER_VIOLATION\n"); break;
+ case 0x0000000e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_USER_MODE_CONTEXT\n"); break;
+ case 0x0000000f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SPIN_LOCK_ALREADY_OWNED\n"); break;
+ case 0x00000010: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SPIN_LOCK_NOT_OWNED\n"); break;
+ case 0x00000011: cchUsed = RTStrPrintf(pszDetails, cbDetails, "THREAD_NOT_MUTEX_OWNER\n"); break;
+ case 0x00000012: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TRAP_CAUSE_UNKNOWN\n"); break;
+ case 0x00000013: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EMPTY_THREAD_REAPER_LIST\n"); break;
+ case 0x00000014: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CREATE_DELETE_LOCK_NOT_LOCKED\n"); break;
+ case 0x00000015: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LAST_CHANCE_CALLED_FROM_KMODE\n"); break;
+ case 0x00000016: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CID_HANDLE_CREATION\n"); break;
+ case 0x00000017: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CID_HANDLE_DELETION\n"); break;
+ case 0x00000018: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REFERENCE_BY_POINTER\n"); break;
+ case 0x00000019: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BAD_POOL_HEADER\n"); break;
+ case 0x0000001a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MEMORY_MANAGEMENT\n"); break;
+ case 0x0000001b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PFN_SHARE_COUNT\n"); break;
+ case 0x0000001c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PFN_REFERENCE_COUNT\n"); break;
+ case 0x0000001d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_SPIN_LOCK_AVAILABLE\n"); break;
+ case 0x0000001e:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "KMODE_EXCEPTION_NOT_HANDLED\n"
+ "P1: %016RX64 - exception code%s\n"
+ "P2: %016RX64 - EIP/RIP%s\n"
+ "P3: %016RX64 - Xcpt param #0\n"
+ "P4: %016RX64 - Xcpt param #1\n",
+ uP1, dbgfR3GetNtStatusName((uint32_t)uP1),
+ uP2, dbgfR3FormatSymbol(pUVM, szSym, sizeof(szSym), ": ", uP2),
+ uP3, uP4);
+ break;
+ case 0x0000001f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SHARED_RESOURCE_CONV_ERROR\n"); break;
+ case 0x00000020: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_APC_PENDING_DURING_EXIT\n"); break;
+ case 0x00000021: cchUsed = RTStrPrintf(pszDetails, cbDetails, "QUOTA_UNDERFLOW\n"); break;
+ case 0x00000022: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FILE_SYSTEM\n"); break;
+ case 0x00000023: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FAT_FILE_SYSTEM\n"); break;
+ case 0x00000024: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NTFS_FILE_SYSTEM\n"); break;
+ case 0x00000025: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NPFS_FILE_SYSTEM\n"); break;
+ case 0x00000026: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CDFS_FILE_SYSTEM\n"); break;
+ case 0x00000027: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RDR_FILE_SYSTEM\n"); break;
+ case 0x00000028: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CORRUPT_ACCESS_TOKEN\n"); break;
+ case 0x00000029: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURITY_SYSTEM\n"); break;
+ case 0x0000002a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INCONSISTENT_IRP\n"); break;
+ case 0x0000002b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PANIC_STACK_SWITCH\n"); break;
+ case 0x0000002c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PORT_DRIVER_INTERNAL\n"); break;
+ case 0x0000002d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SCSI_DISK_DRIVER_INTERNAL\n"); break;
+ case 0x0000002e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DATA_BUS_ERROR\n"); break;
+ case 0x0000002f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INSTRUCTION_BUS_ERROR\n"); break;
+ case 0x00000030: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SET_OF_INVALID_CONTEXT\n"); break;
+ case 0x00000031: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PHASE0_INITIALIZATION_FAILED\n"); break;
+ case 0x00000032: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PHASE1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000033: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UNEXPECTED_INITIALIZATION_CALL\n"); break;
+ case 0x00000034: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CACHE_MANAGER\n"); break;
+ case 0x00000035: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_MORE_IRP_STACK_LOCATIONS\n"); break;
+ case 0x00000036: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DEVICE_REFERENCE_COUNT_NOT_ZERO\n"); break;
+ case 0x00000037: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FLOPPY_INTERNAL_ERROR\n"); break;
+ case 0x00000038: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SERIAL_DRIVER_INTERNAL\n"); break;
+ case 0x00000039: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_EXIT_OWNED_MUTEX\n"); break;
+ case 0x0000003a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_UNWIND_PREVIOUS_USER\n"); break;
+ case 0x0000003b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_SERVICE_EXCEPTION\n"); break;
+ case 0x0000003c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INTERRUPT_UNWIND_ATTEMPTED\n"); break;
+ case 0x0000003d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INTERRUPT_EXCEPTION_NOT_HANDLED\n"); break;
+ case 0x0000003e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED\n"); break;
+ case 0x0000003f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_MORE_SYSTEM_PTES\n"); break;
+ case 0x00000040: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TARGET_MDL_TOO_SMALL\n"); break;
+ case 0x00000041: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MUST_SUCCEED_POOL_EMPTY\n"); break;
+ case 0x00000042: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ATDISK_DRIVER_INTERNAL\n"); break;
+ case 0x00000043: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_SUCH_PARTITION\n"); break;
+ case 0x00000044: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MULTIPLE_IRP_COMPLETE_REQUESTS\n"); break;
+ case 0x00000045: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INSUFFICIENT_SYSTEM_MAP_REGS\n"); break;
+ case 0x00000046: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DEREF_UNKNOWN_LOGON_SESSION\n"); break;
+ case 0x00000047: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REF_UNKNOWN_LOGON_SESSION\n"); break;
+ case 0x00000048: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CANCEL_STATE_IN_COMPLETED_IRP\n"); break;
+ case 0x00000049: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PAGE_FAULT_WITH_INTERRUPTS_OFF\n"); break;
+ case 0x0000004a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IRQL_GT_ZERO_AT_SYSTEM_SERVICE\n"); break;
+ case 0x0000004b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "STREAMS_INTERNAL_ERROR\n"); break;
+ case 0x0000004c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FATAL_UNHANDLED_HARD_ERROR\n"); break;
+ case 0x0000004d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_PAGES_AVAILABLE\n"); break;
+ case 0x0000004e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PFN_LIST_CORRUPT\n"); break;
+ case 0x0000004f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NDIS_INTERNAL_ERROR\n"); break;
+ case 0x00000050: /* PAGE_FAULT_IN_NONPAGED_AREA */
+ case 0x10000050: /* PAGE_FAULT_IN_NONPAGED_AREA_M */
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "PAGE_FAULT_IN_NONPAGED_AREA%s\n"
+ "P1: %016RX64 - memory referenced\n"
+ "P2: %016RX64 - IRQL\n"
+ "P3: %016RX64 - %s\n"
+ "P4: %016RX64 - reserved\n",
+ uBugCheck & 0x10000000 ? "_M" : "", uP1, uP2, uP3, uP3 & RT_BIT_64(0) ? "write" : "read", uP4);
+ break;
+ case 0x00000051: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REGISTRY_ERROR\n"); break;
+ case 0x00000052: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MAILSLOT_FILE_SYSTEM\n"); break;
+ case 0x00000053: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NO_BOOT_DEVICE\n"); break;
+ case 0x00000054: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LM_SERVER_INTERNAL_ERROR\n"); break;
+ case 0x00000055: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DATA_COHERENCY_EXCEPTION\n"); break;
+ case 0x00000056: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INSTRUCTION_COHERENCY_EXCEPTION\n"); break;
+ case 0x00000057: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XNS_INTERNAL_ERROR\n"); break;
+ case 0x00000058: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VOLMGRX_INTERNAL_ERROR\n"); break;
+ case 0x00000059: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PINBALL_FILE_SYSTEM\n"); break;
+ case 0x0000005a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRITICAL_SERVICE_FAILED\n"); break;
+ case 0x0000005b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SET_ENV_VAR_FAILED\n"); break;
+ case 0x0000005c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HAL_INITIALIZATION_FAILED\n"); break;
+ case 0x0000005d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UNSUPPORTED_PROCESSOR\n"); break;
+ case 0x0000005e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "OBJECT_INITIALIZATION_FAILED\n"); break;
+ case 0x0000005f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURITY_INITIALIZATION_FAILED\n"); break;
+ case 0x00000060: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PROCESS_INITIALIZATION_FAILED\n"); break;
+ case 0x00000061: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HAL1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000062: cchUsed = RTStrPrintf(pszDetails, cbDetails, "OBJECT1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000063: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURITY1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000064: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYMBOLIC_INITIALIZATION_FAILED\n"); break;
+ case 0x00000065: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MEMORY1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000066: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CACHE_INITIALIZATION_FAILED\n"); break;
+ case 0x00000067: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CONFIG_INITIALIZATION_FAILED\n"); break;
+ case 0x00000068: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FILE_INITIALIZATION_FAILED\n"); break;
+ case 0x00000069: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IO1_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LPC_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PROCESS1_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REFMON_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SESSION1_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTPROC_INITIALIZATION_FAILED\n"); break;
+ case 0x0000006f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VSL_INITIALIZATION_FAILED\n"); break;
+ case 0x00000070: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SOFT_RESTART_FATAL_ERROR\n"); break;
+ case 0x00000072: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ASSIGN_DRIVE_LETTERS_FAILED\n"); break;
+ case 0x00000073: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CONFIG_LIST_FAILED\n"); break;
+ case 0x00000074: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BAD_SYSTEM_CONFIG_INFO\n"); break;
+ case 0x00000075: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CANNOT_WRITE_CONFIGURATION\n"); break;
+ case 0x00000076: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PROCESS_HAS_LOCKED_PAGES\n"); break;
+ case 0x00000077: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_STACK_INPAGE_ERROR\n"); break;
+ case 0x00000078: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PHASE0_EXCEPTION\n"); break;
+ case 0x00000079: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MISMATCHED_HAL\n"); break;
+ case 0x0000007a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_DATA_INPAGE_ERROR\n"); break;
+ case 0x0000007b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INACCESSIBLE_BOOT_DEVICE\n"); break;
+ case 0x0000007c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_NDIS_DRIVER\n"); break;
+ case 0x0000007d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INSTALL_MORE_MEMORY\n"); break;
+ case 0x0000007e: /* SYSTEM_THREAD_EXCEPTION_NOT_HANDLED */
+ case 0x1000007e: /* SYSTEM_THREAD_EXCEPTION_NOT_HANDLED_M */
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "SYSTEM_THREAD_EXCEPTION_NOT_HANDLED%s\n"
+ "P1: %016RX64 - exception code%s\n"
+ "P2: %016RX64 - EIP/RIP%s\n"
+ "P3: %016RX64 - Xcpt address\n"
+ "P4: %016RX64 - Context address\n",
+ uBugCheck & 0x10000000 ? "_M" : "", uP1, dbgfR3GetNtStatusName((uint32_t)uP1),
+ uP2, dbgfR3FormatSymbol(pUVM, szSym, sizeof(szSym), ": ", uP2),
+ uP3, uP4);
+ break;
+ case 0x0000007f: /* UNEXPECTED_KERNEL_MODE_TRAP */
+ case 0x1000007f: /* UNEXPECTED_KERNEL_MODE_TRAP_M */
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "UNEXPECTED_KERNEL_MODE_TRAP%s\n"
+ "P1: %016RX64 - x86 trap number\n"
+ "P2: %016RX64 - reserved/errorcode?\n"
+ "P3: %016RX64 - reserved\n"
+ "P4: %016RX64 - reserved\n",
+ uBugCheck & 0x10000000 ? "_M" : "", uP1, uP2, uP3, uP4);
+ break;
+ case 0x00000080: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NMI_HARDWARE_FAILURE\n"); break;
+ case 0x00000081: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SPIN_LOCK_INIT_FAILURE\n"); break;
+ case 0x00000082: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DFS_FILE_SYSTEM\n"); break;
+ case 0x00000083: cchUsed = RTStrPrintf(pszDetails, cbDetails, "OFS_FILE_SYSTEM\n"); break;
+ case 0x00000084: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RECOM_DRIVER\n"); break;
+ case 0x00000085: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SETUP_FAILURE\n"); break;
+ case 0x00000086: cchUsed = RTStrPrintf(pszDetails, cbDetails, "AUDIT_FAILURE\n"); break;
+ case 0x0000008b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MBR_CHECKSUM_MISMATCH\n"); break;
+ case 0x0000008e: /* KERNEL_MODE_EXCEPTION_NOT_HANDLED */
+ case 0x1000008e: /* KERNEL_MODE_EXCEPTION_NOT_HANDLED_M */
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "KERNEL_MODE_EXCEPTION_NOT_HANDLED%s\n"
+ "P1: %016RX64 - exception code%s\n"
+ "P2: %016RX64 - EIP/RIP%s\n"
+ "P3: %016RX64 - Trap frame address\n"
+ "P4: %016RX64 - reserved\n",
+ uBugCheck & 0x10000000 ? "_M" : "", uP1, dbgfR3GetNtStatusName((uint32_t)uP1),
+ uP2, dbgfR3FormatSymbol(pUVM, szSym, sizeof(szSym), ": ", uP2),
+ uP3, uP4);
+ break;
+ case 0x0000008f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PP0_INITIALIZATION_FAILED\n"); break;
+ case 0x00000090: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PP1_INITIALIZATION_FAILED\n"); break;
+ case 0x00000091: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_INIT_OR_RIT_FAILURE\n"); break;
+ case 0x00000092: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UP_DRIVER_ON_MP_SYSTEM\n"); break;
+ case 0x00000093: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_KERNEL_HANDLE\n"); break;
+ case 0x00000094: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_STACK_LOCKED_AT_EXIT\n"); break;
+ case 0x00000095: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PNP_INTERNAL_ERROR\n"); break;
+ case 0x00000096: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_WORK_QUEUE_ITEM\n"); break;
+ case 0x00000097: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOUND_IMAGE_UNSUPPORTED\n"); break;
+ case 0x00000098: cchUsed = RTStrPrintf(pszDetails, cbDetails, "END_OF_NT_EVALUATION_PERIOD\n"); break;
+ case 0x00000099: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_REGION_OR_SEGMENT\n"); break;
+ case 0x0000009a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_LICENSE_VIOLATION\n"); break;
+ case 0x0000009b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UDFS_FILE_SYSTEM\n"); break;
+ case 0x0000009c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MACHINE_CHECK_EXCEPTION\n"); break;
+ case 0x0000009e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "USER_MODE_HEALTH_MONITOR\n"); break;
+ case 0x0000009f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_POWER_STATE_FAILURE\n"); break;
+ case 0x000000a0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INTERNAL_POWER_ERROR\n"); break;
+ case 0x000000a1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PCI_BUS_DRIVER_INTERNAL\n"); break;
+ case 0x000000a2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MEMORY_IMAGE_CORRUPT\n"); break;
+ case 0x000000a3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ACPI_DRIVER_INTERNAL\n"); break;
+ case 0x000000a4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CNSS_FILE_SYSTEM_FILTER\n"); break;
+ case 0x000000a5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ACPI_BIOS_ERROR\n"); break;
+ case 0x000000a6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FP_EMULATION_ERROR\n"); break;
+ case 0x000000a7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BAD_EXHANDLE\n"); break;
+ case 0x000000a8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTING_IN_SAFEMODE_MINIMAL\n"); break;
+ case 0x000000a9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTING_IN_SAFEMODE_NETWORK\n"); break;
+ case 0x000000aa: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTING_IN_SAFEMODE_DSREPAIR\n"); break;
+ case 0x000000ab: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SESSION_HAS_VALID_POOL_ON_EXIT\n"); break;
+ case 0x000000ac: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HAL_MEMORY_ALLOCATION\n"); break;
+ case 0x000000b1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BGI_DETECTED_VIOLATION\n"); break;
+ case 0x000000b4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_DRIVER_INIT_FAILURE\n"); break;
+ case 0x000000b5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTLOG_LOADED\n"); break;
+ case 0x000000b6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTLOG_NOT_LOADED\n"); break;
+ case 0x000000b7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BOOTLOG_ENABLED\n"); break;
+ case 0x000000b8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ATTEMPTED_SWITCH_FROM_DPC\n"); break;
+ case 0x000000b9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CHIPSET_DETECTED_ERROR\n"); break;
+ case 0x000000ba: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SESSION_HAS_VALID_VIEWS_ON_EXIT\n"); break;
+ case 0x000000bb: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NETWORK_BOOT_INITIALIZATION_FAILED\n"); break;
+ case 0x000000bc: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NETWORK_BOOT_DUPLICATE_ADDRESS\n"); break;
+ case 0x000000bd: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_HIBERNATED_STATE\n"); break;
+ case 0x000000be: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ATTEMPTED_WRITE_TO_READONLY_MEMORY\n"); break;
+ case 0x000000bf: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MUTEX_ALREADY_OWNED\n"); break;
+ case 0x000000c0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PCI_CONFIG_SPACE_ACCESS_FAILURE\n"); break;
+ case 0x000000c1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION\n"); break;
+
+ case 0x000000c2:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "BAD_POOL_CALLER\n"
+ "P1: %016RX64 - ", uP1);
+ if (cchUsed >= cbDetails)
+ return VINF_BUFFER_OVERFLOW;
+ cbDetails -= cchUsed;
+ pszDetails += cchUsed;
+ switch (uP1)
+ {
+ case 1:
+ case 2:
+ case 4:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Pool header corrupted!\n"
+ "P2: %016RX64 - Pool header address\n"
+ "P3: %016RX64 - Pool header contents\n"
+ "P4: %016RX64 - reserved\n", uP2, uP3, uP4);
+ break;
+ case 6:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Double free w/o tag!\n"
+ "P2: %016RX64 - reserved\n"
+ "P3: %016RX64 - Pool header address\n"
+ "P4: %016RX64 - Pool header contents\n", uP2, uP3, uP4);
+ break;
+ case 7:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Double free w/ tag!\n"
+ "P2: %016RX64 - tag %c%c%c%c\n"
+ "P3: %016RX64 - Pool header contents\n"
+ "P4: %016RX64 - Free address\n",
+ uP2,
+ RT_C_IS_PRINT(RT_BYTE1(uP2)) ? RT_BYTE1(uP2) : '.',
+ RT_C_IS_PRINT(RT_BYTE2(uP2)) ? RT_BYTE2(uP2) : '.',
+ RT_C_IS_PRINT(RT_BYTE3(uP2)) ? RT_BYTE3(uP2) : '.',
+ RT_C_IS_PRINT(RT_BYTE4(uP2)) ? RT_BYTE4(uP2) : '.',
+ uP3, uP4);
+ break;
+ case 8:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Wrong IRQL for allocation!\n"
+ "P2: %016RX64 - IRQL\n"
+ "P3: %016RX64 - Pool type\n"
+ "P4: %016RX64 - Allocation size\n",
+ uP2, uP3, uP4);
+ break;
+ case 9:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Wrong IRQL for free!\n"
+ "P2: %016RX64 - IRQL\n"
+ "P3: %016RX64 - Pool type\n"
+ "P4: %016RX64 - Pool address\n",
+ uP2, uP3, uP4);
+ break;
+ /** @todo fill in more BAD_POOL_CALLER types here as needed.*/
+ default:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "Unknown pool violation type\n"
+ "P2: %016RX64 - type specific\n"
+ "P3: %016RX64 - type specific\n"
+ "P4: %016RX64 - type specific\n",
+ uP2, uP3, uP4);
+ break;
+ }
+ break;
+
+ case 0x000000c3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_IMAGE_BAD_SIGNATURE\n"); break;
+ case 0x000000c4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_VERIFIER_DETECTED_VIOLATION\n"); break;
+ case 0x000000c5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_CORRUPTED_EXPOOL\n"); break;
+ case 0x000000c6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_CAUGHT_MODIFYING_FREED_POOL\n"); break;
+ case 0x000000c7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TIMER_OR_DPC_INVALID\n"); break;
+ case 0x000000c8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IRQL_UNEXPECTED_VALUE\n"); break;
+ case 0x000000c9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_VERIFIER_IOMANAGER_VIOLATION\n"); break;
+ case 0x000000ca: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PNP_DETECTED_FATAL_ERROR\n"); break;
+ case 0x000000cb: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_LEFT_LOCKED_PAGES_IN_PROCESS\n"); break;
+ case 0x000000cc: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PAGE_FAULT_IN_FREED_SPECIAL_POOL\n"); break;
+ case 0x000000cd: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PAGE_FAULT_BEYOND_END_OF_ALLOCATION\n"); break;
+ case 0x000000ce: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_UNLOADED_WITHOUT_CANCELLING_PENDING_OPERATIONS\n"); break;
+ case 0x000000cf: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TERMINAL_SERVER_DRIVER_MADE_INCORRECT_MEMORY_REFERENCE\n"); break;
+ case 0x000000d0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_CORRUPTED_MMPOOL\n"); break;
+ case 0x000000d1:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "DRIVER_IRQL_NOT_LESS_OR_EQUAL\n"
+ "P1: %016RX64 - memory referenced\n"
+ "P2: %016RX64 - IRQL\n"
+ "P3: %016RX64 - %s\n"
+ "P4: %016RX64 - EIP/RIP%s\n",
+ uP1, uP2, uP3, uP3 & RT_BIT_64(0) ? "write" : "read",
+ uP4, dbgfR3FormatSymbol(pUVM, szSym, sizeof(szSym), ": ", uP4));
+ break;
+ case 0x000000d2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_ID_DRIVER\n"); break;
+ case 0x000000d3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_PORTION_MUST_BE_NONPAGED\n"); break;
+ case 0x000000d4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_SCAN_AT_RAISED_IRQL_CAUGHT_IMPROPER_DRIVER_UNLOAD\n"); break;
+ case 0x000000d5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_PAGE_FAULT_IN_FREED_SPECIAL_POOL\n"); break;
+ case 0x000000d6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_PAGE_FAULT_BEYOND_END_OF_ALLOCATION\n"); break;
+ case 0x100000d6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_PAGE_FAULT_BEYOND_END_OF_ALLOCATION_M\n"); break;
+ case 0x000000d7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_UNMAPPING_INVALID_VIEW\n"); break;
+ case 0x000000d8:
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "DRIVER_USED_EXCESSIVE_PTES\n"
+ "P1: %016RX64 - Driver name pointer\n"
+ "P2: %016RX64 - Number of PTEs\n"
+ "P3: %016RX64 - Free system PTEs\n"
+ "P4: %016RX64 - System PTEs\n",
+ uP1, uP2, uP3, uP4);
+ break;
+ case 0x000000d9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LOCKED_PAGES_TRACKER_CORRUPTION\n"); break;
+ case 0x000000da: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SYSTEM_PTE_MISUSE\n"); break;
+ case 0x000000db: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_CORRUPTED_SYSPTES\n"); break;
+ case 0x000000dc: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_INVALID_STACK_ACCESS\n"); break;
+ case 0x000000de: cchUsed = RTStrPrintf(pszDetails, cbDetails, "POOL_CORRUPTION_IN_FILE_AREA\n"); break;
+ case 0x000000df: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IMPERSONATING_WORKER_THREAD\n"); break;
+ case 0x000000e0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ACPI_BIOS_FATAL_ERROR\n"); break;
+ case 0x000000e1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_RETURNED_AT_BAD_IRQL\n"); break;
+ case 0x000000e2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MANUALLY_INITIATED_CRASH\n"); break;
+ case 0x000000e3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RESOURCE_NOT_OWNED\n"); break;
+ case 0x000000e4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_INVALID\n"); break;
+ case 0x000000e5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "POWER_FAILURE_SIMULATE\n"); break;
+ case 0x000000e6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_VERIFIER_DMA_VIOLATION\n"); break;
+ case 0x000000e7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_FLOATING_POINT_STATE\n"); break;
+ case 0x000000e8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_CANCEL_OF_FILE_OPEN\n"); break;
+ case 0x000000e9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ACTIVE_EX_WORKER_THREAD_TERMINATION\n"); break;
+ case 0x000000ea: cchUsed = RTStrPrintf(pszDetails, cbDetails, "THREAD_STUCK_IN_DEVICE_DRIVER\n"); break;
+ case 0x100000ea: cchUsed = RTStrPrintf(pszDetails, cbDetails, "THREAD_STUCK_IN_DEVICE_DRIVER_M\n"); break;
+ case 0x000000eb: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DIRTY_MAPPED_PAGES_CONGESTION\n"); break;
+ case 0x000000ec: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SESSION_HAS_VALID_SPECIAL_POOL_ON_EXIT\n"); break;
+ case 0x000000ed: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UNMOUNTABLE_BOOT_VOLUME\n"); break;
+ case 0x000000ef: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRITICAL_PROCESS_DIED\n"); break;
+ case 0x000000f0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "STORAGE_MINIPORT_ERROR\n"); break;
+ case 0x000000f1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SCSI_VERIFIER_DETECTED_VIOLATION\n"); break;
+ case 0x000000f2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HARDWARE_INTERRUPT_STORM\n"); break;
+ case 0x000000f3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DISORDERLY_SHUTDOWN\n"); break;
+ case 0x000000f4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRITICAL_OBJECT_TERMINATION\n"); break;
+ case 0x000000f5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FLTMGR_FILE_SYSTEM\n"); break;
+ case 0x000000f6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PCI_VERIFIER_DETECTED_VIOLATION\n"); break;
+ case 0x000000f7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_OVERRAN_STACK_BUFFER\n"); break;
+ case 0x000000f8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RAMDISK_BOOT_INITIALIZATION_FAILED\n"); break;
+ case 0x000000f9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_RETURNED_STATUS_REPARSE_FOR_VOLUME_OPEN\n"); break;
+ case 0x000000fa: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HTTP_DRIVER_CORRUPTED\n"); break;
+ case 0x000000fb: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RECURSIVE_MACHINE_CHECK\n"); break;
+ case 0x000000fc: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ATTEMPTED_EXECUTE_OF_NOEXECUTE_MEMORY\n"); break;
+ case 0x000000fd: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DIRTY_NOWRITE_PAGES_CONGESTION\n"); break;
+ case 0x000000fe: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_USB_DRIVER\n"); break;
+ case 0x000000ff: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RESERVE_QUEUE_OVERFLOW\n"); break;
+ case 0x00000100: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LOADER_BLOCK_MISMATCH\n"); break;
+ case 0x00000101: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLOCK_WATCHDOG_TIMEOUT\n"); break;
+ case 0x00000102: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DPC_WATCHDOG_TIMEOUT\n"); break;
+ case 0x00000103: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MUP_FILE_SYSTEM\n"); break;
+ case 0x00000104: cchUsed = RTStrPrintf(pszDetails, cbDetails, "AGP_INVALID_ACCESS\n"); break;
+ case 0x00000105: cchUsed = RTStrPrintf(pszDetails, cbDetails, "AGP_GART_CORRUPTION\n"); break;
+ case 0x00000106: cchUsed = RTStrPrintf(pszDetails, cbDetails, "AGP_ILLEGALLY_REPROGRAMMED\n"); break;
+ case 0x00000107: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_EXPAND_STACK_ACTIVE\n"); break;
+ case 0x00000108: cchUsed = RTStrPrintf(pszDetails, cbDetails, "THIRD_PARTY_FILE_SYSTEM_FAILURE\n"); break;
+ case 0x00000109: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRITICAL_STRUCTURE_CORRUPTION\n"); break;
+ case 0x0000010a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "APP_TAGGING_INITIALIZATION_FAILED\n"); break;
+ case 0x0000010b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DFSC_FILE_SYSTEM\n"); break;
+ case 0x0000010c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FSRTL_EXTRA_CREATE_PARAMETER_VIOLATION\n"); break;
+ case 0x0000010d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WDF_VIOLATION\n"); break;
+ case 0x0000010e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_MEMORY_MANAGEMENT_INTERNAL\n"); break;
+ case 0x00000110: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_INVALID_CRUNTIME_PARAMETER\n"); break;
+ case 0x00000111: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RECURSIVE_NMI\n"); break;
+ case 0x00000112: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MSRPC_STATE_VIOLATION\n"); break;
+ case 0x00000113: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_DXGKRNL_FATAL_ERROR\n"); break;
+ case 0x00000114: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_SHADOW_DRIVER_FATAL_ERROR\n"); break;
+ case 0x00000115: cchUsed = RTStrPrintf(pszDetails, cbDetails, "AGP_INTERNAL\n"); break;
+ case 0x00000116: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_TDR_FAILURE\n"); break;
+ case 0x00000117: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_TDR_TIMEOUT_DETECTED\n"); break;
+ case 0x00000118: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NTHV_GUEST_ERROR\n"); break;
+ case 0x00000119: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_SCHEDULER_INTERNAL_ERROR\n"); break;
+ case 0x0000011a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EM_INITIALIZATION_ERROR\n"); break;
+ case 0x0000011b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_RETURNED_HOLDING_CANCEL_LOCK\n"); break;
+ case 0x0000011c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ATTEMPTED_WRITE_TO_CM_PROTECTED_STORAGE\n"); break;
+ case 0x0000011d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EVENT_TRACING_FATAL_ERROR\n"); break;
+ case 0x0000011e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TOO_MANY_RECURSIVE_FAULTS\n"); break;
+ case 0x0000011f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_DRIVER_HANDLE\n"); break;
+ case 0x00000120: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BITLOCKER_FATAL_ERROR\n"); break;
+ case 0x00000121: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_VIOLATION\n"); break;
+ case 0x00000122: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WHEA_INTERNAL_ERROR\n"); break;
+ case 0x00000123: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRYPTO_SELF_TEST_FAILURE\n"); break;
+ case 0x00000124: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WHEA_UNCORRECTABLE_ERROR\n"); break;
+ case 0x00000125: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NMR_INVALID_STATE\n"); break;
+ case 0x00000126: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NETIO_INVALID_POOL_CALLER\n"); break;
+ case 0x00000127: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PAGE_NOT_ZERO\n"); break;
+ case 0x00000128: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_RETURNED_WITH_BAD_IO_PRIORITY\n"); break;
+ case 0x00000129: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_RETURNED_WITH_BAD_PAGING_IO_PRIORITY\n"); break;
+ case 0x0000012a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MUI_NO_VALID_SYSTEM_LANGUAGE\n"); break;
+ case 0x0000012b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FAULTY_HARDWARE_CORRUPTED_PAGE\n"); break;
+ case 0x0000012c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EXFAT_FILE_SYSTEM\n"); break;
+ case 0x0000012d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VOLSNAP_OVERLAPPED_TABLE_ACCESS\n"); break;
+ case 0x0000012e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_MDL_RANGE\n"); break;
+ case 0x0000012f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VHD_BOOT_INITIALIZATION_FAILED\n"); break;
+ case 0x00000130: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DYNAMIC_ADD_PROCESSOR_MISMATCH\n"); break;
+ case 0x00000131: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_EXTENDED_PROCESSOR_STATE\n"); break;
+ case 0x00000132: cchUsed = RTStrPrintf(pszDetails, cbDetails, "RESOURCE_OWNER_POINTER_INVALID\n"); break;
+ case 0x00000133: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DPC_WATCHDOG_VIOLATION\n"); break;
+ case 0x00000134: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVE_EXTENDER\n"); break;
+ case 0x00000135: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REGISTRY_FILTER_DRIVER_EXCEPTION\n"); break;
+ case 0x00000136: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VHD_BOOT_HOST_VOLUME_NOT_ENOUGH_SPACE\n"); break;
+ case 0x00000137: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_HANDLE_MANAGER\n"); break;
+ case 0x00000138: cchUsed = RTStrPrintf(pszDetails, cbDetails, "GPIO_CONTROLLER_DRIVER_ERROR\n"); break;
+
+ case 0x00000139:
+ {
+ const char *pszCheck;
+ switch (uP1)
+ {
+ case 0x00: pszCheck = "Stack buffer overrun (/GS)"; break;
+ case 0x01: pszCheck = "Illegal virtual function table use (VTGuard)"; break;
+ case 0x02: pszCheck = "Stack buffer overrun (via cookie)"; break;
+ case 0x03: pszCheck = "Correupt LIST_ENTRY"; break;
+ case 0x04: pszCheck = "Out of bounds stack pointer"; break;
+ case 0x05: pszCheck = "Invalid parameter (fatal)"; break;
+ case 0x06: pszCheck = "Uninitialized stack cookie (by loader prior to Win8)"; break;
+ case 0x07: pszCheck = "Fatal program exit request"; break;
+ case 0x08: pszCheck = "Compiler bounds check violation"; break;
+ case 0x09: pszCheck = "Direct RtlQueryRegistryValues w/o typechecking on untrusted hive"; break;
+ /* https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/bug-check---bug-check-0x139-kernel-security-check-failure
+ and !analyze -show differs on the following: */
+ case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
+ case 0x0f: pszCheck = "Memory safety violation [?]"; break;
+ case 0x10: pszCheck = "Invalid indirect call (indirect call guard) [?]"; break;
+ case 0x11: pszCheck = "Invalid memory write (write guard) [?]"; break;
+ case 0x12: pszCheck = "Invalid target context for fiber switch [?]"; break;
+ /** @todo there are lots more... */
+ default: pszCheck = "Todo/Unknown"; break;
+ }
+ cchUsed = RTStrPrintf(pszDetails, cbDetails,
+ "KERNEL_SECURITY_CHECK_FAILURE\n"
+ "P1: %016RX64 - %s!\n"
+ "P2: %016RX64 - Trap frame address\n"
+ "P3: %016RX64 - Exception record\n"
+ "P4: %016RX64 - reserved\n", uP1, pszCheck, uP2, uP3, uP4);
+ break;
+ }
+
+ case 0x0000013a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_MODE_HEAP_CORRUPTION\n"); break;
+ case 0x0000013b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PASSIVE_INTERRUPT_ERROR\n"); break;
+ case 0x0000013c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_IO_BOOST_STATE\n"); break;
+ case 0x0000013d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRITICAL_INITIALIZATION_FAILURE\n"); break;
+ case 0x0000013e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ERRATA_WORKAROUND_UNSUCCESSFUL\n"); break;
+ case 0x00000140: cchUsed = RTStrPrintf(pszDetails, cbDetails, "STORAGE_DEVICE_ABNORMALITY_DETECTED\n"); break;
+ case 0x00000141: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_ENGINE_TIMEOUT_DETECTED\n"); break;
+ case 0x00000142: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_TDR_APPLICATION_BLOCKED\n"); break;
+ case 0x00000143: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PROCESSOR_DRIVER_INTERNAL\n"); break;
+ case 0x00000144: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_USB3_DRIVER\n"); break;
+ case 0x00000145: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURE_BOOT_VIOLATION\n"); break;
+ case 0x00000146: cchUsed = RTStrPrintf(pszDetails, cbDetails, "NDIS_NET_BUFFER_LIST_INFO_ILLEGALLY_TRANSFERRED\n"); break;
+ case 0x00000147: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ABNORMAL_RESET_DETECTED\n"); break;
+ case 0x00000148: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IO_OBJECT_INVALID\n"); break;
+ case 0x00000149: cchUsed = RTStrPrintf(pszDetails, cbDetails, "REFS_FILE_SYSTEM\n"); break;
+ case 0x0000014a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_WMI_INTERNAL\n"); break;
+ case 0x0000014b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SOC_SUBSYSTEM_FAILURE\n"); break;
+ case 0x0000014c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FATAL_ABNORMAL_RESET_ERROR\n"); break;
+ case 0x0000014d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EXCEPTION_SCOPE_INVALID\n"); break;
+ case 0x0000014e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SOC_CRITICAL_DEVICE_REMOVED\n"); break;
+ case 0x0000014f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PDC_WATCHDOG_TIMEOUT\n"); break;
+ case 0x00000150: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TCPIP_AOAC_NIC_ACTIVE_REFERENCE_LEAK\n"); break;
+ case 0x00000151: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UNSUPPORTED_INSTRUCTION_MODE\n"); break;
+ case 0x00000152: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_PUSH_LOCK_FLAGS\n"); break;
+ case 0x00000153: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_LOCK_ENTRY_LEAKED_ON_THREAD_TERMINATION\n"); break;
+ case 0x00000154: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UNEXPECTED_STORE_EXCEPTION\n"); break;
+ case 0x00000155: cchUsed = RTStrPrintf(pszDetails, cbDetails, "OS_DATA_TAMPERING\n"); break;
+ case 0x00000156: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WINSOCK_DETECTED_HUNG_CLOSESOCKET_LIVEDUMP\n"); break;
+ case 0x00000157: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_THREAD_PRIORITY_FLOOR_VIOLATION\n"); break;
+ case 0x00000158: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ILLEGAL_IOMMU_PAGE_FAULT\n"); break;
+ case 0x00000159: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HAL_ILLEGAL_IOMMU_PAGE_FAULT\n"); break;
+ case 0x0000015a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SDBUS_INTERNAL_ERROR\n"); break;
+ case 0x0000015b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_RETURNED_WITH_SYSTEM_PAGE_PRIORITY_ACTIVE\n"); break;
+ case 0x0000015c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PDC_WATCHDOG_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x0000015d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SOC_SUBSYSTEM_FAILURE_LIVEDUMP\n"); break;
+ case 0x0000015e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_NDIS_DRIVER_LIVE_DUMP\n"); break;
+ case 0x0000015f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CONNECTED_STANDBY_WATCHDOG_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000160: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_ATOMIC_CHECK_FAILURE\n"); break;
+ case 0x00000161: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LIVE_SYSTEM_DUMP\n"); break;
+ case 0x00000162: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_AUTO_BOOST_INVALID_LOCK_RELEASE\n"); break;
+ case 0x00000163: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_TEST_CONDITION\n"); break;
+ case 0x00000164: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_CRITICAL_FAILURE\n"); break;
+ case 0x00000165: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_STATUS_IO_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000166: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_RESOURCE_CALL_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000167: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_SNAPSHOT_DEVICE_INFO_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000168: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_STATE_TRANSITION_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000169: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_VOLUME_ARRIVAL_LIVEDUMP\n"); break;
+ case 0x0000016a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_VOLUME_REMOVAL_LIVEDUMP\n"); break;
+ case 0x0000016b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_CLUSTER_WATCHDOG_LIVEDUMP\n"); break;
+ case 0x0000016c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_RUNDOWN_PROTECTION_FLAGS\n"); break;
+ case 0x0000016d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_SLOT_ALLOCATOR_FLAGS\n"); break;
+ case 0x0000016e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ERESOURCE_INVALID_RELEASE\n"); break;
+ case 0x0000016f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_STATE_TRANSITION_INTERVAL_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x00000170: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSV_CLUSSVC_DISCONNECT_WATCHDOG\n"); break;
+ case 0x00000171: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CRYPTO_LIBRARY_INTERNAL_ERROR\n"); break;
+ case 0x00000173: cchUsed = RTStrPrintf(pszDetails, cbDetails, "COREMSGCALL_INTERNAL_ERROR\n"); break;
+ case 0x00000174: cchUsed = RTStrPrintf(pszDetails, cbDetails, "COREMSG_INTERNAL_ERROR\n"); break;
+ case 0x00000175: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PREVIOUS_FATAL_ABNORMAL_RESET_ERROR\n"); break;
+ case 0x00000178: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ELAM_DRIVER_DETECTED_FATAL_ERROR\n"); break;
+ case 0x00000179: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CLUSPORT_STATUS_IO_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x0000017b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PROFILER_CONFIGURATION_ILLEGAL\n"); break;
+ case 0x0000017c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PDC_LOCK_WATCHDOG_LIVEDUMP\n"); break;
+ case 0x0000017d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PDC_UNEXPECTED_REVOCATION_LIVEDUMP\n"); break;
+ case 0x00000180: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_REPLICATION_IOCONTEXT_TIMEOUT\n"); break;
+ case 0x00000181: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_STATE_TRANSITION_TIMEOUT\n"); break;
+ case 0x00000182: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_RECOVERY_IOCONTEXT_TIMEOUT\n"); break;
+ case 0x00000183: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_APP_IO_TIMEOUT\n"); break;
+ case 0x00000184: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_MANUALLY_INITIATED\n"); break;
+ case 0x00000185: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_STATE_FAILURE\n"); break;
+ case 0x00000186: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WVR_LIVEDUMP_CRITICAL_ERROR\n"); break;
+ case 0x00000187: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_DWMINIT_TIMEOUT_FALLBACK_BDD\n"); break;
+ case 0x00000188: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_CSVFS_LIVEDUMP\n"); break;
+ case 0x00000189: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BAD_OBJECT_HEADER\n"); break;
+ case 0x0000018a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SILO_CORRUPT\n"); break;
+ case 0x0000018b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURE_KERNEL_ERROR\n"); break;
+ case 0x0000018c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HYPERGUARD_VIOLATION\n"); break;
+ case 0x0000018d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SECURE_FAULT_UNHANDLED\n"); break;
+ case 0x0000018e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_PARTITION_REFERENCE_VIOLATION\n"); break;
+ case 0x00000190: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_CRITICAL_FAILURE_LIVEDUMP\n"); break;
+ case 0x00000191: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PF_DETECTED_CORRUPTION\n"); break;
+ case 0x00000192: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_AUTO_BOOST_LOCK_ACQUISITION_WITH_RAISED_IRQL\n"); break;
+ case 0x00000193: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_DXGKRNL_LIVEDUMP\n"); break;
+ case 0x00000194: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_NONRESPONSIVEPROCESS\n"); break;
+ case 0x00000195: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SMB_SERVER_LIVEDUMP\n"); break;
+ case 0x00000196: cchUsed = RTStrPrintf(pszDetails, cbDetails, "LOADER_ROLLBACK_DETECTED\n"); break;
+ case 0x00000197: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_SECURITY_FAILURE\n"); break;
+ case 0x00000198: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UFX_LIVEDUMP\n"); break;
+ case 0x00000199: cchUsed = RTStrPrintf(pszDetails, cbDetails, "KERNEL_STORAGE_SLOT_IN_USE\n"); break;
+ case 0x0000019a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_RETURNED_WHILE_ATTACHED_TO_SILO\n"); break;
+ case 0x0000019b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TTM_FATAL_ERROR\n"); break;
+ case 0x0000019c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_POWER_WATCHDOG_TIMEOUT\n"); break;
+ case 0x0000019d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CLUSTER_SVHDX_LIVEDUMP\n"); break;
+ case 0x0000019e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BUGCODE_NETADAPTER_DRIVER\n"); break;
+ case 0x0000019f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "PDC_PRIVILEGE_CHECK_LIVEDUMP\n"); break;
+ case 0x000001a0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TTM_WATCHDOG_TIMEOUT\n"); break;
+ case 0x000001a1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_CALLOUT_WATCHDOG_LIVEDUMP\n"); break;
+ case 0x000001a2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WIN32K_CALLOUT_WATCHDOG_BUGCHECK\n"); break;
+ case 0x000001a3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "CALL_HAS_NOT_RETURNED_WATCHDOG_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x000001a4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIPS_SW_HW_DIVERGENCE_LIVEDUMP\n"); break;
+ case 0x000001a5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "USB_DRIPS_BLOCKER_SURPRISE_REMOVAL_LIVEDUMP\n"); break;
+ case 0x000001c4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_VERIFIER_DETECTED_VIOLATION_LIVEDUMP\n"); break;
+ case 0x000001c5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "IO_THREADPOOL_DEADLOCK_LIVEDUMP\n"); break;
+ case 0x000001c6: cchUsed = RTStrPrintf(pszDetails, cbDetails, "FAST_ERESOURCE_PRECONDITION_VIOLATION\n"); break;
+ case 0x000001c7: cchUsed = RTStrPrintf(pszDetails, cbDetails, "STORE_DATA_STRUCTURE_CORRUPTION\n"); break;
+ case 0x000001c8: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MANUALLY_INITIATED_POWER_BUTTON_HOLD\n"); break;
+ case 0x000001c9: cchUsed = RTStrPrintf(pszDetails, cbDetails, "USER_MODE_HEALTH_MONITOR_LIVEDUMP\n"); break;
+ case 0x000001ca: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HYPERVISOR_WATCHDOG_TIMEOUT\n"); break;
+ case 0x000001cb: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_SILO_DETACH\n"); break;
+ case 0x000001cc: cchUsed = RTStrPrintf(pszDetails, cbDetails, "EXRESOURCE_TIMEOUT_LIVEDUMP\n"); break;
+ case 0x000001cd: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_CALLBACK_STACK_ADDRESS\n"); break;
+ case 0x000001ce: cchUsed = RTStrPrintf(pszDetails, cbDetails, "INVALID_KERNEL_STACK_ADDRESS\n"); break;
+ case 0x000001cf: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HARDWARE_WATCHDOG_TIMEOUT\n"); break;
+ case 0x000001d0: cchUsed = RTStrPrintf(pszDetails, cbDetails, "ACPI_FIRMWARE_WATCHDOG_TIMEOUT\n"); break;
+ case 0x000001d1: cchUsed = RTStrPrintf(pszDetails, cbDetails, "TELEMETRY_ASSERTS_LIVEDUMP\n"); break;
+ case 0x000001d2: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WORKER_THREAD_INVALID_STATE\n"); break;
+ case 0x000001d3: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WFP_INVALID_OPERATION\n"); break;
+ case 0x000001d4: cchUsed = RTStrPrintf(pszDetails, cbDetails, "UCMUCSI_LIVEDUMP\n"); break;
+ case 0x000001d5: cchUsed = RTStrPrintf(pszDetails, cbDetails, "DRIVER_PNP_WATCHDOG\n"); break;
+ case 0x00000315: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_MTBFCOMMANDTIMEOUT\n"); break;
+ case 0x00000356: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_ERACTRL_CS_TIMEOUT\n"); break;
+ case 0x00000357: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_CORRUPTED_IMAGE\n"); break;
+ case 0x00000358: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_INVERTED_FUNCTION_TABLE_OVERFLOW\n"); break;
+ case 0x00000359: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_CORRUPTED_IMAGE_BASE\n"); break;
+ case 0x00000360: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_360_SYSTEM_CRASH\n"); break;
+ case 0x00000420: cchUsed = RTStrPrintf(pszDetails, cbDetails, "XBOX_360_SYSTEM_CRASH_RESERVED\n"); break;
+ case 0x00000bfe: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BC_BLUETOOTH_VERIFIER_FAULT\n"); break;
+ case 0x00000bff: cchUsed = RTStrPrintf(pszDetails, cbDetails, "BC_BTHMINI_VERIFIER_FAULT\n"); break;
+ case 0x00008866: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_SICKAPPLICATION\n"); break;
+ case 0x0000f000: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_UNSPECIFIED\n"); break;
+ case 0x0000f002: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_BLANKSCREEN\n"); break;
+ case 0x0000f003: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_INPUT\n"); break;
+ case 0x0000f004: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_WATCHDOG\n"); break;
+ case 0x0000f005: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_STARTNOTVISIBLE\n"); break;
+ case 0x0000f006: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_NAVIGATIONMODEL\n"); break;
+ case 0x0000f007: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_OUTOFMEMORY\n"); break;
+ case 0x0000f008: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_GRAPHICS\n"); break;
+ case 0x0000f009: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_NAVSERVERTIMEOUT\n"); break;
+ case 0x0000f00a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_CHROMEPROCESSCRASH\n"); break;
+ case 0x0000f00b: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_NOTIFICATIONDISMISSAL\n"); break;
+ case 0x0000f00c: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_SPEECHDISMISSAL\n"); break;
+ case 0x0000f00d: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_CALLDISMISSAL\n"); break;
+ case 0x0000f00e: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_APPBARDISMISSAL\n"); break;
+ case 0x0000f00f: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_RILADAPTATIONCRASH\n"); break;
+ case 0x0000f010: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_APPLISTUNREACHABLE\n"); break;
+ case 0x0000f011: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_REPORTNOTIFICATIONFAILURE\n"); break;
+ case 0x0000f012: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_UNEXPECTEDSHUTDOWN\n"); break;
+ case 0x0000f013: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_RPCFAILURE\n"); break;
+ case 0x0000f014: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_AUXILIARYFULLDUMP\n"); break;
+ case 0x0000f015: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_ACCOUNTPROVSVCINITFAILURE\n"); break;
+ case 0x0000f101: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_MTBFCOMMANDHANG\n"); break;
+ case 0x0000f102: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_MTBFPASSBUGCHECK\n"); break;
+ case 0x0000f103: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_MTBFIOERROR\n"); break;
+ case 0x0000f200: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_RENDERTHREADHANG\n"); break;
+ case 0x0000f201: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_RENDERMOBILEUIOOM\n"); break;
+ case 0x0000f300: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_DEVICEUPDATEUNSPECIFIED\n"); break;
+ case 0x0000f400: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_AUDIODRIVERHANG\n"); break;
+ case 0x0000f500: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_BATTERYPULLOUT\n"); break;
+ case 0x0000f600: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_MEDIACORETESTHANG\n"); break;
+ case 0x0000f700: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_RESOURCEMANAGEMENT\n"); break;
+ case 0x0000f800: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_CAPTURESERVICE\n"); break;
+ case 0x0000f900: cchUsed = RTStrPrintf(pszDetails, cbDetails, "SAVER_WAITFORSHELLREADY\n"); break;
+ case 0x00020001: cchUsed = RTStrPrintf(pszDetails, cbDetails, "HYPERVISOR_ERROR\n"); break;
+ case 0x4000008a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "THREAD_TERMINATE_HELD_MUTEX\n"); break;
+ case 0x400000ad: cchUsed = RTStrPrintf(pszDetails, cbDetails, "VIDEO_DRIVER_DEBUG_REPORT_REQUEST\n"); break;
+ case 0xc000021a: cchUsed = RTStrPrintf(pszDetails, cbDetails, "WINLOGON_FATAL_ERROR\n"); break;
+ case 0xdeaddead: cchUsed = RTStrPrintf(pszDetails, cbDetails, "MANUALLY_INITIATED_CRASH1\n"); break;
+ default: cchUsed = 0; break;
+ }
+ if (cchUsed < cbDetails)
+ return VINF_SUCCESS;
+ return VINF_BUFFER_OVERFLOW;
+}
+
+
+/**
+ * Report a bug check.
+ *
+ * @returns
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per virtual CPU structure.
+ * @param enmEvent The kind of BSOD event this is.
+ * @param uBugCheck The bug check number.
+ * @param uP1 The bug check parameter \#1.
+ * @param uP2 The bug check parameter \#2.
+ * @param uP3 The bug check parameter \#3.
+ * @param uP4 The bug check parameter \#4.
+ */
+VMMR3DECL(VBOXSTRICTRC) DBGFR3ReportBugCheck(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent, uint64_t uBugCheck,
+ uint64_t uP1, uint64_t uP2, uint64_t uP3, uint64_t uP4)
+{
+ /*
+ * Be careful.
+ */
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_INVALID_VMCPU_HANDLE);
+ const char *pszSource;
+ switch (enmEvent)
+ {
+ case DBGFEVENT_BSOD_MSR: pszSource = "GIMHv"; break;
+ case DBGFEVENT_BSOD_EFI: pszSource = "EFI"; break;
+ case DBGFEVENT_BSOD_VMMDEV: pszSource = "VMMDev"; break;
+ default:
+ AssertMsgFailedReturn(("enmEvent=%d\n", enmEvent), VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Note it down.
+ */
+ pVM->dbgf.s.BugCheck.enmEvent = enmEvent;
+ pVM->dbgf.s.BugCheck.uBugCheck = uBugCheck;
+ pVM->dbgf.s.BugCheck.auParameters[0] = uP1;
+ pVM->dbgf.s.BugCheck.auParameters[1] = uP2;
+ pVM->dbgf.s.BugCheck.auParameters[2] = uP3;
+ pVM->dbgf.s.BugCheck.auParameters[3] = uP4;
+ pVM->dbgf.s.BugCheck.idCpu = pVCpu->idCpu;
+ pVM->dbgf.s.BugCheck.uTimestamp = TMVirtualGet(pVM);
+ pVM->dbgf.s.BugCheck.uResetNo = VMGetResetCount(pVM);
+
+ /*
+ * Log the details.
+ */
+ char szDetails[2048];
+ DBGFR3FormatBugCheck(pVM->pUVM, szDetails, sizeof(szDetails), uBugCheck, uP1, uP2, uP3, uP4);
+ LogRel(("%s: %s", pszSource, szDetails));
+
+ /*
+ * Raise debugger event.
+ */
+ VBOXSTRICTRC rc = VINF_SUCCESS;
+ if (DBGF_IS_EVENT_ENABLED(pVM, enmEvent))
+ rc = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent, DBGFEVENTCTX_OTHER, 5 /*cArgs*/, uBugCheck, uP1, uP2, uP3, uP4);
+
+ /*
+ * Take actions.
+ */
+ /** @todo Take actions on BSOD, like notifying main or stopping the VM...
+ * For testing it makes little sense to continue after a BSOD. */
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFHANDLERINT, bugcheck}
+ */
+static DECLCALLBACK(void) dbgfR3BugCheckInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ char szDetails[2048];
+
+ /*
+ * Any arguments for bug check formatting?
+ */
+ if (pszArgs && *pszArgs)
+ pszArgs = RTStrStripL(pszArgs);
+ if (pszArgs && *pszArgs)
+ {
+ uint64_t auData[5] = { 0, 0, 0, 0, 0 };
+ unsigned iData = 0;
+ do
+ {
+ /* Find the next hex digit */
+ char ch;
+ while ((ch = *pszArgs) != '\0' && !RT_C_IS_XDIGIT(ch))
+ pszArgs++;
+ if (ch == '\0')
+ break;
+
+ /* Extract the number. */
+ char *pszNext = (char *)pszArgs + 1;
+ RTStrToUInt64Ex(pszArgs, &pszNext, 16, &auData[iData]);
+
+ /* Advance. */
+ pszArgs = pszNext;
+ iData++;
+ } while (iData < RT_ELEMENTS(auData) && *pszArgs);
+
+ /* Format it. */
+ DBGFR3FormatBugCheck(pVM->pUVM, szDetails, sizeof(szDetails), auData[0], auData[1], auData[2], auData[3], auData[4]);
+ pHlp->pfnPrintf(pHlp, "%s", szDetails);
+ }
+ /*
+ * Format what's been reported (if any).
+ */
+ else if (pVM->dbgf.s.BugCheck.enmEvent != DBGFEVENT_END)
+ {
+ DBGFR3FormatBugCheck(pVM->pUVM, szDetails, sizeof(szDetails), pVM->dbgf.s.BugCheck.uBugCheck,
+ pVM->dbgf.s.BugCheck.auParameters[0], pVM->dbgf.s.BugCheck.auParameters[1],
+ pVM->dbgf.s.BugCheck.auParameters[2], pVM->dbgf.s.BugCheck.auParameters[3]);
+ const char *pszSource = pVM->dbgf.s.BugCheck.enmEvent == DBGFEVENT_BSOD_MSR ? "GIMHv"
+ : pVM->dbgf.s.BugCheck.enmEvent == DBGFEVENT_BSOD_EFI ? "EFI"
+ : pVM->dbgf.s.BugCheck.enmEvent == DBGFEVENT_BSOD_VMMDEV ? "VMMDev" : "<unknown>";
+ uint32_t const uFreq = TMVirtualGetFreq(pVM);
+ uint64_t const cSecs = pVM->dbgf.s.BugCheck.uTimestamp / uFreq;
+ uint32_t const cMillis = (pVM->dbgf.s.BugCheck.uTimestamp - cSecs * uFreq) * 1000 / uFreq;
+ pHlp->pfnPrintf(pHlp, "BugCheck on CPU #%u after %RU64.%03u s VM uptime, %u resets ago (src: %s)\n%s",
+ pVM->dbgf.s.BugCheck.idCpu, cSecs, cMillis, VMGetResetCount(pVM) - pVM->dbgf.s.BugCheck.uResetNo,
+ pszSource, szDetails);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "No bug check reported.\n");
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Flow.cpp b/src/VBox/VMM/VMMR3/DBGFR3Flow.cpp
new file mode 100644
index 00000000..9afe9e57
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3Flow.cpp
@@ -0,0 +1,2322 @@
+/* $Id: DBGFR3Flow.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Control Flow Graph Interface (CFG).
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_cfg DBGFR3Flow - Control Flow Graph Interface
+ *
+ * The control flow graph interface provides an API to disassemble
+ * guest code providing the result in a control flow graph.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/param.h>
+#include <iprt/list.h>
+#include <iprt/mem.h>
+#include <iprt/sort.h>
+#include <iprt/strcache.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+/**
+ * Internal control flow graph state.
+ */
+typedef struct DBGFFLOWINT
+{
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Internal reference counter for basic blocks. */
+ uint32_t volatile cRefsBb;
+ /** Flags during creation. */
+ uint32_t fFlags;
+ /** List of all basic blocks. */
+ RTLISTANCHOR LstFlowBb;
+ /** List of identified branch tables. */
+ RTLISTANCHOR LstBranchTbl;
+ /** Number of basic blocks in this control flow graph. */
+ uint32_t cBbs;
+ /** Number of branch tables in this control flow graph. */
+ uint32_t cBranchTbls;
+ /** Number of call instructions in this control flow graph. */
+ uint32_t cCallInsns;
+ /** The lowest addres of a basic block. */
+ DBGFADDRESS AddrLowest;
+ /** The highest address of a basic block. */
+ DBGFADDRESS AddrHighest;
+ /** String cache for disassembled instructions. */
+ RTSTRCACHE hStrCacheInstr;
+} DBGFFLOWINT;
+/** Pointer to an internal control flow graph state. */
+typedef DBGFFLOWINT *PDBGFFLOWINT;
+
+/**
+ * Instruction record
+ */
+typedef struct DBGFFLOWBBINSTR
+{
+ /** Instruction address. */
+ DBGFADDRESS AddrInstr;
+ /** Size of instruction. */
+ uint32_t cbInstr;
+ /** Disassembled instruction string. */
+ const char *pszInstr;
+} DBGFFLOWBBINSTR;
+/** Pointer to an instruction record. */
+typedef DBGFFLOWBBINSTR *PDBGFFLOWBBINSTR;
+
+
+/**
+ * A branch table identified by the graph processor.
+ */
+typedef struct DBGFFLOWBRANCHTBLINT
+{
+ /** Node for the list of branch tables. */
+ RTLISTNODE NdBranchTbl;
+ /** The owning control flow graph. */
+ PDBGFFLOWINT pFlow;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The general register index holding the bracnh table base. */
+ uint8_t idxGenRegBase;
+ /** Start address of the branch table. */
+ DBGFADDRESS AddrStart;
+ /** Number of valid entries in the branch table. */
+ uint32_t cSlots;
+ /** The addresses contained in the branch table - variable in size. */
+ DBGFADDRESS aAddresses[1];
+} DBGFFLOWBRANCHTBLINT;
+/** Pointer to a branch table structure. */
+typedef DBGFFLOWBRANCHTBLINT *PDBGFFLOWBRANCHTBLINT;
+
+
+/**
+ * Internal control flow graph basic block state.
+ */
+typedef struct DBGFFLOWBBINT
+{
+ /** Node for the list of all basic blocks. */
+ RTLISTNODE NdFlowBb;
+ /** The control flow graph the basic block belongs to. */
+ PDBGFFLOWINT pFlow;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Basic block end type. */
+ DBGFFLOWBBENDTYPE enmEndType;
+ /** Start address of this basic block. */
+ DBGFADDRESS AddrStart;
+ /** End address of this basic block. */
+ DBGFADDRESS AddrEnd;
+ /** Address of the block succeeding.
+ * This is valid for conditional jumps
+ * (the other target is referenced by AddrEnd+1) and
+ * unconditional jumps (not ret, iret, etc.) except
+ * if we can't infer the jump target (jmp *eax for example). */
+ DBGFADDRESS AddrTarget;
+ /** The indirect branch table identified for indirect branches. */
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl;
+ /** Last status error code if DBGF_FLOW_BB_F_INCOMPLETE_ERR is set. */
+ int rcError;
+ /** Error message if DBGF_FLOW_BB_F_INCOMPLETE_ERR is set. */
+ char *pszErr;
+ /** Flags for this basic block. */
+ uint32_t fFlags;
+ /** Number of instructions in this basic block. */
+ uint32_t cInstr;
+ /** Maximum number of instruction records for this basic block. */
+ uint32_t cInstrMax;
+ /** Instruction records, variable in size. */
+ DBGFFLOWBBINSTR aInstr[1];
+} DBGFFLOWBBINT;
+/** Pointer to an internal control flow graph basic block state. */
+typedef DBGFFLOWBBINT *PDBGFFLOWBBINT;
+
+
+/**
+ * Control flow graph iterator state.
+ */
+typedef struct DBGFFLOWITINT
+{
+ /** Pointer to the control flow graph (holding a reference). */
+ PDBGFFLOWINT pFlow;
+ /** Next basic block to return. */
+ uint32_t idxBbNext;
+ /** Array of basic blocks sorted by the specified order - variable in size. */
+ PDBGFFLOWBBINT apBb[1];
+} DBGFFLOWITINT;
+/** Pointer to the internal control flow graph iterator state. */
+typedef DBGFFLOWITINT *PDBGFFLOWITINT;
+
+
+/**
+ * Control flow graph branch table iterator state.
+ */
+typedef struct DBGFFLOWBRANCHTBLITINT
+{
+ /** Pointer to the control flow graph (holding a reference). */
+ PDBGFFLOWINT pFlow;
+ /** Next branch table to return. */
+ uint32_t idxTblNext;
+ /** Array of branch table pointers sorted by the specified order - variable in size. */
+ PDBGFFLOWBRANCHTBLINT apBranchTbl[1];
+} DBGFFLOWBRANCHTBLITINT;
+/** Pointer to the internal control flow graph branch table iterator state. */
+typedef DBGFFLOWBRANCHTBLITINT *PDBGFFLOWBRANCHTBLITINT;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+static uint32_t dbgfR3FlowBbReleaseInt(PDBGFFLOWBBINT pFlowBb, bool fMayDestroyFlow);
+static void dbgfR3FlowBranchTblDestroy(PDBGFFLOWBRANCHTBLINT pFlowBranchTbl);
+
+
+/**
+ * Checks whether both addresses are equal.
+ *
+ * @returns true if both addresses point to the same location, false otherwise.
+ * @param pAddr1 First address.
+ * @param pAddr2 Second address.
+ */
+static bool dbgfR3FlowAddrEqual(PDBGFADDRESS pAddr1, PDBGFADDRESS pAddr2)
+{
+ return pAddr1->Sel == pAddr2->Sel
+ && pAddr1->off == pAddr2->off;
+}
+
+
+/**
+ * Checks whether the first given address is lower than the second one.
+ *
+ * @returns true if both addresses point to the same location, false otherwise.
+ * @param pAddr1 First address.
+ * @param pAddr2 Second address.
+ */
+static bool dbgfR3FlowAddrLower(PDBGFADDRESS pAddr1, PDBGFADDRESS pAddr2)
+{
+ return pAddr1->Sel == pAddr2->Sel
+ && pAddr1->off < pAddr2->off;
+}
+
+
+/**
+ * Checks whether the given basic block and address intersect.
+ *
+ * @returns true if they intersect, false otherwise.
+ * @param pFlowBb The basic block to check.
+ * @param pAddr The address to check for.
+ */
+static bool dbgfR3FlowAddrIntersect(PDBGFFLOWBBINT pFlowBb, PDBGFADDRESS pAddr)
+{
+ return (pFlowBb->AddrStart.Sel == pAddr->Sel)
+ && (pFlowBb->AddrStart.off <= pAddr->off)
+ && (pFlowBb->AddrEnd.off >= pAddr->off);
+}
+
+
+/**
+ * Returns the distance of the two given addresses.
+ *
+ * @returns Distance of the addresses.
+ * @param pAddr1 The first address.
+ * @param pAddr2 The second address.
+ */
+static RTGCUINTPTR dbgfR3FlowAddrGetDistance(PDBGFADDRESS pAddr1, PDBGFADDRESS pAddr2)
+{
+ if (pAddr1->Sel == pAddr2->Sel)
+ {
+ if (pAddr1->off >= pAddr2->off)
+ return pAddr1->off - pAddr2->off;
+ else
+ return pAddr2->off - pAddr1->off;
+ }
+ else
+ AssertFailed();
+
+ return 0;
+}
+
+
+/**
+ * Creates a new basic block.
+ *
+ * @returns Pointer to the basic block on success or NULL if out of memory.
+ * @param pThis The control flow graph.
+ * @param pAddrStart The start of the basic block.
+ * @param fFlowBbFlags Additional flags for this bascic block.
+ * @param cInstrMax Maximum number of instructions this block can hold initially.
+ */
+static PDBGFFLOWBBINT dbgfR3FlowBbCreate(PDBGFFLOWINT pThis, PDBGFADDRESS pAddrStart, uint32_t fFlowBbFlags,
+ uint32_t cInstrMax)
+{
+ PDBGFFLOWBBINT pFlowBb = (PDBGFFLOWBBINT)RTMemAllocZ(RT_UOFFSETOF_DYN(DBGFFLOWBBINT, aInstr[cInstrMax]));
+ if (RT_LIKELY(pFlowBb))
+ {
+ RTListInit(&pFlowBb->NdFlowBb);
+ pFlowBb->cRefs = 1;
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_INVALID;
+ pFlowBb->pFlow = pThis;
+ pFlowBb->fFlags = DBGF_FLOW_BB_F_EMPTY | fFlowBbFlags;
+ pFlowBb->AddrStart = *pAddrStart;
+ pFlowBb->AddrEnd = *pAddrStart;
+ pFlowBb->rcError = VINF_SUCCESS;
+ pFlowBb->pszErr = NULL;
+ pFlowBb->cInstr = 0;
+ pFlowBb->cInstrMax = cInstrMax;
+ pFlowBb->pFlowBranchTbl = NULL;
+ ASMAtomicIncU32(&pThis->cRefsBb);
+ }
+
+ return pFlowBb;
+}
+
+
+/**
+ * Creates an empty branch table with the given size.
+ *
+ * @returns Pointer to the empty branch table on success or NULL if out of memory.
+ * @param pThis The control flow graph.
+ * @param pAddrStart The start of the branch table.
+ * @param idxGenRegBase The general register index holding the base address.
+ * @param cSlots Number of slots the table has.
+ */
+static PDBGFFLOWBRANCHTBLINT
+dbgfR3FlowBranchTblCreate(PDBGFFLOWINT pThis, PDBGFADDRESS pAddrStart, uint8_t idxGenRegBase, uint32_t cSlots)
+{
+ PDBGFFLOWBRANCHTBLINT pBranchTbl = (PDBGFFLOWBRANCHTBLINT)RTMemAllocZ(RT_UOFFSETOF_DYN(DBGFFLOWBRANCHTBLINT,
+ aAddresses[cSlots]));
+ if (RT_LIKELY(pBranchTbl))
+ {
+ RTListInit(&pBranchTbl->NdBranchTbl);
+ pBranchTbl->pFlow = pThis;
+ pBranchTbl->idxGenRegBase = idxGenRegBase;
+ pBranchTbl->AddrStart = *pAddrStart;
+ pBranchTbl->cSlots = cSlots;
+ pBranchTbl->cRefs = 1;
+ }
+
+ return pBranchTbl;
+}
+
+
+/**
+ * Destroys a control flow graph.
+ *
+ * @param pThis The control flow graph to destroy.
+ */
+static void dbgfR3FlowDestroy(PDBGFFLOWINT pThis)
+{
+ /* Defer destruction if there are still basic blocks referencing us. */
+ PDBGFFLOWBBINT pFlowBb;
+ PDBGFFLOWBBINT pFlowBbNext;
+ RTListForEachSafe(&pThis->LstFlowBb, pFlowBb, pFlowBbNext, DBGFFLOWBBINT, NdFlowBb)
+ {
+ dbgfR3FlowBbReleaseInt(pFlowBb, false /*fMayDestroyFlow*/);
+ }
+
+ Assert(!pThis->cRefs);
+ if (!pThis->cRefsBb)
+ {
+ /* Destroy the branch tables. */
+ PDBGFFLOWBRANCHTBLINT pTbl = NULL;
+ PDBGFFLOWBRANCHTBLINT pTblNext = NULL;
+ RTListForEachSafe(&pThis->LstBranchTbl, pTbl, pTblNext, DBGFFLOWBRANCHTBLINT, NdBranchTbl)
+ {
+ dbgfR3FlowBranchTblDestroy(pTbl);
+ }
+
+ RTStrCacheDestroy(pThis->hStrCacheInstr);
+ RTMemFree(pThis);
+ }
+}
+
+
+/**
+ * Destroys a basic block.
+ *
+ * @param pFlowBb The basic block to destroy.
+ * @param fMayDestroyFlow Flag whether the control flow graph container
+ * should be destroyed when there is nothing referencing it.
+ */
+static void dbgfR3FlowBbDestroy(PDBGFFLOWBBINT pFlowBb, bool fMayDestroyFlow)
+{
+ PDBGFFLOWINT pThis = pFlowBb->pFlow;
+
+ RTListNodeRemove(&pFlowBb->NdFlowBb);
+ pThis->cBbs--;
+ for (uint32_t idxInstr = 0; idxInstr < pFlowBb->cInstr; idxInstr++)
+ RTStrCacheRelease(pThis->hStrCacheInstr, pFlowBb->aInstr[idxInstr].pszInstr);
+ uint32_t cRefsBb = ASMAtomicDecU32(&pThis->cRefsBb);
+ RTMemFree(pFlowBb);
+
+ if (!cRefsBb && !pThis->cRefs && fMayDestroyFlow)
+ dbgfR3FlowDestroy(pThis);
+}
+
+
+/**
+ * Destroys a given branch table.
+ *
+ * @param pFlowBranchTbl The flow branch table to destroy.
+ */
+static void dbgfR3FlowBranchTblDestroy(PDBGFFLOWBRANCHTBLINT pFlowBranchTbl)
+{
+ RTListNodeRemove(&pFlowBranchTbl->NdBranchTbl);
+ RTMemFree(pFlowBranchTbl);
+}
+
+
+/**
+ * Internal basic block release worker.
+ *
+ * @returns New reference count of the released basic block, on 0
+ * it is destroyed.
+ * @param pFlowBb The basic block to release.
+ * @param fMayDestroyFlow Flag whether the control flow graph container
+ * should be destroyed when there is nothing referencing it.
+ */
+static uint32_t dbgfR3FlowBbReleaseInt(PDBGFFLOWBBINT pFlowBb, bool fMayDestroyFlow)
+{
+ uint32_t cRefs = ASMAtomicDecU32(&pFlowBb->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p %d\n", cRefs, pFlowBb, pFlowBb->enmEndType));
+ if (cRefs == 0)
+ dbgfR3FlowBbDestroy(pFlowBb, fMayDestroyFlow);
+ return cRefs;
+}
+
+
+/**
+ * Links the given basic block into the control flow graph.
+ *
+ * @param pThis The control flow graph to link into.
+ * @param pFlowBb The basic block to link.
+ */
+DECLINLINE(void) dbgfR3FlowLink(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb)
+{
+ RTListAppend(&pThis->LstFlowBb, &pFlowBb->NdFlowBb);
+ pThis->cBbs++;
+}
+
+
+/**
+ * Links the given branch table into the control flow graph.
+ *
+ * @param pThis The control flow graph to link into.
+ * @param pBranchTbl The branch table to link.
+ */
+DECLINLINE(void) dbgfR3FlowBranchTblLink(PDBGFFLOWINT pThis, PDBGFFLOWBRANCHTBLINT pBranchTbl)
+{
+ RTListAppend(&pThis->LstBranchTbl, &pBranchTbl->NdBranchTbl);
+ pThis->cBranchTbls++;
+}
+
+
+/**
+ * Returns the first unpopulated basic block of the given control flow graph.
+ *
+ * @returns The first unpopulated control flow graph or NULL if not found.
+ * @param pThis The control flow graph.
+ */
+DECLINLINE(PDBGFFLOWBBINT) dbgfR3FlowGetUnpopulatedBb(PDBGFFLOWINT pThis)
+{
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pThis->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ if (pFlowBb->fFlags & DBGF_FLOW_BB_F_EMPTY)
+ return pFlowBb;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Returns the branch table with the given address if it exists.
+ *
+ * @returns Pointer to the branch table record or NULL if not found.
+ * @param pThis The control flow graph.
+ * @param pAddrTbl The branch table address.
+ */
+DECLINLINE(PDBGFFLOWBRANCHTBLINT) dbgfR3FlowBranchTblFindByAddr(PDBGFFLOWINT pThis, PDBGFADDRESS pAddrTbl)
+{
+ PDBGFFLOWBRANCHTBLINT pTbl;
+ RTListForEach(&pThis->LstBranchTbl, pTbl, DBGFFLOWBRANCHTBLINT, NdBranchTbl)
+ {
+ if (dbgfR3FlowAddrEqual(&pTbl->AddrStart, pAddrTbl))
+ return pTbl;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Sets the given error status for the basic block.
+ *
+ * @param pFlowBb The basic block causing the error.
+ * @param rcError The error to set.
+ * @param pszFmt Format string of the error description.
+ * @param ... Arguments for the format string.
+ */
+static void dbgfR3FlowBbSetError(PDBGFFLOWBBINT pFlowBb, int rcError, const char *pszFmt, ...)
+{
+ va_list va;
+ va_start(va, pszFmt);
+
+ Assert(!(pFlowBb->fFlags & DBGF_FLOW_BB_F_INCOMPLETE_ERR));
+ pFlowBb->fFlags |= DBGF_FLOW_BB_F_INCOMPLETE_ERR;
+ pFlowBb->fFlags &= ~DBGF_FLOW_BB_F_EMPTY;
+ pFlowBb->rcError = rcError;
+ pFlowBb->pszErr = RTStrAPrintf2V(pszFmt, va);
+ va_end(va);
+}
+
+
+/**
+ * Checks whether the given control flow graph contains a basic block
+ * with the given start address.
+ *
+ * @returns true if there is a basic block with the start address, false otherwise.
+ * @param pThis The control flow graph.
+ * @param pAddr The address to check for.
+ */
+static bool dbgfR3FlowHasBbWithStartAddr(PDBGFFLOWINT pThis, PDBGFADDRESS pAddr)
+{
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pThis->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ if (dbgfR3FlowAddrEqual(&pFlowBb->AddrStart, pAddr))
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * Splits a given basic block into two at the given address.
+ *
+ * @returns VBox status code.
+ * @param pThis The control flow graph.
+ * @param pFlowBb The basic block to split.
+ * @param pAddr The address to split at.
+ */
+static int dbgfR3FlowBbSplit(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb, PDBGFADDRESS pAddr)
+{
+ int rc = VINF_SUCCESS;
+ uint32_t idxInstrSplit;
+
+ /* If the block is empty it will get populated later so there is nothing to split,
+ * same if the start address equals. */
+ if ( pFlowBb->fFlags & DBGF_FLOW_BB_F_EMPTY
+ || dbgfR3FlowAddrEqual(&pFlowBb->AddrStart, pAddr))
+ return VINF_SUCCESS;
+
+ /* Find the instruction to split at. */
+ for (idxInstrSplit = 1; idxInstrSplit < pFlowBb->cInstr; idxInstrSplit++)
+ if (dbgfR3FlowAddrEqual(&pFlowBb->aInstr[idxInstrSplit].AddrInstr, pAddr))
+ break;
+
+ Assert(idxInstrSplit > 0);
+
+ /*
+ * Given address might not be on instruction boundary, this is not supported
+ * so far and results in an error.
+ */
+ if (idxInstrSplit < pFlowBb->cInstr)
+ {
+ /* Create new basic block. */
+ uint32_t cInstrNew = pFlowBb->cInstr - idxInstrSplit;
+ PDBGFFLOWBBINT pFlowBbNew = dbgfR3FlowBbCreate(pThis, &pFlowBb->aInstr[idxInstrSplit].AddrInstr,
+ 0 /*fFlowBbFlags*/, cInstrNew);
+ if (pFlowBbNew)
+ {
+ /* Move instructions over. */
+ pFlowBbNew->cInstr = cInstrNew;
+ pFlowBbNew->AddrEnd = pFlowBb->AddrEnd;
+ pFlowBbNew->enmEndType = pFlowBb->enmEndType;
+ pFlowBbNew->AddrTarget = pFlowBb->AddrTarget;
+ pFlowBbNew->fFlags = pFlowBb->fFlags & ~DBGF_FLOW_BB_F_ENTRY;
+ pFlowBbNew->pFlowBranchTbl = pFlowBb->pFlowBranchTbl;
+ pFlowBb->pFlowBranchTbl = NULL;
+
+ /* Move any error to the new basic block and clear them in the old basic block. */
+ pFlowBbNew->rcError = pFlowBb->rcError;
+ pFlowBbNew->pszErr = pFlowBb->pszErr;
+ pFlowBb->rcError = VINF_SUCCESS;
+ pFlowBb->pszErr = NULL;
+ pFlowBb->fFlags &= ~DBGF_FLOW_BB_F_INCOMPLETE_ERR;
+
+ memcpy(&pFlowBbNew->aInstr[0], &pFlowBb->aInstr[idxInstrSplit], cInstrNew * sizeof(DBGFFLOWBBINSTR));
+ pFlowBb->cInstr = idxInstrSplit;
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND;
+ pFlowBb->AddrEnd = pFlowBb->aInstr[idxInstrSplit-1].AddrInstr;
+ pFlowBb->AddrTarget = pFlowBbNew->AddrStart;
+ DBGFR3AddrAdd(&pFlowBb->AddrEnd, pFlowBb->aInstr[idxInstrSplit-1].cbInstr - 1);
+ RT_BZERO(&pFlowBb->aInstr[idxInstrSplit], cInstrNew * sizeof(DBGFFLOWBBINSTR));
+
+ dbgfR3FlowLink(pThis, pFlowBbNew);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ AssertFailedStmt(rc = VERR_INVALID_STATE); /** @todo Proper status code. */
+
+ return rc;
+}
+
+
+/**
+ * Makes sure there is an successor at the given address splitting already existing
+ * basic blocks if they intersect.
+ *
+ * @returns VBox status code.
+ * @param pThis The control flow graph.
+ * @param pAddrSucc The guest address the new successor should start at.
+ * @param fNewBbFlags Flags for the new basic block.
+ * @param pBranchTbl Branch table candidate for this basic block.
+ */
+static int dbgfR3FlowBbSuccessorAdd(PDBGFFLOWINT pThis, PDBGFADDRESS pAddrSucc,
+ uint32_t fNewBbFlags, PDBGFFLOWBRANCHTBLINT pBranchTbl)
+{
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pThis->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ /*
+ * The basic block must be split if it intersects with the given address
+ * and the start address does not equal the given one.
+ */
+ if (dbgfR3FlowAddrIntersect(pFlowBb, pAddrSucc))
+ return dbgfR3FlowBbSplit(pThis, pFlowBb, pAddrSucc);
+ }
+
+ int rc = VINF_SUCCESS;
+ pFlowBb = dbgfR3FlowBbCreate(pThis, pAddrSucc, fNewBbFlags, 10);
+ if (pFlowBb)
+ {
+ pFlowBb->pFlowBranchTbl = pBranchTbl;
+ dbgfR3FlowLink(pThis, pFlowBb);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Returns whether the parameter indicates an indirect branch.
+ *
+ * @returns Flag whether this is an indirect branch.
+ * @param pDisParam The parameter from the disassembler.
+ */
+DECLINLINE(bool) dbgfR3FlowBranchTargetIsIndirect(PDISOPPARAM pDisParam)
+{
+ bool fIndirect = true;
+
+ if ( pDisParam->fUse & (DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE64)
+ || pDisParam->fUse & (DISUSE_IMMEDIATE8_REL | DISUSE_IMMEDIATE16_REL | DISUSE_IMMEDIATE32_REL | DISUSE_IMMEDIATE64_REL))
+ fIndirect = false;
+
+ return fIndirect;
+}
+
+
+/**
+ * Resolves the direct branch target address if possible from the given instruction address
+ * and instruction parameter.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param idCpu CPU id for resolving the address.
+ * @param pDisParam The parameter from the disassembler.
+ * @param pAddrInstr The instruction address.
+ * @param cbInstr Size of instruction in bytes.
+ * @param fRelJmp Flag whether this is a reltive jump.
+ * @param pAddrJmpTarget Where to store the address to the jump target on success.
+ */
+static int dbgfR3FlowQueryDirectBranchTarget(PUVM pUVM, VMCPUID idCpu, PDISOPPARAM pDisParam, PDBGFADDRESS pAddrInstr,
+ uint32_t cbInstr, bool fRelJmp, PDBGFADDRESS pAddrJmpTarget)
+{
+ int rc = VINF_SUCCESS;
+
+ Assert(!dbgfR3FlowBranchTargetIsIndirect(pDisParam));
+
+ /* Relative jumps are always from the beginning of the next instruction. */
+ *pAddrJmpTarget = *pAddrInstr;
+ DBGFR3AddrAdd(pAddrJmpTarget, cbInstr);
+
+ if (fRelJmp)
+ {
+ RTGCINTPTR iRel = 0;
+ if (pDisParam->fUse & DISUSE_IMMEDIATE8_REL)
+ iRel = (int8_t)pDisParam->uValue;
+ else if (pDisParam->fUse & DISUSE_IMMEDIATE16_REL)
+ iRel = (int16_t)pDisParam->uValue;
+ else if (pDisParam->fUse & DISUSE_IMMEDIATE32_REL)
+ iRel = (int32_t)pDisParam->uValue;
+ else if (pDisParam->fUse & DISUSE_IMMEDIATE64_REL)
+ iRel = (int64_t)pDisParam->uValue;
+ else
+ AssertFailedStmt(rc = VERR_NOT_SUPPORTED);
+
+ if (iRel < 0)
+ DBGFR3AddrSub(pAddrJmpTarget, -iRel);
+ else
+ DBGFR3AddrAdd(pAddrJmpTarget, iRel);
+ }
+ else
+ {
+ if (pDisParam->fUse & (DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE64))
+ {
+ if (DBGFADDRESS_IS_FLAT(pAddrInstr))
+ DBGFR3AddrFromFlat(pUVM, pAddrJmpTarget, pDisParam->uValue);
+ else
+ DBGFR3AddrFromSelOff(pUVM, idCpu, pAddrJmpTarget, pAddrInstr->Sel, pDisParam->uValue);
+ }
+ else
+ AssertFailedStmt(rc = VERR_INVALID_STATE);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Returns the CPU mode based on the given assembler flags.
+ *
+ * @returns CPU mode.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param fFlagsDisasm The flags used for disassembling.
+ */
+static CPUMMODE dbgfR3FlowGetDisasCpuMode(PUVM pUVM, VMCPUID idCpu, uint32_t fFlagsDisasm)
+{
+ CPUMMODE enmMode = CPUMMODE_INVALID;
+ uint32_t fDisasMode = fFlagsDisasm & DBGF_DISAS_FLAGS_MODE_MASK;
+ if (fDisasMode == DBGF_DISAS_FLAGS_DEFAULT_MODE)
+ enmMode = DBGFR3CpuGetMode(pUVM, idCpu);
+ else if ( fDisasMode == DBGF_DISAS_FLAGS_16BIT_MODE
+ || fDisasMode == DBGF_DISAS_FLAGS_16BIT_REAL_MODE)
+ enmMode = CPUMMODE_REAL;
+ else if (fDisasMode == DBGF_DISAS_FLAGS_32BIT_MODE)
+ enmMode = CPUMMODE_PROTECTED;
+ else if (fDisasMode == DBGF_DISAS_FLAGS_64BIT_MODE)
+ enmMode = CPUMMODE_LONG;
+ else
+ AssertFailed();
+
+ return enmMode;
+}
+
+
+/**
+ * Searches backwards in the given basic block starting the given instruction index for
+ * a mov instruction with the given register as the target where the constant looks like
+ * a pointer.
+ *
+ * @returns Flag whether a candidate was found.
+ * @param pFlowBb The basic block containing the indirect branch.
+ * @param idxRegTgt The general register the mov targets.
+ * @param cbPtr The pointer size to look for.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param fFlagsDisasm The flags to use for disassembling.
+ * @param pidxInstrStart The instruction index to start searching for on input,
+ * The last instruction evaluated on output.
+ * @param pAddrDest Where to store the candidate address on success.
+ */
+static bool dbgfR3FlowSearchMovWithConstantPtrSizeBackwards(PDBGFFLOWBBINT pFlowBb, uint8_t idxRegTgt, uint32_t cbPtr,
+ PUVM pUVM, VMCPUID idCpu, uint32_t fFlagsDisasm,
+ uint32_t *pidxInstrStart, PDBGFADDRESS pAddrDest)
+{
+ bool fFound = false;
+ uint32_t idxInstrCur = *pidxInstrStart;
+ uint32_t cInstrCheck = idxInstrCur + 1;
+
+ for (;;)
+ {
+ /** @todo Avoid to disassemble again. */
+ PDBGFFLOWBBINSTR pInstr = &pFlowBb->aInstr[idxInstrCur];
+ DBGFDISSTATE DisState;
+ char szOutput[_4K];
+
+ int rc = dbgfR3DisasInstrStateEx(pUVM, idCpu, &pInstr->AddrInstr, fFlagsDisasm,
+ &szOutput[0], sizeof(szOutput), &DisState);
+ if (RT_SUCCESS(rc))
+ {
+ if ( DisState.pCurInstr->uOpcode == OP_MOV
+ && (DisState.Param1.fUse & (DISUSE_REG_GEN16 | DISUSE_REG_GEN32 | DISUSE_REG_GEN64))
+ && DisState.Param1.Base.idxGenReg == idxRegTgt
+ /*&& DisState.Param1.cb == cbPtr*/
+ && DisState.Param2.cb == cbPtr
+ && (DisState.Param2.fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE64)))
+ {
+ /* Found possible candidate. */
+ fFound = true;
+ if (DBGFADDRESS_IS_FLAT(&pInstr->AddrInstr))
+ DBGFR3AddrFromFlat(pUVM, pAddrDest, DisState.Param2.uValue);
+ else
+ DBGFR3AddrFromSelOff(pUVM, idCpu, pAddrDest, pInstr->AddrInstr.Sel, DisState.Param2.uValue);
+ break;
+ }
+ }
+ else
+ break;
+
+ cInstrCheck--;
+ if (!cInstrCheck)
+ break;
+
+ idxInstrCur--;
+ }
+
+ *pidxInstrStart = idxInstrCur;
+ return fFound;
+}
+
+
+/**
+ * Verifies the given branch table candidate and adds it to the control flow graph on success.
+ *
+ * @returns VBox status code.
+ * @param pThis The flow control graph.
+ * @param pFlowBb The basic block causing the indirect branch.
+ * @param pAddrBranchTbl Address of the branch table location.
+ * @param idxGenRegBase The general register holding the base address.
+ * @param cbPtr Guest pointer size.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ *
+ * @todo Handle branch tables greater than 4KB (lazy coder).
+ */
+static int dbgfR3FlowBranchTblVerifyAdd(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb, PDBGFADDRESS pAddrBranchTbl,
+ uint8_t idxGenRegBase, uint32_t cbPtr, PUVM pUVM, VMCPUID idCpu)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWBRANCHTBLINT pBranchTbl = dbgfR3FlowBranchTblFindByAddr(pThis, pAddrBranchTbl);
+
+ if (!pBranchTbl)
+ {
+ uint32_t cSlots = 0;
+ uint8_t abBuf[_4K];
+
+ rc = DBGFR3MemRead(pUVM, idCpu, pAddrBranchTbl, &abBuf[0], sizeof(abBuf));
+ if (RT_SUCCESS(rc))
+ {
+ uint8_t *pbBuf = &abBuf[0];
+ while (pbBuf < &abBuf[0] + sizeof(abBuf))
+ {
+ DBGFADDRESS AddrDest;
+ RTGCUINTPTR GCPtr = cbPtr == sizeof(uint64_t)
+ ? *(uint64_t *)pbBuf
+ : cbPtr == sizeof(uint32_t)
+ ? *(uint32_t *)pbBuf
+ : *(uint16_t *)pbBuf;
+ pbBuf += cbPtr;
+
+ if (DBGFADDRESS_IS_FLAT(pAddrBranchTbl))
+ DBGFR3AddrFromFlat(pUVM, &AddrDest, GCPtr);
+ else
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &AddrDest, pAddrBranchTbl->Sel, GCPtr);
+
+ if (dbgfR3FlowAddrGetDistance(&AddrDest, &pFlowBb->AddrEnd) > _512K)
+ break;
+
+ cSlots++;
+ }
+
+ /* If there are any slots use it. */
+ if (cSlots)
+ {
+ pBranchTbl = dbgfR3FlowBranchTblCreate(pThis, pAddrBranchTbl, idxGenRegBase, cSlots);
+ if (pBranchTbl)
+ {
+ /* Get the addresses. */
+ for (unsigned i = 0; i < cSlots && RT_SUCCESS(rc); i++)
+ {
+ RTGCUINTPTR GCPtr = cbPtr == sizeof(uint64_t)
+ ? *(uint64_t *)&abBuf[i * cbPtr]
+ : cbPtr == sizeof(uint32_t)
+ ? *(uint32_t *)&abBuf[i * cbPtr]
+ : *(uint16_t *)&abBuf[i * cbPtr];
+
+ if (DBGFADDRESS_IS_FLAT(pAddrBranchTbl))
+ DBGFR3AddrFromFlat(pUVM, &pBranchTbl->aAddresses[i], GCPtr);
+ else
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pBranchTbl->aAddresses[i],
+ pAddrBranchTbl->Sel, GCPtr);
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &pBranchTbl->aAddresses[i], DBGF_FLOW_BB_F_BRANCH_TABLE,
+ pBranchTbl);
+ }
+ dbgfR3FlowBranchTblLink(pThis, pBranchTbl);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ }
+ }
+
+ if (pBranchTbl)
+ pFlowBb->pFlowBranchTbl = pBranchTbl;
+
+ return rc;
+}
+
+
+/**
+ * Checks whether the location for the branch target candidate contains a valid code address.
+ *
+ * @returns VBox status code.
+ * @param pThis The flow control graph.
+ * @param pFlowBb The basic block causing the indirect branch.
+ * @param pAddrBranchTgt Address of the branch target location.
+ * @param idxGenRegBase The general register holding the address of the location.
+ * @param cbPtr Guest pointer size.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param fBranchTbl Flag whether this is a possible branch table containing multiple
+ * targets.
+ */
+static int dbgfR3FlowCheckBranchTargetLocation(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb, PDBGFADDRESS pAddrBranchTgt,
+ uint8_t idxGenRegBase, uint32_t cbPtr, PUVM pUVM, VMCPUID idCpu, bool fBranchTbl)
+{
+ int rc = VINF_SUCCESS;
+
+ if (!fBranchTbl)
+ {
+ union { uint16_t u16Val; uint32_t u32Val; uint64_t u64Val; } uVal;
+ rc = DBGFR3MemRead(pUVM, idCpu, pAddrBranchTgt, &uVal, cbPtr);
+ if (RT_SUCCESS(rc))
+ {
+ DBGFADDRESS AddrTgt;
+ RTGCUINTPTR GCPtr = cbPtr == sizeof(uint64_t)
+ ? uVal.u64Val
+ : cbPtr == sizeof(uint32_t)
+ ? uVal.u32Val
+ : uVal.u16Val;
+ if (DBGFADDRESS_IS_FLAT(pAddrBranchTgt))
+ DBGFR3AddrFromFlat(pUVM, &AddrTgt, GCPtr);
+ else
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &AddrTgt, pAddrBranchTgt->Sel, GCPtr);
+
+ if (dbgfR3FlowAddrGetDistance(&AddrTgt, &pFlowBb->AddrEnd) <= _128K)
+ {
+ /* Finish the basic block. */
+ pFlowBb->AddrTarget = AddrTgt;
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &AddrTgt,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ }
+ else
+ rc = VERR_NOT_FOUND;
+ }
+ }
+ else
+ rc = dbgfR3FlowBranchTblVerifyAdd(pThis, pFlowBb, pAddrBranchTgt,
+ idxGenRegBase, cbPtr, pUVM, idCpu);
+
+ return rc;
+}
+
+
+/**
+ * Tries to resolve the indirect branch.
+ *
+ * @returns VBox status code.
+ * @param pThis The flow control graph.
+ * @param pFlowBb The basic block causing the indirect branch.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param pDisParam The parameter from the disassembler.
+ * @param fFlagsDisasm Flags for the disassembler.
+ */
+static int dbgfR3FlowTryResolveIndirectBranch(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb, PUVM pUVM,
+ VMCPUID idCpu, PDISOPPARAM pDisParam, uint32_t fFlagsDisasm)
+{
+ Assert(dbgfR3FlowBranchTargetIsIndirect(pDisParam));
+
+ uint32_t cbPtr = 0;
+ CPUMMODE enmMode = dbgfR3FlowGetDisasCpuMode(pUVM, idCpu, fFlagsDisasm);
+
+ switch (enmMode)
+ {
+ case CPUMMODE_REAL:
+ cbPtr = sizeof(uint16_t);
+ break;
+ case CPUMMODE_PROTECTED:
+ cbPtr = sizeof(uint32_t);
+ break;
+ case CPUMMODE_LONG:
+ cbPtr = sizeof(uint64_t);
+ break;
+ default:
+ AssertMsgFailed(("Invalid CPU mode %u\n", enmMode));
+ }
+
+ if (pDisParam->fUse & DISUSE_BASE)
+ {
+ uint8_t idxRegBase = pDisParam->Base.idxGenReg;
+
+ /* Check that the used register size and the pointer size match. */
+ if ( ((pDisParam->fUse & DISUSE_REG_GEN16) && cbPtr == sizeof(uint16_t))
+ || ((pDisParam->fUse & DISUSE_REG_GEN32) && cbPtr == sizeof(uint32_t))
+ || ((pDisParam->fUse & DISUSE_REG_GEN64) && cbPtr == sizeof(uint64_t)))
+ {
+ /*
+ * Search all instructions backwards until a move to the used general register
+ * is detected with a constant using the pointer size.
+ */
+ uint32_t idxInstrStart = pFlowBb->cInstr - 1 - 1; /* Don't look at the branch. */
+ bool fCandidateFound = false;
+ bool fBranchTbl = RT_BOOL(pDisParam->fUse & DISUSE_INDEX);
+ DBGFADDRESS AddrBranchTgt;
+ do
+ {
+ fCandidateFound = dbgfR3FlowSearchMovWithConstantPtrSizeBackwards(pFlowBb, idxRegBase, cbPtr,
+ pUVM, idCpu, fFlagsDisasm,
+ &idxInstrStart, &AddrBranchTgt);
+ if (fCandidateFound)
+ {
+ /* Check that the address is not too far away from the instruction address. */
+ RTGCUINTPTR offPtr = dbgfR3FlowAddrGetDistance(&AddrBranchTgt, &pFlowBb->AddrEnd);
+ if (offPtr <= 20 * _1M)
+ {
+ /* Read the content at the address and check that it is near this basic block too. */
+ int rc = dbgfR3FlowCheckBranchTargetLocation(pThis, pFlowBb, &AddrBranchTgt, idxRegBase,
+ cbPtr, pUVM, idCpu, fBranchTbl);
+ if (RT_SUCCESS(rc))
+ break;
+ fCandidateFound = false;
+ }
+
+ if (idxInstrStart > 0)
+ idxInstrStart--;
+ }
+ } while (idxInstrStart > 0 && !fCandidateFound);
+ }
+ else
+ dbgfR3FlowBbSetError(pFlowBb, VERR_INVALID_STATE,
+ "The base register size and selected pointer size do not match (fUse=%#x cbPtr=%u)",
+ pDisParam->fUse, cbPtr);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Tries to resolve the indirect branch.
+ *
+ * @returns VBox status code.
+ * @param pThis The flow control graph.
+ * @param pFlowBb The basic block causing the indirect branch.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param pDisParam The parameter from the disassembler.
+ * @param fFlagsDisasm Flags for the disassembler.
+ */
+static int dbgfR3FlowBbCheckBranchTblCandidate(PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb, PUVM pUVM,
+ VMCPUID idCpu, PDISOPPARAM pDisParam, uint32_t fFlagsDisasm)
+{
+ int rc = VINF_SUCCESS;
+
+ Assert(pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE && pFlowBb->pFlowBranchTbl);
+
+ uint32_t cbPtr = 0;
+ CPUMMODE enmMode = dbgfR3FlowGetDisasCpuMode(pUVM, idCpu, fFlagsDisasm);
+
+ switch (enmMode)
+ {
+ case CPUMMODE_REAL:
+ cbPtr = sizeof(uint16_t);
+ break;
+ case CPUMMODE_PROTECTED:
+ cbPtr = sizeof(uint32_t);
+ break;
+ case CPUMMODE_LONG:
+ cbPtr = sizeof(uint64_t);
+ break;
+ default:
+ AssertMsgFailed(("Invalid CPU mode %u\n", enmMode));
+ }
+
+ if (pDisParam->fUse & DISUSE_BASE)
+ {
+ uint8_t idxRegBase = pDisParam->Base.idxGenReg;
+
+ /* Check that the used register size and the pointer size match. */
+ if ( ((pDisParam->fUse & DISUSE_REG_GEN16) && cbPtr == sizeof(uint16_t))
+ || ((pDisParam->fUse & DISUSE_REG_GEN32) && cbPtr == sizeof(uint32_t))
+ || ((pDisParam->fUse & DISUSE_REG_GEN64) && cbPtr == sizeof(uint64_t)))
+ {
+ if (idxRegBase != pFlowBb->pFlowBranchTbl->idxGenRegBase)
+ {
+ /* Try to find the new branch table. */
+ pFlowBb->pFlowBranchTbl = NULL;
+ rc = dbgfR3FlowTryResolveIndirectBranch(pThis, pFlowBb, pUVM, idCpu, pDisParam, fFlagsDisasm);
+ }
+ /** @todo else check that the base register is not modified in this basic block. */
+ }
+ else
+ dbgfR3FlowBbSetError(pFlowBb, VERR_INVALID_STATE,
+ "The base register size and selected pointer size do not match (fUse=%#x cbPtr=%u)",
+ pDisParam->fUse, cbPtr);
+ }
+ else
+ dbgfR3FlowBbSetError(pFlowBb, VERR_INVALID_STATE,
+ "The instruction does not use a register");
+
+ return rc;
+}
+
+
+/**
+ * Processes and fills one basic block.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param pThis The control flow graph to populate.
+ * @param pFlowBb The basic block to fill.
+ * @param cbDisasmMax The maximum amount to disassemble.
+ * @param fFlags Combination of DBGF_DISAS_FLAGS_*.
+ */
+static int dbgfR3FlowBbProcess(PUVM pUVM, VMCPUID idCpu, PDBGFFLOWINT pThis, PDBGFFLOWBBINT pFlowBb,
+ uint32_t cbDisasmMax, uint32_t fFlags)
+{
+ int rc = VINF_SUCCESS;
+ uint32_t cbDisasmLeft = cbDisasmMax ? cbDisasmMax : UINT32_MAX;
+ DBGFADDRESS AddrDisasm = pFlowBb->AddrEnd;
+
+ Assert(pFlowBb->fFlags & DBGF_FLOW_BB_F_EMPTY);
+
+ /*
+ * Disassemble instruction by instruction until we get a conditional or
+ * unconditional jump or some sort of return.
+ */
+ while ( cbDisasmLeft
+ && RT_SUCCESS(rc))
+ {
+ DBGFDISSTATE DisState;
+ char szOutput[_4K];
+
+ /*
+ * Before disassembling we have to check whether the address belongs
+ * to another basic block and stop here.
+ */
+ if ( !(pFlowBb->fFlags & DBGF_FLOW_BB_F_EMPTY)
+ && dbgfR3FlowHasBbWithStartAddr(pThis, &AddrDisasm))
+ {
+ pFlowBb->AddrTarget = AddrDisasm;
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND;
+ break;
+ }
+
+ rc = dbgfR3DisasInstrStateEx(pUVM, idCpu, &AddrDisasm, fFlags,
+ &szOutput[0], sizeof(szOutput), &DisState);
+ if (RT_SUCCESS(rc))
+ {
+ if ( pThis->fFlags & DBGF_FLOW_CREATE_F_CALL_INSN_SEPARATE_BB
+ && DisState.pCurInstr->uOpcode == OP_CALL
+ && !(pFlowBb->fFlags & DBGF_FLOW_BB_F_EMPTY))
+ {
+ /*
+ * If the basic block is not empty, the basic block is terminated and the successor is added
+ * which will contain the call instruction.
+ */
+ pFlowBb->AddrTarget = AddrDisasm;
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND;
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &AddrDisasm,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ if (RT_FAILURE(rc))
+ dbgfR3FlowBbSetError(pFlowBb, rc, "Adding successor blocks failed with %Rrc", rc);
+ break;
+ }
+
+ pFlowBb->fFlags &= ~DBGF_FLOW_BB_F_EMPTY;
+ cbDisasmLeft -= DisState.cbInstr;
+
+ if (pFlowBb->cInstr == pFlowBb->cInstrMax)
+ {
+ /* Reallocate. */
+ RTListNodeRemove(&pFlowBb->NdFlowBb);
+ PDBGFFLOWBBINT pFlowBbNew = (PDBGFFLOWBBINT)RTMemRealloc(pFlowBb,
+ RT_UOFFSETOF_DYN(DBGFFLOWBBINT, aInstr[pFlowBb->cInstrMax + 10]));
+ if (pFlowBbNew)
+ {
+ pFlowBbNew->cInstrMax += 10;
+ pFlowBb = pFlowBbNew;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ RTListAppend(&pThis->LstFlowBb, &pFlowBb->NdFlowBb);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ PDBGFFLOWBBINSTR pInstr = &pFlowBb->aInstr[pFlowBb->cInstr];
+
+ pInstr->AddrInstr = AddrDisasm;
+ pInstr->cbInstr = DisState.cbInstr;
+ pInstr->pszInstr = RTStrCacheEnter(pThis->hStrCacheInstr, &szOutput[0]);
+ pFlowBb->cInstr++;
+
+ pFlowBb->AddrEnd = AddrDisasm;
+ DBGFR3AddrAdd(&pFlowBb->AddrEnd, pInstr->cbInstr - 1);
+ DBGFR3AddrAdd(&AddrDisasm, pInstr->cbInstr);
+
+ /*
+ * Check control flow instructions and create new basic blocks
+ * marking the current one as complete.
+ */
+ if (DisState.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
+ {
+ uint16_t uOpc = DisState.pCurInstr->uOpcode;
+
+ if (uOpc == OP_CALL)
+ pThis->cCallInsns++;
+
+ if ( uOpc == OP_RETN || uOpc == OP_RETF || uOpc == OP_IRET
+ || uOpc == OP_SYSEXIT || uOpc == OP_SYSRET)
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_EXIT;
+ else if (uOpc == OP_JMP)
+ {
+ Assert(DisState.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW);
+
+ if (dbgfR3FlowBranchTargetIsIndirect(&DisState.Param1))
+ {
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND_INDIRECT_JMP;
+
+ if (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE)
+ {
+ Assert(pThis->fFlags & DBGF_FLOW_CREATE_F_TRY_RESOLVE_INDIRECT_BRANCHES);
+
+ /*
+ * This basic block was already discovered by parsing a jump table and
+ * there should be a candidate for the branch table. Check whether it uses the
+ * same branch table.
+ */
+ rc = dbgfR3FlowBbCheckBranchTblCandidate(pThis, pFlowBb, pUVM, idCpu,
+ &DisState.Param1, fFlags);
+ }
+ else
+ {
+ if (pThis->fFlags & DBGF_FLOW_CREATE_F_TRY_RESOLVE_INDIRECT_BRANCHES)
+ rc = dbgfR3FlowTryResolveIndirectBranch(pThis, pFlowBb, pUVM, idCpu,
+ &DisState.Param1, fFlags);
+ else
+ dbgfR3FlowBbSetError(pFlowBb, VERR_NOT_SUPPORTED,
+ "Detected indirect branch and resolving it not being enabled");
+ }
+ }
+ else
+ {
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND_JMP;
+
+ /* Create one new basic block with the jump target address. */
+ rc = dbgfR3FlowQueryDirectBranchTarget(pUVM, idCpu, &DisState.Param1, &pInstr->AddrInstr, pInstr->cbInstr,
+ RT_BOOL(DisState.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW),
+ &pFlowBb->AddrTarget);
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &pFlowBb->AddrTarget,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ }
+ }
+ else if (uOpc != OP_CALL)
+ {
+ Assert(DisState.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW);
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_COND;
+
+ /*
+ * Create two new basic blocks, one with the jump target address
+ * and one starting after the current instruction.
+ */
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &AddrDisasm,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3FlowQueryDirectBranchTarget(pUVM, idCpu, &DisState.Param1, &pInstr->AddrInstr, pInstr->cbInstr,
+ RT_BOOL(DisState.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW),
+ &pFlowBb->AddrTarget);
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &pFlowBb->AddrTarget,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ }
+ }
+ else if (pThis->fFlags & DBGF_FLOW_CREATE_F_CALL_INSN_SEPARATE_BB)
+ {
+ pFlowBb->enmEndType = DBGFFLOWBBENDTYPE_UNCOND;
+ pFlowBb->fFlags |= DBGF_FLOW_BB_F_CALL_INSN;
+
+ /* Add new basic block coming after the call instruction. */
+ rc = dbgfR3FlowBbSuccessorAdd(pThis, &AddrDisasm,
+ (pFlowBb->fFlags & DBGF_FLOW_BB_F_BRANCH_TABLE),
+ pFlowBb->pFlowBranchTbl);
+ if ( RT_SUCCESS(rc)
+ && !dbgfR3FlowBranchTargetIsIndirect(&DisState.Param1))
+ {
+ /* Resolve the branch target. */
+ rc = dbgfR3FlowQueryDirectBranchTarget(pUVM, idCpu, &DisState.Param1, &pInstr->AddrInstr, pInstr->cbInstr,
+ RT_BOOL(DisState.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW),
+ &pFlowBb->AddrTarget);
+ if (RT_SUCCESS(rc))
+ pFlowBb->fFlags |= DBGF_FLOW_BB_F_CALL_INSN_TARGET_KNOWN;
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ dbgfR3FlowBbSetError(pFlowBb, rc, "Adding successor blocks failed with %Rrc", rc);
+
+ /* Quit disassembling. */
+ if ( ( uOpc != OP_CALL
+ || (pThis->fFlags & DBGF_FLOW_CREATE_F_CALL_INSN_SEPARATE_BB))
+ || RT_FAILURE(rc))
+ break;
+ }
+ }
+ else
+ dbgfR3FlowBbSetError(pFlowBb, rc, "Increasing basic block failed with %Rrc", rc);
+ }
+ else
+ dbgfR3FlowBbSetError(pFlowBb, rc, "Disassembling the instruction failed with %Rrc", rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Populate all empty basic blocks.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param pThis The control flow graph to populate.
+ * @param cbDisasmMax The maximum amount to disassemble.
+ * @param fFlags Combination of DBGF_DISAS_FLAGS_*.
+ */
+static int dbgfR3FlowPopulate(PUVM pUVM, VMCPUID idCpu, PDBGFFLOWINT pThis, uint32_t cbDisasmMax, uint32_t fFlags)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWBBINT pFlowBb = dbgfR3FlowGetUnpopulatedBb(pThis);
+
+ while (pFlowBb != NULL)
+ {
+ rc = dbgfR3FlowBbProcess(pUVM, idCpu, pThis, pFlowBb, cbDisasmMax, fFlags);
+ if (RT_FAILURE(rc))
+ break;
+
+ pFlowBb = dbgfR3FlowGetUnpopulatedBb(pThis);
+ }
+
+ return rc;
+}
+
+/**
+ * Creates a new control flow graph from the given start address.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu CPU id for disassembling.
+ * @param pAddressStart Where to start creating the control flow graph.
+ * @param cbDisasmMax Limit the amount of bytes to disassemble, 0 for no limit.
+ * @param fFlagsFlow Combination of DBGF_FLOW_CREATE_F_* to control the creation of the flow graph.
+ * @param fFlagsDisasm Combination of DBGF_DISAS_FLAGS_* controlling the style of the disassembled
+ * instructions.
+ * @param phFlow Where to store the handle to the control flow graph on success.
+ */
+VMMR3DECL(int) DBGFR3FlowCreate(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddressStart, uint32_t cbDisasmMax,
+ uint32_t fFlagsFlow, uint32_t fFlagsDisasm, PDBGFFLOW phFlow)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pAddressStart, VERR_INVALID_POINTER);
+ AssertReturn(!(fFlagsDisasm & ~DBGF_DISAS_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn((fFlagsDisasm & DBGF_DISAS_FLAGS_MODE_MASK) <= DBGF_DISAS_FLAGS_64BIT_MODE, VERR_INVALID_PARAMETER);
+
+ /* Create the control flow graph container. */
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWINT pThis = (PDBGFFLOWINT)RTMemAllocZ(sizeof(DBGFFLOWINT));
+ if (RT_LIKELY(pThis))
+ {
+ rc = RTStrCacheCreate(&pThis->hStrCacheInstr, "DBGFFLOW");
+ if (RT_SUCCESS(rc))
+ {
+ pThis->cRefs = 1;
+ pThis->cRefsBb = 0;
+ pThis->cBbs = 0;
+ pThis->cBranchTbls = 0;
+ pThis->cCallInsns = 0;
+ pThis->fFlags = fFlagsFlow;
+ RTListInit(&pThis->LstFlowBb);
+ RTListInit(&pThis->LstBranchTbl);
+ /* Create the entry basic block and start the work. */
+
+ PDBGFFLOWBBINT pFlowBb = dbgfR3FlowBbCreate(pThis, pAddressStart, DBGF_FLOW_BB_F_ENTRY, 10);
+ if (RT_LIKELY(pFlowBb))
+ {
+ dbgfR3FlowLink(pThis, pFlowBb);
+ rc = dbgfR3FlowPopulate(pUVM, idCpu, pThis, cbDisasmMax, fFlagsDisasm);
+ if (RT_SUCCESS(rc))
+ {
+ *phFlow = pThis;
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ ASMAtomicDecU32(&pThis->cRefs);
+ dbgfR3FlowDestroy(pThis);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Retains the control flow graph handle.
+ *
+ * @returns Current reference count.
+ * @param hFlow The control flow graph handle to retain.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowRetain(DBGFFLOW hFlow)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ return cRefs;
+}
+
+
+/**
+ * Releases the control flow graph handle.
+ *
+ * @returns Current reference count, on 0 the control flow graph will be destroyed.
+ * @param hFlow The control flow graph handle to release.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowRelease(DBGFFLOW hFlow)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ if (!pThis)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ if (cRefs == 0)
+ dbgfR3FlowDestroy(pThis);
+ return cRefs;
+}
+
+
+/**
+ * Queries the basic block denoting the entry point into the control flow graph.
+ *
+ * @returns VBox status code.
+ * @param hFlow The control flow graph handle.
+ * @param phFlowBb Where to store the basic block handle on success.
+ */
+VMMR3DECL(int) DBGFR3FlowQueryStartBb(DBGFFLOW hFlow, PDBGFFLOWBB phFlowBb)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pThis->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ if (pFlowBb->fFlags & DBGF_FLOW_BB_F_ENTRY)
+ {
+ *phFlowBb = pFlowBb;
+ return VINF_SUCCESS;
+ }
+ }
+
+ AssertFailed(); /* Should never get here. */
+ return VERR_INTERNAL_ERROR;
+}
+
+
+/**
+ * Queries a basic block in the given control flow graph which covers the given
+ * address.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if there is no basic block intersecting with the address.
+ * @param hFlow The control flow graph handle.
+ * @param pAddr The address to look for.
+ * @param phFlowBb Where to store the basic block handle on success.
+ */
+VMMR3DECL(int) DBGFR3FlowQueryBbByAddress(DBGFFLOW hFlow, PDBGFADDRESS pAddr, PDBGFFLOWBB phFlowBb)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertPtrReturn(pAddr, VERR_INVALID_POINTER);
+ AssertPtrReturn(phFlowBb, VERR_INVALID_POINTER);
+
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pThis->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ if (dbgfR3FlowAddrIntersect(pFlowBb, pAddr))
+ {
+ DBGFR3FlowBbRetain(pFlowBb);
+ *phFlowBb = pFlowBb;
+ return VINF_SUCCESS;
+ }
+ }
+
+ return VERR_NOT_FOUND;
+}
+
+
+/**
+ * Queries a branch table in the given control flow graph by the given address.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if there is no branch table with the given address.
+ * @param hFlow The control flow graph handle.
+ * @param pAddr The address of the branch table.
+ * @param phFlowBranchTbl Where to store the handle to branch table on success.
+ *
+ * @note Call DBGFR3FlowBranchTblRelease() when the handle is not required anymore.
+ */
+VMMR3DECL(int) DBGFR3FlowQueryBranchTblByAddress(DBGFFLOW hFlow, PDBGFADDRESS pAddr, PDBGFFLOWBRANCHTBL phFlowBranchTbl)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertPtrReturn(pAddr, VERR_INVALID_POINTER);
+ AssertPtrReturn(phFlowBranchTbl, VERR_INVALID_POINTER);
+
+ PDBGFFLOWBRANCHTBLINT pBranchTbl = dbgfR3FlowBranchTblFindByAddr(pThis, pAddr);
+ if (pBranchTbl)
+ {
+ DBGFR3FlowBranchTblRetain(pBranchTbl);
+ *phFlowBranchTbl = pBranchTbl;
+ return VINF_SUCCESS;
+ }
+
+ return VERR_NOT_FOUND;
+}
+
+
+/**
+ * Returns the number of basic blcoks inside the control flow graph.
+ *
+ * @returns Number of basic blocks.
+ * @param hFlow The control flow graph handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowGetBbCount(DBGFFLOW hFlow)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, 0);
+
+ return pThis->cBbs;
+}
+
+
+/**
+ * Returns the number of branch tables inside the control flow graph.
+ *
+ * @returns Number of basic blocks.
+ * @param hFlow The control flow graph handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowGetBranchTblCount(DBGFFLOW hFlow)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, 0);
+
+ return pThis->cBranchTbls;
+}
+
+
+/**
+ * Returns the number of call instructions encountered in the given
+ * control flow graph.
+ *
+ * @returns Number of call instructions.
+ * @param hFlow The control flow graph handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowGetCallInsnCount(DBGFFLOW hFlow)
+{
+ PDBGFFLOWINT pThis = hFlow;
+ AssertPtrReturn(pThis, 0);
+
+ return pThis->cCallInsns;
+}
+
+
+/**
+ * Retains the basic block handle.
+ *
+ * @returns Current reference count.
+ * @param hFlowBb The basic block handle to retain.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBbRetain(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pFlowBb->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p %d\n", cRefs, pFlowBb, pFlowBb->enmEndType));
+ return cRefs;
+}
+
+
+/**
+ * Releases the basic block handle.
+ *
+ * @returns Current reference count, on 0 the basic block will be destroyed.
+ * @param hFlowBb The basic block handle to release.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBbRelease(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ if (!pFlowBb)
+ return 0;
+
+ return dbgfR3FlowBbReleaseInt(pFlowBb, true /* fMayDestroyFlow */);
+}
+
+
+/**
+ * Returns the start address of the basic block.
+ *
+ * @returns Pointer to DBGF adress containing the start address of the basic block.
+ * @param hFlowBb The basic block handle.
+ * @param pAddrStart Where to store the start address of the basic block.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBbGetStartAddress(DBGFFLOWBB hFlowBb, PDBGFADDRESS pAddrStart)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, NULL);
+ AssertPtrReturn(pAddrStart, NULL);
+
+ *pAddrStart = pFlowBb->AddrStart;
+ return pAddrStart;
+}
+
+
+/**
+ * Returns the end address of the basic block (inclusive).
+ *
+ * @returns Pointer to DBGF adress containing the end address of the basic block.
+ * @param hFlowBb The basic block handle.
+ * @param pAddrEnd Where to store the end address of the basic block.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBbGetEndAddress(DBGFFLOWBB hFlowBb, PDBGFADDRESS pAddrEnd)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, NULL);
+ AssertPtrReturn(pAddrEnd, NULL);
+
+ *pAddrEnd = pFlowBb->AddrEnd;
+ return pAddrEnd;
+}
+
+
+/**
+ * Returns the address the last instruction in the basic block branches to.
+ *
+ * @returns Pointer to DBGF adress containing the branch address of the basic block.
+ * @param hFlowBb The basic block handle.
+ * @param pAddrTarget Where to store the branch address of the basic block.
+ *
+ * @note This is only valid for unconditional or conditional branches, or for a basic block
+ * containing only a call instruction when DBGF_FLOW_CREATE_F_CALL_INSN_SEPARATE_BB was given
+ * during creation and the branch target could be deduced as indicated by the DBGF_FLOW_BB_F_CALL_INSN_TARGET_KNOWN
+ * flag for the basic block. This method will assert for every other basic block type.
+ * @note For indirect unconditional branches using a branch table this will return the start address
+ * of the branch table.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBbGetBranchAddress(DBGFFLOWBB hFlowBb, PDBGFADDRESS pAddrTarget)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, NULL);
+ AssertPtrReturn(pAddrTarget, NULL);
+ AssertReturn( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_JMP
+ || pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_COND
+ || pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_INDIRECT_JMP
+ || ( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND
+ && (pFlowBb->fFlags & DBGF_FLOW_BB_F_CALL_INSN_TARGET_KNOWN)),
+ NULL);
+
+ if ( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_INDIRECT_JMP
+ && pFlowBb->pFlowBranchTbl)
+ *pAddrTarget = pFlowBb->pFlowBranchTbl->AddrStart;
+ else
+ *pAddrTarget = pFlowBb->AddrTarget;
+ return pAddrTarget;
+}
+
+
+/**
+ * Returns the address of the next block following this one in the instruction stream.
+ * (usually end address + 1).
+ *
+ * @returns Pointer to DBGF adress containing the following address of the basic block.
+ * @param hFlowBb The basic block handle.
+ * @param pAddrFollow Where to store the following address of the basic block.
+ *
+ * @note This is only valid for conditional branches and if the last instruction in the
+ * given basic block doesn't change the control flow but the blocks were split
+ * because the successor is referenced by multiple other blocks as an entry point.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBbGetFollowingAddress(DBGFFLOWBB hFlowBb, PDBGFADDRESS pAddrFollow)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, NULL);
+ AssertPtrReturn(pAddrFollow, NULL);
+ AssertReturn( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND
+ || pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_COND,
+ NULL);
+
+ *pAddrFollow = pFlowBb->AddrEnd;
+ DBGFR3AddrAdd(pAddrFollow, 1);
+ return pAddrFollow;
+}
+
+
+/**
+ * Returns the type of the last instruction in the basic block.
+ *
+ * @returns Last instruction type.
+ * @param hFlowBb The basic block handle.
+ */
+VMMR3DECL(DBGFFLOWBBENDTYPE) DBGFR3FlowBbGetType(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, DBGFFLOWBBENDTYPE_INVALID);
+
+ return pFlowBb->enmEndType;
+}
+
+
+/**
+ * Get the number of instructions contained in the basic block.
+ *
+ * @returns Number of instructions in the basic block.
+ * @param hFlowBb The basic block handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBbGetInstrCount(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, 0);
+
+ return pFlowBb->cInstr;
+}
+
+
+/**
+ * Get flags for the given basic block.
+ *
+ * @returns Combination of DBGF_FLOW_BB_F_*
+ * @param hFlowBb The basic block handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBbGetFlags(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, 0);
+
+ return pFlowBb->fFlags;
+}
+
+
+/**
+ * Queries the branch table used if the given basic block ends with an indirect branch
+ * and has a branch table referenced.
+ *
+ * @returns VBox status code.
+ * @param hFlowBb The basic block handle.
+ * @param phBranchTbl Where to store the branch table handle on success.
+ *
+ * @note Release the branch table reference with DBGFR3FlowBranchTblRelease() when not required
+ * anymore.
+ */
+VMMR3DECL(int) DBGFR3FlowBbQueryBranchTbl(DBGFFLOWBB hFlowBb, PDBGFFLOWBRANCHTBL phBranchTbl)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, VERR_INVALID_HANDLE);
+ AssertReturn(pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_INDIRECT_JMP, VERR_INVALID_STATE);
+ AssertPtrReturn(pFlowBb->pFlowBranchTbl, VERR_INVALID_STATE);
+ AssertPtrReturn(phBranchTbl, VERR_INVALID_POINTER);
+
+ DBGFR3FlowBranchTblRetain(pFlowBb->pFlowBranchTbl);
+ *phBranchTbl = pFlowBb->pFlowBranchTbl;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Returns the error status and message if the given basic block has an error.
+ *
+ * @returns VBox status code of the error for the basic block.
+ * @param hFlowBb The basic block handle.
+ * @param ppszErr Where to store the pointer to the error message - optional.
+ */
+VMMR3DECL(int) DBGFR3FlowBbQueryError(DBGFFLOWBB hFlowBb, const char **ppszErr)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, VERR_INVALID_HANDLE);
+
+ if (ppszErr)
+ *ppszErr = pFlowBb->pszErr;
+
+ return pFlowBb->rcError;
+}
+
+
+/**
+ * Store the disassembled instruction as a string in the given output buffer.
+ *
+ * @returns VBox status code.
+ * @param hFlowBb The basic block handle.
+ * @param idxInstr The instruction to query.
+ * @param pAddrInstr Where to store the guest instruction address on success, optional.
+ * @param pcbInstr Where to store the instruction size on success, optional.
+ * @param ppszInstr Where to store the pointer to the disassembled instruction string, optional.
+ */
+VMMR3DECL(int) DBGFR3FlowBbQueryInstr(DBGFFLOWBB hFlowBb, uint32_t idxInstr, PDBGFADDRESS pAddrInstr,
+ uint32_t *pcbInstr, const char **ppszInstr)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, VERR_INVALID_POINTER);
+ AssertReturn(idxInstr < pFlowBb->cInstr, VERR_INVALID_PARAMETER);
+
+ if (pAddrInstr)
+ *pAddrInstr = pFlowBb->aInstr[idxInstr].AddrInstr;
+ if (pcbInstr)
+ *pcbInstr = pFlowBb->aInstr[idxInstr].cbInstr;
+ if (ppszInstr)
+ *ppszInstr = pFlowBb->aInstr[idxInstr].pszInstr;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Queries the successors of the basic block.
+ *
+ * @returns VBox status code.
+ * @param hFlowBb The basic block handle.
+ * @param phFlowBbFollow Where to store the handle to the basic block following
+ * this one (optional).
+ * @param phFlowBbTarget Where to store the handle to the basic block being the
+ * branch target for this one (optional).
+ */
+VMMR3DECL(int) DBGFR3FlowBbQuerySuccessors(DBGFFLOWBB hFlowBb, PDBGFFLOWBB phFlowBbFollow, PDBGFFLOWBB phFlowBbTarget)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, VERR_INVALID_POINTER);
+
+ if ( phFlowBbFollow
+ && ( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND
+ || pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_COND))
+ {
+ DBGFADDRESS AddrStart = pFlowBb->AddrEnd;
+ DBGFR3AddrAdd(&AddrStart, 1);
+ int rc = DBGFR3FlowQueryBbByAddress(pFlowBb->pFlow, &AddrStart, phFlowBbFollow);
+ AssertRC(rc);
+ }
+
+ if ( phFlowBbTarget
+ && ( pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_JMP
+ || pFlowBb->enmEndType == DBGFFLOWBBENDTYPE_COND))
+ {
+ int rc = DBGFR3FlowQueryBbByAddress(pFlowBb->pFlow, &pFlowBb->AddrTarget, phFlowBbTarget);
+ AssertRC(rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Returns the number of basic blocks referencing this basic block as a target.
+ *
+ * @returns Number of other basic blocks referencing this one.
+ * @param hFlowBb The basic block handle.
+ *
+ * @note If the given basic block references itself (loop, etc.) this will be counted as well.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBbGetRefBbCount(DBGFFLOWBB hFlowBb)
+{
+ PDBGFFLOWBBINT pFlowBb = hFlowBb;
+ AssertPtrReturn(pFlowBb, 0);
+
+ uint32_t cRefsBb = 0;
+ PDBGFFLOWBBINT pFlowBbCur;
+ RTListForEach(&pFlowBb->pFlow->LstFlowBb, pFlowBbCur, DBGFFLOWBBINT, NdFlowBb)
+ {
+ if (pFlowBbCur->fFlags & DBGF_FLOW_BB_F_INCOMPLETE_ERR)
+ continue;
+
+ if ( pFlowBbCur->enmEndType == DBGFFLOWBBENDTYPE_UNCOND
+ || pFlowBbCur->enmEndType == DBGFFLOWBBENDTYPE_COND)
+ {
+ DBGFADDRESS AddrStart = pFlowBb->AddrEnd;
+ DBGFR3AddrAdd(&AddrStart, 1);
+ if (dbgfR3FlowAddrEqual(&pFlowBbCur->AddrStart, &AddrStart))
+ cRefsBb++;
+ }
+
+ if ( ( pFlowBbCur->enmEndType == DBGFFLOWBBENDTYPE_UNCOND_JMP
+ || pFlowBbCur->enmEndType == DBGFFLOWBBENDTYPE_COND)
+ && dbgfR3FlowAddrEqual(&pFlowBbCur->AddrStart, &pFlowBb->AddrTarget))
+ cRefsBb++;
+ }
+ return cRefsBb;
+}
+
+
+/**
+ * Returns the basic block handles referencing the given basic block.
+ *
+ * @returns VBox status code.
+ * @retval VERR_BUFFER_OVERFLOW if the array can't hold all the basic blocks.
+ * @param hFlowBb The basic block handle.
+ * @param paFlowBbRef Pointer to the array containing the referencing basic block handles on success.
+ * @param cRef Number of entries in the given array.
+ */
+VMMR3DECL(int) DBGFR3FlowBbGetRefBb(DBGFFLOWBB hFlowBb, PDBGFFLOWBB paFlowBbRef, uint32_t cRef)
+{
+ RT_NOREF3(hFlowBb, paFlowBbRef, cRef);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Retains a reference for the given control flow graph branch table.
+ *
+ * @returns new reference count.
+ * @param hFlowBranchTbl The branch table handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBranchTblRetain(DBGFFLOWBRANCHTBL hFlowBranchTbl)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ AssertPtrReturn(pFlowBranchTbl, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pFlowBranchTbl->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pFlowBranchTbl));
+ return cRefs;
+}
+
+
+/**
+ * Releases a given branch table handle.
+ *
+ * @returns the new reference count of the given branch table, on 0 it is destroyed.
+ * @param hFlowBranchTbl The branch table handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBranchTblRelease(DBGFFLOWBRANCHTBL hFlowBranchTbl)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ if (!pFlowBranchTbl)
+ return 0;
+ AssertPtrReturn(pFlowBranchTbl, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pFlowBranchTbl->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pFlowBranchTbl));
+ if (cRefs == 0)
+ dbgfR3FlowBranchTblDestroy(pFlowBranchTbl);
+ return cRefs;
+}
+
+
+/**
+ * Return the number of slots the branch table has.
+ *
+ * @returns Number of slots in the branch table.
+ * @param hFlowBranchTbl The branch table handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowBranchTblGetSlots(DBGFFLOWBRANCHTBL hFlowBranchTbl)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ AssertPtrReturn(pFlowBranchTbl, 0);
+
+ return pFlowBranchTbl->cSlots;
+}
+
+
+/**
+ * Returns the start address of the branch table in the guest.
+ *
+ * @returns Pointer to start address of the branch table (pAddrStart).
+ * @param hFlowBranchTbl The branch table handle.
+ * @param pAddrStart Where to store the branch table address.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBranchTblGetStartAddress(DBGFFLOWBRANCHTBL hFlowBranchTbl, PDBGFADDRESS pAddrStart)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ AssertPtrReturn(pFlowBranchTbl, NULL);
+ AssertPtrReturn(pAddrStart, NULL);
+
+ *pAddrStart = pFlowBranchTbl->AddrStart;
+ return pAddrStart;
+}
+
+
+/**
+ * Returns one address in the branch table at the given slot index.
+ *
+ * @return Pointer to the address at the given slot in the given branch table.
+ * @param hFlowBranchTbl The branch table handle.
+ * @param idxSlot The slot the address should be returned from.
+ * @param pAddrSlot Where to store the address.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowBranchTblGetAddrAtSlot(DBGFFLOWBRANCHTBL hFlowBranchTbl, uint32_t idxSlot, PDBGFADDRESS pAddrSlot)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ AssertPtrReturn(pFlowBranchTbl, NULL);
+ AssertPtrReturn(pAddrSlot, NULL);
+ AssertReturn(idxSlot < pFlowBranchTbl->cSlots, NULL);
+
+ *pAddrSlot = pFlowBranchTbl->aAddresses[idxSlot];
+ return pAddrSlot;
+}
+
+
+/**
+ * Query all addresses contained in the given branch table.
+ *
+ * @returns VBox status code.
+ * @retval VERR_BUFFER_OVERFLOW if there is not enough space in the array to hold all addresses.
+ * @param hFlowBranchTbl The branch table handle.
+ * @param paAddrs Where to store the addresses on success.
+ * @param cAddrs Number of entries the array can hold.
+ */
+VMMR3DECL(int) DBGFR3FlowBranchTblQueryAddresses(DBGFFLOWBRANCHTBL hFlowBranchTbl, PDBGFADDRESS paAddrs, uint32_t cAddrs)
+{
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl = hFlowBranchTbl;
+ AssertPtrReturn(pFlowBranchTbl, VERR_INVALID_HANDLE);
+ AssertPtrReturn(paAddrs, VERR_INVALID_POINTER);
+ AssertReturn(cAddrs > 0, VERR_INVALID_PARAMETER);
+
+ if (cAddrs < pFlowBranchTbl->cSlots)
+ return VERR_BUFFER_OVERFLOW;
+
+ memcpy(paAddrs, &pFlowBranchTbl->aAddresses[0], pFlowBranchTbl->cSlots * sizeof(DBGFADDRESS));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNRTSORTCMP}
+ */
+static DECLCALLBACK(int) dbgfR3FlowItSortCmp(void const *pvElement1, void const *pvElement2, void *pvUser)
+{
+ PDBGFFLOWITORDER penmOrder = (PDBGFFLOWITORDER)pvUser;
+ PDBGFFLOWBBINT pFlowBb1 = *(PDBGFFLOWBBINT *)pvElement1;
+ PDBGFFLOWBBINT pFlowBb2 = *(PDBGFFLOWBBINT *)pvElement2;
+
+ if (dbgfR3FlowAddrEqual(&pFlowBb1->AddrStart, &pFlowBb2->AddrStart))
+ return 0;
+
+ if (*penmOrder == DBGFFLOWITORDER_BY_ADDR_LOWEST_FIRST)
+ {
+ if (dbgfR3FlowAddrLower(&pFlowBb1->AddrStart, &pFlowBb2->AddrStart))
+ return -1;
+ else
+ return 1;
+ }
+ else
+ {
+ if (dbgfR3FlowAddrLower(&pFlowBb1->AddrStart, &pFlowBb2->AddrStart))
+ return 1;
+ else
+ return -1;
+ }
+}
+
+
+/**
+ * Creates a new iterator for the given control flow graph.
+ *
+ * @returns VBox status code.
+ * @param hFlow The control flow graph handle.
+ * @param enmOrder The order in which the basic blocks are enumerated.
+ * @param phFlowIt Where to store the handle to the iterator on success.
+ */
+VMMR3DECL(int) DBGFR3FlowItCreate(DBGFFLOW hFlow, DBGFFLOWITORDER enmOrder, PDBGFFLOWIT phFlowIt)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWINT pFlow = hFlow;
+ AssertPtrReturn(pFlow, VERR_INVALID_POINTER);
+ AssertPtrReturn(phFlowIt, VERR_INVALID_POINTER);
+ AssertReturn(enmOrder > DBGFFLOWITORDER_INVALID && enmOrder < DBGFFLOWITORDER_BREADTH_FIRST,
+ VERR_INVALID_PARAMETER);
+ AssertReturn(enmOrder < DBGFFLOWITORDER_DEPTH_FRIST, VERR_NOT_IMPLEMENTED); /** @todo */
+
+ PDBGFFLOWITINT pIt = (PDBGFFLOWITINT)RTMemAllocZ(RT_UOFFSETOF_DYN(DBGFFLOWITINT, apBb[pFlow->cBbs]));
+ if (RT_LIKELY(pIt))
+ {
+ DBGFR3FlowRetain(hFlow);
+ pIt->pFlow = pFlow;
+ pIt->idxBbNext = 0;
+ /* Fill the list and then sort. */
+ uint32_t idxBb = 0;
+ PDBGFFLOWBBINT pFlowBb;
+ RTListForEach(&pFlow->LstFlowBb, pFlowBb, DBGFFLOWBBINT, NdFlowBb)
+ {
+ DBGFR3FlowBbRetain(pFlowBb);
+ pIt->apBb[idxBb++] = pFlowBb;
+ }
+
+ /* Sort the blocks by address. */
+ RTSortShell(&pIt->apBb[0], pFlow->cBbs, sizeof(PDBGFFLOWBBINT), dbgfR3FlowItSortCmp, &enmOrder);
+
+ *phFlowIt = pIt;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Destroys a given control flow graph iterator.
+ *
+ * @param hFlowIt The control flow graph iterator handle.
+ */
+VMMR3DECL(void) DBGFR3FlowItDestroy(DBGFFLOWIT hFlowIt)
+{
+ PDBGFFLOWITINT pIt = hFlowIt;
+ AssertPtrReturnVoid(pIt);
+
+ for (unsigned i = 0; i < pIt->pFlow->cBbs; i++)
+ DBGFR3FlowBbRelease(pIt->apBb[i]);
+
+ DBGFR3FlowRelease(pIt->pFlow);
+ RTMemFree(pIt);
+}
+
+
+/**
+ * Returns the next basic block in the iterator or NULL if there is no
+ * basic block left.
+ *
+ * @returns Handle to the next basic block in the iterator or NULL if the end
+ * was reached.
+ * @param hFlowIt The iterator handle.
+ *
+ * @note If a valid handle is returned it must be release with DBGFR3FlowBbRelease()
+ * when not required anymore.
+ */
+VMMR3DECL(DBGFFLOWBB) DBGFR3FlowItNext(DBGFFLOWIT hFlowIt)
+{
+ PDBGFFLOWITINT pIt = hFlowIt;
+ AssertPtrReturn(pIt, NULL);
+
+ PDBGFFLOWBBINT pFlowBb = NULL;
+ if (pIt->idxBbNext < pIt->pFlow->cBbs)
+ {
+ pFlowBb = pIt->apBb[pIt->idxBbNext++];
+ DBGFR3FlowBbRetain(pFlowBb);
+ }
+
+ return pFlowBb;
+}
+
+
+/**
+ * Resets the given iterator to the beginning.
+ *
+ * @returns VBox status code.
+ * @param hFlowIt The iterator handle.
+ */
+VMMR3DECL(int) DBGFR3FlowItReset(DBGFFLOWIT hFlowIt)
+{
+ PDBGFFLOWITINT pIt = hFlowIt;
+ AssertPtrReturn(pIt, VERR_INVALID_HANDLE);
+
+ pIt->idxBbNext = 0;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNRTSORTCMP}
+ */
+static DECLCALLBACK(int) dbgfR3FlowBranchTblItSortCmp(void const *pvElement1, void const *pvElement2, void *pvUser)
+{
+ PDBGFFLOWITORDER penmOrder = (PDBGFFLOWITORDER)pvUser;
+ PDBGFFLOWBRANCHTBLINT pTbl1 = *(PDBGFFLOWBRANCHTBLINT *)pvElement1;
+ PDBGFFLOWBRANCHTBLINT pTbl2 = *(PDBGFFLOWBRANCHTBLINT *)pvElement2;
+
+ if (dbgfR3FlowAddrEqual(&pTbl1->AddrStart, &pTbl2->AddrStart))
+ return 0;
+
+ if (*penmOrder == DBGFFLOWITORDER_BY_ADDR_LOWEST_FIRST)
+ {
+ if (dbgfR3FlowAddrLower(&pTbl1->AddrStart, &pTbl2->AddrStart))
+ return -1;
+ else
+ return 1;
+ }
+ else
+ {
+ if (dbgfR3FlowAddrLower(&pTbl1->AddrStart, &pTbl2->AddrStart))
+ return 1;
+ else
+ return -1;
+ }
+}
+
+
+/**
+ * Creates a new branch table iterator for the given control flow graph.
+ *
+ * @returns VBox status code.
+ * @param hFlow The control flow graph handle.
+ * @param enmOrder The order in which the basic blocks are enumerated.
+ * @param phFlowBranchTblIt Where to store the handle to the iterator on success.
+ */
+VMMR3DECL(int) DBGFR3FlowBranchTblItCreate(DBGFFLOW hFlow, DBGFFLOWITORDER enmOrder,
+ PDBGFFLOWBRANCHTBLIT phFlowBranchTblIt)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWINT pFlow = hFlow;
+ AssertPtrReturn(pFlow, VERR_INVALID_POINTER);
+ AssertPtrReturn(phFlowBranchTblIt, VERR_INVALID_POINTER);
+ AssertReturn(enmOrder > DBGFFLOWITORDER_INVALID && enmOrder < DBGFFLOWITORDER_BREADTH_FIRST,
+ VERR_INVALID_PARAMETER);
+ AssertReturn(enmOrder < DBGFFLOWITORDER_DEPTH_FRIST, VERR_NOT_SUPPORTED);
+
+ PDBGFFLOWBRANCHTBLITINT pIt = (PDBGFFLOWBRANCHTBLITINT)RTMemAllocZ(RT_UOFFSETOF_DYN(DBGFFLOWBRANCHTBLITINT,
+ apBranchTbl[pFlow->cBranchTbls]));
+ if (RT_LIKELY(pIt))
+ {
+ DBGFR3FlowRetain(hFlow);
+ pIt->pFlow = pFlow;
+ pIt->idxTblNext = 0;
+ /* Fill the list and then sort. */
+ uint32_t idxTbl = 0;
+ PDBGFFLOWBRANCHTBLINT pFlowBranchTbl;
+ RTListForEach(&pFlow->LstBranchTbl, pFlowBranchTbl, DBGFFLOWBRANCHTBLINT, NdBranchTbl)
+ {
+ DBGFR3FlowBranchTblRetain(pFlowBranchTbl);
+ pIt->apBranchTbl[idxTbl++] = pFlowBranchTbl;
+ }
+
+ /* Sort the blocks by address. */
+ RTSortShell(&pIt->apBranchTbl[0], pFlow->cBranchTbls, sizeof(PDBGFFLOWBRANCHTBLINT), dbgfR3FlowBranchTblItSortCmp, &enmOrder);
+
+ *phFlowBranchTblIt = pIt;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Destroys a given control flow graph branch table iterator.
+ *
+ * @param hFlowBranchTblIt The control flow graph branch table iterator handle.
+ */
+VMMR3DECL(void) DBGFR3FlowBranchTblItDestroy(DBGFFLOWBRANCHTBLIT hFlowBranchTblIt)
+{
+ PDBGFFLOWBRANCHTBLITINT pIt = hFlowBranchTblIt;
+ AssertPtrReturnVoid(pIt);
+
+ for (unsigned i = 0; i < pIt->pFlow->cBranchTbls; i++)
+ DBGFR3FlowBranchTblRelease(pIt->apBranchTbl[i]);
+
+ DBGFR3FlowRelease(pIt->pFlow);
+ RTMemFree(pIt);
+}
+
+
+/**
+ * Returns the next branch table in the iterator or NULL if there is no
+ * branch table left.
+ *
+ * @returns Handle to the next basic block in the iterator or NULL if the end
+ * was reached.
+ * @param hFlowBranchTblIt The iterator handle.
+ *
+ * @note If a valid handle is returned it must be release with DBGFR3FlowBranchTblRelease()
+ * when not required anymore.
+ */
+VMMR3DECL(DBGFFLOWBRANCHTBL) DBGFR3FlowBranchTblItNext(DBGFFLOWBRANCHTBLIT hFlowBranchTblIt)
+{
+ PDBGFFLOWBRANCHTBLITINT pIt = hFlowBranchTblIt;
+ AssertPtrReturn(pIt, NULL);
+
+ PDBGFFLOWBRANCHTBLINT pTbl = NULL;
+ if (pIt->idxTblNext < pIt->pFlow->cBranchTbls)
+ {
+ pTbl = pIt->apBranchTbl[pIt->idxTblNext++];
+ DBGFR3FlowBranchTblRetain(pTbl);
+ }
+
+ return pTbl;
+}
+
+
+/**
+ * Resets the given iterator to the beginning.
+ *
+ * @returns VBox status code.
+ * @param hFlowBranchTblIt The iterator handle.
+ */
+VMMR3DECL(int) DBGFR3FlowBranchTblItReset(DBGFFLOWBRANCHTBLIT hFlowBranchTblIt)
+{
+ PDBGFFLOWBRANCHTBLITINT pIt = hFlowBranchTblIt;
+ AssertPtrReturn(pIt, VERR_INVALID_HANDLE);
+
+ pIt->idxTblNext = 0;
+ return VINF_SUCCESS;
+}
diff --git a/src/VBox/VMM/VMMR3/DBGFR3FlowTrace.cpp b/src/VBox/VMM/VMMR3/DBGFR3FlowTrace.cpp
new file mode 100644
index 00000000..cbe8b766
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3FlowTrace.cpp
@@ -0,0 +1,1975 @@
+/* $Id: DBGFR3FlowTrace.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Guest Execution Flow Tracing.
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_flow DBGFR3FlowTrace - Flow Trace Interface
+ *
+ * @todo
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgfflowtrace.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/semaphore.h>
+#include <iprt/list.h>
+#include <iprt/time.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+/** Pointer to the internal trace module instance data. */
+typedef struct DBGFFLOWTRACEMODINT *PDBGFFLOWTRACEMODINT;
+/** Pointer to a trace module probe location. */
+typedef struct DBGFFLOWTRACEMODPROBELOC *PDBGFFLOWTRACEMODPROBELOC;
+
+/**
+ * Internal probe instance data.
+ */
+typedef struct DBGFFLOWTRACEPROBEINT
+{
+ /** External and internal references hold. */
+ volatile uint32_t cRefs;
+ /** Trace modules referencing this probe. */
+ volatile uint32_t cRefsMod;
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** Description of this probe. */
+ char *pszDescr;
+ /** Overall memory consumed for this probe for each invocation. */
+ size_t cbProbe;
+ /** Number of entries for this probe. */
+ uint32_t cEntries;
+ /** Maximum number of entries the array can hold. */
+ uint32_t cEntriesMax;
+ /** Pointer to the probe entry array. */
+ PDBGFFLOWTRACEPROBEENTRY paEntries;
+} DBGFFLOWTRACEPROBEINT;
+/** Pointer to the internal probe instance data. */
+typedef DBGFFLOWTRACEPROBEINT *PDBGFFLOWTRACEPROBEINT;
+/** Pointer to a constant internal probe instance data. */
+typedef const DBGFFLOWTRACEPROBEINT *PCDBGFFLOWTRACEPROBEINT;
+
+/**
+ * Record collected for one probe hit.
+ */
+typedef struct DBGFFLOWTRACERECORDINT
+{
+ /** Data list node. */
+ RTLISTNODE NdRecord;
+ /** The probe instance the record was created for. */
+ PDBGFFLOWTRACEPROBEINT pProbe;
+ /** The common probe instance data was collected for. */
+ PDBGFFLOWTRACEPROBEINT pProbeCmn;
+ /** Address of the probe location. */
+ DBGFADDRESS AddrProbe;
+ /** Reference counter. */
+ volatile uint32_t cRefs;
+ /** CPU ID this data was collected on. */
+ VMCPUID idCpu;
+ /** Sequence number for this data. */
+ uint64_t u64SeqNo;
+ /** Timestamp in nanoseconds when the data was collected. */
+ uint64_t u64TsCollected;
+ /** Pointer to the values for the common probe if available. */
+ PDBGFFLOWTRACEPROBEVAL paValCmn;
+ /** The probe values collected - size defined
+ * by the number of entries in the probe. */
+ DBGFFLOWTRACEPROBEVAL aVal[1];
+} DBGFFLOWTRACERECORDINT;
+/** Pointer to one collected probe data. */
+typedef DBGFFLOWTRACERECORDINT *PDBGFFLOWTRACERECORDINT;
+
+/**
+ * Trace module state.
+ */
+typedef enum DBGFFLOWTRACEMODSTATE
+{
+ /** Invalid state. */
+ DBGFFLOWTRACEMODSTATE_INVALID = 0,
+ /** The module was created. */
+ DBGFFLOWTRACEMODSTATE_CREATED,
+ /** The module is active, no probes can be added. */
+ DBGFFLOWTRACEMODSTATE_ENABLED,
+ /** The VM is destroyed but there are still references to the module,
+ * functionality is limited (query records only). */
+ DBGFFLOWTRACEMODSTATE_VM_DESTROYED,
+ /** The trace module is destroyed. */
+ DBGFFLOWTRACEMODSTATE_DESTROYED,
+ /** 32bit hack. */
+ DBGFFLOWTRACEMODSTATE_32BIT_HACK = 0x7fffffff
+} DBGFFLOWTRACEMODSTATE;
+
+/**
+ * Internal trace module instance data.
+ */
+typedef struct DBGFFLOWTRACEMODINT
+{
+ /** References hold for this trace module. */
+ volatile uint32_t cRefs;
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** CPU ID the module is for. */
+ VMCPUID idCpu;
+ /** The DBGF owner handle. */
+ DBGFBPOWNER hBpOwner;
+ /** State of the trace module. */
+ volatile DBGFFLOWTRACEMODSTATE enmState;
+ /** Next free sequence number. */
+ volatile uint64_t u64SeqNoNext;
+ /** Optional ocmmon probe describing data to collect. */
+ PDBGFFLOWTRACEPROBEINT pProbeCmn;
+ /** Flags whether to record only a limited amount of data as indicated
+ * by cHitsLeft. */
+ bool fLimit;
+ /** Number of hits left until the module is disabled automatically. */
+ volatile uint32_t cHitsLeft;
+ /** Number of records to keep before evicting the oldest one. */
+ uint32_t cRecordsMax;
+ /** Number of records collected in this module. */
+ volatile uint32_t cRecords;
+ /** Number of probes in this trace module. */
+ uint32_t cProbes;
+ /** List of probes active for this module - DBGFFLOWTRACEMODPROBELOC. */
+ RTLISTANCHOR LstProbes;
+ /** List of collected data for this module. */
+ RTLISTANCHOR LstRecords;
+ /** Semaphore protecting access to the probe and record list. */
+ RTSEMFASTMUTEX hMtx;
+} DBGFFLOWTRACEMODINT;
+/** Pointer to a const internal trace module instance data. */
+typedef const DBGFFLOWTRACEMODINT *PCDBGFFLOWTRACEMODINT;
+
+/**
+ * Trace module probe location data.
+ */
+typedef struct DBGFFLOWTRACEMODPROBELOC
+{
+ /** List node for the list of probes. */
+ RTLISTNODE NdProbes;
+ /** The owning trace module. */
+ PDBGFFLOWTRACEMODINT pTraceMod;
+ /** The probe instance. */
+ PDBGFFLOWTRACEPROBEINT pProbe;
+ /** Address of the probe location. */
+ DBGFADDRESS AddrProbe;
+ /** The DBGF breakpoint handle. */
+ DBGFBP hBp;
+ /** Flags controlling the collection behavior for the probe. */
+ uint32_t fFlags;
+} DBGFFLOWTRACEMODPROBELOC;
+
+
+/**
+ * Flow trace report state.
+ */
+typedef struct DBGFFLOWTRACEREPORTINT
+{
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** Reference count. */
+ volatile uint32_t cRefs;
+ /** Number of records. */
+ uint32_t cRecords;
+ /** Array with handle of records - variable in size. */
+ PDBGFFLOWTRACERECORDINT apRec[1];
+} DBGFFLOWTRACEMODREPORTINT;
+/** Pointer to the internal flow trace report state. */
+typedef DBGFFLOWTRACEREPORTINT *PDBGFFLOWTRACEREPORTINT;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+/**
+ * Creates a new trace record.
+ *
+ * @returns Pointer to the create flow trace record or NULL if out of memory.
+ * @param pProbeLoc The probe location to allocate the record for.
+ * @param idCpu The CPU ID this record was created for.
+ * @param ppbBuf Where to store the pointer to the data buffer for this probe.
+ * @param ppbBufCmn Where to store the pointer to the data buffer for the common probe
+ * if available.
+ */
+static PDBGFFLOWTRACERECORDINT dbgfR3FlowTraceRecordCreate(PDBGFFLOWTRACEMODPROBELOC pProbeLoc, VMCPUID idCpu,
+ uint8_t **ppbBuf, uint8_t **ppbBufCmn)
+{
+ PDBGFFLOWTRACEMODINT pTraceMod = pProbeLoc->pTraceMod;
+ PCDBGFFLOWTRACEPROBEINT pProbe = pProbeLoc->pProbe;
+ PCDBGFFLOWTRACEPROBEINT pProbeCmn = pTraceMod->pProbeCmn;
+ size_t cbProbeBuf = pProbe->cbProbe;
+ if (pProbeCmn)
+ cbProbeBuf += pProbeCmn->cbProbe;
+
+ *ppbBuf = NULL;
+ *ppbBufCmn = NULL;
+
+ PDBGFFLOWTRACERECORDINT pRecord = (PDBGFFLOWTRACERECORDINT)MMR3HeapAllocZU(pTraceMod->pUVM, MM_TAG_DBGF_FLOWTRACE,
+ sizeof(DBGFFLOWTRACERECORDINT) + cbProbeBuf);
+ if (RT_LIKELY(pRecord))
+ {
+ DBGFR3FlowTraceProbeRetain(pProbeLoc->pProbe);
+ if (pProbeLoc->pTraceMod->pProbeCmn)
+ DBGFR3FlowTraceProbeRetain(pProbeLoc->pTraceMod->pProbeCmn);
+
+ pRecord->pProbe = pProbeLoc->pProbe;
+ pRecord->pProbeCmn = pProbeLoc->pTraceMod->pProbeCmn;
+ pRecord->AddrProbe = pProbeLoc->AddrProbe;
+ pRecord->cRefs = 1;
+ pRecord->idCpu = idCpu;
+ pRecord->u64SeqNo = ASMAtomicIncU64(&pTraceMod->u64SeqNoNext);
+ pRecord->u64TsCollected = RTTimeNanoTS();
+ pRecord->paValCmn = NULL;
+
+ *ppbBuf = (uint8_t *)&pRecord->aVal[pProbe->cEntries];
+
+ if (pProbeCmn)
+ {
+ size_t offValCmn = pProbe->cbProbe - pProbe->cEntries * sizeof(DBGFFLOWTRACEPROBEVAL);
+ pRecord->paValCmn = (PDBGFFLOWTRACEPROBEVAL)(*ppbBuf + offValCmn);
+ *ppbBufCmn = (uint8_t *)&pRecord->paValCmn[pProbeCmn->cEntries];
+ }
+ }
+
+ return pRecord;
+}
+
+
+/**
+ * Destroys the given record.
+ *
+ * @param pRecord The record to destroy.
+ */
+static void dbgfR3FlowTraceRecordDestroy(PDBGFFLOWTRACERECORDINT pRecord)
+{
+ DBGFR3FlowTraceProbeRelease(pRecord->pProbe);
+ pRecord->pProbe = NULL;
+ MMR3HeapFree(pRecord);
+}
+
+
+/**
+ * Creates a new flow trace report which can hold the given amount o records.
+ *
+ * @returns Pointer to the newly created report state or NULL if out of memory.
+ * @param pUVM The usermode VM handle.
+ * @param cRecords Number of records the report shoudld be able to hold.
+ */
+static PDBGFFLOWTRACEREPORTINT dbgfR3FlowTraceReportCreate(PUVM pUVM, uint32_t cRecords)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = (PDBGFFLOWTRACEREPORTINT)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_FLOWTRACE,
+ RT_UOFFSETOF_DYN(DBGFFLOWTRACEREPORTINT, apRec[cRecords]));
+ if (RT_LIKELY(pReport))
+ {
+ pReport->pUVM = pUVM;
+ pReport->cRefs = 1;
+ pReport->cRecords = cRecords;
+ }
+
+ return pReport;
+}
+
+
+/**
+ * Destroys the given report releasing all references hold to the containing records.
+ *
+ * @param pReport The report to destroy.
+ */
+static void dbgfR3FlowTraceReportDestroy(PDBGFFLOWTRACEREPORTINT pReport)
+{
+ for (uint32_t i = 0; i < pReport->cRecords; i++)
+ DBGFR3FlowTraceRecordRelease(pReport->apRec[i]);
+ MMR3HeapFree(pReport);
+}
+
+
+/**
+ * Queries the given register and returns the value as a guest pointer.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param idCpu VM CPU identifier.
+ * @param pszReg The register name to query.
+ * @param pGCPtr Where to store the register value on success.
+ */
+static int dbgfR3FlowTraceModProbeQueryRegAsGCPtr(PUVM pUVM, VMCPUID idCpu, const char *pszReg,
+ PRTGCPTR pGCPtr)
+{
+ DBGFREGVAL Val;
+ DBGFREGVALTYPE enmValType;
+ int rc = DBGFR3RegNmQuery(pUVM, idCpu, pszReg, &Val, &enmValType);
+ if (RT_SUCCESS(rc))
+ {
+ switch (enmValType)
+ {
+ case DBGFREGVALTYPE_U8:
+ *pGCPtr = Val.u8;
+ break;
+ case DBGFREGVALTYPE_U16:
+ *pGCPtr = Val.u16;
+ break;
+ case DBGFREGVALTYPE_U32:
+ *pGCPtr = Val.u32;
+ break;
+ case DBGFREGVALTYPE_U64:
+ *pGCPtr = Val.u64;
+ break;
+ case DBGFREGVALTYPE_U128:
+ case DBGFREGVALTYPE_R80:
+ case DBGFREGVALTYPE_DTR:
+ default:
+ rc = VERR_INVALID_PARAMETER;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Resolves the guest address from an indirect memory probe entry.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param idCpu VM CPU identifier.
+ * @param pEntry The probe entry.
+ * @param pAddr Where to store the address on success.
+ */
+static int dbgfR3FlowTraceModProbeResolveIndirectAddr(PUVM pUVM, VMCPUID idCpu, PDBGFFLOWTRACEPROBEENTRY pEntry,
+ PDBGFADDRESS pAddr)
+{
+ Assert(pEntry->enmType == DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM);
+
+ RTGCPTR GCPtrBase = 0;
+ RTGCPTR GCPtrIndex = 0;
+ int rc = dbgfR3FlowTraceModProbeQueryRegAsGCPtr(pUVM, idCpu, pEntry->Type.IndirectMem.RegBase.pszName,
+ &GCPtrBase);
+ if ( RT_SUCCESS(rc)
+ && pEntry->Type.IndirectMem.RegIndex.pszName)
+ rc = dbgfR3FlowTraceModProbeQueryRegAsGCPtr(pUVM, idCpu, pEntry->Type.IndirectMem.RegIndex.pszName,
+ &GCPtrIndex);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPTR GCPtr = GCPtrBase + GCPtrIndex * pEntry->Type.IndirectMem.uScale;
+ DBGFR3AddrFromFlat(pUVM, pAddr, GCPtr);
+ if (pEntry->Type.IndirectMem.iOffset > 0)
+ DBGFR3AddrAdd(pAddr, pEntry->Type.IndirectMem.iOffset);
+ else if (pEntry->Type.IndirectMem.iOffset < 0)
+ DBGFR3AddrSub(pAddr, -pEntry->Type.IndirectMem.iOffset);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Destroys the given flow trace module freeing all allocated resources.
+ *
+ * @param pThis The flow trace module instance data.
+ */
+static void dbgfR3FlowTraceModDestroy(PDBGFFLOWTRACEMODINT pThis)
+{
+ if (ASMAtomicReadU32((volatile uint32_t *)&pThis->enmState) == DBGFFLOWTRACEMODSTATE_ENABLED)
+ {
+ int rc = DBGFR3FlowTraceModDisable(pThis);
+ AssertRC(rc);
+ }
+
+ Assert( pThis->enmState == DBGFFLOWTRACEMODSTATE_CREATED
+ || pThis->enmState == DBGFFLOWTRACEMODSTATE_VM_DESTROYED);
+
+ /* Do the cleanup under the semaphore. */
+ RTSemFastMutexRequest(pThis->hMtx);
+ if (pThis->pProbeCmn)
+ DBGFR3FlowTraceProbeRelease(pThis->pProbeCmn);
+
+ PDBGFFLOWTRACEMODPROBELOC pIt, pItNext;
+ RTListForEachSafe(&pThis->LstProbes, pIt, pItNext, DBGFFLOWTRACEMODPROBELOC, NdProbes)
+ {
+ RTListNodeRemove(&pIt->NdProbes);
+ ASMAtomicDecU32(&pIt->pProbe->cRefsMod);
+ DBGFR3FlowTraceProbeRelease(pIt->pProbe);
+ MMR3HeapFree(pIt);
+ }
+
+ PDBGFFLOWTRACERECORDINT pRecIt, pRecItNext;
+ RTListForEachSafe(&pThis->LstRecords, pRecIt, pRecItNext, DBGFFLOWTRACERECORDINT, NdRecord)
+ {
+ RTListNodeRemove(&pRecIt->NdRecord);
+ DBGFR3FlowTraceRecordRelease(pRecIt);
+ }
+
+ DBGFR3BpOwnerDestroy(pThis->pUVM, pThis->hBpOwner);
+ RTSemFastMutexRelease(pThis->hMtx);
+ RTSemFastMutexDestroy(pThis->hMtx);
+ MMR3HeapFree(pThis);
+}
+
+
+/**
+ * Checks whether the given basic block and address intersect.
+ *
+ * @returns true if they intersect, false otherwise.
+ * @param pAddr The address to check for.
+ * @param pAddrStart The start address.
+ * @param pAddrLast The last address.
+ */
+static bool dbgfR3FlowTraceAddrIntersect(PDBGFADDRESS pAddr, PDBGFADDRESS pAddrStart,
+ PDBGFADDRESS pAddrLast)
+{
+ return (pAddrStart->Sel == pAddr->Sel)
+ && (pAddrStart->off <= pAddr->off)
+ && (pAddrLast->off >= pAddr->off);
+}
+
+
+/**
+ * Matches a single value against a given filter value.
+ *
+ * @returns Flag whether the value matches against the single value.
+ * @param pVal The value to match.
+ * @param pValFilter The value filter to match against.
+ */
+static bool dbgfR3FlowTraceRecordMatchSingleValue(PCDBGFFLOWTRACEPROBEVAL pVal,
+ PCDBGFFLOWTRACEPROBEVAL pValFilter)
+{
+ if (pVal->pProbeEntry->enmType != pValFilter->pProbeEntry->enmType)
+ return false;
+
+ switch (pVal->pProbeEntry->enmType)
+ {
+ case DBGFFLOWTRACEPROBEENTRYTYPE_REG:
+ {
+ if (pVal->Type.Reg.enmType != pValFilter->Type.Reg.enmType)
+ return false;
+
+ if (strcmp(pVal->Type.Reg.pszName, pValFilter->Type.Reg.pszName))
+ return false;
+
+ switch (pVal->Type.Reg.enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ if (pVal->Type.Reg.Val.u8 != pValFilter->Type.Reg.Val.u8)
+ return false;
+ break;
+ case DBGFREGVALTYPE_U16:
+ if (pVal->Type.Reg.Val.u16 != pValFilter->Type.Reg.Val.u16)
+ return false;
+ break;
+ case DBGFREGVALTYPE_U32:
+ if (pVal->Type.Reg.Val.u32 != pValFilter->Type.Reg.Val.u32)
+ return false;
+ break;
+ case DBGFREGVALTYPE_U64:
+ if (pVal->Type.Reg.Val.u64 != pValFilter->Type.Reg.Val.u64)
+ return false;
+ break;
+ case DBGFREGVALTYPE_U128:
+ if (memcmp(&pVal->Type.Reg.Val.u128, &pValFilter->Type.Reg.Val.u128,
+ sizeof(RTUINT128U)))
+ return false;
+ break;
+ case DBGFREGVALTYPE_R80:
+ if (memcmp(&pVal->Type.Reg.Val.r80Ex, &pValFilter->Type.Reg.Val.r80Ex,
+ sizeof(RTFLOAT80U2)))
+ return false;
+ break;
+ case DBGFREGVALTYPE_DTR:
+ if ( pVal->Type.Reg.Val.dtr.u64Base != pValFilter->Type.Reg.Val.dtr.u64Base
+ || pVal->Type.Reg.Val.dtr.u32Limit != pValFilter->Type.Reg.Val.dtr.u32Limit)
+ return false;
+ break;
+ default:
+ AssertFailed();
+ return false;
+ }
+ break;
+ }
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CONST_MEM:
+ case DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM:
+ if ( memcmp(&pVal->Type.Mem.Addr, &pValFilter->Type.Mem.Addr,
+ sizeof(DBGFADDRESS))
+ || pVal->Type.Mem.cbBuf != pValFilter->Type.Mem.cbBuf
+ || memcmp(pVal->Type.Mem.pvBuf, pValFilter->Type.Mem.pvBuf,
+ pValFilter->Type.Mem.cbBuf))
+ return false;
+ break;
+ default:
+ AssertFailed();
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
+ * Matches the given values against the filter values returning a flag whether they match.
+ *
+ * @returns Flag whether the given values match the filter.
+ * @param paVal Pointer to the array of values.
+ * @param cVals Number of values in the array.
+ * @param paValFilter Pointer to the filter values.
+ * @param cValsFilter Number of entries in the filter values.
+ */
+static bool dbgfR3FlowTraceRecordMatchValues(PCDBGFFLOWTRACEPROBEVAL paVal, uint32_t cVals,
+ PCDBGFFLOWTRACEPROBEVAL paValFilter, uint32_t cValsFilter)
+{
+ bool fMatch = false;
+
+ /*
+ * The order in which the filters and values are doesn't need to match but for every filter
+ * there should be at least one entry matching.
+ */
+ while ( cValsFilter-- > 0
+ && fMatch)
+ {
+ for (uint32_t i = 0; i < cVals; i++)
+ {
+ fMatch = dbgfR3FlowTraceRecordMatchSingleValue(&paVal[i], paValFilter);
+ if (fMatch)
+ break;
+ }
+ paValFilter++;
+ }
+
+ return fMatch;
+}
+
+
+/**
+ * Checks the given record against the given filter, returning whether the filter
+ * matches.
+ *
+ * @returns Flag whether the record matches the given filter.
+ * @param pRecord The record to check.
+ * @param pFilter The filter to check against.
+ */
+static bool dbgfR3FlowTraceRecordMatchSingleFilter(PDBGFFLOWTRACERECORDINT pRecord,
+ PDBGFFLOWTRACEREPORTFILTER pFilter)
+{
+ bool fMatch = false;
+
+ switch (pFilter->enmType)
+ {
+ case DBGFFLOWTRACEREPORTFILTERTYPE_SEQ_NUM:
+ {
+ if ( pRecord->u64SeqNo >= pFilter->Type.SeqNo.u64SeqNoFirst
+ && pRecord->u64SeqNo <= pFilter->Type.SeqNo.u64SeqNoLast)
+ fMatch = true;
+ break;
+ }
+ case DBGFFLOWTRACEREPORTFILTERTYPE_TIMESTAMP:
+ {
+ if ( pRecord->u64TsCollected >= pFilter->Type.Timestamp.u64TsFirst
+ && pRecord->u64TsCollected <= pFilter->Type.Timestamp.u64TsLast)
+ fMatch = true;
+ break;
+ }
+ case DBGFFLOWTRACEREPORTFILTERTYPE_ADDR:
+ {
+ if (dbgfR3FlowTraceAddrIntersect(&pRecord->AddrProbe,
+ &pFilter->Type.Addr.AddrStart,
+ &pFilter->Type.Addr.AddrLast))
+ fMatch = true;
+ break;
+ }
+ case DBGFFLOWTRACEREPORTFILTERTYPE_VMCPU_ID:
+ {
+ if ( pRecord->idCpu >= pFilter->Type.VCpuId.idCpuStart
+ && pRecord->idCpu <= pFilter->Type.VCpuId.idCpuLast)
+ fMatch = true;
+ break;
+ }
+ case DBGFFLOWTRACEREPORTFILTERTYPE_PROBE_DATA:
+ {
+ if (pFilter->Type.ProbeData.fValCmn)
+ {
+ if (pRecord->paValCmn)
+ {
+ PCDBGFFLOWTRACEPROBEINT pProbeCmn = pRecord->pProbeCmn;
+ AssertPtr(pProbeCmn);
+
+ fMatch = dbgfR3FlowTraceRecordMatchValues(pRecord->paValCmn, pProbeCmn->cEntries,
+ pFilter->Type.ProbeData.paVal,
+ pFilter->Type.ProbeData.cVals);
+ }
+ }
+ else
+ fMatch = dbgfR3FlowTraceRecordMatchValues(&pRecord->aVal[0], pRecord->pProbe->cEntries,
+ pFilter->Type.ProbeData.paVal, pFilter->Type.ProbeData.cVals);
+ break;
+ }
+ default:
+ AssertMsgFailed(("Invalid filter type %u!\n", pFilter->enmType));
+ }
+
+ return fMatch;
+}
+
+
+/**
+ * Checks the given record against the given filters.
+ *
+ * @returns Flag whether the record matches the filters.
+ * @param pRecord The record to check.
+ * @param paFilters Array of filters to check.
+ * @param cFilters Number of filters in the array.
+ * @param enmOp How the record should match against the filters.
+ */
+static bool dbgfR3FlowTraceDoesRecordMatchFilter(PDBGFFLOWTRACERECORDINT pRecord,
+ PDBGFFLOWTRACEREPORTFILTER paFilters,
+ uint32_t cFilters, DBGFFLOWTRACEREPORTFILTEROP enmOp)
+{
+ bool fMatch = false;
+
+ if (enmOp == DBGFFLOWTRACEREPORTFILTEROP_AND)
+ {
+ fMatch = true;
+ while (cFilters-- > 0)
+ {
+ if (!dbgfR3FlowTraceRecordMatchSingleFilter(pRecord, &paFilters[cFilters]))
+ {
+ fMatch = false;
+ break;
+ }
+ }
+ }
+ else if (enmOp == DBGFFLOWTRACEREPORTFILTEROP_OR)
+ {
+ while (cFilters-- > 0)
+ {
+ if (dbgfR3FlowTraceRecordMatchSingleFilter(pRecord, &paFilters[cFilters]))
+ {
+ fMatch = true;
+ break;
+ }
+ }
+ }
+ else
+ AssertMsgFailed(("Invalid filter operation %u!\n", enmOp));
+
+ return fMatch;
+}
+
+
+/**
+ * Collects all the data specified in the given probe.
+ *
+ * @returns Flag whether to enter the debugger.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The virtual CPU ID.
+ * @param pTraceMod The trace module instance.
+ * @param pAddrProbe Location of the probe, NULL if a common probe.
+ * @param pProbe The probe instance.
+ * @param pVal Pointer to the array of values to fill.
+ * @param pbBuf Poitner to the memory buffer holding additional data.
+ */
+static bool dbgfR3FlowTraceModProbeCollectData(PUVM pUVM, VMCPUID idCpu,
+ PDBGFFLOWTRACEMODINT pTraceMod,
+ PCDBGFADDRESS pAddrProbe,
+ PDBGFFLOWTRACEPROBEINT pProbe,
+ PDBGFFLOWTRACEPROBEVAL pVal, uint8_t *pbBuf)
+{
+ bool fDbgDefer = false;
+
+ for (uint32_t i = 0; i < pProbe->cEntries; i++)
+ {
+ int rc;
+ PDBGFFLOWTRACEPROBEENTRY pEntry = &pProbe->paEntries[i];
+
+ pVal->pProbeEntry = pEntry;
+
+ switch (pEntry->enmType)
+ {
+ case DBGFFLOWTRACEPROBEENTRYTYPE_REG:
+ rc = DBGFR3RegNmQuery(pUVM, idCpu, pEntry->Type.Reg.pszName,
+ &pVal->Type.Reg.Val, &pVal->Type.Reg.enmType);
+ AssertRC(rc);
+ pVal->Type.Reg.pszName = pEntry->Type.Reg.pszName;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM:
+ {
+ DBGFADDRESS Addr;
+ rc = dbgfR3FlowTraceModProbeResolveIndirectAddr(pUVM, idCpu, pEntry, &Addr);
+ if (RT_SUCCESS(rc))
+ {
+ pVal->Type.Mem.pvBuf = pbBuf;
+ pVal->Type.Mem.cbBuf = pEntry->Type.IndirectMem.cbMem;
+ pVal->Type.Mem.Addr = Addr;
+ rc = DBGFR3MemRead(pUVM, idCpu, &pVal->Type.Mem.Addr, pbBuf,
+ pVal->Type.Mem.cbBuf);
+ AssertRC(rc);
+ pbBuf += pVal->Type.Mem.cbBuf;
+ }
+ break;
+ }
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CONST_MEM:
+ pVal->Type.Mem.pvBuf = pbBuf;
+ pVal->Type.Mem.cbBuf = pEntry->Type.ConstMem.cbMem;
+ pVal->Type.Mem.Addr = pEntry->Type.ConstMem.AddrMem;
+ rc = DBGFR3MemRead(pUVM, idCpu, &pVal->Type.Mem.Addr, pbBuf,
+ pVal->Type.Mem.cbBuf);
+ AssertRC(rc);
+ pbBuf += pVal->Type.Mem.cbBuf;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CALLBACK:
+ rc = pEntry->Type.Callback.pfnCallback(pUVM, idCpu, pTraceMod,
+ pAddrProbe, pProbe, pEntry,
+ pEntry->Type.Callback.pvUser);
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_DEBUGGER:
+ fDbgDefer = true;
+ break;
+ default:
+ AssertFailed();
+ }
+
+ pVal++;
+ }
+
+ return fDbgDefer;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFBPHIT}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3FlowTraceModProbeFiredWorker(PVM pVM, VMCPUID idCpu, void *pvUserBp, DBGFBP hBp, PCDBGFBPPUB pBpPub, uint16_t fFlags)
+{
+ RT_NOREF(pVM, hBp, pBpPub, fFlags);
+ LogFlowFunc(("pVM=%#p idCpu=%u pvUserBp=%#p hBp=%#x pBpPub=%p\n",
+ pVM, idCpu, pvUserBp, hBp, pBpPub));
+
+ PDBGFFLOWTRACEMODPROBELOC pProbeLoc = (PDBGFFLOWTRACEMODPROBELOC)pvUserBp;
+ PDBGFFLOWTRACEPROBEINT pProbe = pProbeLoc->pProbe;
+ PDBGFFLOWTRACEMODINT pTraceMod = pProbeLoc->pTraceMod;
+ bool fDisabledModule = false;
+ bool fDbgDefer = false;
+
+ /* Check whether the trace module is still active and we are tracing the correct VCPU. */
+ if (ASMAtomicReadU32((volatile uint32_t *)&pTraceMod->enmState) != DBGFFLOWTRACEMODSTATE_ENABLED
+ || ( idCpu != pTraceMod->idCpu
+ && pTraceMod->idCpu != VMCPUID_ANY))
+ return VINF_SUCCESS;
+
+ if ( pTraceMod->fLimit
+ && ASMAtomicReadU32(&pTraceMod->cHitsLeft))
+ {
+ uint32_t cHitsLeftNew = ASMAtomicDecU32(&pTraceMod->cHitsLeft);
+ if (cHitsLeftNew > cHitsLeftNew + 1) /* Underflow => reached the limit. */
+ {
+ ASMAtomicIncU32(&pTraceMod->cHitsLeft);
+ return VINF_SUCCESS;
+ }
+
+ if (!cHitsLeftNew)
+ {
+ /* We got the last record, disable the trace module. */
+ fDisabledModule = ASMAtomicCmpXchgU32((volatile uint32_t *)&pTraceMod->enmState, DBGFFLOWTRACEMODSTATE_CREATED,
+ DBGFFLOWTRACEMODSTATE_ENABLED);
+ }
+ }
+
+ uint8_t *pbBuf = NULL;
+ uint8_t *pbBufCmn = NULL;
+ PDBGFFLOWTRACERECORDINT pRecord = dbgfR3FlowTraceRecordCreate(pProbeLoc, idCpu, &pbBuf, &pbBufCmn);
+ if (pRecord)
+ {
+ fDbgDefer = dbgfR3FlowTraceModProbeCollectData(pTraceMod->pUVM, idCpu, pTraceMod, &pProbeLoc->AddrProbe, pProbe,
+ &pRecord->aVal[0], pbBuf);
+ if (pTraceMod->pProbeCmn)
+ fDbgDefer = dbgfR3FlowTraceModProbeCollectData(pTraceMod->pUVM, idCpu, pTraceMod, NULL, pTraceMod->pProbeCmn,
+ pRecord->paValCmn, pbBufCmn);
+
+ RTSemFastMutexRequest(pTraceMod->hMtx);
+ uint32_t cRecordsNew = ASMAtomicIncU32(&pTraceMod->cRecords);
+ RTListAppend(&pTraceMod->LstRecords, &pRecord->NdRecord);
+ if ( (cRecordsNew > pTraceMod->cRecordsMax)
+ && pTraceMod->cRecordsMax > 0)
+ {
+ /* Get the first record and destroy it. */
+ pRecord = RTListRemoveFirst(&pTraceMod->LstRecords, DBGFFLOWTRACERECORDINT, NdRecord);
+ AssertPtr(pRecord);
+ DBGFR3FlowTraceRecordRelease(pRecord);
+ ASMAtomicDecU32(&pTraceMod->cRecords);
+ }
+ RTSemFastMutexRelease(pTraceMod->hMtx);
+ }
+
+ if (fDisabledModule)
+ {
+ int rc = DBGFR3FlowTraceModDisable(pTraceMod);
+ AssertRC(rc);
+ }
+
+ return fDbgDefer ? VINF_DBGF_BP_HALT : VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for DBGFR3FlowTraceModEnable(), doing the work in an EMT rendezvous point to
+ * ensure no probe is hit in an inconsistent state.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The VM instance data.
+ * @param pVCpu The virtual CPU we execute on.
+ * @param pvUser Opaque user data.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3FlowTraceModEnableWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF2(pVM, pVCpu);
+ PDBGFFLOWTRACEMODINT pThis = (PDBGFFLOWTRACEMODINT)pvUser;
+ PDBGFFLOWTRACEMODPROBELOC pProbeLoc = NULL;
+ int rc = VINF_SUCCESS;
+
+ pThis->enmState = DBGFFLOWTRACEMODSTATE_ENABLED;
+
+ RTListForEach(&pThis->LstProbes, pProbeLoc, DBGFFLOWTRACEMODPROBELOC, NdProbes)
+ {
+ uint16_t fBpFlags = DBGF_BP_F_ENABLED;
+
+ if (pProbeLoc->fFlags & DBGF_FLOW_TRACE_PROBE_ADD_F_BEFORE_EXEC)
+ fBpFlags |= DBGF_BP_F_HIT_EXEC_BEFORE;
+ if (pProbeLoc->fFlags & DBGF_FLOW_TRACE_PROBE_ADD_F_AFTER_EXEC)
+ fBpFlags |= DBGF_BP_F_HIT_EXEC_AFTER;
+
+ rc = DBGFR3BpSetInt3Ex(pThis->pUVM, pThis->hBpOwner, pProbeLoc,
+ 0 /*idSrcCpu*/, &pProbeLoc->AddrProbe, fBpFlags,
+ 0 /*iHitTrigger*/, ~0ULL /*iHitDisable*/, &pProbeLoc->hBp);
+ if (RT_FAILURE(rc))
+ break;
+ }
+
+ if (RT_FAILURE(rc))
+ pThis->enmState = DBGFFLOWTRACEMODSTATE_CREATED;
+
+ return rc;
+}
+
+
+/**
+ * Worker for DBGFR3FlowTraceModDisable(), doing the work in an EMT rendezvous point to
+ * ensure no probe is hit in an inconsistent state.
+ *
+ * @returns Struct VBox status code.
+ * @param pVM The VM instance data.
+ * @param pVCpu The virtual CPU we execute on.
+ * @param pvUser Opaque user data.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3FlowTraceModDisableWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF2(pVM, pVCpu);
+ PDBGFFLOWTRACEMODINT pThis = (PDBGFFLOWTRACEMODINT)pvUser;
+ PDBGFFLOWTRACEMODPROBELOC pProbeLoc = NULL;
+ int rc = VINF_SUCCESS;
+
+ pThis->enmState = DBGFFLOWTRACEMODSTATE_CREATED;
+
+ RTListForEach(&pThis->LstProbes, pProbeLoc, DBGFFLOWTRACEMODPROBELOC, NdProbes)
+ {
+ rc = DBGFR3BpClear(pThis->pUVM, pProbeLoc->hBp);
+ AssertRC(rc);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Checks whether both addresses are equal.
+ *
+ * @returns true if both addresses point to the same location, false otherwise.
+ * @param pAddr1 First address.
+ * @param pAddr2 Second address.
+ */
+static bool dbgfR3FlowTraceAddrEqual(PCDBGFADDRESS pAddr1, PCDBGFADDRESS pAddr2)
+{
+ return pAddr1->Sel == pAddr2->Sel
+ && pAddr1->off == pAddr2->off;
+}
+
+
+/**
+ * Returns the probe location pointer at the given address for the given trace module.
+ *
+ * @returns Pointer to the probe location or NULL if there is no probe at the given location.
+ * @param pThis The flow trace module instance data.
+ * @param pAddrProbe Address of the probe to check.
+ */
+static PDBGFFLOWTRACEMODPROBELOC dbgfR3TraceModGetProbeLocAtAddr(PDBGFFLOWTRACEMODINT pThis, PCDBGFADDRESS pAddrProbe)
+{
+ RTSemFastMutexRequest(pThis->hMtx);
+
+ PDBGFFLOWTRACEMODPROBELOC pIt;
+ RTListForEach(&pThis->LstProbes, pIt, DBGFFLOWTRACEMODPROBELOC, NdProbes)
+ {
+ if (dbgfR3FlowTraceAddrEqual(&pIt->AddrProbe, pAddrProbe))
+ {
+ RTSemFastMutexRelease(pThis->hMtx);
+ return pIt;
+ }
+ }
+ RTSemFastMutexRelease(pThis->hMtx);
+ return NULL;
+}
+
+
+/**
+ * Cleans up any allocated resources for each entry in the given probe for the given range.
+ *
+ * @param pProbe The probe instance.
+ * @param idxStart Start index to clean up.
+ * @param cEntries How many entries to clean up.
+ */
+static void dbgfR3ProbeEntryCleanup(PDBGFFLOWTRACEPROBEINT pProbe, uint32_t idxStart, uint32_t cEntries)
+{
+ AssertReturnVoid(pProbe->cEntriesMax >= idxStart + cEntries);
+
+ for (uint32_t i = idxStart; i < idxStart + cEntries; i++)
+ {
+ PDBGFFLOWTRACEPROBEENTRY pEntry = &pProbe->paEntries[i];
+
+ switch (pEntry->enmType)
+ {
+ case DBGFFLOWTRACEPROBEENTRYTYPE_REG:
+ if (pEntry->Type.Reg.pszName)
+ MMR3HeapFree((void *)pEntry->Type.Reg.pszName);
+ pEntry->Type.Reg.pszName = NULL;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CONST_MEM:
+ pEntry->Type.ConstMem.cbMem = 0;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM:
+ pEntry->Type.IndirectMem.uScale = 0;
+ pEntry->Type.IndirectMem.cbMem = 0;
+ if (pEntry->Type.IndirectMem.RegBase.pszName)
+ MMR3HeapFree((void *)pEntry->Type.IndirectMem.RegBase.pszName);
+ if (pEntry->Type.IndirectMem.RegIndex.pszName)
+ MMR3HeapFree((void *)pEntry->Type.IndirectMem.RegIndex.pszName);
+ pEntry->Type.IndirectMem.RegBase.pszName = NULL;
+ pEntry->Type.IndirectMem.RegIndex.pszName = NULL;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CALLBACK:
+ pEntry->Type.Callback.pfnCallback = NULL;
+ pEntry->Type.Callback.pvUser = NULL;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_DEBUGGER:
+ break;
+ default:
+ AssertFailed();
+ }
+ }
+}
+
+
+/**
+ * Destroys the given flow trace probe freeing all allocated resources.
+ *
+ * @param pProbe The flow trace probe instance data.
+ */
+static void dbgfR3FlowTraceProbeDestroy(PDBGFFLOWTRACEPROBEINT pProbe)
+{
+ dbgfR3ProbeEntryCleanup(pProbe, 0, pProbe->cEntries);
+ MMR3HeapFree(pProbe->paEntries);
+ MMR3HeapFree(pProbe);
+}
+
+
+/**
+ * Ensures that the given probe has the given amount of additional entries available,
+ * increasing the size if necessary.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NO_MEMORY if increasing the size failed due to an out of memory condition.
+ * @param pProbe The probe insatnce.
+ * @param cEntriesAdd Number of additional entries required.
+ */
+static int dbgfR3ProbeEnsureSize(PDBGFFLOWTRACEPROBEINT pProbe, uint32_t cEntriesAdd)
+{
+ uint32_t cEntriesNew = pProbe->cEntries + cEntriesAdd;
+ int rc = VINF_SUCCESS;
+
+ if (pProbe->cEntriesMax < cEntriesNew)
+ {
+ PDBGFFLOWTRACEPROBEENTRY paEntriesNew;
+ if (!pProbe->cEntriesMax)
+ paEntriesNew = (PDBGFFLOWTRACEPROBEENTRY)MMR3HeapAllocZU(pProbe->pUVM, MM_TAG_DBGF_FLOWTRACE,
+ cEntriesNew * sizeof(DBGFFLOWTRACEPROBEENTRY));
+ else
+ paEntriesNew = (PDBGFFLOWTRACEPROBEENTRY)MMR3HeapRealloc(pProbe->paEntries,
+ cEntriesNew * sizeof(DBGFFLOWTRACEPROBEENTRY));
+ if (RT_LIKELY(paEntriesNew))
+ {
+ pProbe->paEntries = paEntriesNew;
+ pProbe->cEntriesMax = cEntriesNew;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Duplicates a probe registry entry.
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param pDst Where to copy the entry to.
+ * @param pSrc What to copy.
+ */
+static int dbgfR3ProbeEntryRegDup(PUVM pUVM, PDBGFFLOWTRACEPROBEENTRYREG pDst, PCDBGFFLOWTRACEPROBEENTRYREG pSrc)
+{
+ int rc = VINF_SUCCESS;
+
+ pDst->enmType = pSrc->enmType;
+ pDst->pszName = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_FLOWTRACE, pSrc->pszName);
+ if (!pDst->pszName)
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Duplicates a given probe entry in the given destination doing a deep copy (strings are duplicated).
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param pDst Where to copy the entry to.
+ * @param pSrc What to copy.
+ */
+static int dbgfR3ProbeEntryDup(PUVM pUVM, PDBGFFLOWTRACEPROBEENTRY pDst, PCDBGFFLOWTRACEPROBEENTRY pSrc)
+{
+ int rc = VINF_SUCCESS;
+
+ pDst->enmType = pSrc->enmType;
+ pDst->pszDesc = NULL;
+ if (pSrc->pszDesc)
+ {
+ pDst->pszDesc = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_FLOWTRACE, pSrc->pszDesc);
+ if (!pDst->pszDesc)
+ rc = VERR_NO_MEMORY;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ switch (pDst->enmType)
+ {
+ case DBGFFLOWTRACEPROBEENTRYTYPE_REG:
+ rc = dbgfR3ProbeEntryRegDup(pUVM, &pDst->Type.Reg, &pSrc->Type.Reg);
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CONST_MEM:
+ pDst->Type.ConstMem.AddrMem = pSrc->Type.ConstMem.AddrMem;
+ pDst->Type.ConstMem.cbMem = pSrc->Type.ConstMem.cbMem;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM:
+ pDst->Type.IndirectMem.uScale = pSrc->Type.IndirectMem.uScale;
+ pDst->Type.IndirectMem.cbMem = pSrc->Type.IndirectMem.cbMem;
+ pDst->Type.IndirectMem.iOffset = pSrc->Type.IndirectMem.iOffset;
+ rc = dbgfR3ProbeEntryRegDup(pUVM, &pDst->Type.IndirectMem.RegBase, &pSrc->Type.IndirectMem.RegBase);
+ if ( RT_SUCCESS(rc)
+ && pDst->Type.IndirectMem.RegIndex.pszName)
+ {
+ rc = dbgfR3ProbeEntryRegDup(pUVM, &pDst->Type.IndirectMem.RegIndex, &pSrc->Type.IndirectMem.RegIndex);
+ if (RT_FAILURE(rc))
+ MMR3HeapFree((void *)pDst->Type.IndirectMem.RegBase.pszName);
+ }
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CALLBACK:
+ pDst->Type.Callback.pfnCallback = pSrc->Type.Callback.pfnCallback;
+ pDst->Type.Callback.pvUser = pSrc->Type.Callback.pvUser;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_DEBUGGER:
+ break;
+ default:
+ rc = VERR_INVALID_PARAMETER;
+ }
+ }
+
+ if ( RT_FAILURE(rc)
+ && pDst->pszDesc)
+ {
+ MMR3HeapFree((void *)pDst->pszDesc);
+ pDst->pszDesc = NULL;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Recalculates the size occupied by the data of this probe for each invocation.
+ *
+ * @param pProbe The probe instance.
+ */
+static void dbgfR3ProbeRecalcSize(PDBGFFLOWTRACEPROBEINT pProbe)
+{
+ size_t cbProbe = 0;
+
+ for (uint32_t i = 0; i < pProbe->cEntries; i++)
+ {
+ PDBGFFLOWTRACEPROBEENTRY pEntry = &pProbe->paEntries[i];
+
+ cbProbe += sizeof(DBGFFLOWTRACEPROBEVAL);
+
+ switch (pEntry->enmType)
+ {
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CONST_MEM:
+ cbProbe += pEntry->Type.ConstMem.cbMem;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_INDIRECT_MEM:
+ cbProbe += pEntry->Type.IndirectMem.cbMem;
+ break;
+ case DBGFFLOWTRACEPROBEENTRYTYPE_CALLBACK:
+ case DBGFFLOWTRACEPROBEENTRYTYPE_REG:
+ case DBGFFLOWTRACEPROBEENTRYTYPE_DEBUGGER:
+ break;
+ default:
+ AssertFailed();
+ }
+ }
+
+ pProbe->cbProbe = cbProbe;
+}
+
+
+/**
+ * Creates a new empty flow trace module.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param idCpu CPU ID the module is for, use VMCPUID_ANY for any CPU.
+ * @param hFlowTraceProbeCommon Optional probe handle of data to capture regardless of the actual
+ * probe.
+ * @param phFlowTraceMod Where to store the handle to the created module on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModCreate(PUVM pUVM, VMCPUID idCpu,
+ DBGFFLOWTRACEPROBE hFlowTraceProbeCommon,
+ PDBGFFLOWTRACEMOD phFlowTraceMod)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWTRACEMODINT pThis = (PDBGFFLOWTRACEMODINT)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_FLOWTRACE,
+ sizeof(DBGFFLOWTRACEMODINT));
+ if (RT_LIKELY(pThis))
+ {
+ pThis->cRefs = 1;
+ pThis->pUVM = pUVM;
+ pThis->idCpu = idCpu;
+ pThis->enmState = DBGFFLOWTRACEMODSTATE_CREATED;
+ pThis->u64SeqNoNext = 0;
+ pThis->cHitsLeft = 0;
+ pThis->cRecordsMax = 0;
+ pThis->cRecords = 0;
+ pThis->cProbes = 0;
+ RTListInit(&pThis->LstProbes);
+ RTListInit(&pThis->LstRecords);
+
+ rc = RTSemFastMutexCreate(&pThis->hMtx);
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGFR3BpOwnerCreate(pUVM, dbgfR3FlowTraceModProbeFiredWorker, NULL /*pfnBpIoHit*/, &pThis->hBpOwner);
+ if (RT_SUCCESS(rc))
+ {
+ PDBGFFLOWTRACEPROBEINT pProbe = hFlowTraceProbeCommon;
+ if (pProbe)
+ {
+ DBGFR3FlowTraceProbeRetain(pProbe);
+ pThis->pProbeCmn = pProbe;
+ }
+ }
+
+ *phFlowTraceMod = pThis;
+ }
+
+ if (RT_FAILURE(rc))
+ MMR3HeapFree(pThis);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Create a new flow trace module from the given control flow graph adding the given probes
+ * at the entries, exits and branches.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param idCpu CPU ID the module is for, use VMCPUID_ANY for any CPU.
+ * @param hFlow Control flow graph handle to use.
+ * @param hFlowTraceProbeCommon Optional probe handle of data to capture regardless of the actual
+ * probe.
+ * @param hFlowTraceProbeEntry Probe handle to use for all entry blocks.
+ * @param hFlowTraceProbeRegular Probe handle to use for all branches.
+ * @param hFlowTraceProbeExit Probe handle to use for all exits.
+ * @param phFlowTraceMod Where to store the handle to the created module on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModCreateFromFlowGraph(PUVM pUVM, VMCPUID idCpu, DBGFFLOW hFlow,
+ DBGFFLOWTRACEPROBE hFlowTraceProbeCommon,
+ DBGFFLOWTRACEPROBE hFlowTraceProbeEntry,
+ DBGFFLOWTRACEPROBE hFlowTraceProbeRegular,
+ DBGFFLOWTRACEPROBE hFlowTraceProbeExit,
+ PDBGFFLOWTRACEMOD phFlowTraceMod)
+{
+ DBGFFLOWIT hFlowIt;
+ int rc = DBGFR3FlowItCreate(hFlow, DBGFFLOWITORDER_BY_ADDR_LOWEST_FIRST, &hFlowIt);
+ if (RT_SUCCESS(rc))
+ {
+ DBGFFLOWTRACEMOD hFlowTraceMod;
+ rc = DBGFR3FlowTraceModCreate(pUVM, idCpu, hFlowTraceProbeCommon, &hFlowTraceMod);
+ if (RT_SUCCESS(rc))
+ {
+ DBGFFLOWBB hFlowBb = DBGFR3FlowItNext(hFlowIt);
+ while (hFlowBb && RT_SUCCESS(rc))
+ {
+ uint32_t fFlags = DBGFR3FlowBbGetFlags(hFlowBb);
+
+ if (!(fFlags & (DBGF_FLOW_BB_F_EMPTY | DBGF_FLOW_BB_F_INCOMPLETE_ERR)))
+ {
+ DBGFADDRESS AddrInstr;
+
+ if (fFlags & DBGF_FLOW_BB_F_ENTRY)
+ {
+ rc = DBGFR3FlowBbQueryInstr(hFlowBb, 0, &AddrInstr, NULL, NULL);
+ AssertRC(rc);
+
+ rc = DBGFR3FlowTraceModAddProbe(hFlowTraceMod, &AddrInstr, hFlowTraceProbeEntry,
+ DBGF_FLOW_TRACE_PROBE_ADD_F_BEFORE_EXEC);
+ }
+ else
+ {
+ DBGFFLOWBBENDTYPE enmType = DBGFR3FlowBbGetType(hFlowBb);
+ uint32_t cInstr = enmType == DBGFFLOWBBENDTYPE_EXIT ? DBGFR3FlowBbGetInstrCount(hFlowBb) - 1 : 0;
+ rc = DBGFR3FlowBbQueryInstr(hFlowBb, cInstr, &AddrInstr, NULL, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ if (enmType == DBGFFLOWBBENDTYPE_EXIT)
+ rc = DBGFR3FlowTraceModAddProbe(hFlowTraceMod, &AddrInstr, hFlowTraceProbeExit,
+ DBGF_FLOW_TRACE_PROBE_ADD_F_AFTER_EXEC);
+ else
+ rc = DBGFR3FlowTraceModAddProbe(hFlowTraceMod, &AddrInstr, hFlowTraceProbeRegular,
+ DBGF_FLOW_TRACE_PROBE_ADD_F_BEFORE_EXEC);
+ }
+ }
+ }
+
+ hFlowBb = DBGFR3FlowItNext(hFlowIt);
+ }
+
+ if (RT_FAILURE(rc))
+ DBGFR3FlowTraceModRelease(hFlowTraceMod);
+ else
+ *phFlowTraceMod = hFlowTraceMod;
+ }
+
+ DBGFR3FlowItDestroy(hFlowIt);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Retain a reference to the given flow trace module.
+ *
+ * @returns New reference count.
+ * @param hFlowTraceMod Flow trace module handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceModRetain(DBGFFLOWTRACEMOD hFlowTraceMod)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ AssertPtrReturn(pThis, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ return cRefs;
+}
+
+
+/**
+ * Release a reference of the given flow trace module.
+ *
+ * @returns New reference count, on 0 the module is destroyed and all containing records
+ * are deleted.
+ * @param hFlowTraceMod Flow trace module handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceModRelease(DBGFFLOWTRACEMOD hFlowTraceMod)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ if (!pThis)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ if (cRefs == 0)
+ dbgfR3FlowTraceModDestroy(pThis);
+ return cRefs;
+}
+
+
+/**
+ * Enables and arms all probes in the given flow trace module.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceMod Flow trace module handle.
+ * @param cHits Number of hits inside this module until the module is disabled
+ * automatically, 0 if not to disable automatically.
+ * @param cRecordsMax Maximum number of records to keep until the oldest is evicted.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModEnable(DBGFFLOWTRACEMOD hFlowTraceMod, uint32_t cHits, uint32_t cRecordsMax)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->cProbes > 0, VERR_INVALID_STATE);
+ AssertReturn(pThis->enmState == DBGFFLOWTRACEMODSTATE_CREATED, VERR_INVALID_STATE);
+
+ pThis->cHitsLeft = cHits;
+ pThis->cRecordsMax = cRecordsMax;
+
+ return VMMR3EmtRendezvous(pThis->pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
+ dbgfR3FlowTraceModEnableWorker, pThis);
+}
+
+
+/**
+ * Disables all probes in the given flow trace module.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceMod Flow trace module handle.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModDisable(DBGFFLOWTRACEMOD hFlowTraceMod)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->enmState == DBGFFLOWTRACEMODSTATE_ENABLED, VERR_INVALID_STATE);
+
+ return VMMR3EmtRendezvous(pThis->pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
+ dbgfR3FlowTraceModDisableWorker, pThis);
+}
+
+
+/**
+ * Returns a report containing all existing records in the given flow trace module.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceMod Flow trace module handle.
+ * @param phFlowTraceReport Where to store the flow trace report handle on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModQueryReport(DBGFFLOWTRACEMOD hFlowTraceMod,
+ PDBGFFLOWTRACEREPORT phFlowTraceReport)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertPtrReturn(phFlowTraceReport, VERR_INVALID_POINTER);
+
+ /** @todo Locking. */
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWTRACEREPORTINT pReport = dbgfR3FlowTraceReportCreate(pThis->pUVM, pThis->cRecords);
+ if (RT_LIKELY(pReport))
+ {
+ PDBGFFLOWTRACERECORDINT pIt;
+ uint32_t idx = 0;
+
+ RTSemFastMutexRequest(pThis->hMtx);
+ RTListForEach(&pThis->LstRecords, pIt, DBGFFLOWTRACERECORDINT, NdRecord)
+ {
+ DBGFR3FlowTraceRecordRetain(pIt);
+ pReport->apRec[idx++] = pIt;
+ }
+ RTSemFastMutexRelease(pThis->hMtx);
+
+ *phFlowTraceReport = pReport;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Clears all records contained in the flow trace module.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceMod Flow trace module handle.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModClear(DBGFFLOWTRACEMOD hFlowTraceMod)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+
+ RTSemFastMutexRequest(pThis->hMtx);
+ RTLISTANCHOR LstTmp;
+ RTListMove(&LstTmp, &pThis->LstRecords);
+ ASMAtomicWriteU32(&pThis->cRecords, 0);
+ RTSemFastMutexRelease(pThis->hMtx);
+
+ PDBGFFLOWTRACERECORDINT pIt, pItNext;
+ RTListForEachSafe(&LstTmp, pIt, pItNext, DBGFFLOWTRACERECORDINT, NdRecord)
+ {
+ RTListNodeRemove(&pIt->NdRecord);
+ DBGFR3FlowTraceRecordRelease(pIt);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Adds a new probe to the given flow trace module.
+ *
+ * @returns VBox status code
+ * @retval VERR_INVALID_STATE if the probe is active or was destroyed already.
+ * @retval VERR_ALREADY_EXISTS if there is already a probe at the specified location.
+ * @param hFlowTraceMod Flow trace module handle.
+ * @param pAddrProbe Guest address to insert the probe at.
+ * @param hFlowTraceProbe The handle of the probe to insert.
+ * @param fFlags Combination of DBGF_FLOW_TRACE_PROBE_ADD_F_*.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceModAddProbe(DBGFFLOWTRACEMOD hFlowTraceMod, PCDBGFADDRESS pAddrProbe,
+ DBGFFLOWTRACEPROBE hFlowTraceProbe, uint32_t fFlags)
+{
+ PDBGFFLOWTRACEMODINT pThis = hFlowTraceMod;
+ PDBGFFLOWTRACEPROBEINT pProbe = hFlowTraceProbe;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertPtrReturn(pProbe, VERR_INVALID_HANDLE);
+ AssertPtrReturn(pAddrProbe, VERR_INVALID_POINTER);
+ AssertReturn(!(fFlags & ~DBGF_FLOW_TRACE_PROBE_ADD_F_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(pThis->enmState == DBGFFLOWTRACEMODSTATE_CREATED, VERR_INVALID_STATE);
+
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWTRACEMODPROBELOC pProbeLoc = dbgfR3TraceModGetProbeLocAtAddr(pThis, pAddrProbe);
+ if (!pProbeLoc)
+ {
+ pProbeLoc = (PDBGFFLOWTRACEMODPROBELOC)MMR3HeapAllocZU(pThis->pUVM, MM_TAG_DBGF_FLOWTRACE,
+ sizeof(DBGFFLOWTRACEMODPROBELOC));
+ if (RT_LIKELY(pProbeLoc))
+ {
+ pProbeLoc->pTraceMod = pThis;
+ pProbeLoc->pProbe = pProbe;
+ pProbeLoc->AddrProbe = *pAddrProbe;
+ pProbeLoc->fFlags = fFlags;
+ ASMAtomicIncU32(&pProbe->cRefs);
+ ASMAtomicIncU32(&pProbe->cRefsMod);
+ RTSemFastMutexRequest(pThis->hMtx);
+ RTListAppend(&pThis->LstProbes, &pProbeLoc->NdProbes);
+ pThis->cProbes++;
+ RTSemFastMutexRelease(pThis->hMtx);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_ALREADY_EXISTS;
+
+ return rc;
+}
+
+
+/**
+ * Creates a new empty probe.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param pszDescr Description of the probe, optional.
+ * @param phFlowTraceProbe Where to store the probe handle on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceProbeCreate(PUVM pUVM, const char *pszDescr, PDBGFFLOWTRACEPROBE phFlowTraceProbe)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWTRACEPROBEINT pProbe = (PDBGFFLOWTRACEPROBEINT)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_FLOWTRACE,
+ sizeof(DBGFFLOWTRACEPROBEINT));
+ if (RT_LIKELY(pProbe))
+ {
+ pProbe->cRefs = 1;
+ pProbe->cRefsMod = 0;
+ pProbe->pUVM = pUVM;
+ pProbe->cbProbe = 0;
+ pProbe->cEntries = 0;
+ pProbe->cEntriesMax = 0;
+ pProbe->paEntries = NULL;
+ pProbe->pszDescr = NULL;
+ if (pszDescr)
+ {
+ pProbe->pszDescr = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_FLOWTRACE, pszDescr);
+ if (!pProbe->pszDescr)
+ {
+ MMR3HeapFree(pProbe);
+ rc = VERR_NO_MEMORY;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ *phFlowTraceProbe = pProbe;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Retains a reference to the probe.
+ *
+ * @returns New reference count.
+ * @param hFlowTraceProbe Flow trace probe handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceProbeRetain(DBGFFLOWTRACEPROBE hFlowTraceProbe)
+{
+ PDBGFFLOWTRACEPROBEINT pProbe = hFlowTraceProbe;
+ AssertPtrReturn(pProbe, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pProbe->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pProbe));
+ return cRefs;
+}
+
+
+/**
+ * Release a probe reference.
+ *
+ * @returns New reference count, on 0 the probe is destroyed.
+ * @param hFlowTraceProbe Flow trace probe handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceProbeRelease(DBGFFLOWTRACEPROBE hFlowTraceProbe)
+{
+ PDBGFFLOWTRACEPROBEINT pProbe = hFlowTraceProbe;
+ if (!pProbe)
+ return 0;
+ AssertPtrReturn(pProbe, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pProbe->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pProbe));
+ if (cRefs == 0)
+ dbgfR3FlowTraceProbeDestroy(pProbe);
+ return cRefs;
+}
+
+
+/**
+ * Adds new data to log in the given probe.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_STATE if the probe is already part of a trace module and it is not
+ * possible to add new entries at this point.
+ * @param hFlowTraceProbe Flow trace probe handle.
+ * @param paEntries Pointer to the array of entry descriptors.
+ * @param cEntries Number of entries in the array.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceProbeEntriesAdd(DBGFFLOWTRACEPROBE hFlowTraceProbe,
+ PCDBGFFLOWTRACEPROBEENTRY paEntries, uint32_t cEntries)
+{
+ PDBGFFLOWTRACEPROBEINT pProbe = hFlowTraceProbe;
+ AssertPtrReturn(pProbe, VERR_INVALID_HANDLE);
+ AssertPtrReturn(paEntries, VERR_INVALID_POINTER);
+ AssertReturn(cEntries > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(!pProbe->cRefsMod, VERR_INVALID_STATE);
+
+ int rc = dbgfR3ProbeEnsureSize(pProbe, cEntries);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t idxEntry;
+
+ for (idxEntry = 0; idxEntry < cEntries && RT_SUCCESS(rc); idxEntry++)
+ {
+ PCDBGFFLOWTRACEPROBEENTRY pEntry = &paEntries[idxEntry];
+ PDBGFFLOWTRACEPROBEENTRY pProbeEntry = &pProbe->paEntries[pProbe->cEntries + idxEntry];
+
+ rc = dbgfR3ProbeEntryDup(pProbe->pUVM, pProbeEntry, pEntry);
+ }
+
+ if (RT_FAILURE(rc))
+ dbgfR3ProbeEntryCleanup(pProbe, pProbe->cEntries, idxEntry + 1);
+ else
+ {
+ pProbe->cEntries += cEntries;
+ dbgfR3ProbeRecalcSize(pProbe);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Retains a reference to the given flow trace report.
+ *
+ * @returns New reference count.
+ * @param hFlowTraceReport Flow trace report handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceReportRetain(DBGFFLOWTRACEREPORT hFlowTraceReport)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ AssertPtrReturn(pReport, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pReport->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pReport));
+ return cRefs;
+}
+
+
+/**
+ * Releases a reference of the given flow trace report.
+ *
+ * @returns New reference count, on 0 the report is destroyed.
+ * @param hFlowTraceReport Flow trace report handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceReportRelease(DBGFFLOWTRACEREPORT hFlowTraceReport)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ if (!pReport)
+ return 0;
+ AssertPtrReturn(pReport, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pReport->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pReport));
+ if (cRefs == 0)
+ dbgfR3FlowTraceReportDestroy(pReport);
+ return cRefs;
+}
+
+
+/**
+ * Returns the number of records in the given report.
+ *
+ * @returns Number of records.
+ * @param hFlowTraceReport Flow trace report handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceReportGetRecordCount(DBGFFLOWTRACEREPORT hFlowTraceReport)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ AssertPtrReturn(pReport, 0);
+
+ return pReport->cRecords;
+}
+
+
+/**
+ * Queries the specified record contained in the given report.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceReport Flow trace report handle.
+ * @param idxRec The record index to query.
+ * @param phFlowTraceRec Where to store the retained handle of the record on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceReportQueryRecord(DBGFFLOWTRACEREPORT hFlowTraceReport, uint32_t idxRec, PDBGFFLOWTRACERECORD phFlowTraceRec)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ AssertPtrReturn(pReport, 0);
+ AssertPtrReturn(phFlowTraceRec, VERR_INVALID_POINTER);
+ AssertReturn(idxRec < pReport->cRecords, VERR_INVALID_PARAMETER);
+
+ DBGFR3FlowTraceRecordRetain(pReport->apRec[idxRec]);
+ *phFlowTraceRec = pReport->apRec[idxRec];
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Filters the given flow trace report by the given criterias and returns a filtered report.
+ *
+ * @returns VBox status code.
+ * @param hFlowTraceReport Flow trace report handle.
+ * @param fFlags Combination of DBGF_FLOW_TRACE_REPORT_FILTER_F_*.
+ * @param paFilters Pointer to the array of filters.
+ * @param cFilters Number of entries in the filter array.
+ * @param enmOp How the filters are connected to each other.
+ * @param phFlowTraceReportFiltered Where to return the handle to the report containing the
+ * filtered records on success.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceReportQueryFiltered(DBGFFLOWTRACEREPORT hFlowTraceReport, uint32_t fFlags,
+ PDBGFFLOWTRACEREPORTFILTER paFilters, uint32_t cFilters,
+ DBGFFLOWTRACEREPORTFILTEROP enmOp,
+ PDBGFFLOWTRACEREPORT phFlowTraceReportFiltered)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ AssertPtrReturn(pReport, VERR_INVALID_HANDLE);
+ AssertReturn(!(fFlags & DBGF_FLOW_TRACE_REPORT_FILTER_F_VALID), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(paFilters, VERR_INVALID_POINTER);
+ AssertReturn(cFilters > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(enmOp > DBGFFLOWTRACEREPORTFILTEROP_INVALID && enmOp <= DBGFFLOWTRACEREPORTFILTEROP_OR,
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phFlowTraceReportFiltered, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ PDBGFFLOWTRACEREPORTINT pReportFiltered = dbgfR3FlowTraceReportCreate(pReport->pUVM, pReport->cRecords);
+ if (RT_LIKELY(pReport))
+ {
+ uint32_t idxFiltered = 0;
+
+ for (uint32_t i = 0; i < pReport->cRecords; i++)
+ {
+ PDBGFFLOWTRACERECORDINT pCur = pReport->apRec[i];
+ bool fRecFilterMatch = dbgfR3FlowTraceDoesRecordMatchFilter(pCur, paFilters, cFilters, enmOp);
+
+ if ( ( fRecFilterMatch
+ && !(fFlags & DBGF_FLOW_TRACE_REPORT_FILTER_F_REVERSE))
+ || ( !fRecFilterMatch
+ && (fFlags & DBGF_FLOW_TRACE_REPORT_FILTER_F_REVERSE)))
+ {
+ DBGFR3FlowTraceRecordRetain(pCur);
+ pReportFiltered->apRec[idxFiltered++] = pCur;
+ }
+ }
+
+ pReportFiltered->cRecords = idxFiltered;
+ *phFlowTraceReportFiltered = pReportFiltered;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Enumerates all records in the given flow trace report calling the supplied
+ * enumeration callback.
+ *
+ * @returns VBox status code, return value of pfnEnum on error.
+ * @param hFlowTraceReport Flow trace report handle.
+ * @param pfnEnum The callback to call for every record.
+ * @param pvUser Opaque user data to pass to the callback.
+ */
+VMMR3DECL(int) DBGFR3FlowTraceReportEnumRecords(DBGFFLOWTRACEREPORT hFlowTraceReport,
+ PFNDBGFFLOWTRACEREPORTENUMCLBK pfnEnum,
+ void *pvUser)
+{
+ PDBGFFLOWTRACEREPORTINT pReport = hFlowTraceReport;
+ AssertPtrReturn(pReport, VERR_INVALID_HANDLE);
+
+ int rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < pReport->cRecords && RT_SUCCESS(rc); i++)
+ rc = pfnEnum(pReport, pReport->apRec[i], pvUser);
+
+ return rc;
+}
+
+
+/**
+ * Retains a reference to the given flow trace record handle.
+ *
+ * @returns New reference count.
+ * @param hFlowTraceRecord The record handle to retain.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceRecordRetain(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pRecord->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pRecord));
+ return cRefs;
+}
+
+
+/**
+ * Releases a reference of the given flow trace record.
+ *
+ * @returns New reference count, on 0 the record is destroyed.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceRecordRelease(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ if (!pRecord)
+ return 0;
+ AssertPtrReturn(pRecord, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pRecord->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pRecord));
+ if (cRefs == 0)
+ dbgfR3FlowTraceRecordDestroy(pRecord);
+ return cRefs;
+}
+
+
+/**
+ * Gets the sequence number of the given record handle.
+ *
+ * @returns Sequence number.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(uint64_t) DBGFR3FlowTraceRecordGetSeqNo(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, 0);
+
+ return pRecord->u64SeqNo;
+}
+
+
+/**
+ * Returns the timestamp when the record was created.
+ *
+ * @returns Timestamp in nano seconds.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(uint64_t) DBGFR3FlowTraceRecordGetTimestamp(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, 0);
+
+ return pRecord->u64TsCollected;
+}
+
+
+/**
+ * Gets the address in the guest the record was created.
+ *
+ * @returns Pointer to the address containing the guest location the record was created at.
+ * @param hFlowTraceRecord Flow trace record handle.
+ * @param pAddr Where to store the guest address.
+ */
+VMMR3DECL(PDBGFADDRESS) DBGFR3FlowTraceRecordGetAddr(DBGFFLOWTRACERECORD hFlowTraceRecord, PDBGFADDRESS pAddr)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, NULL);
+ AssertPtrReturn(pAddr, NULL);
+
+ *pAddr = pRecord->AddrProbe;
+ return pAddr;
+}
+
+
+/**
+ * Returns the handle to the probe for the given record.
+ *
+ * @returns Handle to the probe.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(DBGFFLOWTRACEPROBE) DBGFR3FlowTraceRecordGetProbe(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, NULL);
+
+ DBGFR3FlowTraceProbeRetain(pRecord->pProbe);
+ return pRecord->pProbe;
+}
+
+
+/**
+ * Returns the number of values contained in the record.
+ *
+ * @returns Number of values in the record.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceRecordGetValCount(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, 0);
+
+ return pRecord->pProbe->cEntries;
+}
+
+
+/**
+ * Returns the number of values contained in the record.
+ *
+ * @returns Number of values in the record.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3FlowTraceRecordGetValCommonCount(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, 0);
+
+ return pRecord->pProbeCmn ? pRecord->pProbeCmn->cEntries : 0;
+}
+
+
+/**
+ * Returns the values for the given record.
+ *
+ * @returns Pointer to the array of values.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(PCDBGFFLOWTRACEPROBEVAL) DBGFR3FlowTraceRecordGetVals(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, NULL);
+
+ return &pRecord->aVal[0];
+}
+
+
+/**
+ * Returns data collected by the common probe for the trace module this record is in if one
+ * is active.
+ *
+ * @returns Pointer to the array of common probe values or NULL if no common probe was specified
+ * for the trace module.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(PCDBGFFLOWTRACEPROBEVAL) DBGFR3FlowTraceRecordGetValsCommon(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, NULL);
+
+ return pRecord->paValCmn;
+}
+
+
+/**
+ * Returns the vCPU ID the record was created on.
+ *
+ * @returns vCPU ID.
+ * @param hFlowTraceRecord Flow trace record handle.
+ */
+VMMR3DECL(VMCPUID) DBGFR3FlowTraceRecordGetCpuId(DBGFFLOWTRACERECORD hFlowTraceRecord)
+{
+ PDBGFFLOWTRACERECORDINT pRecord = hFlowTraceRecord;
+ AssertPtrReturn(pRecord, VMCPUID_ANY);
+
+ return pRecord->idCpu;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3ModInMem.cpp b/src/VBox/VMM/VMMR3/DBGFR3ModInMem.cpp
new file mode 100644
index 00000000..d8a62b33
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3ModInMem.cpp
@@ -0,0 +1,1111 @@
+/* $Id: DBGFR3ModInMem.cpp $ */
+/** @file
+ * DBGFR3ModInMemPe - In memory PE module 'loader'.
+ */
+
+/*
+ * Copyright (C) 2009-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+
+#include <VBox/err.h>
+#include <iprt/ctype.h>
+#include <iprt/ldr.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+#include <iprt/sort.h>
+#include <iprt/formats/pecoff.h>
+#include <iprt/formats/mz.h>
+#include <iprt/formats/elf.h>
+#include <iprt/formats/mach-o.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Entry for mapping file offset to memory location. */
+typedef struct DBGFMODINMEMMAPPING
+{
+ /** The file offset. */
+ uint32_t offFile;
+ /** The file size of this mapping. */
+ uint32_t cbFile;
+ /** The size of this mapping. */
+ uint32_t cbMem;
+ /** The offset to the memory from the start of the image.
+ * @note This can be negative (for mach_kernel). */
+ int32_t offMem;
+} DBGFMODINMEMMAPPING;
+typedef DBGFMODINMEMMAPPING *PDBGFMODINMEMMAPPING;
+typedef DBGFMODINMEMMAPPING const *PCDBGFMODINMEMMAPPING;
+
+/**
+ * Common in-memory reader instance data.
+ */
+typedef struct DBGFMODINMEMRDR
+{
+ /** The VM handle (referenced). */
+ PUVM pUVM;
+ /** The image base. */
+ DBGFADDRESS ImageAddr;
+ /** The file size, based on the offFile and cbFile of the last mapping. */
+ uint32_t cbFile;
+ /** Number of entries in the aMappings table. */
+ uint32_t cMappings;
+ /** Mapping hint. */
+ uint32_t iHint;
+ /** Mapping file offset to memory offsets, ordered by file offset. */
+ DBGFMODINMEMMAPPING aMappings[RT_FLEXIBLE_ARRAY_NESTED];
+} DBGFMODINMEMRDR;
+/** Pointer to the common instance data for an in-memory file reader. */
+typedef DBGFMODINMEMRDR *PDBGFMODINMEMRDR;
+
+/**
+ * The WinNT digger's loader reader instance data.
+ */
+typedef struct DBGFMODPERDR
+{
+ /** The VM handle (referenced). */
+ PUVM pUVM;
+ /** The image base. */
+ DBGFADDRESS ImageAddr;
+ /** The image size. */
+ uint32_t cbImage;
+ /** The file offset of the SizeOfImage field in the optional header if it
+ * needs patching, otherwise set to UINT32_MAX. */
+ uint32_t offSizeOfImage;
+ /** The correct image size. */
+ uint32_t cbCorrectImageSize;
+ /** Number of entries in the aMappings table. */
+ uint32_t cMappings;
+ /** Mapping hint. */
+ uint32_t iHint;
+ /** Mapping file offset to memory offsets, ordered by file offset. */
+ struct
+ {
+ /** The file offset. */
+ uint32_t offFile;
+ /** The size of this mapping. */
+ uint32_t cbMem;
+ /** The offset to the memory from the start of the image. */
+ uint32_t offMem;
+ } aMappings[1];
+} DBGFMODPERDR;
+/** Pointer a WinNT loader reader instance data. */
+typedef DBGFMODPERDR *PDBGFMODPERDR;
+
+/**
+ * Stack buffer.
+ */
+typedef union DBGFMODINMEMBUF
+{
+ uint8_t ab[0x2000];
+ IMAGE_DOS_HEADER DosHdr;
+ IMAGE_NT_HEADERS32 Nt32;
+ IMAGE_NT_HEADERS64 Nt64;
+ mach_header_64 MachoHdr;
+ DBGFMODINMEMMAPPING aMappings[0x2000 / sizeof(DBGFMODINMEMMAPPING)];
+} DBGFMODINMEMBUF;
+/** Pointer to stack buffer. */
+typedef DBGFMODINMEMBUF *PDBGFMODINMEMBUF;
+
+
+
+/**
+ * Normalizes a debug module name.
+ *
+ * @returns Normalized debug module name.
+ * @param pszName The name.
+ * @param pszBuf Buffer to use if work is needed.
+ * @param cbBuf Size of buffer.
+ */
+const char *dbgfR3ModNormalizeName(const char *pszName, char *pszBuf, size_t cbBuf)
+{
+ /*
+ * Skip to the filename in case someone gave us a full filename path.
+ */
+ pszName = RTPathFilenameEx(pszName, RTPATH_STR_F_STYLE_DOS);
+
+ /*
+ * Is it okay?
+ */
+ size_t cchName = strlen(pszName);
+ size_t off = 0;
+ for (;; off++)
+ {
+ char ch = pszName[off];
+ if (ch == '\0')
+ return pszName;
+ if (!RT_C_IS_ALNUM(ch) && ch != '_')
+ break;
+ }
+
+ /*
+ * It's no okay, so morph it.
+ */
+ if (cchName >= cbBuf)
+ cchName = cbBuf - 1;
+ for (off = 0; off < cchName; off++)
+ {
+ char ch = pszName[off];
+ if (!RT_C_IS_ALNUM(ch))
+ ch = '_';
+ pszBuf[off] = ch;
+ }
+ pszBuf[off] = '\0';
+
+ return pszBuf;
+}
+
+
+/**
+ * @callback_method_impl{PFNRTLDRRDRMEMREAD}
+ */
+static DECLCALLBACK(int) dbgfModInMemCommon_Read(void *pvBuf, size_t cb, size_t off, void *pvUser)
+{
+ PDBGFMODINMEMRDR pThis = (PDBGFMODINMEMRDR)pvUser;
+ uint32_t offFile = (uint32_t)off;
+ AssertReturn(offFile == off, VERR_INVALID_PARAMETER);
+
+ /*
+ * Set i to a mapping that starts at or before the specified offset.
+ * ASSUMING aMappings are sorted by offFile.
+ */
+ uint32_t i = pThis->iHint;
+ if (pThis->aMappings[i].offFile > offFile)
+ {
+ i = pThis->cMappings; /** @todo doesn't need to start from the end here... */
+ while (i-- > 0)
+ if (offFile >= pThis->aMappings[i].offFile)
+ break;
+ pThis->iHint = i;
+ }
+
+ while (cb > 0)
+ {
+ uint32_t offNextMap = i + 1 < pThis->cMappings ? pThis->aMappings[i + 1].offFile
+ : pThis->aMappings[i].offFile + RT_MAX(pThis->aMappings[i].cbFile, pThis->aMappings[i].cbMem);
+ uint32_t offMap = offFile - pThis->aMappings[i].offFile;
+
+ /* Read file bits backed by memory. */
+ if (offMap < pThis->aMappings[i].cbMem)
+ {
+ uint32_t cbToRead = pThis->aMappings[i].cbMem - offMap;
+ if (cbToRead > cb)
+ cbToRead = (uint32_t)cb;
+
+ DBGFADDRESS Addr = pThis->ImageAddr;
+ DBGFR3AddrAdd(&Addr, pThis->aMappings[i].offMem + offMap);
+
+ int rc = DBGFR3MemRead(pThis->pUVM, 0 /*idCpu*/, &Addr, pvBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Done? */
+ if (cbToRead == cb)
+ break;
+
+ offFile += cbToRead;
+ cb -= cbToRead;
+ pvBuf = (char *)pvBuf + cbToRead;
+ }
+
+ /* Mind the gap. */
+ if (offNextMap > offFile)
+ {
+ uint32_t cbZero = offNextMap - offFile;
+ if (cbZero > cb)
+ {
+ RT_BZERO(pvBuf, cb);
+ break;
+ }
+
+ RT_BZERO(pvBuf, cbZero);
+ offFile += cbZero;
+ cb -= cbZero;
+ pvBuf = (char *)pvBuf + cbZero;
+ }
+
+ pThis->iHint = ++i;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{PFNRTLDRRDRMEMDTOR}
+ */
+static DECLCALLBACK(void) dbgfModInMemCommon_Dtor(void *pvUser, size_t cbImage)
+{
+ PDBGFMODINMEMRDR pThis = (PDBGFMODINMEMRDR)pvUser;
+ RT_NOREF(cbImage);
+
+ VMR3ReleaseUVM(pThis->pUVM);
+ pThis->pUVM = NULL;
+
+ RTMemFree(pThis);
+}
+
+
+/**
+ * @callback_method_impl{FNRTSORTCMP}
+ */
+static DECLCALLBACK(int) dbgfModInMemCompMappings(void const *pvElement1, void const *pvElement2, void *pvUser)
+{
+ RT_NOREF(pvUser);
+ PCDBGFMODINMEMMAPPING pElement1 = (PCDBGFMODINMEMMAPPING)pvElement1;
+ PCDBGFMODINMEMMAPPING pElement2 = (PCDBGFMODINMEMMAPPING)pvElement2;
+ if (pElement1->offFile < pElement2->offFile)
+ return -1;
+ if (pElement1->offFile > pElement2->offFile)
+ return 1;
+ if (pElement1->cbFile < pElement2->cbFile)
+ return -1;
+ if (pElement1->cbFile > pElement2->cbFile)
+ return 1;
+ if (pElement1->offMem < pElement2->offMem)
+ return -1;
+ if (pElement1->offMem > pElement2->offMem)
+ return 1;
+ if (pElement1->cbMem < pElement2->cbMem)
+ return -1;
+ if (pElement1->cbMem > pElement2->cbMem)
+ return 1;
+ return 0;
+}
+
+
+static int dbgfModInMemCommon_Init(PDBGFMODINMEMRDR pThis, PUVM pUVM, PCDBGFADDRESS pImageAddr,PCDBGFMODINMEMMAPPING paMappings,
+ uint32_t cMappings, const char *pszName, RTLDRARCH enmArch,
+ PRTLDRMOD phLdrMod, PRTERRINFO pErrInfo)
+{
+ /*
+ * Initialize the reader instance.
+ */
+ VMR3RetainUVM(pUVM);
+ pThis->pUVM = pUVM;
+ pThis->ImageAddr = *pImageAddr;
+ pThis->cMappings = cMappings;
+ pThis->iHint = 0;
+ memcpy(pThis->aMappings, paMappings, cMappings * sizeof(pThis->aMappings[0]));
+ RTSortShell(pThis->aMappings, cMappings, sizeof(pThis->aMappings[0]), dbgfModInMemCompMappings, NULL);
+ pThis->cbFile = pThis->aMappings[cMappings - 1].offFile + pThis->aMappings[cMappings - 1].cbFile;
+
+ /*
+ * Call the loader to open it.
+ * Note! destructore is always called.
+ */
+
+ RTLDRMOD hLdrMod;
+ int rc = RTLdrOpenInMemory(pszName, RTLDR_O_FOR_DEBUG, enmArch, pThis->cbFile,
+ dbgfModInMemCommon_Read, dbgfModInMemCommon_Dtor, pThis,
+ &hLdrMod, pErrInfo);
+ if (RT_SUCCESS(rc))
+ *phLdrMod = hLdrMod;
+ else
+ *phLdrMod = NIL_RTLDRMOD;
+ return rc;
+}
+
+
+/**
+ * Handles in-memory ELF images.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pImageAddr The image address.
+ * @param fFlags Flags, DBGFMODINMEM_F_XXX.
+ * @param pszName The module name, optional.
+ * @param pszFilename The image filename, optional.
+ * @param enmArch The image arch if we force it, pass
+ * RTLDRARCH_WHATEVER if you don't care.
+ * @param cbImage Image size. Pass 0 if not known.
+ * @param puBuf The header buffer.
+ * @param phDbgMod Where to return the resulting debug module on success.
+ * @param pErrInfo Where to return extended error info on failure.
+ */
+static int dbgfR3ModInMemElf(PUVM pUVM, PCDBGFADDRESS pImageAddr, uint32_t fFlags, const char *pszName, const char *pszFilename,
+ RTLDRARCH enmArch, uint32_t cbImage, PDBGFMODINMEMBUF puBuf,
+ PRTDBGMOD phDbgMod, PRTERRINFO pErrInfo)
+{
+ RT_NOREF(pUVM, fFlags, pszName, pszFilename, enmArch, cbImage, puBuf, phDbgMod);
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_INVALID_EXE_SIGNATURE, "Found ELF magic at %RGv", pImageAddr->FlatPtr);
+}
+
+
+/**
+ * Handles in-memory Mach-O images.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pImageAddr The image address.
+ * @param fFlags Flags, DBGFMODINMEM_F_XXX.
+ * @param pszName The module name, optional.
+ * @param pszFilename The image filename, optional.
+ * @param enmArch The image arch if we force it, pass
+ * RTLDRARCH_WHATEVER if you don't care.
+ * @param cbImage Image size. Pass 0 if not known.
+ * @param puBuf The header buffer.
+ * @param phDbgMod Where to return the resulting debug module on success.
+ * @param pErrInfo Where to return extended error info on failure.
+ */
+static int dbgfR3ModInMemMachO(PUVM pUVM, PCDBGFADDRESS pImageAddr, uint32_t fFlags, const char *pszName, const char *pszFilename,
+ RTLDRARCH enmArch, uint32_t cbImage, PDBGFMODINMEMBUF puBuf,
+ PRTDBGMOD phDbgMod, PRTERRINFO pErrInfo)
+{
+ RT_NOREF(cbImage, fFlags);
+
+ /*
+ * Match up enmArch.
+ */
+ if (enmArch == RTLDRARCH_AMD64)
+ {
+ if (puBuf->MachoHdr.magic != IMAGE_MACHO64_SIGNATURE)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Wanted AMD64 but header is not 64-bit");
+ if (puBuf->MachoHdr.cputype != CPU_TYPE_X86_64)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Wanted AMD64 but cpu type is %#x instead of %#x",
+ puBuf->MachoHdr.cputype, CPU_TYPE_X86_64);
+ }
+ else if (enmArch == RTLDRARCH_X86_32)
+ {
+ if (puBuf->MachoHdr.magic != IMAGE_MACHO32_SIGNATURE)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Wanted X86_32 but header is not 32-bit");
+ if (puBuf->MachoHdr.cputype != CPU_TYPE_X86)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Wanted X86_32 but cpu type is %#x instead of %#x",
+ puBuf->MachoHdr.cputype, CPU_TYPE_X86);
+ }
+ else if (enmArch != RTLDRARCH_WHATEVER)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Unsupported enmArch value %s (%d)",
+ RTLdrArchName(enmArch), enmArch);
+
+ /*
+ * Guess the module name if not specified and make sure it conforms to DBGC expectations.
+ */
+ char szNormalized[128];
+ if (!pszName)
+ {
+ if (pszFilename)
+ pszName = RTPathFilenameEx(pszFilename, RTPATH_STR_F_STYLE_DOS /*whatever*/);
+ if (!pszName)
+ {
+ RTStrPrintf(szNormalized, sizeof(szNormalized), "image_%#llx", (uint64_t)pImageAddr->FlatPtr);
+ pszName = szNormalized;
+ }
+ }
+ if (pszName != szNormalized)
+ pszName = dbgfR3ModNormalizeName(pszName, szNormalized, sizeof(szNormalized));
+
+ /*
+ * Read the load commands into memory, they follow the header. Refuse
+ * if there appear to be too many or too much of these.
+ */
+ uint32_t const cLoadCmds = puBuf->MachoHdr.ncmds;
+ uint32_t const cbLoadCmds = puBuf->MachoHdr.sizeofcmds;
+ if (cLoadCmds > _8K || cLoadCmds < 2)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRMACHO_BAD_HEADER,
+ "ncmds=%u is out of sensible range (2..8192)", cLoadCmds);
+ if (cbLoadCmds > _2M || cbLoadCmds < sizeof(load_command_t) * 2)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRMACHO_BAD_HEADER,
+ "cbLoadCmds=%#x is out of sensible range (8..2MiB)", cbLoadCmds);
+
+ uint8_t *pbLoadCmds = (uint8_t *)RTMemTmpAllocZ(cbLoadCmds);
+ AssertReturn(pbLoadCmds, VERR_NO_TMP_MEMORY);
+
+ uint32_t const cbHdr = puBuf->MachoHdr.magic == IMAGE_MACHO64_SIGNATURE ? sizeof(mach_header_64) : sizeof(mach_header_32);
+ DBGFADDRESS Addr = *pImageAddr;
+ int rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, DBGFR3AddrAdd(&Addr, cbHdr), pbLoadCmds, cbLoadCmds);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Scan it for segments so we can tranlate file offsets to virtual
+ * memory locations.
+ */
+ RTUUID Uuid = RTUUID_INITIALIZE_NULL;
+ uint32_t cMappings = 0;
+ uint32_t offCmd = 0;
+ for (uint32_t iCmd = 0; iCmd < cLoadCmds; iCmd++)
+ {
+ load_command_t const *pCurCmd = (load_command_t const *)&pbLoadCmds[offCmd];
+ uint32_t const cbCurCmd = offCmd + sizeof(*pCurCmd) <= cbLoadCmds ? pCurCmd->cmdsize : sizeof(*pCurCmd);
+ if (offCmd + cbCurCmd > cbLoadCmds)
+ rc = RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRMACHO_BAD_LOAD_COMMAND,
+ "Load command #%u @ %#x is out of bounds: size %#x, left %#x", iCmd, offCmd, cbCurCmd,
+ cbLoadCmds - offCmd);
+ else if (pCurCmd->cmd == LC_SEGMENT_64)
+ {
+ segment_command_64 const *pSeg = (segment_command_64 const *)pCurCmd;
+ if (cbCurCmd >= sizeof(*pSeg))
+ {
+ if (cMappings >= RT_ELEMENTS(puBuf->aMappings))
+ rc = RTERRINFO_LOG_SET_F(pErrInfo, VERR_OUT_OF_RANGE, "Too many segments!");
+ else
+ {
+ puBuf->aMappings[cMappings].offFile = pSeg->fileoff;
+ puBuf->aMappings[cMappings].cbFile = pSeg->filesize;
+ puBuf->aMappings[cMappings].offMem = pSeg->vmaddr - pImageAddr->FlatPtr;
+ puBuf->aMappings[cMappings].cbMem = pSeg->vmsize;
+ cMappings++;
+ }
+ }
+ else
+ rc = RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRMACHO_BAD_LOAD_COMMAND,
+ "Load command #%u @ %#x is too small for a 64-bit segment: %#x", iCmd, offCmd, cbCurCmd);
+ }
+ else if (pCurCmd->cmd == LC_SEGMENT_32)
+ {
+ segment_command_32 const *pSeg = (segment_command_32 const *)pCurCmd;
+ if (cbCurCmd >= sizeof(*pSeg))
+ {
+ if (cMappings >= RT_ELEMENTS(puBuf->aMappings))
+ rc = RTERRINFO_LOG_SET_F(pErrInfo, VERR_OUT_OF_RANGE, "Too many segments!");
+ else
+ {
+ puBuf->aMappings[cMappings].offFile = pSeg->fileoff;
+ puBuf->aMappings[cMappings].cbFile = pSeg->filesize;
+ puBuf->aMappings[cMappings].offMem = pSeg->vmaddr - pImageAddr->FlatPtr;
+ puBuf->aMappings[cMappings].cbMem = pSeg->vmsize;
+ cMappings++;
+ }
+ }
+ else
+ rc = RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRMACHO_BAD_LOAD_COMMAND,
+ "Load command #%u @ %#x is too small for a 32-bit segment: %#x", iCmd, offCmd, cbCurCmd);
+ }
+ else if (pCurCmd->cmd == LC_UUID && cbCurCmd == sizeof(uuid_command_t))
+ memcpy(&Uuid, ((uuid_command_t const *)pCurCmd)->uuid, sizeof(Uuid));
+
+ if (RT_SUCCESS(rc))
+ offCmd += cbCurCmd;
+ else
+ break;
+ } /* for each command */
+
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create generic loader module instance (pThis is tied to it
+ * come rain come shine).
+ */
+ PDBGFMODINMEMRDR pThis = (PDBGFMODINMEMRDR)RTMemAllocZVar(RT_UOFFSETOF_DYN(DBGFMODINMEMRDR, aMappings[cMappings]));
+ if (pThis)
+ {
+ RTLDRMOD hLdrMod;
+ rc = dbgfModInMemCommon_Init(pThis, pUVM, pImageAddr, puBuf->aMappings, cMappings,
+ pszName, enmArch, &hLdrMod, pErrInfo);
+ if (RT_SUCCESS(rc)) /* Don't bother if we don't have a handle. */
+ {
+ RTDBGMOD hMod;
+ rc = RTDbgModCreateFromMachOImage(&hMod, pszFilename ? pszFilename : pszName, pszName, enmArch,
+ &hLdrMod, 0 /*cbImage*/, 0, NULL, &Uuid, DBGFR3AsGetConfig(pUVM), fFlags);
+ if (RT_SUCCESS(rc))
+ *phDbgMod = hMod;
+ }
+ else
+ hLdrMod = NIL_RTLDRMOD;
+
+#if 0 /** @todo later */
+ if (RT_FAILURE(rc) && !(fFlags & DBGFMODINMEM_F_NO_CONTAINER_FALLBACK))
+ {
+ /*
+ * Fallback is a container module.
+ */
+ rc = RTDbgModCreate(&hMod, pszName, cbImage, 0);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgModSymbolAdd(hMod, "Headers", 0 /*iSeg*/, 0, cbImage, 0 /*fFlags*/, NULL);
+ AssertRC(rc);
+ }
+ }
+#endif
+ if (hLdrMod != NIL_RTLDRMOD)
+ RTLdrClose(hLdrMod);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ }
+ else
+ RTERRINFO_LOG_SET_F(pErrInfo, rc, "Failed to read %#x bytes of load commands", cbLoadCmds);
+ RTMemTmpFree(pbLoadCmds);
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{PFNRTLDRRDRMEMREAD}
+ */
+static DECLCALLBACK(int) dbgfModInMemPeRdr_Read(void *pvBuf, size_t cb, size_t off, void *pvUser)
+{
+ PDBGFMODPERDR pThis = (PDBGFMODPERDR)pvUser;
+ uint32_t offFile = (uint32_t)off;
+ AssertReturn(offFile == off, VERR_INVALID_PARAMETER);
+
+ uint32_t i = pThis->iHint;
+ if (pThis->aMappings[i].offFile > offFile)
+ {
+ i = pThis->cMappings;
+ while (i-- > 0)
+ if (offFile >= pThis->aMappings[i].offFile)
+ break;
+ pThis->iHint = i;
+ }
+
+ while (cb > 0)
+ {
+ uint32_t offNextMap = i + 1 < pThis->cMappings ? pThis->aMappings[i + 1].offFile : pThis->cbImage;
+ uint32_t offMap = offFile - pThis->aMappings[i].offFile;
+
+ /* Read file bits backed by memory. */
+ if (offMap < pThis->aMappings[i].cbMem)
+ {
+ uint32_t cbToRead = pThis->aMappings[i].cbMem - offMap;
+ if (cbToRead > cb)
+ cbToRead = (uint32_t)cb;
+
+ DBGFADDRESS Addr = pThis->ImageAddr;
+ DBGFR3AddrAdd(&Addr, pThis->aMappings[i].offMem + offMap);
+
+ int rc = DBGFR3MemRead(pThis->pUVM, 0 /*idCpu*/, &Addr, pvBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Apply SizeOfImage patch? */
+ if ( pThis->offSizeOfImage != UINT32_MAX
+ && offFile < pThis->offSizeOfImage + 4
+ && offFile + cbToRead > pThis->offSizeOfImage)
+ {
+ uint32_t SizeOfImage = pThis->cbCorrectImageSize;
+ uint32_t cbPatch = sizeof(SizeOfImage);
+ int32_t offPatch = pThis->offSizeOfImage - offFile;
+ uint8_t *pbPatch = (uint8_t *)pvBuf + offPatch;
+ if (offFile + cbToRead < pThis->offSizeOfImage + cbPatch)
+ cbPatch = offFile + cbToRead - pThis->offSizeOfImage;
+ while (cbPatch-- > 0)
+ {
+ if (offPatch >= 0)
+ *pbPatch = (uint8_t)SizeOfImage;
+ offPatch++;
+ pbPatch++;
+ SizeOfImage >>= 8;
+ }
+ }
+
+ /* Done? */
+ if (cbToRead == cb)
+ break;
+
+ offFile += cbToRead;
+ cb -= cbToRead;
+ pvBuf = (char *)pvBuf + cbToRead;
+ }
+
+ /* Mind the gap. */
+ if (offNextMap > offFile)
+ {
+ uint32_t cbZero = offNextMap - offFile;
+ if (cbZero > cb)
+ {
+ RT_BZERO(pvBuf, cb);
+ break;
+ }
+
+ RT_BZERO(pvBuf, cbZero);
+ offFile += cbZero;
+ cb -= cbZero;
+ pvBuf = (char *)pvBuf + cbZero;
+ }
+
+ pThis->iHint = ++i;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{PFNRTLDRRDRMEMDTOR}
+ */
+static DECLCALLBACK(void) dbgfModInMemPeRdr_Dtor(void *pvUser, size_t cbImage)
+{
+ PDBGFMODPERDR pThis = (PDBGFMODPERDR)pvUser;
+ RT_NOREF(cbImage);
+
+ VMR3ReleaseUVM(pThis->pUVM);
+ pThis->pUVM = NULL;
+ RTMemFree(pvUser);
+}
+
+
+/**
+ * Checks if the section headers look okay.
+ *
+ * @returns VBox status code.
+ * @param paShdrs Pointer to the section headers.
+ * @param cShdrs Number of headers.
+ * @param cbImage The image size reported by NT.
+ * @param cbImageFromHdr The image size by the linker in the header.
+ * @param uRvaRsrc The RVA of the resource directory. UINT32_MAX if
+ * no resource directory.
+ * @param cbSectAlign The section alignment specified in the header.
+ * @param fNt31 Set if NT 3.1. Needed for chopped off HAL.
+ * @param pcbImageCorrect The corrected image size. This is derived from
+ * cbImage and virtual range of the section tables.
+ *
+ * The problem is that NT may choose to drop the
+ * last pages in images it loads early, starting at
+ * the resource directory. These images will have
+ * a page aligned cbImage.
+ *
+ * @param pErrInfo Where to return more error details.
+ */
+static int dbgfR3ModPeCheckSectHdrsAndImgSize(PCIMAGE_SECTION_HEADER paShdrs, uint32_t cShdrs, uint32_t cbImage,
+ uint32_t cbImageFromHdr, uint32_t uRvaRsrc, uint32_t cbSectAlign,
+ bool fNt31, uint32_t *pcbImageCorrect, PRTERRINFO pErrInfo)
+{
+ *pcbImageCorrect = cbImage;
+
+ for (uint32_t i = 0; i < cShdrs; i++)
+ {
+ if (!paShdrs[i].Name[0])
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "Section header #%u has no name", i);
+
+ if (paShdrs[i].Characteristics & IMAGE_SCN_TYPE_NOLOAD)
+ continue;
+
+ /* Tweak to determine the virtual size if the linker didn't set it (NT 3.1). */
+ /** @todo this isn't really perfect. cbImage is kind of wrong... */
+ uint32_t cbVirtual = paShdrs[i].Misc.VirtualSize;
+ if (cbVirtual == 0)
+ {
+ for (uint32_t j = i + 1; j < cShdrs; j++)
+ if ( !(paShdrs[j].Characteristics & IMAGE_SCN_TYPE_NOLOAD)
+ && paShdrs[j].VirtualAddress > paShdrs[i].VirtualAddress)
+ {
+ cbVirtual = paShdrs[j].VirtualAddress - paShdrs[i].VirtualAddress;
+ break;
+ }
+ if (!cbVirtual)
+ {
+ if (paShdrs[i].VirtualAddress < cbImageFromHdr)
+ cbVirtual = cbImageFromHdr - paShdrs[i].VirtualAddress;
+ else if (paShdrs[i].SizeOfRawData > 0)
+ cbVirtual = RT_ALIGN(paShdrs[i].SizeOfRawData, _4K);
+ }
+ }
+
+ /* Check that sizes are within the same range and that both sizes and
+ addresses are within reasonable limits. */
+ if ( RT_ALIGN(cbVirtual, _64K) < RT_ALIGN(paShdrs[i].SizeOfRawData, _64K)
+ || cbVirtual >= _1G
+ || paShdrs[i].SizeOfRawData >= _1G)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
+ "Section header #%u (%.8s) has a VirtualSize=%#x (%#x) and SizeOfRawData=%#x, that's too much data!",
+ i, paShdrs[i].Name, cbVirtual, paShdrs[i].Misc.VirtualSize, paShdrs[i].SizeOfRawData);
+ uint32_t uRvaEnd = paShdrs[i].VirtualAddress + cbVirtual;
+ if (uRvaEnd >= _1G || uRvaEnd < paShdrs[i].VirtualAddress)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
+ "Section header #%u (%.8s) has a VirtualSize=%#x (%#x) and VirtualAddr=%#x, %#x in total, that's too much!",
+ i, paShdrs[i].Name, cbVirtual, paShdrs[i].Misc.VirtualSize, paShdrs[i].VirtualAddress, uRvaEnd);
+
+ /* Check for images chopped off around '.rsrc'. */
+ if ( cbImage < uRvaEnd
+ && uRvaEnd >= uRvaRsrc)
+ cbImage = RT_ALIGN(uRvaEnd, cbSectAlign);
+
+ /* Check that the section is within the image. */
+ if (uRvaEnd > cbImage && fNt31)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
+ "Section header #%u has a virtual address range beyond the image: %#x TO %#x cbImage=%#x",
+ i, paShdrs[i].VirtualAddress, uRvaEnd, cbImage);
+ }
+
+ Assert(*pcbImageCorrect == cbImage || !(*pcbImageCorrect & 0xfff));
+ *pcbImageCorrect = cbImage;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Create a loader module for the in-guest-memory PE module.
+ */
+static int dbgfR3ModInMemPeCreateLdrMod(PUVM pUVM, uint32_t fFlags, const char *pszName, PCDBGFADDRESS pImageAddr,
+ uint32_t cbImage, uint32_t cbImageFromHdr, bool f32Bit,
+ uint32_t cShdrs, PCIMAGE_SECTION_HEADER paShdrs, uint32_t cbSectAlign,
+ uint32_t cDataDir, PCIMAGE_DATA_DIRECTORY paDataDir, uint32_t offHdrs,
+ PRTLDRMOD phLdrMod, PRTERRINFO pErrInfo)
+{
+ /*
+ * Allocate and create a reader instance.
+ */
+ PDBGFMODPERDR pRdr = (PDBGFMODPERDR)RTMemAlloc(RT_UOFFSETOF_DYN(DBGFMODPERDR, aMappings[cShdrs + 2]));
+ if (!pRdr)
+ return VERR_NO_MEMORY;
+
+ VMR3RetainUVM(pUVM);
+ pRdr->pUVM = pUVM;
+ pRdr->ImageAddr = *pImageAddr;
+ pRdr->cbImage = cbImage;
+ pRdr->cbCorrectImageSize = cbImage;
+ pRdr->offSizeOfImage = UINT32_MAX;
+ pRdr->iHint = 0;
+
+ /*
+ * Use the section table to construct a more accurate view of the file/image.
+ */
+ uint32_t uRvaRsrc = UINT32_MAX;
+ if ( cDataDir > IMAGE_DIRECTORY_ENTRY_RESOURCE
+ && paDataDir[IMAGE_DIRECTORY_ENTRY_RESOURCE].Size > 0)
+ uRvaRsrc = paDataDir[IMAGE_DIRECTORY_ENTRY_RESOURCE].VirtualAddress;
+
+ int rc = dbgfR3ModPeCheckSectHdrsAndImgSize(paShdrs, cShdrs, cbImage, cbImageFromHdr, uRvaRsrc, cbSectAlign,
+ RT_BOOL(fFlags & DBGFMODINMEM_F_PE_NT31), &pRdr->cbCorrectImageSize, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ pRdr->cMappings = 0;
+
+ for (uint32_t i = 0; i < cShdrs; i++)
+ if ( paShdrs[i].SizeOfRawData > 0
+ && paShdrs[i].PointerToRawData > 0)
+ {
+ uint32_t j = 1;
+ if (!pRdr->cMappings)
+ pRdr->cMappings++;
+ else
+ {
+ while (j < pRdr->cMappings && pRdr->aMappings[j].offFile < paShdrs[i].PointerToRawData)
+ j++;
+ if (j < pRdr->cMappings)
+ memmove(&pRdr->aMappings[j + 1], &pRdr->aMappings[j], (pRdr->cMappings - j) * sizeof(pRdr->aMappings));
+ }
+ pRdr->aMappings[j].offFile = paShdrs[i].PointerToRawData;
+ pRdr->aMappings[j].offMem = paShdrs[i].VirtualAddress;
+ pRdr->aMappings[j].cbMem = i + 1 < cShdrs
+ ? paShdrs[i + 1].VirtualAddress - paShdrs[i].VirtualAddress
+ : paShdrs[i].Misc.VirtualSize;
+ if (j == pRdr->cMappings)
+ pRdr->cbImage = paShdrs[i].PointerToRawData + paShdrs[i].SizeOfRawData;
+ pRdr->cMappings++;
+ }
+
+ /* Insert the mapping of the headers that isn't covered by the section table. */
+ pRdr->aMappings[0].offFile = 0;
+ pRdr->aMappings[0].offMem = 0;
+ pRdr->aMappings[0].cbMem = pRdr->cMappings ? pRdr->aMappings[1].offFile : pRdr->cbImage;
+
+ int j = pRdr->cMappings - 1;
+ while (j-- > 0)
+ {
+ uint32_t cbFile = pRdr->aMappings[j + 1].offFile - pRdr->aMappings[j].offFile;
+ if (pRdr->aMappings[j].cbMem > cbFile)
+ pRdr->aMappings[j].cbMem = cbFile;
+ }
+ }
+ else if (fFlags & DBGFMODINMEM_F_NO_READER_FALLBACK)
+ return rc;
+ else
+ {
+ /*
+ * Fallback, fake identity mapped file data.
+ */
+ pRdr->cMappings = 1;
+ pRdr->aMappings[0].offFile = 0;
+ pRdr->aMappings[0].offMem = 0;
+ pRdr->aMappings[0].cbMem = pRdr->cbImage;
+ }
+
+ /* Enable the SizeOfImage patching if necessary. */
+ if (pRdr->cbCorrectImageSize != cbImage)
+ {
+ Log(("dbgfR3ModInMemPeCreateLdrMod: The image is really %#x bytes long, not %#x as mapped by NT!\n",
+ pRdr->cbCorrectImageSize, cbImage));
+ pRdr->offSizeOfImage = f32Bit
+ ? offHdrs + RT_OFFSETOF(IMAGE_NT_HEADERS32, OptionalHeader.SizeOfImage)
+ : offHdrs + RT_OFFSETOF(IMAGE_NT_HEADERS64, OptionalHeader.SizeOfImage);
+ }
+
+ /*
+ * Call the loader to open the PE image for debugging.
+ * Note! It always calls pfnDtor.
+ */
+ RTLDRMOD hLdrMod;
+ rc = RTLdrOpenInMemory(pszName, RTLDR_O_FOR_DEBUG, RTLDRARCH_WHATEVER, pRdr->cbImage,
+ dbgfModInMemPeRdr_Read, dbgfModInMemPeRdr_Dtor, pRdr,
+ &hLdrMod, pErrInfo);
+ if (RT_SUCCESS(rc))
+ *phLdrMod = hLdrMod;
+ else
+ *phLdrMod = NIL_RTLDRMOD;
+ return rc;
+}
+
+
+/**
+ * Handles in-memory PE images.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pImageAddr The image address.
+ * @param fFlags Flags, DBGFMODINMEM_F_XXX.
+ * @param pszName The module name, optional.
+ * @param pszFilename The image filename, optional.
+ * @param enmArch The image arch if we force it, pass
+ * RTLDRARCH_WHATEVER if you don't care.
+ * @param cbImage Image size. Pass 0 if not known.
+ * @param offPeHdrs Offset of the PE header.
+ * @param cbPeHdrsPart1 How read into uBuf at @a offPeHdrs.
+ * @param puBuf The header buffer.
+ * @param phDbgMod Where to return the resulting debug module on success.
+ * @param pErrInfo Where to return extended error info on failure.
+ */
+static int dbgfR3ModInMemPe(PUVM pUVM, PCDBGFADDRESS pImageAddr, uint32_t fFlags, const char *pszName, const char *pszFilename,
+ RTLDRARCH enmArch, uint32_t cbImage, uint32_t offPeHdrs, uint32_t cbPeHdrsPart1,
+ PDBGFMODINMEMBUF puBuf, PRTDBGMOD phDbgMod, PRTERRINFO pErrInfo)
+{
+ /*
+ * Read the optional header and the section table after validating the
+ * info we need from the file header.
+ */
+ /* Check the opt hdr size and number of sections as these are used to determine how much to read next. */
+ if ( puBuf->Nt32.FileHeader.SizeOfOptionalHeader < sizeof(IMAGE_OPTIONAL_HEADER32)
+ || puBuf->Nt32.FileHeader.SizeOfOptionalHeader > sizeof(IMAGE_OPTIONAL_HEADER64) + 128)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "Invalid SizeOfOptionalHeader value: %#RX32",
+ puBuf->Nt32.FileHeader.SizeOfOptionalHeader);
+
+ if ( puBuf->Nt32.FileHeader.NumberOfSections < 1
+ || puBuf->Nt32.FileHeader.NumberOfSections > 190 /* what fits in our 8K buffer */)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "NumberOfSections is out of range: %#RX32 (1..190)",
+ puBuf->Nt32.FileHeader.NumberOfSections);
+
+ /* Read the optional header and section table. */
+ uint32_t const cbHdrs = RT_UOFFSETOF(IMAGE_NT_HEADERS32, OptionalHeader)
+ + puBuf->Nt32.FileHeader.SizeOfOptionalHeader
+ + puBuf->Nt32.FileHeader.NumberOfSections * sizeof(IMAGE_SECTION_HEADER);
+ AssertReturn(cbHdrs <= sizeof(*puBuf), RTERRINFO_LOG_SET_F(pErrInfo, VERR_INTERNAL_ERROR_2, "cbHdrs=%#x", cbHdrs));
+
+ DBGFADDRESS PeHdrPart2Addr = *pImageAddr;
+ DBGFR3AddrAdd(&PeHdrPart2Addr, offPeHdrs + cbPeHdrsPart1);
+ int rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &PeHdrPart2Addr, &puBuf->ab[cbPeHdrsPart1], cbHdrs - cbPeHdrsPart1);
+ if (RT_FAILURE(rc))
+ return RTERRINFO_LOG_SET_F(pErrInfo, rc,
+ "Failed to read the second part of the PE headers at %RGv (off=%#RX32 + %#RX32): %Rrc",
+ PeHdrPart2Addr.FlatPtr, offPeHdrs, cbPeHdrsPart1, rc);
+
+ /*
+ * Check the image architecture and determine the bitness.
+ */
+ RTLDRARCH enmArchActual;
+ bool f32Bit;
+ switch (puBuf->Nt32.FileHeader.Machine)
+ {
+ case IMAGE_FILE_MACHINE_I386:
+ enmArchActual = RTLDRARCH_X86_32;
+ f32Bit = true;
+ break;
+ case IMAGE_FILE_MACHINE_AMD64:
+ enmArchActual = RTLDRARCH_AMD64;
+ f32Bit = false;
+ break;
+ case IMAGE_FILE_MACHINE_ARM:
+ case IMAGE_FILE_MACHINE_THUMB:
+ case IMAGE_FILE_MACHINE_ARMNT:
+ enmArchActual = RTLDRARCH_ARM32;
+ f32Bit = true;
+ break;
+ case IMAGE_FILE_MACHINE_ARM64:
+ enmArchActual = RTLDRARCH_ARM64;
+ f32Bit = false;
+ break;
+ default:
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Unknown machine: %#x", puBuf->Nt32.FileHeader.Machine);
+ }
+ if ( enmArch != RTLDRARCH_WHATEVER
+ && enmArch != enmArchActual)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDR_ARCH_MISMATCH, "Found %s expected %s",
+ RTLdrArchName(enmArchActual), RTLdrArchName(enmArch));
+
+ /*
+ * Check optional header magic and size.
+ */
+ uint16_t const uOptMagic = f32Bit ? IMAGE_NT_OPTIONAL_HDR32_MAGIC : IMAGE_NT_OPTIONAL_HDR64_MAGIC;
+ if (puBuf->Nt32.OptionalHeader.Magic != uOptMagic)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "Unexpected optional header magic: %#x (expected %#x)",
+ puBuf->Nt32.OptionalHeader.Magic, uOptMagic);
+
+ uint32_t const cDataDir = f32Bit ? puBuf->Nt32.OptionalHeader.NumberOfRvaAndSizes : puBuf->Nt64.OptionalHeader.NumberOfRvaAndSizes;
+ if ( cDataDir <= IMAGE_DIRECTORY_ENTRY_BASERELOC /* a bit random */
+ || cDataDir > 32 /* also random */)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "Unexpected data directory size: %#x", cDataDir);
+
+ uint32_t cbOptHdr = f32Bit ? sizeof(IMAGE_OPTIONAL_HEADER32) : sizeof(IMAGE_OPTIONAL_HEADER64);
+ cbOptHdr -= sizeof(IMAGE_DATA_DIRECTORY) * IMAGE_NUMBEROF_DIRECTORY_ENTRIES;
+ cbOptHdr += sizeof(IMAGE_DATA_DIRECTORY) * cDataDir;
+ if (puBuf->Nt32.FileHeader.SizeOfOptionalHeader != cbOptHdr)
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "Unexpected optional header size: %#x (expected %#x)",
+ puBuf->Nt32.FileHeader.SizeOfOptionalHeader, cbOptHdr);
+
+ uint32_t const cbSectAlign = f32Bit ? puBuf->Nt32.OptionalHeader.SectionAlignment : puBuf->Nt64.OptionalHeader.SectionAlignment;
+ PCIMAGE_SECTION_HEADER pSHdrs = (PCIMAGE_SECTION_HEADER)((uintptr_t)&puBuf->Nt32.OptionalHeader + cbOptHdr);
+ PCIMAGE_DATA_DIRECTORY paDataDir = (PCIMAGE_DATA_DIRECTORY)((uintptr_t)pSHdrs - cDataDir * sizeof(IMAGE_DATA_DIRECTORY));
+
+ /*
+ * Establish the image size.
+ */
+ uint32_t cbImageFromHdr = f32Bit ? puBuf->Nt32.OptionalHeader.SizeOfImage : puBuf->Nt64.OptionalHeader.SizeOfImage;
+ if ( !cbImage
+ || (fFlags & DBGFMODINMEM_F_PE_NT31))
+ cbImage = RT_ALIGN(cbImageFromHdr, _4K);
+ else if (RT_ALIGN(cbImageFromHdr, _4K) != RT_ALIGN(cbImage, _4K))
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_MISMATCH, "Image size mismatch: input=%#x header=%#x", cbImage, cbImageFromHdr);
+
+ /*
+ * Guess the module name if not specified and make sure it conforms to DBGC expectations.
+ */
+ if (!pszName)
+ {
+ if (pszFilename)
+ pszName = RTPathFilenameEx(pszFilename, RTPATH_STR_F_STYLE_DOS);
+ /** @todo */
+ }
+
+ char szNormalized[128];
+ pszName = dbgfR3ModNormalizeName(pszName, szNormalized, sizeof(szNormalized));
+
+ /*
+ * Create the module using the in memory image first, falling back on cached image.
+ */
+ RTLDRMOD hLdrMod;
+ rc = dbgfR3ModInMemPeCreateLdrMod(pUVM, fFlags, pszName, pImageAddr, cbImage, cbImageFromHdr, f32Bit,
+ puBuf->Nt32.FileHeader.NumberOfSections, pSHdrs, cbSectAlign, cDataDir, paDataDir,
+ offPeHdrs, &hLdrMod, pErrInfo);
+ if (RT_FAILURE(rc))
+ hLdrMod = NIL_RTLDRMOD;
+
+ RTDBGMOD hMod;
+ rc = RTDbgModCreateFromPeImage(&hMod, pszFilename, pszName, &hLdrMod, cbImageFromHdr,
+ puBuf->Nt32.FileHeader.TimeDateStamp, DBGFR3AsGetConfig(pUVM));
+ if (RT_SUCCESS(rc))
+ *phDbgMod = hMod;
+ else if (!(fFlags & DBGFMODINMEM_F_NO_CONTAINER_FALLBACK))
+ {
+ /*
+ * Fallback is a container module.
+ */
+ rc = RTDbgModCreate(&hMod, pszName, cbImage, 0);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgModSymbolAdd(hMod, "Headers", 0 /*iSeg*/, 0, cbImage, 0 /*fFlags*/, NULL);
+ AssertRC(rc);
+ }
+ }
+ return rc;
+}
+
+
+
+/**
+ * Process a PE image found in guest memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pImageAddr The image address.
+ * @param fFlags Flags, DBGFMODINMEM_F_XXX.
+ * @param pszName The module name, optional.
+ * @param pszFilename The image filename, optional.
+ * @param enmArch The image arch if we force it, pass
+ * RTLDRARCH_WHATEVER if you don't care.
+ * @param cbImage Image size. Pass 0 if not known.
+ * @param phDbgMod Where to return the resulting debug module on success.
+ * @param pErrInfo Where to return extended error info on failure.
+ */
+VMMR3DECL(int) DBGFR3ModInMem(PUVM pUVM, PCDBGFADDRESS pImageAddr, uint32_t fFlags, const char *pszName, const char *pszFilename,
+ RTLDRARCH enmArch, uint32_t cbImage, PRTDBGMOD phDbgMod, PRTERRINFO pErrInfo)
+{
+ /*
+ * Validate and adjust.
+ */
+ AssertPtrReturn(phDbgMod, VERR_INVALID_POINTER);
+ *phDbgMod = NIL_RTDBGMOD;
+ AssertPtrReturn(pImageAddr, VERR_INVALID_POINTER);
+ AssertMsgReturn(cbImage == 0 || cbImage >= sizeof(IMAGE_NT_HEADERS32) + sizeof(IMAGE_DOS_HEADER),
+ ("cbImage=%#x\n", cbImage), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(fFlags & ~DBGFMODINMEM_F_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
+ if (enmArch == RTLDRARCH_HOST)
+ enmArch = RTLdrGetHostArch();
+
+ /*
+ * Look for an image header we can work with.
+ */
+ DBGFMODINMEMBUF uBuf;
+ RT_ZERO(uBuf);
+
+ int rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, pImageAddr, &uBuf, sizeof(uBuf.DosHdr));
+ if (RT_FAILURE(rc))
+ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "Failed to read DOS header at %RGv: %Rrc", pImageAddr->FlatPtr, rc);
+
+ if (uBuf.ab[0] == ELFMAG0 && uBuf.ab[1] == ELFMAG1 && uBuf.ab[2] == ELFMAG2 && uBuf.ab[3] == ELFMAG3)
+ return dbgfR3ModInMemElf(pUVM, pImageAddr, fFlags, pszName, pszFilename, enmArch, cbImage, &uBuf, phDbgMod, pErrInfo);
+
+ if ( uBuf.MachoHdr.magic == IMAGE_MACHO64_SIGNATURE
+ || uBuf.MachoHdr.magic == IMAGE_MACHO32_SIGNATURE)
+ return dbgfR3ModInMemMachO(pUVM, pImageAddr, fFlags, pszName, pszFilename, enmArch, cbImage, &uBuf, phDbgMod, pErrInfo);
+
+ uint32_t offNewHdrs;
+ if (uBuf.DosHdr.e_magic == IMAGE_DOS_SIGNATURE)
+ {
+ offNewHdrs = uBuf.DosHdr.e_lfanew;
+ if ( offNewHdrs < 16
+ || offNewHdrs > (cbImage ? _2M : cbImage - sizeof(IMAGE_NT_HEADERS32)))
+ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "e_lfanew value is out of range: %RX32 (16..%u)",
+ offNewHdrs, (cbImage ? _2M : cbImage - sizeof(IMAGE_NT_HEADERS32)));
+ }
+ else if (uBuf.Nt32.Signature == IMAGE_NT_SIGNATURE)
+ offNewHdrs = 0;
+ else
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_INVALID_EXE_SIGNATURE, "Unknown image magic at %RGv: %.8Rhxs",
+ pImageAddr->FlatPtr, uBuf.ab);
+
+ /*
+ * Read the next bit of header, assuming PE so stop at the end of
+ * the COFF file header.
+ */
+ DBGFADDRESS PeHdrAddr = *pImageAddr;
+ DBGFR3AddrAdd(&PeHdrAddr, offNewHdrs);
+ uint32_t const cbPeHdrsPart1 = RT_UOFFSETOF(IMAGE_NT_HEADERS32, OptionalHeader);
+ rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &PeHdrAddr, &uBuf, cbPeHdrsPart1);
+ if (RT_FAILURE(rc))
+ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "Failed to read PE/LX/NE headers at %RGv (off=%#RX32): %Rrc",
+ PeHdrAddr.FlatPtr, offNewHdrs, rc);
+
+ if (uBuf.Nt32.Signature == IMAGE_NT_SIGNATURE)
+ return dbgfR3ModInMemPe(pUVM, pImageAddr, fFlags, pszName, pszFilename, enmArch, cbImage, offNewHdrs, cbPeHdrsPart1,
+ &uBuf, phDbgMod, pErrInfo);
+
+ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_INVALID_EXE_SIGNATURE, "No PE/LX/NE header at %RGv (off=%#RX32): %.8Rhxs",
+ PeHdrAddr.FlatPtr, offNewHdrs, uBuf.ab);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3PlugIn.cpp b/src/VBox/VMM/VMMR3/DBGFR3PlugIn.cpp
new file mode 100644
index 00000000..05408bed
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3PlugIn.cpp
@@ -0,0 +1,627 @@
+/* $Id: DBGFR3PlugIn.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Plug-In Support.
+ */
+
+/*
+ * Copyright (C) 2008-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vmm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/version.h>
+
+#include <iprt/alloca.h>
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/env.h>
+#include <iprt/dir.h>
+#include <iprt/ldr.h>
+#include <iprt/param.h>
+#include <iprt/path.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+#define DBGF_PLUG_IN_READ_LOCK(pUVM) \
+ do { int rcLock = RTCritSectRwEnterShared(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+#define DBGF_PLUG_IN_READ_UNLOCK(pUVM) \
+ do { int rcLock = RTCritSectRwLeaveShared(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+
+#define DBGF_PLUG_IN_WRITE_LOCK(pUVM) \
+ do { int rcLock = RTCritSectRwEnterExcl(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+#define DBGF_PLUG_IN_WRITE_UNLOCK(pUVM) \
+ do { int rcLock = RTCritSectRwLeaveExcl(&pUVM->dbgf.s.CritSect); AssertRC(rcLock); } while (0)
+
+/** Max allowed length of a plug-in name (excludes the path and suffix). */
+#define DBGFPLUGIN_MAX_NAME 64
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Plug-in tracking record.
+ */
+typedef struct DBGFPLUGIN
+{
+ /** Pointer to the next plug-in. */
+ struct DBGFPLUGIN *pNext;
+ /** The loader handle. */
+ RTLDRMOD hLdrMod;
+ /** The plug-in entry point. */
+ PFNDBGFPLUGIN pfnEntry;
+ /** The name length. */
+ uint8_t cchName;
+ /** The plug-in name (variable length). */
+ char szName[1];
+} DBGFPLUGIN;
+/** Pointer to plug-in tracking record. */
+typedef DBGFPLUGIN *PDBGFPLUGIN;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(void) dbgfPlugInUnloadAll(PUVM pUVM);
+static FNDBGFHANDLERINT dbgfR3PlugInInfoList;
+
+
+/**
+ * Internal init routine called by DBGFR3Init().
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3PlugInInit(PUVM pUVM)
+{
+ return DBGFR3InfoRegisterInternal(pUVM->pVM, "plugins", "Lists the debugger plug-ins.", dbgfR3PlugInInfoList);
+}
+
+
+/**
+ * Internal cleanup routine called by DBGFR3Term().
+ *
+ * @param pUVM The user mode VM handle.
+ */
+void dbgfR3PlugInTerm(PUVM pUVM)
+{
+ dbgfPlugInUnloadAll(pUVM);
+}
+
+
+/**
+ * Extracts the plug-in name from a plug-in specifier that may or may not
+ * include path and/or suffix.
+ *
+ * @returns VBox status code.
+ *
+ * @param pszDst Where to return the name. At least DBGFPLUGIN_MAX_NAME
+ * worth of buffer space.
+ * @param pszPlugIn The plug-in module specifier to parse.
+ * @param pErrInfo Optional error information structure.
+ */
+static int dbgfPlugInExtractName(char *pszDst, const char *pszPlugIn, PRTERRINFO pErrInfo)
+{
+ /*
+ * Parse out the name stopping at the extension.
+ */
+ const char *pszName = RTPathFilename(pszPlugIn);
+ if (!pszName || !*pszName)
+ return VERR_INVALID_NAME;
+ if (!RTStrNICmp(pszName, RT_STR_TUPLE(DBGF_PLUG_IN_PREFIX)))
+ {
+ pszName += sizeof(DBGF_PLUG_IN_PREFIX) - 1;
+ if (!*pszName)
+ return RTErrInfoSetF(pErrInfo, VERR_INVALID_NAME, "Invalid plug-in name: nothing after the prefix");
+ }
+
+ int ch;
+ size_t cchName = 0;
+ while ( (ch = pszName[cchName]) != '\0'
+ && ch != '.')
+ {
+ if ( RT_C_IS_ALPHA(ch)
+ || (RT_C_IS_DIGIT(ch) && cchName != 0))
+ cchName++;
+ else
+ {
+ if (!RT_C_IS_DIGIT(ch))
+ return RTErrInfoSetF(pErrInfo, VERR_INVALID_NAME, "Invalid plug-in name: '%c' is not alphanumeric", ch);
+ return RTErrInfoSetF(pErrInfo, VERR_INVALID_NAME,
+ "Invalid plug-in name: Cannot start with a digit (after the prefix)");
+ }
+ }
+
+ if (cchName >= DBGFPLUGIN_MAX_NAME)
+ return RTErrInfoSetF(pErrInfo, VERR_INVALID_NAME, "Invalid plug-in name: too long (max %u)", DBGFPLUGIN_MAX_NAME);
+
+ /*
+ * We're very picky about the extension when present.
+ */
+ if ( ch == '.'
+ && RTStrICmp(&pszName[cchName], RTLdrGetSuff()))
+ return RTErrInfoSetF(pErrInfo, VERR_INVALID_NAME,
+ "Invalid plug-in name: Suffix isn't the default dll/so/dylib one (%s): '%s'",
+ RTLdrGetSuff(), &pszName[cchName]);
+
+ /*
+ * Copy it.
+ */
+ memcpy(pszDst, pszName, cchName);
+ pszDst[cchName] = '\0';
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Locate a loaded plug-in.
+ *
+ * @returns Pointer to the plug-in tracking structure.
+ * @param pUVM Pointer to the user-mode VM structure.
+ * @param pszName The name of the plug-in we're looking for.
+ * @param ppPrev Where to optionally return the pointer to the
+ * previous list member.
+ */
+static PDBGFPLUGIN dbgfR3PlugInLocate(PUVM pUVM, const char *pszName, PDBGFPLUGIN *ppPrev)
+{
+ PDBGFPLUGIN pPrev = NULL;
+ PDBGFPLUGIN pCur = pUVM->dbgf.s.pPlugInHead;
+ while (pCur)
+ {
+ if (!RTStrICmp(pCur->szName, pszName))
+ {
+ if (ppPrev)
+ *ppPrev = pPrev;
+ return pCur;
+ }
+
+ /* advance */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ return NULL;
+}
+
+
+/**
+ * Try load the specified plug-in module.
+ *
+ * @returns VINF_SUCCESS on success, path error or loader error on failure.
+ *
+ * @param pPlugIn The plug-in tracing record.
+ * @param pszModule Module name.
+ * @param pErrInfo Optional error information structure.
+ */
+static int dbgfR3PlugInTryLoad(PDBGFPLUGIN pPlugIn, const char *pszModule, PRTERRINFO pErrInfo)
+{
+ /*
+ * Load it and try resolve the entry point.
+ */
+ int rc = SUPR3HardenedVerifyPlugIn(pszModule, pErrInfo);
+ if (RT_SUCCESS(rc))
+ rc = RTLdrLoadEx(pszModule, &pPlugIn->hLdrMod, RTLDRLOAD_FLAGS_LOCAL, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTLdrGetSymbol(pPlugIn->hLdrMod, DBGF_PLUG_IN_ENTRYPOINT, (void **)&pPlugIn->pfnEntry);
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("DBGF: Loaded Plug-In '%s' (%s)\n", pPlugIn->szName, pszModule));
+ return VINF_SUCCESS;
+ }
+
+ RTErrInfoSet(pErrInfo, rc, "Failed to locate plug-in entrypoint (" DBGF_PLUG_IN_ENTRYPOINT ")" );
+ LogRel(("DBGF: RTLdrGetSymbol('%s', '%s',) -> %Rrc\n", pszModule, DBGF_PLUG_IN_ENTRYPOINT, rc));
+
+ RTLdrClose(pPlugIn->hLdrMod);
+ pPlugIn->hLdrMod = NIL_RTLDRMOD;
+ }
+ return rc;
+}
+
+
+/**
+ * RTPathTraverseList callback.
+ *
+ * @returns See FNRTPATHTRAVERSER.
+ *
+ * @param pchPath See FNRTPATHTRAVERSER.
+ * @param cchPath See FNRTPATHTRAVERSER.
+ * @param pvUser1 The plug-in specifier.
+ * @param pvUser2 The plug-in tracking record.
+ */
+static DECLCALLBACK(int) dbgfR3PlugInLoadCallback(const char *pchPath, size_t cchPath, void *pvUser1, void *pvUser2)
+{
+ PDBGFPLUGIN pPlugIn = (PDBGFPLUGIN)pvUser1;
+ PRTERRINFO pErrInfo = (PRTERRINFO)pvUser2;
+
+ /*
+ * Join the path and the specified plug-in name, adding prefix and suffix.
+ */
+ const char *pszSuff = RTLdrGetSuff();
+ size_t const cchSuff = strlen(pszSuff);
+ size_t const cchModule = cchPath + sizeof(RTPATH_SLASH_STR) + sizeof(DBGF_PLUG_IN_PREFIX) + pPlugIn->cchName + cchSuff + 4;
+ char *pszModule = (char *)alloca(cchModule);
+ AssertReturn(pszModule, VERR_TRY_AGAIN);
+ memcpy(pszModule, pchPath, cchPath);
+ pszModule[cchPath] = '\0';
+
+ int rc = RTPathAppend(pszModule, cchModule, DBGF_PLUG_IN_PREFIX);
+ AssertRCReturn(rc, VERR_TRY_AGAIN);
+ strcat(&pszModule[cchPath], pPlugIn->szName);
+ strcat(&pszModule[cchPath + sizeof(DBGF_PLUG_IN_PREFIX) - 1 + pPlugIn->cchName], pszSuff);
+ Assert(strlen(pszModule) < cchModule - 4);
+
+ if (RTPathExists(pszModule))
+ {
+ rc = dbgfR3PlugInTryLoad(pPlugIn, pszModule, pErrInfo);
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+ }
+
+ return VERR_TRY_AGAIN;
+}
+
+
+/**
+ * Loads a plug-in.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user-mode VM structure.
+ * @param pszName The plug-in name.
+ * @param pszMaybeModule Path to the plug-in, or just the
+ * plug-in name as specified by the user. Ignored
+ * if no path.
+ * @param pErrInfo Optional error information structure.
+ */
+static DECLCALLBACK(int) dbgfR3PlugInLoad(PUVM pUVM, const char *pszName, const char *pszMaybeModule, PRTERRINFO pErrInfo)
+{
+ DBGF_PLUG_IN_WRITE_LOCK(pUVM);
+
+ /*
+ * Check if a plug-in by the given name already exists.
+ */
+ PDBGFPLUGIN pPlugIn = dbgfR3PlugInLocate(pUVM, pszName, NULL);
+ if (pPlugIn)
+ {
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ return RTErrInfoSetF(pErrInfo, VERR_ALREADY_EXISTS, "A plug-in by the name '%s' already exists", pszName);
+ }
+
+ /*
+ * Create a module structure and we can pass around via RTPathTraverseList if needed.
+ */
+ size_t cbName = strlen(pszName) + 1;
+ pPlugIn = (PDBGFPLUGIN)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF, RT_UOFFSETOF_DYN(DBGFPLUGIN, szName[cbName]));
+ if (RT_UNLIKELY(!pPlugIn))
+ {
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ return VERR_NO_MEMORY;
+ }
+ memcpy(pPlugIn->szName, pszName, cbName);
+ pPlugIn->cchName = (uint8_t)cbName - 1;
+ Assert(pPlugIn->cchName == cbName - 1);
+
+ /*
+ * If the caller specified a path, try load exactly what was specified.
+ */
+ int rc;
+ if (RTPathHavePath(pszMaybeModule))
+ rc = dbgfR3PlugInTryLoad(pPlugIn, pszMaybeModule, pErrInfo);
+ else
+ {
+ /*
+ * No path specified, search for the plug-in using the canonical
+ * module name for it.
+ */
+ RTErrInfoClear(pErrInfo);
+
+ /* 1. The private architecture directory. */
+ char szPath[_4K];
+ rc = RTPathAppPrivateArch(szPath, sizeof(szPath));
+ if (RT_SUCCESS(rc))
+ rc = RTPathTraverseList(szPath, '\0', dbgfR3PlugInLoadCallback, pPlugIn, pErrInfo);
+ if (RT_FAILURE_NP(rc))
+ {
+ /* 2. The config value 'PlugInPath' */
+ int rc2 = CFGMR3QueryString(CFGMR3GetChild(CFGMR3GetRootU(pUVM), "/DBGF"), "PlugInPath", szPath, sizeof(szPath));
+ if (RT_SUCCESS(rc2))
+ rc = RTPathTraverseList(szPath, ';', dbgfR3PlugInLoadCallback, pPlugIn, pErrInfo);
+ if (RT_FAILURE_NP(rc))
+ {
+ /* 3. The VBOXDBG_PLUG_IN_PATH environment variable. */
+ rc2 = RTEnvGetEx(RTENV_DEFAULT, "VBOXDBG_PLUG_IN_PATH", szPath, sizeof(szPath), NULL);
+ if (RT_SUCCESS(rc2))
+ rc = RTPathTraverseList(szPath, ';', dbgfR3PlugInLoadCallback, pPlugIn, pErrInfo);
+ }
+ }
+
+ if (rc == VERR_END_OF_STRING)
+ rc = VERR_FILE_NOT_FOUND;
+ if (pErrInfo && !RTErrInfoIsSet(pErrInfo))
+ RTErrInfoSetF(pErrInfo, rc, "Failed to locate '%s'", pPlugIn->szName);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Try initialize it.
+ */
+ rc = pPlugIn->pfnEntry(DBGFPLUGINOP_INIT, pUVM, VMMR3GetVTable(), VBOX_VERSION);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Link it and we're good.
+ */
+ pPlugIn->pNext = pUVM->dbgf.s.pPlugInHead;
+ pUVM->dbgf.s.pPlugInHead = pPlugIn;
+
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ return VINF_SUCCESS;
+ }
+
+ RTErrInfoSet(pErrInfo, rc, "Plug-in init failed");
+ LogRel(("DBGF: Plug-in '%s' failed during init: %Rrc\n", pPlugIn->szName, rc));
+ RTLdrClose(pPlugIn->hLdrMod);
+ }
+ MMR3HeapFree(pPlugIn);
+
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ return rc;
+}
+
+
+/**
+ * Load a debugging plug-in.
+ *
+ * @returns VBox status code.
+ * @retval VERR_ALREADY_EXISTS if the module was already loaded.
+ * @retval VINF_BUFFER_OVERFLOW if the actual plug-in name buffer was too small
+ * (the plug-in was still successfully loaded).
+ * @param pUVM Pointer to the user-mode VM structure.
+ * @param pszPlugIn The plug-in name. This may specify the exact path to
+ * the plug-in module, or it may just specify the core name
+ * of the plug-in without prefix, suffix and path.
+ * @param pszActual Buffer to return the actual plug-in name in. Optional.
+ * This will be returned on VERR_ALREADY_EXSIST too.
+ * @param cbActual The size of @a pszActual.
+ * @param pErrInfo Optional error information structure.
+ */
+VMMR3DECL(int) DBGFR3PlugInLoad(PUVM pUVM, const char *pszPlugIn, char *pszActual, size_t cbActual, PRTERRINFO pErrInfo)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszPlugIn, VERR_INVALID_PARAMETER);
+
+ /*
+ * Extract the plug-in name. Copy it to the return buffer as we'll want to
+ * return it in the VERR_ALREADY_EXISTS case too.
+ */
+ char szName[DBGFPLUGIN_MAX_NAME];
+ int rc = dbgfPlugInExtractName(szName, pszPlugIn, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ int rc2 = VINF_SUCCESS;
+ if (pszActual)
+ rc2 = RTStrCopy(pszActual, cbActual, szName);
+
+ /*
+ * Write lock releated DBGF bits and try load it.
+ */
+ rc = VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3PlugInLoad, 4, pUVM, szName, pszPlugIn, pErrInfo);
+ if (rc2 != VINF_SUCCESS && RT_SUCCESS(rc))
+ rc = VINF_BUFFER_OVERFLOW;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Load all plug-ins from the architechture private directory of VBox.
+ *
+ * @param pUVM Pointer to the user-mode VM structure.
+ */
+VMMR3DECL(void) DBGFR3PlugInLoadAll(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN_VOID(pUVM);
+
+ /*
+ * Pass it on to EMT(0) if necessary (thanks to DBGFR3Os*).
+ */
+ if (VMR3GetVMCPUId(pUVM->pVM) != 0)
+ {
+ VMR3ReqPriorityCallVoidWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)DBGFR3PlugInLoadAll, 1, pUVM);
+ return;
+ }
+
+
+ /*
+ * Open the architecture specific directory with a filter on our prefix
+ * and names including a dot.
+ */
+ const char *pszSuff = RTLdrGetSuff();
+ size_t cchSuff = strlen(pszSuff);
+
+ char szPath[RTPATH_MAX];
+ int rc = RTPathAppPrivateArch(szPath, sizeof(szPath) - cchSuff);
+ AssertRCReturnVoid(rc);
+ size_t offDir = strlen(szPath);
+
+ rc = RTPathAppend(szPath, sizeof(szPath) - cchSuff, DBGF_PLUG_IN_PREFIX "*");
+ AssertRCReturnVoid(rc);
+ strcat(szPath, pszSuff);
+
+ RTDIR hDir;
+ rc = RTDirOpenFiltered(&hDir, szPath, RTDIRFILTER_WINNT, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Now read it and try load each of the plug-in modules.
+ */
+ RTDIRENTRY DirEntry;
+ while (RT_SUCCESS(RTDirRead(hDir, &DirEntry, NULL)))
+ {
+ szPath[offDir] = '\0';
+ rc = RTPathAppend(szPath, sizeof(szPath), DirEntry.szName);
+ if (RT_SUCCESS(rc))
+ {
+ char szName[DBGFPLUGIN_MAX_NAME];
+ rc = dbgfPlugInExtractName(szName, DirEntry.szName, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ DBGF_PLUG_IN_WRITE_LOCK(pUVM);
+ dbgfR3PlugInLoad(pUVM, szName, szPath, NULL);
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ }
+ }
+ }
+
+ RTDirClose(hDir);
+ }
+}
+
+
+/**
+ * Unloads a plug-in by name (no path, prefix or suffix).
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the specified plug-in wasn't found.
+ * @param pUVM Pointer to the user-mode VM structure.
+ * @param pszName The name of the plug-in to unload.
+ */
+VMMR3DECL(int) DBGFR3PlugInUnload(PUVM pUVM, const char *pszName)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Pass it on to EMT(0) if necessary (thanks to DBGFR3Os*).
+ */
+ if (VMR3GetVMCPUId(pUVM->pVM) != 0)
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)DBGFR3PlugInUnload, 2, pUVM, pszName);
+
+
+ /*
+ * Find the plug-in.
+ */
+ DBGF_PLUG_IN_WRITE_LOCK(pUVM);
+
+ int rc;
+ PDBGFPLUGIN pPrevPlugIn;
+ PDBGFPLUGIN pPlugIn = dbgfR3PlugInLocate(pUVM, pszName, &pPrevPlugIn);
+ if (pPlugIn)
+ {
+ /*
+ * Unlink, terminate, unload and free the plug-in.
+ */
+ if (pPrevPlugIn)
+ pPrevPlugIn->pNext = pPlugIn->pNext;
+ else
+ pUVM->dbgf.s.pPlugInHead = pPlugIn->pNext;
+
+ pPlugIn->pfnEntry(DBGFPLUGINOP_TERM, pUVM, VMMR3GetVTable(), 0);
+ RTLdrClose(pPlugIn->hLdrMod);
+
+ pPlugIn->pfnEntry = NULL;
+ pPlugIn->hLdrMod = NIL_RTLDRMOD;
+ MMR3HeapFree(pPlugIn->pNext);
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NOT_FOUND;
+
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+ return rc;
+}
+
+
+/**
+ * Unload all plug-ins.
+ *
+ * @param pUVM Pointer to the user-mode VM structure.
+ */
+static DECLCALLBACK(void) dbgfPlugInUnloadAll(PUVM pUVM)
+{
+ DBGF_PLUG_IN_WRITE_LOCK(pUVM);
+
+ while (pUVM->dbgf.s.pPlugInHead)
+ {
+ PDBGFPLUGIN pPlugin = pUVM->dbgf.s.pPlugInHead;
+ pUVM->dbgf.s.pPlugInHead = pPlugin->pNext;
+
+ pPlugin->pfnEntry(DBGFPLUGINOP_TERM, pUVM, VMMR3GetVTable(), 0);
+
+ int rc2 = RTLdrClose(pPlugin->hLdrMod);
+ AssertRC(rc2);
+
+ pPlugin->pfnEntry = NULL;
+ pPlugin->hLdrMod = NIL_RTLDRMOD;
+ MMR3HeapFree(pPlugin);
+ }
+
+ DBGF_PLUG_IN_WRITE_UNLOCK(pUVM);
+}
+
+
+/**
+ * Unloads all plug-ins.
+ *
+ * @param pUVM Pointer to the user-mode VM structure.
+ */
+VMMR3DECL(void) DBGFR3PlugInUnloadAll(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN_VOID(pUVM);
+ /* Thanks to DBGFR3Os, this must be done on EMT(0). */
+ VMR3ReqPriorityCallVoidWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfPlugInUnloadAll, 1, pUVM);
+}
+
+
+
+/**
+ * @callback_method_impl{FNDBGFHANDLERINT, The 'plugins' info item.}
+ */
+static DECLCALLBACK(void) dbgfR3PlugInInfoList(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ PDBGFPLUGIN pPlugIn = pVM->pUVM->dbgf.s.pPlugInHead;
+ RT_NOREF_PV(pszArgs);
+ if (pPlugIn)
+ {
+ pHlp->pfnPrintf(pHlp, "Debugging plug-in%s: %s", pPlugIn->pNext ? "s" : "", pPlugIn->szName);
+ while ((pPlugIn = pPlugIn->pNext) != NULL)
+ pHlp->pfnPrintf(pHlp, ", %s", pPlugIn->szName);
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "No plug-ins loaded\n");
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3SampleReport.cpp b/src/VBox/VMM/VMMR3/DBGFR3SampleReport.cpp
new file mode 100644
index 00000000..99bef44a
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3SampleReport.cpp
@@ -0,0 +1,782 @@
+/* $Id: DBGFR3SampleReport.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Sample report creation.
+ */
+
+/*
+ * Copyright (C) 2021-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_sample_report DBGFR3SampleReport - Sample Report Interface
+ *
+ * @todo
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/semaphore.h>
+#include <iprt/list.h>
+#include <iprt/mem.h>
+#include <iprt/time.h>
+#include <iprt/timer.h>
+#include <iprt/sort.h>
+#include <iprt/string.h>
+#include <iprt/stream.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+/** Maximum stack frame depth. */
+#define DBGF_SAMPLE_REPORT_FRAME_DEPTH_MAX 64
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+/**
+ * Sample report state.
+ */
+typedef enum DBGFSAMPLEREPORTSTATE
+{
+ /** Invalid state do not use. */
+ DBGFSAMPLEREPORTSTATE_INVALID = 0,
+ /** The sample report is ready to run. */
+ DBGFSAMPLEREPORTSTATE_READY,
+ /** The sampple process is running currently. */
+ DBGFSAMPLEREPORTSTATE_RUNNING,
+ /** The sample process is about to stop. */
+ DBGFSAMPLEREPORTSTATE_STOPPING,
+ /** 32bit hack. */
+ DBGFSAMPLEREPORTSTATE_32BIT_HACK = 0x7fffffff
+} DBGFSAMPLEREPORTSTATE;
+
+/** Pointer to a single sample frame. */
+typedef struct DBGFSAMPLEFRAME *PDBGFSAMPLEFRAME;
+
+/**
+ * Frame information.
+ */
+typedef struct DBGFSAMPLEFRAME
+{
+ /** Frame address. */
+ DBGFADDRESS AddrFrame;
+ /** Number of times this frame was encountered. */
+ uint64_t cSamples;
+ /** Pointer to the array of frames below in the call stack. */
+ PDBGFSAMPLEFRAME paFrames;
+ /** Number of valid entries in the frams array. */
+ uint64_t cFramesValid;
+ /** Maximum number of entries in the frames array. */
+ uint64_t cFramesMax;
+} DBGFSAMPLEFRAME;
+typedef const DBGFSAMPLEFRAME *PCDBGFSAMPLEFRAME;
+
+
+/**
+ * Per VCPU sample report data.
+ */
+typedef struct DBGFSAMPLEREPORTVCPU
+{
+ /** The root frame. */
+ DBGFSAMPLEFRAME FrameRoot;
+} DBGFSAMPLEREPORTVCPU;
+/** Pointer to the per VCPU sample report data. */
+typedef DBGFSAMPLEREPORTVCPU *PDBGFSAMPLEREPORTVCPU;
+/** Pointer to const per VCPU sample report data. */
+typedef const DBGFSAMPLEREPORTVCPU *PCDBGFSAMPLEREPORTVCPU;
+
+
+/**
+ * Internal sample report instance data.
+ */
+typedef struct DBGFSAMPLEREPORTINT
+{
+ /** References hold for this trace module. */
+ volatile uint32_t cRefs;
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** State the sample report is currently in. */
+ volatile DBGFSAMPLEREPORTSTATE enmState;
+ /** Flags passed during report creation. */
+ uint32_t fFlags;
+ /** The timer handle for the sample report collector. */
+ PRTTIMER hTimer;
+ /** The sample interval in microseconds. */
+ uint32_t cSampleIntervalUs;
+ /** THe progress callback if set. */
+ PFNDBGFPROGRESS pfnProgress;
+ /** Opaque user data passed with the progress callback. */
+ void *pvProgressUser;
+ /** Number of microseconds left for sampling. */
+ uint64_t cSampleUsLeft;
+ /** The report created after sampling was stopped. */
+ char *pszReport;
+ /** Number of EMTs having a guest sample operation queued. */
+ volatile uint32_t cEmtsActive;
+ /** Array of per VCPU samples collected. */
+ DBGFSAMPLEREPORTVCPU aCpus[1];
+} DBGFSAMPLEREPORTINT;
+/** Pointer to a const internal trace module instance data. */
+typedef DBGFSAMPLEREPORTINT *PDBGFSAMPLEREPORTINT;
+/** Pointer to a const internal trace module instance data. */
+typedef const DBGFSAMPLEREPORTINT *PCDBGFSAMPLEREPORTINT;
+
+
+/**
+ * Structure to pass to DBGFR3Info() and for doing all other
+ * output during fatal dump.
+ */
+typedef struct DBGFSAMPLEREPORTINFOHLP
+{
+ /** The helper core. */
+ DBGFINFOHLP Core;
+ /** Pointer to the allocated character buffer. */
+ char *pachBuf;
+ /** Number of bytes allocated for the character buffer. */
+ size_t cbBuf;
+ /** Offset into the character buffer. */
+ size_t offBuf;
+} DBGFSAMPLEREPORTINFOHLP, *PDBGFSAMPLEREPORTINFOHLP;
+/** Pointer to a DBGFSAMPLEREPORTINFOHLP structure. */
+typedef const DBGFSAMPLEREPORTINFOHLP *PCDBGFSAMPLEREPORTINFOHLP;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+/**
+ * Print formatted string.
+ *
+ * @param pHlp Pointer to this structure.
+ * @param pszFormat The format string.
+ * @param ... Arguments.
+ */
+static DECLCALLBACK(void) dbgfR3SampleReportInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ va_list args;
+ va_start(args, pszFormat);
+ pHlp->pfnPrintfV(pHlp, pszFormat, args);
+ va_end(args);
+}
+
+
+/**
+ * Print formatted string.
+ *
+ * @param pHlp Pointer to this structure.
+ * @param pszFormat The format string.
+ * @param args Argument list.
+ */
+static DECLCALLBACK(void) dbgfR3SampleReportInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
+{
+ PDBGFSAMPLEREPORTINFOHLP pMyHlp = (PDBGFSAMPLEREPORTINFOHLP)pHlp;
+
+ va_list args2;
+ va_copy(args2, args);
+ ssize_t cch = RTStrPrintf2V(&pMyHlp->pachBuf[pMyHlp->offBuf], pMyHlp->cbBuf - pMyHlp->offBuf, pszFormat, args2);
+ if (cch < 0)
+ {
+ /* Try increase the buffer. */
+ char *pachBufNew = (char *)RTMemRealloc(pMyHlp->pachBuf, pMyHlp->cbBuf + RT_MAX(_4K, -cch));
+ if (pachBufNew)
+ {
+ pMyHlp->pachBuf = pachBufNew;
+ pMyHlp->cbBuf += RT_MAX(_4K, -cch);
+ cch = RTStrPrintf2V(&pMyHlp->pachBuf[pMyHlp->offBuf], pMyHlp->cbBuf - pMyHlp->offBuf, pszFormat, args2);
+ Assert(cch > 0);
+ pMyHlp->offBuf += cch;
+ }
+ }
+ else
+ pMyHlp->offBuf += cch;
+ va_end(args2);
+}
+
+
+/**
+ * Initializes the sample report output helper.
+ *
+ * @param pHlp The structure to initialize.
+ */
+static void dbgfR3SampleReportInfoHlpInit(PDBGFSAMPLEREPORTINFOHLP pHlp)
+{
+ RT_BZERO(pHlp, sizeof(*pHlp));
+
+ pHlp->Core.pfnPrintf = dbgfR3SampleReportInfoHlp_pfnPrintf;
+ pHlp->Core.pfnPrintfV = dbgfR3SampleReportInfoHlp_pfnPrintfV;
+ pHlp->Core.pfnGetOptError = DBGFR3InfoGenericGetOptError;
+
+ pHlp->pachBuf = (char *)RTMemAllocZ(_4K);
+ if (pHlp->pachBuf)
+ pHlp->cbBuf = _4K;
+}
+
+
+/**
+ * Deletes the sample report output helper.
+ *
+ * @param pHlp The structure to delete.
+ */
+static void dbgfR3SampleReportInfoHlpDelete(PDBGFSAMPLEREPORTINFOHLP pHlp)
+{
+ if (pHlp->pachBuf)
+ RTMemFree(pHlp->pachBuf);
+}
+
+
+/**
+ * Frees the given frame and all its descendants.
+ *
+ * @param pFrame The frame to free.
+ */
+static void dbgfR3SampleReportFrameFree(PDBGFSAMPLEFRAME pFrame)
+{
+ for (uint32_t i = 0; i < pFrame->cFramesValid; i++)
+ dbgfR3SampleReportFrameFree(&pFrame->paFrames[i]); /** @todo Recursion... */
+
+ MMR3HeapFree(pFrame->paFrames);
+ memset(pFrame, 0, sizeof(*pFrame));
+}
+
+
+/**
+ * Destroys the given sample report freeing all allocated resources.
+ *
+ * @param pThis The sample report instance data.
+ */
+static void dbgfR3SampleReportDestroy(PDBGFSAMPLEREPORTINT pThis)
+{
+ for (uint32_t i = 0; i < pThis->pUVM->cCpus; i++)
+ dbgfR3SampleReportFrameFree(&pThis->aCpus[i].FrameRoot);
+ MMR3HeapFree(pThis);
+}
+
+
+/**
+ * Returns the frame belonging to the given address or NULL if not found.
+ *
+ * @returns Pointer to the descendant frame or NULL if not found.
+ * @param pFrame The frame to look for descendants with the matching address.
+ * @param pAddr The guest address to search for.
+ */
+static PDBGFSAMPLEFRAME dbgfR3SampleReportFrameFindByAddr(PCDBGFSAMPLEFRAME pFrame, PCDBGFADDRESS pAddr)
+{
+ for (uint32_t i = 0; i < pFrame->cFramesValid; i++)
+ if (!memcmp(pAddr, &pFrame->paFrames[i].AddrFrame, sizeof(*pAddr)))
+ return &pFrame->paFrames[i];
+
+ return NULL;
+}
+
+
+/**
+ * Adds the given address to as a descendant to the given frame.
+ *
+ * @returns Pointer to the newly inserted frame identified by the given address.
+ * @param pUVM The usermode VM handle.
+ * @param pFrame The frame to add the new one to as a descendant.
+ * @param pAddr The guest address to add.
+ */
+static PDBGFSAMPLEFRAME dbgfR3SampleReportAddFrameByAddr(PUVM pUVM, PDBGFSAMPLEFRAME pFrame, PCDBGFADDRESS pAddr)
+{
+ if (pFrame->cFramesValid == pFrame->cFramesMax)
+ {
+ uint32_t cFramesMaxNew = pFrame->cFramesMax + 10;
+ PDBGFSAMPLEFRAME paFramesNew = NULL;
+ if (pFrame->paFrames)
+ paFramesNew = (PDBGFSAMPLEFRAME)MMR3HeapRealloc(pFrame->paFrames, sizeof(*pFrame->paFrames) * cFramesMaxNew);
+ else
+ paFramesNew = (PDBGFSAMPLEFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF, sizeof(*pFrame->paFrames) * cFramesMaxNew);
+
+ if (!paFramesNew)
+ return NULL;
+
+ pFrame->cFramesMax = cFramesMaxNew;
+ pFrame->paFrames = paFramesNew;
+ }
+
+ PDBGFSAMPLEFRAME pFrameNew = &pFrame->paFrames[pFrame->cFramesValid++];
+ pFrameNew->AddrFrame = *pAddr;
+ pFrameNew->cSamples = 1;
+ pFrameNew->paFrames = NULL;
+ pFrameNew->cFramesMax = 0;
+ pFrameNew->cFramesValid = 0;
+ return pFrameNew;
+}
+
+
+/**
+ * @copydoc FNRTSORTCMP
+ */
+static DECLCALLBACK(int) dbgfR3SampleReportFrameSortCmp(void const *pvElement1, void const *pvElement2, void *pvUser)
+{
+ RT_NOREF(pvUser);
+ PCDBGFSAMPLEFRAME pFrame1 = (PCDBGFSAMPLEFRAME)pvElement1;
+ PCDBGFSAMPLEFRAME pFrame2 = (PCDBGFSAMPLEFRAME)pvElement2;
+
+ if (pFrame1->cSamples < pFrame2->cSamples)
+ return 1;
+ if (pFrame1->cSamples > pFrame2->cSamples)
+ return -1;
+
+ return 0;
+}
+
+
+/**
+ * Dumps a single given frame to the release log.
+ *
+ * @param pHlp The debug info helper used for printing.
+ * @param pUVM The usermode VM handle.
+ * @param pFrame The frame to dump.
+ * @param idxFrame The frame number.
+ */
+static void dbgfR3SampleReportDumpFrame(PCDBGFINFOHLP pHlp, PUVM pUVM, PCDBGFSAMPLEFRAME pFrame, uint32_t idxFrame)
+{
+ RTGCINTPTR offDisp;
+ RTDBGMOD hMod;
+ RTDBGSYMBOL SymPC;
+
+ if (DBGFR3AddrIsValid(pUVM, &pFrame->AddrFrame))
+ {
+ int rc = DBGFR3AsSymbolByAddr(pUVM, DBGF_AS_GLOBAL, &pFrame->AddrFrame,
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ &offDisp, &SymPC, &hMod);
+ if (RT_SUCCESS(rc))
+ {
+ const char *pszModName = hMod != NIL_RTDBGMOD ? RTDbgModName(hMod) : NULL;
+
+ pHlp->pfnPrintf(pHlp,
+ "%*s%RU64 %s+%llx (%s) [%RGv]\n", idxFrame * 4, " ",
+ pFrame->cSamples,
+ SymPC.szName, offDisp,
+ hMod ? pszModName : "",
+ pFrame->AddrFrame.FlatPtr);
+ RTDbgModRelease(hMod);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "%*s%RU64 %RGv\n", idxFrame * 4, " ", pFrame->cSamples, pFrame->AddrFrame.FlatPtr);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "%*s%RU64 %RGv\n", idxFrame * 4, " ", pFrame->cSamples, pFrame->AddrFrame.FlatPtr);
+
+ /* Sort by sample count. */
+ RTSortShell(pFrame->paFrames, pFrame->cFramesValid, sizeof(*pFrame->paFrames), dbgfR3SampleReportFrameSortCmp, NULL);
+
+ for (uint32_t i = 0; i < pFrame->cFramesValid; i++)
+ dbgfR3SampleReportDumpFrame(pHlp, pUVM, &pFrame->paFrames[i], idxFrame + 1);
+}
+
+
+/**
+ * Worker for dbgfR3SampleReportTakeSample(), doing the work in an EMT rendezvous point on
+ * each VCPU.
+ *
+ * @param pThis Pointer to the sample report instance.
+ */
+static DECLCALLBACK(void) dbgfR3SampleReportSample(PDBGFSAMPLEREPORTINT pThis)
+{
+ PVM pVM = pThis->pUVM->pVM;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ PCDBGFSTACKFRAME pFrameFirst;
+ int rc = DBGFR3StackWalkBegin(pThis->pUVM, pVCpu->idCpu, DBGFCODETYPE_GUEST, &pFrameFirst);
+ if (RT_SUCCESS(rc))
+ {
+ DBGFADDRESS aFrameAddresses[DBGF_SAMPLE_REPORT_FRAME_DEPTH_MAX];
+ uint32_t idxFrame = 0;
+
+ PDBGFSAMPLEFRAME pFrame = &pThis->aCpus[pVCpu->idCpu].FrameRoot;
+ pFrame->cSamples++;
+
+ for (PCDBGFSTACKFRAME pStackFrame = pFrameFirst;
+ pStackFrame && idxFrame < RT_ELEMENTS(aFrameAddresses);
+ pStackFrame = DBGFR3StackWalkNext(pStackFrame))
+ {
+ if (pThis->fFlags & DBGF_SAMPLE_REPORT_F_STACK_REVERSE)
+ {
+ PDBGFSAMPLEFRAME pFrameNext = dbgfR3SampleReportFrameFindByAddr(pFrame, &pStackFrame->AddrPC);
+ if (!pFrameNext)
+ pFrameNext = dbgfR3SampleReportAddFrameByAddr(pThis->pUVM, pFrame, &pStackFrame->AddrPC);
+ else
+ pFrameNext->cSamples++;
+
+ pFrame = pFrameNext;
+ }
+ else
+ aFrameAddresses[idxFrame] = pStackFrame->AddrPC;
+
+ idxFrame++;
+ }
+
+ DBGFR3StackWalkEnd(pFrameFirst);
+
+ if (!(pThis->fFlags & DBGF_SAMPLE_REPORT_F_STACK_REVERSE))
+ {
+ /* Walk the frame stack backwards and construct the call stack. */
+ while (idxFrame--)
+ {
+ PDBGFSAMPLEFRAME pFrameNext = dbgfR3SampleReportFrameFindByAddr(pFrame, &aFrameAddresses[idxFrame]);
+ if (!pFrameNext)
+ pFrameNext = dbgfR3SampleReportAddFrameByAddr(pThis->pUVM, pFrame, &aFrameAddresses[idxFrame]);
+ else
+ pFrameNext->cSamples++;
+
+ pFrame = pFrameNext;
+ }
+ }
+ }
+ else
+ LogRelMax(10, ("Sampling guest stack on VCPU %u failed with rc=%Rrc\n", pVCpu->idCpu, rc));
+
+ /* Last EMT finishes the report when sampling was stopped. */
+ uint32_t cEmtsActive = ASMAtomicDecU32(&pThis->cEmtsActive);
+ if ( ASMAtomicReadU32((volatile uint32_t *)&pThis->enmState) == DBGFSAMPLEREPORTSTATE_STOPPING
+ && !cEmtsActive)
+ {
+ rc = RTTimerDestroy(pThis->hTimer); AssertRC(rc); RT_NOREF(rc);
+ pThis->hTimer = NULL;
+
+ DBGFSAMPLEREPORTINFOHLP Hlp;
+ PCDBGFINFOHLP pHlp = &Hlp.Core;
+
+ dbgfR3SampleReportInfoHlpInit(&Hlp);
+
+ /* Some early dump code. */
+ for (uint32_t i = 0; i < pThis->pUVM->cCpus; i++)
+ {
+ PCDBGFSAMPLEREPORTVCPU pSampleVCpu = &pThis->aCpus[i];
+
+ pHlp->pfnPrintf(pHlp, "Sample report for vCPU %u:\n", i);
+ dbgfR3SampleReportDumpFrame(pHlp, pThis->pUVM, &pSampleVCpu->FrameRoot, 0);
+ }
+
+ /* Shameless copy from VMMGuruMeditation.cpp */
+ static struct
+ {
+ const char *pszInfo;
+ const char *pszArgs;
+ } const aInfo[] =
+ {
+ { "mappings", NULL },
+ { "mode", "all" },
+ { "handlers", "phys virt hyper stats" },
+ { "timers", NULL },
+ { "activetimers", NULL },
+ };
+ for (unsigned i = 0; i < RT_ELEMENTS(aInfo); i++)
+ {
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! {%s, %s}\n"
+ "!!\n",
+ aInfo[i].pszInfo, aInfo[i].pszArgs);
+ DBGFR3Info(pVM->pUVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
+ }
+
+ /* All other info items */
+ DBGFR3InfoMulti(pVM,
+ "*",
+ "mappings|hma|cpum|cpumguest|cpumguesthwvirt|cpumguestinstr|cpumhyper|cpumhost|cpumvmxfeat|mode|cpuid"
+ "|pgmpd|pgmcr3|timers|activetimers|handlers|help|cfgm",
+ "!!\n"
+ "!! {%s}\n"
+ "!!\n",
+ pHlp);
+
+
+ /* done */
+ pHlp->pfnPrintf(pHlp,
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
+
+ if (pThis->pszReport)
+ RTMemFree(pThis->pszReport);
+ pThis->pszReport = Hlp.pachBuf;
+ Hlp.pachBuf = NULL;
+ dbgfR3SampleReportInfoHlpDelete(&Hlp);
+
+ ASMAtomicXchgU32((volatile uint32_t *)&pThis->enmState, DBGFSAMPLEREPORTSTATE_READY);
+
+ if (pThis->pfnProgress)
+ {
+ pThis->pfnProgress(pThis->pvProgressUser, 100);
+ pThis->pfnProgress = NULL;
+ pThis->pvProgressUser = NULL;
+ }
+
+ DBGFR3SampleReportRelease(pThis);
+ }
+}
+
+
+/**
+ * @copydoc FNRTTIMER
+ */
+static DECLCALLBACK(void) dbgfR3SampleReportTakeSample(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
+{
+ PDBGFSAMPLEREPORTINT pThis = (PDBGFSAMPLEREPORTINT)pvUser;
+
+ if (pThis->cSampleUsLeft != UINT32_MAX)
+ {
+ int rc = VINF_SUCCESS;
+ uint64_t cUsSampled = iTick * pThis->cSampleIntervalUs; /** @todo Wrong if the timer resolution is different from what we've requested. */
+
+ /* Update progress. */
+ if (pThis->pfnProgress)
+ rc = pThis->pfnProgress(pThis->pvProgressUser, cUsSampled * 99 / pThis->cSampleUsLeft);
+
+ if ( cUsSampled >= pThis->cSampleUsLeft
+ || rc == VERR_DBGF_CANCELLED)
+ {
+ /*
+ * Let the EMTs do one last round in order to be able to destroy the timer (can't do this on the timer thread)
+ * and gather information from the devices.
+ */
+ ASMAtomicCmpXchgU32((volatile uint32_t *)&pThis->enmState, DBGFSAMPLEREPORTSTATE_STOPPING,
+ DBGFSAMPLEREPORTSTATE_RUNNING);
+
+ rc = RTTimerStop(pTimer); AssertRC(rc); RT_NOREF(rc);
+ }
+ }
+
+ ASMAtomicAddU32(&pThis->cEmtsActive, pThis->pUVM->cCpus);
+
+ for (uint32_t i = 0; i < pThis->pUVM->cCpus; i++)
+ {
+ int rc = VMR3ReqCallVoidNoWait(pThis->pUVM->pVM, i, (PFNRT)dbgfR3SampleReportSample, 1, pThis);
+ AssertRC(rc);
+ if (RT_FAILURE(rc))
+ ASMAtomicDecU32(&pThis->cEmtsActive);
+ }
+}
+
+
+/**
+ * Creates a new sample report instance for the specified VM.
+ *
+ * @returns VBox status code.
+ * @param pUVM The usermode VM handle.
+ * @param cSampleIntervalUs The sample interval in micro seconds.
+ * @param fFlags Combination of DBGF_SAMPLE_REPORT_F_XXX.
+ * @param phSample Where to return the handle to the sample report on success.
+ */
+VMMR3DECL(int) DBGFR3SampleReportCreate(PUVM pUVM, uint32_t cSampleIntervalUs, uint32_t fFlags, PDBGFSAMPLEREPORT phSample)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(!(fFlags & ~DBGF_SAMPLE_REPORT_F_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phSample, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ PDBGFSAMPLEREPORTINT pThis = (PDBGFSAMPLEREPORTINT)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF,
+ RT_UOFFSETOF_DYN(DBGFSAMPLEREPORTINT, aCpus[pUVM->cCpus]));
+ if (RT_LIKELY(pThis))
+ {
+ pThis->cRefs = 1;
+ pThis->pUVM = pUVM;
+ pThis->fFlags = fFlags;
+ pThis->cSampleIntervalUs = cSampleIntervalUs;
+ pThis->enmState = DBGFSAMPLEREPORTSTATE_READY;
+ pThis->cEmtsActive = 0;
+
+ for (uint32_t i = 0; i < pUVM->cCpus; i++)
+ {
+ pThis->aCpus[i].FrameRoot.paFrames = NULL;
+ pThis->aCpus[i].FrameRoot.cSamples = 0;
+ pThis->aCpus[i].FrameRoot.cFramesValid = 0;
+ pThis->aCpus[i].FrameRoot.cFramesMax = 0;
+ }
+
+ *phSample = pThis;
+ return VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Retains a reference to the given sample report handle.
+ *
+ * @returns New reference count.
+ * @param hSample Sample report handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3SampleReportRetain(DBGFSAMPLEREPORT hSample)
+{
+ PDBGFSAMPLEREPORTINT pThis = hSample;
+ AssertPtrReturn(pThis, UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ return cRefs;
+}
+
+
+/**
+ * Release a given sample report handle reference.
+ *
+ * @returns New reference count, on 0 the sample report instance is destroyed.
+ * @param hSample Sample report handle.
+ */
+VMMR3DECL(uint32_t) DBGFR3SampleReportRelease(DBGFSAMPLEREPORT hSample)
+{
+ PDBGFSAMPLEREPORTINT pThis = hSample;
+ if (!pThis)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertReturn(ASMAtomicReadU32((volatile uint32_t *)&pThis->enmState) == DBGFSAMPLEREPORTSTATE_READY,
+ 0);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pThis));
+ if (cRefs == 0)
+ dbgfR3SampleReportDestroy(pThis);
+ return cRefs;
+}
+
+
+/**
+ * Starts collecting samples for the given sample report.
+ *
+ * @returns VBox status code.
+ * @param hSample Sample report handle.
+ * @param cSampleUs Number of microseconds to sample at the interval given during creation.
+ * Use UINT32_MAX to sample for an indefinite amount of time.
+ * @param pfnProgress Optional progress callback.
+ * @param pvUser Opaque user data to pass to the progress callback.
+ */
+VMMR3DECL(int) DBGFR3SampleReportStart(DBGFSAMPLEREPORT hSample, uint64_t cSampleUs, PFNDBGFPROGRESS pfnProgress, void *pvUser)
+{
+ PDBGFSAMPLEREPORTINT pThis = hSample;
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(ASMAtomicCmpXchgU32((volatile uint32_t *)&pThis->enmState, DBGFSAMPLEREPORTSTATE_RUNNING, DBGFSAMPLEREPORTSTATE_READY),
+ VERR_INVALID_STATE);
+
+ pThis->pfnProgress = pfnProgress;
+ pThis->pvProgressUser = pvUser;
+ pThis->cSampleUsLeft = cSampleUs;
+
+ /* Try to detect the guest OS first so we can get more accurate symbols and addressing. */
+ char szName[64];
+ int rc = DBGFR3OSDetect(pThis->pUVM, &szName[0], sizeof(szName));
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("DBGF/SampleReport: Detected guest OS \"%s\"\n", szName));
+ char szVersion[512];
+ int rc2 = DBGFR3OSQueryNameAndVersion(pThis->pUVM, NULL, 0, szVersion, sizeof(szVersion));
+ if (RT_SUCCESS(rc2))
+ LogRel(("DBGF/SampleReport: Version : \"%s\"\n", szVersion));
+ }
+ else
+ LogRel(("DBGF/SampleReport: Couldn't detect guest operating system rc=%Rcr\n", rc));
+
+ /*
+ * We keep an additional reference to ensure that the sample report stays alive,
+ * it will be dropped when the sample process is stopped.
+ */
+ DBGFR3SampleReportRetain(pThis);
+
+ rc = RTTimerCreateEx(&pThis->hTimer, pThis->cSampleIntervalUs * 1000,
+ RTTIMER_FLAGS_CPU_ANY | RTTIMER_FLAGS_HIGH_RES,
+ dbgfR3SampleReportTakeSample, pThis);
+ if (RT_SUCCESS(rc))
+ rc = RTTimerStart(pThis->hTimer, 0 /*u64First*/);
+ if (RT_FAILURE(rc))
+ {
+ if (pThis->hTimer)
+ {
+ int rc2 = RTTimerDestroy(pThis->hTimer);
+ AssertRC(rc2); RT_NOREF(rc2);
+ pThis->hTimer = NULL;
+ }
+
+ bool fXchg = ASMAtomicCmpXchgU32((volatile uint32_t *)&pThis->enmState, DBGFSAMPLEREPORTSTATE_READY,
+ DBGFSAMPLEREPORTSTATE_RUNNING);
+ Assert(fXchg); RT_NOREF(fXchg);
+ DBGFR3SampleReportRelease(pThis);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Stops collecting samples for the given sample report.
+ *
+ * @returns VBox status code.
+ * @param hSample Sample report handle.
+ */
+VMMR3DECL(int) DBGFR3SampleReportStop(DBGFSAMPLEREPORT hSample)
+{
+ PDBGFSAMPLEREPORTINT pThis = hSample;
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(ASMAtomicCmpXchgU32((volatile uint32_t *)&pThis->enmState, DBGFSAMPLEREPORTSTATE_STOPPING,
+ DBGFSAMPLEREPORTSTATE_RUNNING),
+ VERR_INVALID_STATE);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps the current sample report to the given file.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_STATE if nothing was sampled so far for reporting.
+ * @param hSample Sample report handle.
+ * @param pszFilename The filename to dump the report to.
+ */
+VMMR3DECL(int) DBGFR3SampleReportDumpToFile(DBGFSAMPLEREPORT hSample, const char *pszFilename)
+{
+ PDBGFSAMPLEREPORTINT pThis = hSample;
+
+ AssertReturn(pThis->pszReport, VERR_INVALID_STATE);
+
+ PRTSTREAM hStream;
+ int rc = RTStrmOpen(pszFilename, "w", &hStream);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTStrmPutStr(hStream, pThis->pszReport);
+ RTStrmClose(hStream);
+ }
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp b/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp
new file mode 100644
index 00000000..21a6d66e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp
@@ -0,0 +1,458 @@
+/* $Id: DBGFR3Trace.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Tracing.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgftrace.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pdmapi.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include "VMMTracing.h"
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/trace.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(void) dbgfR3TraceInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/**
+ * VMM trace point group translation table.
+ */
+static const struct
+{
+ /** The group name. */
+ const char *pszName;
+ /** The name length. */
+ uint32_t cchName;
+ /** The mask. */
+ uint32_t fMask;
+} g_aVmmTpGroups[] =
+{
+ { RT_STR_TUPLE("em"), VMMTPGROUP_EM },
+ { RT_STR_TUPLE("hm"), VMMTPGROUP_HM },
+ { RT_STR_TUPLE("tm"), VMMTPGROUP_TM },
+};
+
+
+/**
+ * Initializes the tracing.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ * @param cbEntry The trace entry size.
+ * @param cEntries The number of entries.
+ */
+static int dbgfR3TraceEnable(PVM pVM, uint32_t cbEntry, uint32_t cEntries)
+{
+ /*
+ * Don't enable it twice.
+ */
+ if (pVM->hTraceBufR3 != NIL_RTTRACEBUF)
+ return VERR_ALREADY_EXISTS;
+
+ /*
+ * Resolve default parameter values.
+ */
+ int rc;
+ if (!cbEntry)
+ {
+ rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntrySize", &cbEntry, 128);
+ AssertRCReturn(rc, rc);
+ }
+ if (!cEntries)
+ {
+ rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntries", &cEntries, 4096);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Figure the required size.
+ */
+ RTTRACEBUF hTraceBuf;
+ size_t cbBlock = 0;
+ rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, NULL, &cbBlock);
+ if (rc != VERR_BUFFER_OVERFLOW)
+ {
+ AssertReturn(!RT_SUCCESS_NP(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
+ return rc;
+ }
+
+ /*
+ * Allocate a hyper heap block and carve a trace buffer out of it.
+ *
+ * Note! We ASSUME that the returned trace buffer handle has the same value
+ * as the heap block.
+ */
+ cbBlock = RT_ALIGN_Z(cbBlock, HOST_PAGE_SIZE);
+ RTR0PTR pvBlockR0 = NIL_RTR0PTR;
+ void *pvBlockR3 = NULL;
+ rc = SUPR3PageAllocEx(cbBlock >> HOST_PAGE_SHIFT, 0, &pvBlockR3, &pvBlockR0, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, pvBlockR3, &cbBlock);
+ AssertRCReturn(rc, rc);
+ AssertReleaseReturn(hTraceBuf == (RTTRACEBUF)pvBlockR3, VERR_INTERNAL_ERROR_3);
+ AssertReleaseReturn((void *)hTraceBuf == pvBlockR3, VERR_INTERNAL_ERROR_3);
+
+ pVM->hTraceBufR3 = hTraceBuf;
+ pVM->hTraceBufR0 = pvBlockR0;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes the tracing.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ */
+int dbgfR3TraceInit(PVM pVM)
+{
+ /*
+ * Initialize the trace buffer handles.
+ */
+ Assert(NIL_RTTRACEBUF == (RTTRACEBUF)NULL);
+ pVM->hTraceBufR3 = NIL_RTTRACEBUF;
+ pVM->hTraceBufR0 = NIL_RTR0PTR;
+
+ /*
+ * Check the config and enable tracing if requested.
+ */
+ PCFGMNODE pDbgfNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF");
+#if defined(DEBUG) || defined(RTTRACE_ENABLED)
+ bool const fDefault = false;
+ const char * const pszConfigDefault = "";
+#else
+ bool const fDefault = false;
+ const char * const pszConfigDefault = "";
+#endif
+ bool fTracingEnabled;
+ int rc = CFGMR3QueryBoolDef(pDbgfNode, "TracingEnabled", &fTracingEnabled, fDefault);
+ AssertRCReturn(rc, rc);
+ if (fTracingEnabled)
+ {
+ rc = dbgfR3TraceEnable(pVM, 0, 0);
+ if (RT_SUCCESS(rc))
+ {
+ if (pDbgfNode)
+ {
+ char *pszTracingConfig;
+ rc = CFGMR3QueryStringAllocDef(pDbgfNode, "TracingConfig", &pszTracingConfig, pszConfigDefault);
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGFR3TraceConfig(pVM, pszTracingConfig);
+ if (RT_FAILURE(rc))
+ rc = VMSetError(pVM, rc, RT_SRC_POS, "TracingConfig=\"%s\" -> %Rrc", pszTracingConfig, rc);
+ MMR3HeapFree(pszTracingConfig);
+ }
+ }
+ else
+ {
+ rc = DBGFR3TraceConfig(pVM, pszConfigDefault);
+ if (RT_FAILURE(rc))
+ rc = VMSetError(pVM, rc, RT_SRC_POS, "TracingConfig=\"%s\" (default) -> %Rrc", pszConfigDefault, rc);
+ }
+ }
+ }
+
+ /*
+ * Register a debug info item that will dump the trace buffer content.
+ */
+ if (RT_SUCCESS(rc))
+ rc = DBGFR3InfoRegisterInternal(pVM, "tracebuf", "Display the trace buffer content. No arguments.", dbgfR3TraceInfo);
+
+ return rc;
+}
+
+
+/**
+ * Terminates the tracing.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void dbgfR3TraceTerm(PVM pVM)
+{
+ /* nothing to do */
+ NOREF(pVM);
+}
+
+
+/**
+ * Relocates the trace buffer handle in RC.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void dbgfR3TraceRelocate(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Change the traceing configuration of the VM.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_NOT_FOUND if any of the trace point groups mentioned in the
+ * config string cannot be found. (Or if the string cannot be made
+ * sense of.) No change made.
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_POINTER
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszConfig The configuration change specification.
+ *
+ * Trace point group names, optionally prefixed by a '-' to
+ * indicate that the group is being disabled. A special
+ * group 'all' can be used to enable or disable all trace
+ * points.
+ *
+ * Drivers, devices and USB devices each have their own
+ * trace point group which can be accessed by prefixing
+ * their official PDM name by 'drv', 'dev' or 'usb'
+ * respectively.
+ */
+VMMDECL(int) DBGFR3TraceConfig(PVM pVM, const char *pszConfig)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszConfig, VERR_INVALID_POINTER);
+ if (pVM->hTraceBufR3 == NIL_RTTRACEBUF)
+ return VERR_DBGF_NO_TRACE_BUFFER;
+
+ /*
+ * We do this in two passes, the first pass just validates the input string
+ * and the second applies the changes.
+ */
+ for (uint32_t uPass = 0; uPass < 1; uPass++)
+ {
+ char ch;
+ while ((ch = *pszConfig) != '\0')
+ {
+ if (RT_C_IS_SPACE(ch))
+ continue;
+
+ /*
+ * Operation prefix.
+ */
+ bool fNo = false;
+ do
+ {
+ if (ch == 'n' && pszConfig[1] == 'o')
+ {
+ fNo = !fNo;
+ pszConfig++;
+ }
+ else if (ch == '+')
+ fNo = false;
+ else if (ch == '-' || ch == '!' || ch == '~')
+ fNo = !fNo;
+ else
+ break;
+ } while ((ch = *++pszConfig) != '\0');
+ if (ch == '\0')
+ break;
+
+ /*
+ * Extract the name.
+ */
+ const char *pszName = pszConfig;
+ while ( ch != '\0'
+ && !RT_C_IS_SPACE(ch)
+ && !RT_C_IS_PUNCT(ch))
+ ch = *++pszConfig;
+ size_t const cchName = pszConfig - pszName;
+
+ /*
+ * 'all' - special group that enables or disables all trace points.
+ */
+ if (cchName == 3 && !strncmp(pszName, "all", 3))
+ {
+ if (uPass != 0)
+ {
+ uint32_t iCpu = pVM->cCpus;
+ if (!fNo)
+ while (iCpu-- > 0)
+ pVM->apCpusR3[iCpu]->fTraceGroups = UINT32_MAX;
+ else
+ while (iCpu-- > 0)
+ pVM->apCpusR3[iCpu]->fTraceGroups = 0;
+ PDMR3TracingConfig(pVM, NULL, 0, !fNo, uPass > 0);
+ }
+ }
+ else
+ {
+ /*
+ * A specific group, try the VMM first then PDM.
+ */
+ uint32_t i = RT_ELEMENTS(g_aVmmTpGroups);
+ while (i-- > 0)
+ if ( g_aVmmTpGroups[i].cchName == cchName
+ && !strncmp(g_aVmmTpGroups[i].pszName, pszName, cchName))
+ {
+ if (uPass != 0)
+ {
+ uint32_t iCpu = pVM->cCpus;
+ if (!fNo)
+ while (iCpu-- > 0)
+ pVM->apCpusR3[iCpu]->fTraceGroups |= g_aVmmTpGroups[i].fMask;
+ else
+ while (iCpu-- > 0)
+ pVM->apCpusR3[iCpu]->fTraceGroups &= ~g_aVmmTpGroups[i].fMask;
+ }
+ break;
+ }
+
+ if (i == UINT32_MAX)
+ {
+ int rc = PDMR3TracingConfig(pVM, pszName, cchName, !fNo, uPass > 0);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Query the trace configuration specification string.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_POINTER
+ * @retval VERR_BUFFER_OVERFLOW if the buffer is too small. Buffer will be
+ * empty.
+
+ * @param pVM The cross context VM structure.
+ * @param pszConfig Pointer to the output buffer.
+ * @param cbConfig The size of the output buffer.
+ */
+VMMDECL(int) DBGFR3TraceQueryConfig(PVM pVM, char *pszConfig, size_t cbConfig)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszConfig, VERR_INVALID_POINTER);
+ if (cbConfig < 1)
+ return VERR_BUFFER_OVERFLOW;
+ *pszConfig = '\0';
+
+ if (pVM->hTraceBufR3 == NIL_RTTRACEBUF)
+ return VERR_DBGF_NO_TRACE_BUFFER;
+
+ int rc = VINF_SUCCESS;
+ uint32_t const fTraceGroups = pVM->apCpusR3[0]->fTraceGroups;
+ if ( fTraceGroups == UINT32_MAX
+ && PDMR3TracingAreAll(pVM, true /*fEnabled*/))
+ rc = RTStrCopy(pszConfig, cbConfig, "all");
+ else if ( fTraceGroups == 0
+ && PDMR3TracingAreAll(pVM, false /*fEnabled*/))
+ rc = RTStrCopy(pszConfig, cbConfig, "-all");
+ else
+ {
+ char *pszDst = pszConfig;
+ size_t cbDst = cbConfig;
+ uint32_t i = RT_ELEMENTS(g_aVmmTpGroups);
+ while (i-- > 0)
+ if (g_aVmmTpGroups[i].fMask & fTraceGroups)
+ {
+ size_t cchThis = g_aVmmTpGroups[i].cchName + (pszDst != pszConfig);
+ if (cchThis >= cbDst)
+ {
+ rc = VERR_BUFFER_OVERFLOW;
+ break;
+ }
+ if (pszDst != pszConfig)
+ {
+ *pszDst = ' ';
+ memcpy(pszDst + 1, g_aVmmTpGroups[i].pszName, g_aVmmTpGroups[i].cchName + 1);
+ }
+ else
+ memcpy(pszDst, g_aVmmTpGroups[i].pszName, g_aVmmTpGroups[i].cchName + 1);
+ pszDst += cchThis;
+ cbDst -= cchThis;
+ }
+
+ if (RT_SUCCESS(rc))
+ rc = PDMR3TracingQueryConfig(pVM, pszDst, cbDst);
+ }
+
+ if (RT_FAILURE(rc))
+ *pszConfig = '\0';
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNRTTRACEBUFCALLBACK}
+ */
+static DECLCALLBACK(int)
+dbgfR3TraceInfoDumpEntry(RTTRACEBUF hTraceBuf, uint32_t iEntry, uint64_t NanoTS, RTCPUID idCpu, const char *pszMsg, void *pvUser)
+{
+ PCDBGFINFOHLP pHlp = (PCDBGFINFOHLP)pvUser;
+ pHlp->pfnPrintf(pHlp, "#%04u/%'llu/%02x: %s\n", iEntry, NanoTS, idCpu, pszMsg);
+ NOREF(hTraceBuf);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFHANDLERINT, Info handler for displaying the trace buffer content.}
+ */
+static DECLCALLBACK(void) dbgfR3TraceInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RTTRACEBUF hTraceBuf = pVM->hTraceBufR3;
+ if (hTraceBuf == NIL_RTTRACEBUF)
+ pHlp->pfnPrintf(pHlp, "Tracing is disabled\n");
+ else
+ {
+ pHlp->pfnPrintf(pHlp, "Trace buffer %p - %u entries of %u bytes\n",
+ hTraceBuf, RTTraceBufGetEntryCount(hTraceBuf), RTTraceBufGetEntrySize(hTraceBuf));
+ RTTraceBufEnumEntries(hTraceBuf, dbgfR3TraceInfoDumpEntry, (void *)pHlp);
+ }
+ NOREF(pszArgs);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Tracer.cpp b/src/VBox/VMM/VMMR3/DBGFR3Tracer.cpp
new file mode 100644
index 00000000..8e55778f
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3Tracer.cpp
@@ -0,0 +1,1184 @@
+/* $Id: DBGFR3Tracer.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, tracing parts.
+ */
+
+/*
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include "DBGFInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/sup.h>
+
+#include <VBox/version.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/buildconfig.h>
+#include <iprt/alloc.h>
+#include <iprt/alloca.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/path.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/tracelog.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The event descriptors written to the trace log. */
+
+static const RTTRACELOGEVTDESC g_EvtSrcRegisterEvtDesc =
+{
+ "EvtSrc.Register",
+ "An event source was registered",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ 0,
+ NULL
+};
+
+
+static const RTTRACELOGEVTDESC g_EvtSrcDeregisterEvtDesc =
+{
+ "EvtSrc.Deregister",
+ "An event source was de-registered",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ 0,
+ NULL
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevMmioCreateEvtItems[] =
+{
+ {"hMmioRegion", "The MMIO region handle being returned by IOM", RTTRACELOGTYPE_UINT64, 0},
+ {"cbRegion", "Size of the MMIO region in bytes", RTTRACELOGTYPE_UINT64, 0},
+ {"fIomFlags", "Flags passed to IOM", RTTRACELOGTYPE_UINT32, 0},
+ {"iPciRegion", "PCI region used for a PCI device", RTTRACELOGTYPE_UINT32, 0},
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioCreateEvtDesc =
+{
+ "Dev.MmioCreate",
+ "MMIO region of a device is being created",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioCreateEvtItems),
+ &g_DevMmioCreateEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevMmioMapEvtItems[] =
+{
+ {"hMmioRegion", "The MMIO region handle being mapped", RTTRACELOGTYPE_UINT64, 0},
+ {"GCPhysMmioBase", "The guest physical address where the region is mapped", RTTRACELOGTYPE_UINT64, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioMapEvtDesc =
+{
+ "Dev.MmioMap",
+ "MMIO region of a device is being mapped",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioMapEvtItems),
+ &g_DevMmioMapEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevMmioUnmapEvtItems[] =
+{
+ {"hMmioRegion", "The MMIO region handle being unmapped", RTTRACELOGTYPE_UINT64, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioUnmapEvtDesc =
+{
+ "Dev.MmioUnmap",
+ "MMIO region of a device is being unmapped",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioUnmapEvtItems),
+ &g_DevMmioUnmapEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevMmioRwEvtItems[] =
+{
+ {"hMmioRegion", "The MMIO region handle being accessed", RTTRACELOGTYPE_UINT64, 0},
+ {"offMmio", "The offset in the MMIO region being accessed", RTTRACELOGTYPE_UINT64, 0},
+ {"cbXfer", "Number of bytes being transfered", RTTRACELOGTYPE_UINT64, 0},
+ {"u64Val", "The value read or written", RTTRACELOGTYPE_UINT64, 0},
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioReadEvtDesc =
+{
+ "Dev.MmioRead",
+ "MMIO region of a device is being read",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioRwEvtItems),
+ &g_DevMmioRwEvtItems[0]
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioWriteEvtDesc =
+{
+ "Dev.MmioWrite",
+ "MMIO region of a device is being written",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioRwEvtItems),
+ &g_DevMmioRwEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevMmioFillEvtItems[] =
+{
+ {"hMmioRegion", "The MMIO region handle being unmapped", RTTRACELOGTYPE_UINT64, 0},
+ {"offMmio", "The offset in the MMIO region being accessed", RTTRACELOGTYPE_UINT64, 0},
+ {"cbItem", "Item size in bytes", RTTRACELOGTYPE_UINT32, 0},
+ {"cItems", "Number of items being written", RTTRACELOGTYPE_UINT32, 0},
+ {"u32Val", "The value used for filling", RTTRACELOGTYPE_UINT32, 0},
+};
+
+static const RTTRACELOGEVTDESC g_DevMmioFillEvtDesc =
+{
+ "Dev.MmioFill",
+ "MMIO region of a device is being filled",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevMmioFillEvtItems),
+ &g_DevMmioFillEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoPortCreateEvtItems[] =
+{
+ {"hIoPorts", "The I/O port region handle being returned by IOM", RTTRACELOGTYPE_UINT64, 0},
+ {"cPorts", "Size of the region in number of ports", RTTRACELOGTYPE_UINT16, 0},
+ {"fIomFlags", "Flags passed to IOM", RTTRACELOGTYPE_UINT32, 0},
+ {"iPciRegion", "PCI region used for a PCI device", RTTRACELOGTYPE_UINT32, 0},
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortCreateEvtDesc =
+{
+ "Dev.IoPortCreate",
+ "I/O port region of a device is being created",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortCreateEvtItems),
+ &g_DevIoPortCreateEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoPortMapEvtItems[] =
+{
+ {"hIoPorts", "The I/O port region handle being mapped", RTTRACELOGTYPE_UINT64, 0},
+ {"IoPortBase", "The I/O port base address where the region is mapped", RTTRACELOGTYPE_UINT16, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortMapEvtDesc =
+{
+ "Dev.IoPortMap",
+ "I/O port region of a device is being mapped",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortMapEvtItems),
+ &g_DevIoPortMapEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoPortUnmapEvtItems[] =
+{
+ {"hIoPorts", "The I/O port region handle being unmapped", RTTRACELOGTYPE_UINT64, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortUnmapEvtDesc =
+{
+ "Dev.IoPortUnmap",
+ "I/O port region of a device is being unmapped",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortUnmapEvtItems),
+ &g_DevIoPortUnmapEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoPortRwEvtItems[] =
+{
+ {"hIoPorts", "The I/O region handle being accessed", RTTRACELOGTYPE_UINT64, 0},
+ {"offPort", "The offset in the I/O port region being accessed", RTTRACELOGTYPE_UINT16, 0},
+ {"cbXfer", "Number of bytes being transfered", RTTRACELOGTYPE_UINT64, 0},
+ {"u32Val", "The value read or written", RTTRACELOGTYPE_UINT32, 0},
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortReadEvtDesc =
+{
+ "Dev.IoPortRead",
+ "I/O port region of a device is being read",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortRwEvtItems),
+ &g_DevIoPortRwEvtItems[0]
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortWriteEvtDesc =
+{
+ "Dev.IoPortWrite",
+ "I/O port region of a device is being written",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortRwEvtItems),
+ &g_DevIoPortRwEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoPortRwStrEvtItems[] =
+{
+ {"hIoPorts", "The I/O region handle being accesses", RTTRACELOGTYPE_UINT64, 0},
+ {"offPort", "The offset in the I/O port region being accessed", RTTRACELOGTYPE_UINT16, 0},
+ {"cbItem", "Item size for the access", RTTRACELOGTYPE_UINT32, 0},
+ {"cTransfersReq", "Number of transfers requested by the guest", RTTRACELOGTYPE_UINT32, 0},
+ {"cTransfersRet", "Number of transfers executed by the device", RTTRACELOGTYPE_UINT32, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortReadStrEvtDesc =
+{
+ "Dev.IoPortReadStr",
+ "I/O port region of a device is being read using REP INS",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortRwStrEvtItems),
+ &g_DevIoPortRwStrEvtItems[0]
+};
+
+static const RTTRACELOGEVTDESC g_DevIoPortWriteStrEvtDesc =
+{
+ "Dev.IoPortWriteStr",
+ "I/O port region of a device is being written using REP OUTS",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoPortRwStrEvtItems),
+ &g_DevIoPortRwStrEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIrqEvtItems[] =
+{
+ {"iIrq", "The IRQ line", RTTRACELOGTYPE_INT32, 0},
+ {"fIrqLvl", "The IRQ level", RTTRACELOGTYPE_INT32, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevIrqEvtDesc =
+{
+ "Dev.Irq",
+ "Device raised or lowered an IRQ line",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIrqEvtItems),
+ &g_DevIrqEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevIoApicMsiEvtItems[] =
+{
+ {"GCPhys", "Physical guest address being written", RTTRACELOGTYPE_UINT64, 0},
+ {"u32Val", "value being written", RTTRACELOGTYPE_UINT32, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevIoApicMsiEvtDesc =
+{
+ "Dev.IoApicMsi",
+ "Device sent a MSI event through the I/O APIC",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevIoApicMsiEvtItems),
+ &g_DevIoApicMsiEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevGCPhysRwStartEvtItems[] =
+{
+ {"GCPhys", "Physical guest address being accessed", RTTRACELOGTYPE_UINT64, 0},
+ {"cbXfer", "Number of bytes being transfered", RTTRACELOGTYPE_UINT64, 0},
+};
+
+
+static const RTTRACELOGEVTDESC g_DevGCPhysReadEvtDesc =
+{
+ "Dev.GCPhysRead",
+ "Device read data from guest physical memory",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevGCPhysRwStartEvtItems),
+ &g_DevGCPhysRwStartEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTDESC g_DevGCPhysWriteEvtDesc =
+{
+ "Dev.GCPhysWrite",
+ "Device wrote data to guest physical memory",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevGCPhysRwStartEvtItems),
+ &g_DevGCPhysRwStartEvtItems[0]
+};
+
+
+static const RTTRACELOGEVTITEMDESC g_DevRwDataEvtItems[] =
+{
+ {"abData", "The data being read/written", RTTRACELOGTYPE_RAWDATA, 0}
+};
+
+static const RTTRACELOGEVTDESC g_DevRwDataEvtDesc =
+{
+ "Dev.RwData",
+ "The data being read or written",
+ RTTRACELOGEVTSEVERITY_DEBUG,
+ RT_ELEMENTS(g_DevRwDataEvtItems),
+ &g_DevRwDataEvtItems[0]
+};
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+
+/**
+ * Returns an unused guest memory read/write data aggregation structure.
+ *
+ * @returns Pointer to a new aggregation structure or NULL if out of memory.
+ * @param pThis The DBGF tracer instance.
+ */
+static PDBGFTRACERGCPHYSRWAGG dbgfTracerR3EvtRwAggNew(PDBGFTRACERINSR3 pThis)
+{
+ for (uint32_t i = 0; i < RT_ELEMENTS(pThis->aGstMemRwData); i++)
+ {
+ if (pThis->aGstMemRwData[i].idEvtStart == DBGF_TRACER_EVT_HDR_ID_INVALID)
+ return &pThis->aGstMemRwData[i];
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Find the guest memory read/write data aggregation structure for the given event ID.
+ *
+ * @returns Pointer to a new aggregation structure or NULL if not found.
+ * @param pThis The DBGF tracer instance.
+ * @param idEvtPrev The event ID to look for.
+ */
+static PDBGFTRACERGCPHYSRWAGG dbgfTracerR3EvtRwAggFind(PDBGFTRACERINSR3 pThis, uint64_t idEvtPrev)
+{
+ for (uint32_t i = 0; i < RT_ELEMENTS(pThis->aGstMemRwData); i++)
+ {
+ if ( pThis->aGstMemRwData[i].idEvtStart != DBGF_TRACER_EVT_HDR_ID_INVALID
+ && pThis->aGstMemRwData[i].idEvtPrev == idEvtPrev)
+ return &pThis->aGstMemRwData[i];
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Common code for the guest memory and string I/O port read/write events.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pEvtHdr The event header.
+ * @param cbXfer Overall number of bytes of data for this event.
+ * @param pvData Initial data supplied in the event starting the aggregation.
+ * @param cbData Number of initial bytes of data.
+ */
+static int dbgfTracerR3EvtRwStartCommon(PDBGFTRACERINSR3 pThis, PCDBGFTRACEREVTHDR pEvtHdr, size_t cbXfer, const void *pvData, size_t cbData)
+{
+ /* Slow path, find an empty aggregation structure. */
+ int rc = VINF_SUCCESS;
+ PDBGFTRACERGCPHYSRWAGG pDataAgg = dbgfTracerR3EvtRwAggNew(pThis);
+ if (RT_LIKELY(pDataAgg))
+ {
+ /* Initialize it. */
+ pDataAgg->idEvtStart = pEvtHdr->idEvt;
+ pDataAgg->idEvtPrev = pEvtHdr->idEvt;
+ pDataAgg->cbXfer = cbXfer;
+ pDataAgg->cbLeft = pDataAgg->cbXfer;
+ pDataAgg->offBuf = 0;
+
+ /* Need to reallocate the buffer to hold the complete data? */
+ if (RT_UNLIKELY(pDataAgg->cbBufMax < pDataAgg->cbXfer))
+ {
+ uint8_t *pbBufNew = (uint8_t *)RTMemRealloc(pDataAgg->pbBuf, pDataAgg->cbXfer);
+ if (RT_LIKELY(pbBufNew))
+ {
+ pDataAgg->pbBuf = pbBufNew;
+ pDataAgg->cbBufMax = pDataAgg->cbXfer;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ memcpy(pDataAgg->pbBuf, pvData, cbData);
+ pDataAgg->offBuf += cbData;
+ pDataAgg->cbLeft -= cbData;
+ }
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(10, ("DBGF: Creating new data aggregation structure for memory read/write failed with %Rrc, trace log will not contain data for this event!\n", rc));
+
+ /* Write out the finish event without any data. */
+ size_t cbEvtData = 0;
+ rc = RTTraceLogWrEvtAdd(pThis->hTraceLog, &g_DevRwDataEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_FINISH,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, NULL, &cbEvtData);
+ if (pDataAgg) /* Reset the aggregation event. */
+ pDataAgg->idEvtStart = DBGF_TRACER_EVT_HDR_ID_INVALID;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Starts a new guest memory read/write event.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pEvtHdr The event header.
+ * @param pEvtGCPhysRw The guest memory read/write event descriptor.
+ * @param pEvtDesc The event descriptor written to the trace log.
+ */
+static int dbgfTracerR3EvtGCPhysRwStart(PDBGFTRACERINSR3 pThis, PCDBGFTRACEREVTHDR pEvtHdr,
+ PCDBGFTRACEREVTGCPHYS pEvtGCPhysRw, PCRTTRACELOGEVTDESC pEvtDesc)
+{
+ /* Write out the event header first in any case. */
+ int rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, pEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_START,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtGCPhysRw->GCPhys, pEvtGCPhysRw->cbXfer);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * If the amount of data is small enough to fit into the single event descriptor we can skip allocating
+ * an aggregation tracking structure and write the event containing the complete data out immediately.
+ */
+ if (pEvtGCPhysRw->cbXfer <= sizeof(pEvtGCPhysRw->abData))
+ {
+ size_t cbEvtData = pEvtGCPhysRw->cbXfer;
+
+ rc = RTTraceLogWrEvtAdd(pThis->hTraceLog, &g_DevRwDataEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_FINISH,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, &pEvtGCPhysRw->abData[0], &cbEvtData);
+ }
+ else
+ rc = dbgfTracerR3EvtRwStartCommon(pThis, pEvtHdr, pEvtGCPhysRw->cbXfer, &pEvtGCPhysRw->abData[0], sizeof(pEvtGCPhysRw->abData));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Starts a new I/O port string read/write event.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pEvtHdr The event header.
+ * @param pEvtIoPortStrRw The I/O port string read/write event descriptor.
+ * @param cbXfer Number of bytes of valid data for this event.
+ * @param pEvtDesc The event descriptor written to the trace log.
+ */
+static int dbgfTracerR3EvtIoPortStrRwStart(PDBGFTRACERINSR3 pThis, PCDBGFTRACEREVTHDR pEvtHdr,
+ PCDBGFTRACEREVTIOPORTSTR pEvtIoPortStrRw, size_t cbXfer,
+ PCRTTRACELOGEVTDESC pEvtDesc)
+{
+ /* Write out the event header first in any case. */
+ int rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, pEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_START,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoPortStrRw->hIoPorts, pEvtIoPortStrRw->offPort,
+ pEvtIoPortStrRw->cbItem, pEvtIoPortStrRw->cTransfersReq, pEvtIoPortStrRw->cTransfersRet);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * If the amount of data is small enough to fit into the single event descriptor we can skip allocating
+ * an aggregation tracking structure and write the event containing the complete data out immediately.
+ */
+ if (cbXfer <= sizeof(pEvtIoPortStrRw->abData))
+ {
+ size_t cbEvtData = cbXfer;
+
+ rc = RTTraceLogWrEvtAdd(pThis->hTraceLog, &g_DevRwDataEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_FINISH,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, &pEvtIoPortStrRw->abData[0], &cbEvtData);
+ }
+ else
+ rc = dbgfTracerR3EvtRwStartCommon(pThis, pEvtHdr, cbXfer, &pEvtIoPortStrRw->abData[0], sizeof(pEvtIoPortStrRw->abData));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Continues a previously started guest memory or string I/O port read/write event.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pEvtHdr The event header.
+ * @param pvData The data to log.
+ */
+static int dbgfTracerR3EvtRwContinue(PDBGFTRACERINSR3 pThis, PCDBGFTRACEREVTHDR pEvtHdr, void *pvData)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFTRACERGCPHYSRWAGG pDataAgg = dbgfTracerR3EvtRwAggFind(pThis, pEvtHdr->idEvtPrev);
+
+ if (RT_LIKELY(pDataAgg))
+ {
+ size_t cbThisXfer = RT_MIN(pDataAgg->cbLeft, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+ memcpy(pDataAgg->pbBuf + pDataAgg->offBuf, pvData, cbThisXfer);
+ pDataAgg->offBuf += cbThisXfer;
+ pDataAgg->cbLeft -= cbThisXfer;
+
+ if (!pDataAgg->cbLeft)
+ {
+ /* All data aggregated, write it out and reset the structure. */
+ rc = RTTraceLogWrEvtAdd(pThis->hTraceLog, &g_DevRwDataEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_FINISH,
+ pDataAgg->idEvtStart, pEvtHdr->hEvtSrc, pDataAgg->pbBuf, &pDataAgg->cbXfer);
+ pDataAgg->offBuf = 0;
+ pDataAgg->idEvtStart = DBGF_TRACER_EVT_HDR_ID_INVALID;
+ }
+ else
+ pDataAgg->idEvtPrev = pEvtHdr->idEvt; /* So the next event containing more data can find the aggregation structure. */
+ }
+ else /* This can only happen if creating a new structure failed before. */
+ rc = VERR_DBGF_TRACER_IPE_1;
+
+ return rc;
+}
+
+
+/**
+ * Processes the given event.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pEvtHdr The event to process.
+ */
+static int dbgfR3TracerEvtProcess(PDBGFTRACERINSR3 pThis, PDBGFTRACEREVTHDR pEvtHdr)
+{
+ int rc = VINF_SUCCESS;
+
+ LogFlowFunc(("pThis=%p pEvtHdr=%p{idEvt=%llu,enmEvt=%u}\n",
+ pThis, pEvtHdr, pEvtHdr->idEvt, pEvtHdr->enmEvt));
+
+ switch (pEvtHdr->enmEvt)
+ {
+ case DBGFTRACEREVT_SRC_REGISTER:
+ {
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_EvtSrcRegisterEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_START,
+ pEvtHdr->hEvtSrc, 0 /*uParentGrpId*/);
+ break;
+ }
+ case DBGFTRACEREVT_SRC_DEREGISTER:
+ {
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_EvtSrcDeregisterEvtDesc, RTTRACELOG_WR_ADD_EVT_F_GRP_FINISH,
+ pEvtHdr->hEvtSrc, 0 /*uParentGrpId*/);
+ break;
+ }
+ case DBGFTRACEREVT_MMIO_REGION_CREATE:
+ {
+ PCDBGFTRACEREVTMMIOCREATE pEvtMmioCreate = (PCDBGFTRACEREVTMMIOCREATE)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevMmioCreateEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtMmioCreate->hMmioRegion, pEvtMmioCreate->cbRegion,
+ pEvtMmioCreate->fIomFlags, pEvtMmioCreate->iPciRegion);
+ break;
+ }
+ case DBGFTRACEREVT_MMIO_MAP:
+ {
+ PCDBGFTRACEREVTMMIOMAP pEvtMmioMap = (PCDBGFTRACEREVTMMIOMAP)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevMmioMapEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtMmioMap->hMmioRegion, pEvtMmioMap->GCPhysMmioBase);
+ break;
+ }
+ case DBGFTRACEREVT_MMIO_UNMAP:
+ {
+ PCDBGFTRACEREVTMMIOUNMAP pEvtMmioUnmap = (PCDBGFTRACEREVTMMIOUNMAP)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevMmioUnmapEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtMmioUnmap->hMmioRegion);
+ break;
+ }
+ case DBGFTRACEREVT_MMIO_READ:
+ case DBGFTRACEREVT_MMIO_WRITE:
+ {
+ PCDBGFTRACEREVTMMIO pEvtMmioRw = (PCDBGFTRACEREVTMMIO)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog,
+ pEvtHdr->enmEvt == DBGFTRACEREVT_MMIO_READ
+ ? &g_DevMmioReadEvtDesc
+ : &g_DevMmioWriteEvtDesc,
+ 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtMmioRw->hMmioRegion, pEvtMmioRw->offMmio,
+ pEvtMmioRw->cbXfer, pEvtMmioRw->u64Val);
+ break;
+ }
+ case DBGFTRACEREVT_MMIO_FILL:
+ {
+ PCDBGFTRACEREVTMMIOFILL pEvtMmioFill = (PCDBGFTRACEREVTMMIOFILL)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevMmioFillEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtMmioFill->hMmioRegion, pEvtMmioFill->offMmio,
+ pEvtMmioFill->cbItem, pEvtMmioFill->cItems, pEvtMmioFill->u32Item);
+ break;
+ }
+ case DBGFTRACEREVT_IOPORT_REGION_CREATE:
+ {
+ PCDBGFTRACEREVTIOPORTCREATE pEvtIoPortCreate = (PCDBGFTRACEREVTIOPORTCREATE)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevIoPortCreateEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoPortCreate->hIoPorts, pEvtIoPortCreate->cPorts,
+ pEvtIoPortCreate->fIomFlags, pEvtIoPortCreate->iPciRegion);
+ break;
+ }
+ case DBGFTRACEREVT_IOPORT_MAP:
+ {
+ PCDBGFTRACEREVTIOPORTMAP pEvtIoPortMap = (PCDBGFTRACEREVTIOPORTMAP)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevIoPortMapEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoPortMap->hIoPorts, pEvtIoPortMap->IoPortBase);
+ break;
+ }
+ case DBGFTRACEREVT_IOPORT_UNMAP:
+ {
+ PCDBGFTRACEREVTIOPORTUNMAP pEvtIoPortUnmap = (PCDBGFTRACEREVTIOPORTUNMAP)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevIoPortUnmapEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoPortUnmap->hIoPorts);
+ break;
+ }
+ case DBGFTRACEREVT_IOPORT_READ:
+ case DBGFTRACEREVT_IOPORT_WRITE:
+ {
+ PCDBGFTRACEREVTIOPORT pEvtIoPortRw = (PCDBGFTRACEREVTIOPORT)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog,
+ pEvtHdr->enmEvt == DBGFTRACEREVT_IOPORT_READ
+ ? &g_DevIoPortReadEvtDesc
+ : &g_DevIoPortWriteEvtDesc,
+ 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoPortRw->hIoPorts, pEvtIoPortRw->offPort,
+ pEvtIoPortRw->cbXfer, pEvtIoPortRw->u32Val);
+ break;
+ }
+ case DBGFTRACEREVT_IOPORT_READ_STR:
+ case DBGFTRACEREVT_IOPORT_WRITE_STR:
+ {
+ PCRTTRACELOGEVTDESC pEvtDesc = pEvtHdr->enmEvt == DBGFTRACEREVT_IOPORT_WRITE_STR
+ ? &g_DevIoPortWriteStrEvtDesc
+ : &g_DevIoPortReadStrEvtDesc;
+
+ /* If the previous event ID is invalid this starts a new read/write we have to aggregate all the data for. */
+ if (pEvtHdr->idEvtPrev == DBGF_TRACER_EVT_HDR_ID_INVALID)
+ {
+ PCDBGFTRACEREVTIOPORTSTR pEvtIoPortStrRw = (PCDBGFTRACEREVTIOPORTSTR)(pEvtHdr + 1);
+ size_t cbXfer = pEvtHdr->enmEvt == DBGFTRACEREVT_IOPORT_WRITE_STR
+ ? pEvtIoPortStrRw->cTransfersReq * pEvtIoPortStrRw->cbItem
+ : pEvtIoPortStrRw->cTransfersRet * pEvtIoPortStrRw->cbItem;
+
+ rc = dbgfTracerR3EvtIoPortStrRwStart(pThis, pEvtHdr, pEvtIoPortStrRw, cbXfer, pEvtDesc);
+ }
+ else
+ {
+ /* Continuation of a started read or write, look up the right tracking structure and process the new data. */
+ void *pvData = pEvtHdr + 1;
+ rc = dbgfTracerR3EvtRwContinue(pThis, pEvtHdr, pvData);
+ }
+ break;
+ }
+ case DBGFTRACEREVT_IRQ:
+ {
+ PCDBGFTRACEREVTIRQ pEvtIrq = (PCDBGFTRACEREVTIRQ)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevIrqEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIrq->iIrq, pEvtIrq->fIrqLvl);
+ break;
+ }
+ case DBGFTRACEREVT_IOAPIC_MSI:
+ {
+ PCDBGFTRACEREVTIOAPICMSI pEvtIoApicMsi = (PCDBGFTRACEREVTIOAPICMSI)(pEvtHdr + 1);
+
+ rc = RTTraceLogWrEvtAddL(pThis->hTraceLog, &g_DevIrqEvtDesc, 0 /*fFlags*/,
+ pEvtHdr->idEvt, pEvtHdr->hEvtSrc, pEvtIoApicMsi->GCPhys, pEvtIoApicMsi->u32Val);
+ break;
+ }
+ case DBGFTRACEREVT_GCPHYS_READ:
+ case DBGFTRACEREVT_GCPHYS_WRITE:
+ {
+ PCRTTRACELOGEVTDESC pEvtDesc = pEvtHdr->enmEvt == DBGFTRACEREVT_GCPHYS_WRITE
+ ? &g_DevGCPhysWriteEvtDesc
+ : &g_DevGCPhysReadEvtDesc;
+
+ /* If the previous event ID is invalid this starts a new read/write we have to aggregate all the data for. */
+ if (pEvtHdr->idEvtPrev == DBGF_TRACER_EVT_HDR_ID_INVALID)
+ {
+ PCDBGFTRACEREVTGCPHYS pEvtGCPhysRw = (PCDBGFTRACEREVTGCPHYS)(pEvtHdr + 1);
+ rc = dbgfTracerR3EvtGCPhysRwStart(pThis, pEvtHdr, pEvtGCPhysRw, pEvtDesc);
+ }
+ else
+ {
+ /* Continuation of a started read or write, look up the right tracking structure and process the new data. */
+ void *pvData = pEvtHdr + 1;
+ rc = dbgfTracerR3EvtRwContinue(pThis, pEvtHdr, pvData);
+ }
+ break;
+ }
+ default:
+ AssertLogRelMsgFailed(("Invalid or unsupported event: %u!\n", pEvtHdr->enmEvt));
+ break;
+ }
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNRTTHREAD,
+ * DBGF Tracer flush thread}
+ */
+static DECLCALLBACK(int) dbgfR3TracerThreadFlush(RTTHREAD ThreadSelf, void *pvUser)
+{
+ PDBGFTRACERINSR3 pThis = (PDBGFTRACERINSR3)pvUser;
+ PDBGFTRACERSHARED pShared = pThis->pSharedR3;
+ PSUPDRVSESSION pSession = pThis->pVMR3->pSession;
+
+ /* Release the waiter. */
+ RTThreadUserSignal(ThreadSelf);
+
+ /*
+ * Process stuff until we're told to terminate.
+ */
+ for (;;)
+ {
+ ASMAtomicXchgBool(&pShared->fFlushThrdActive, false);
+ if (!ASMAtomicXchgBool(&pShared->fEvtsWaiting, false))
+ {
+ int rc = SUPSemEventWaitNoResume(pSession, pShared->hSupSemEvtFlush, RT_INDEFINITE_WAIT);
+ Assert(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED); RT_NOREF(rc);
+
+ if (RT_UNLIKELY(ASMAtomicReadBool(&pThis->fShutdown)))
+ break;
+ }
+
+ ASMAtomicXchgBool(&pShared->fFlushThrdActive, true);
+
+ uint64_t idEvtNow = ASMAtomicReadU64(&pShared->idEvt);
+ uint64_t idEvt = pThis->idEvtLast;
+ size_t cRingBufEvts = pShared->cbRingBuf / DBGF_TRACER_EVT_SZ;
+ while (idEvt < idEvtNow)
+ {
+ uint64_t idxRingBuf = idEvt % cRingBufEvts; /* This gives the index in the ring buffer for the event. */
+ PDBGFTRACEREVTHDR pEvtHdr = (PDBGFTRACEREVTHDR)(pThis->CTX_SUFF(pbRingBuf) + idxRingBuf * DBGF_TRACER_EVT_SZ);
+
+ /*
+ * If the event header contains the invalid ID the producer was interrupted or didn't get that far yet, spin a bit
+ * and wait for the ID to become valid.
+ */
+ while (ASMAtomicReadU64(&pEvtHdr->idEvt) == DBGF_TRACER_EVT_HDR_ID_INVALID)
+ RTThreadYield();
+
+ int rc = dbgfR3TracerEvtProcess(pThis, pEvtHdr);
+ if (RT_FAILURE(rc))
+ LogRelMax(10, ("DBGF: Writing event failed with %Rrc, tracing log will be incomplete!\n", rc));
+
+ ASMAtomicWriteU64(&pEvtHdr->idEvt, DBGF_TRACER_EVT_HDR_ID_INVALID);
+ idEvt++;
+ }
+
+ pThis->idEvtLast = idEvt;
+ ASMAtomicXchgBool(&pShared->fEvtsWaiting, false);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Registers a possible event descriptors with the created trace log for faster subsequent operations.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ */
+static int dbgfR3TracerTraceLogEvtDescRegister(PDBGFTRACERINSR3 pThis)
+{
+ int rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevMmioMapEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevMmioUnmapEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevMmioReadEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevMmioWriteEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIoPortMapEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIoPortUnmapEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIoPortReadEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIoPortWriteEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIrqEvtDesc);
+ if (RT_SUCCESS(rc))
+ rc = RTTraceLogWrAddEvtDesc(pThis->hTraceLog, &g_DevIoApicMsiEvtDesc);
+
+ return rc;
+}
+
+
+/**
+ * Initializes the R3 and shared tarcer instance data and spins up the flush thread.
+ *
+ * @returns VBox status code.
+ * @param pThis The DBGF tracer instance.
+ * @param pszTraceFilePath The path of the trace file to create.
+ */
+static int dbgfR3TracerInitR3(PDBGFTRACERINSR3 pThis, const char *pszTraceFilePath)
+{
+ PVM pVM = pThis->pVMR3;
+ PDBGFTRACERSHARED pShared = pThis->pSharedR3;
+
+ pThis->fShutdown = false;
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pThis->aGstMemRwData); i++)
+ pThis->aGstMemRwData[i].idEvtStart = DBGF_TRACER_EVT_HDR_ID_INVALID;
+
+ /* Try to create a file based trace log. */
+ int rc = RTTraceLogWrCreateFile(&pThis->hTraceLog, RTBldCfgVersion(), pszTraceFilePath);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = dbgfR3TracerTraceLogEvtDescRegister(pThis);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Go through the whole ring buffer and initialize the event IDs of all entries
+ * to invalid values.
+ */
+ uint64_t cEvtEntries = pShared->cbRingBuf / DBGF_TRACER_EVT_SZ;
+ PDBGFTRACEREVTHDR pEvtHdr = (PDBGFTRACEREVTHDR)pThis->pbRingBufR3;
+ for (uint32_t i = 0; i < cEvtEntries; i++)
+ {
+ pEvtHdr->idEvt = DBGF_TRACER_EVT_HDR_ID_INVALID;
+ pEvtHdr++;
+ }
+
+ rc = SUPSemEventCreate(pVM->pSession, &pShared->hSupSemEvtFlush);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTThreadCreate(&pThis->hThrdFlush, dbgfR3TracerThreadFlush, pThis, 0 /*cbStack*/, RTTHREADTYPE_IO,
+ RTTHREADFLAGS_WAITABLE, "DBGFTracer");
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTThreadUserWait(pThis->hThrdFlush, 10 * 1000);
+ if (RT_SUCCESS(rc))
+ {
+ return VINF_SUCCESS;
+ }
+ }
+
+ SUPSemEventClose(pVM->pSession, pShared->hSupSemEvtFlush);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Creates a DBGF tracer based on the given config and returns it.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fR0Enabled Flag whether the tracer should have R0 support enabled.
+ * @param pszTraceFilePath The path of the trace file to create.
+ * @param cbRingBuf Size of the ring buffer in bytes.
+ * @param ppDbgfTracerR3 Where to store the pointer to the tracer on success.
+ */
+DECLHIDDEN(int) dbgfR3TracerCreate(PVM pVM, bool fR0Enabled, const char *pszTraceFilePath,
+ uint32_t cbRingBuf, PDBGFTRACERINSR3 *ppDbgfTracerR3)
+{
+ PDBGFTRACERINSR3 pThis = NULL;
+
+ /*
+ * Allocate the tracer instance.
+ */
+ if ((fR0Enabled /*|| fRCEnabled*/) && !SUPR3IsDriverless())
+ {
+ AssertLogRel(fR0Enabled /* not possible to just enabled raw-mode atm. */);
+
+ DBGFTRACERCREATEREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pTracerInsR3 = NULL;
+ Req.cbRingBuf = cbRingBuf;
+ Req.fRCEnabled = false; /*fRCEnabled;*/
+ Req.afReserved[0] = false;
+ Req.afReserved[1] = false;
+ Req.afReserved[2] = false;
+ int rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_DBGF_TRACER_CREATE, 0, &Req.Hdr);
+ AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_TRACER_CREATE failed: %Rrc\n", rc), rc);
+ pThis = Req.pTracerInsR3;
+ }
+ else
+ {
+ /* The code in this else branch works by the same rules as the DBGFR0Tracer.cpp
+ code, except there is only the ring-3 components of the tracer instance.
+ Changes here may need to be reflected in DBGFR0Tracer.cpp and vice versa! */
+ uint32_t cb = sizeof(DBGFTRACERINSR3);
+ cb = RT_ALIGN_32(cb, 64);
+ const uint32_t offShared = cb;
+ cb += sizeof(DBGFTRACERSHARED) + cbRingBuf;
+ AssertLogRelMsgReturn(cb <= DBGF_MAX_TRACER_INSTANCE_SIZE_R3,
+ ("Tracer total instance size is to big: %u, max %u\n",
+ cb, DBGF_MAX_TRACER_INSTANCE_SIZE_R3),
+ VERR_ALLOCATION_TOO_BIG);
+
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_DBGF_TRACER, cb, (void **)&pThis);
+ AssertLogRelMsgRCReturn(rc, ("Failed to allocate %zu bytes of instance data for tracer. rc=%Rrc\n",
+ cb, rc), rc);
+
+ /* Initialize it: */
+ pThis->pNextR3 = NULL;
+ pThis->pVMR3 = pVM;
+ pThis->fR0Enabled = false;
+ pThis->pSharedR3 = (PDBGFTRACERSHARED)((uint8_t *)pThis + offShared);
+ pThis->pbRingBufR3 = (uint8_t *)(pThis->pSharedR3 + 1);
+
+ pThis->pSharedR3->idEvt = 0;
+ pThis->pSharedR3->cbRingBuf = cbRingBuf;
+ pThis->pSharedR3->fEvtsWaiting = false;
+ pThis->pSharedR3->fFlushThrdActive = false;
+ }
+
+ /* Initialize the rest of the R3 tracer instance and spin up the flush thread. */
+ int rc = dbgfR3TracerInitR3(pThis, pszTraceFilePath);
+ if (RT_SUCCESS(rc))
+ {
+ *ppDbgfTracerR3 = pThis;
+ return rc;
+ }
+
+ /** @todo Cleanup. */
+ LogFlow(("dbgfR3TracerCreate: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Initializes and configures the tracer if configured.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM pointer.
+ */
+DECLHIDDEN(int) dbgfR3TracerInit(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+
+ pUVM->dbgf.s.pTracerR3 = NULL;
+
+ /*
+ * Check the config and enable tracing if requested.
+ */
+ PCFGMNODE pDbgfNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF");
+ bool fTracerEnabled;
+ int rc = CFGMR3QueryBoolDef(pDbgfNode, "TracerEnabled", &fTracerEnabled, false);
+ AssertRCReturn(rc, rc);
+ if (fTracerEnabled)
+ {
+ bool fR0Enabled;
+ uint32_t cbRingBuf = 0;
+ char *pszTraceFilePath = NULL;
+ rc = CFGMR3QueryBoolDef(pDbgfNode, "TracerR0Enabled", &fR0Enabled, false);
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3QueryU32Def(pDbgfNode, "TracerRingBufSz", &cbRingBuf, _4M);
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3QueryStringAlloc(pDbgfNode, "TracerFilePath", &pszTraceFilePath);
+ if (RT_SUCCESS(rc))
+ {
+ AssertLogRelMsgReturn(cbRingBuf && cbRingBuf == (size_t)cbRingBuf,
+ ("Tracing ringbuffer size %#RX64 is invalid\n", cbRingBuf),
+ VERR_INVALID_PARAMETER);
+
+ rc = dbgfR3TracerCreate(pVM, fR0Enabled, pszTraceFilePath, cbRingBuf, &pUVM->dbgf.s.pTracerR3);
+ }
+
+ if (pszTraceFilePath)
+ {
+ MMR3HeapFree(pszTraceFilePath);
+ pszTraceFilePath = NULL;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Terminates any configured tracer for the given VM instance.
+ *
+ * @param pVM The cross context VM structure.
+ */
+DECLHIDDEN(void) dbgfR3TracerTerm(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+
+ if (pUVM->dbgf.s.pTracerR3)
+ {
+ PDBGFTRACERINSR3 pThis = pUVM->dbgf.s.pTracerR3;
+ PDBGFTRACERSHARED pSharedR3 = pThis->CTX_SUFF(pShared);
+
+ /* Tear down the flush thread. */
+ ASMAtomicXchgBool(&pThis->fShutdown, true);
+ SUPSemEventSignal(pVM->pSession, pSharedR3->hSupSemEvtFlush);
+
+ int rc = RTThreadWait(pThis->hThrdFlush, RT_MS_30SEC, NULL);
+ AssertLogRelMsgRC(rc, ("DBGF: Waiting for the tracer flush thread to terminate failed with %Rrc\n", rc));
+
+ /* Close the trace log. */
+ rc = RTTraceLogWrDestroy(pThis->hTraceLog);
+ AssertLogRelMsgRC(rc, ("DBGF: Closing the trace log file failed with %Rrc\n", rc));
+
+ SUPSemEventClose(pVM->pSession, pSharedR3->hSupSemEvtFlush);
+ /* The instance memory is freed by MM or when the R0 component terminates. */
+ pUVM->dbgf.s.pTracerR3 = NULL;
+ }
+}
+
+
+/**
+ * Registers a new event source with the given name and returns a tracer event source handle.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName The event source name.
+ * @param phEvtSrc Where to return the handle to the event source on success.
+ */
+VMMR3_INT_DECL(int) DBGFR3TracerRegisterEvtSrc(PVM pVM, const char *pszName, PDBGFTRACEREVTSRC phEvtSrc)
+{
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertReturn(pszName && *pszName != '\0', VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phEvtSrc, VERR_INVALID_POINTER);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFTRACERINSR3 pThis = pUVM->dbgf.s.pTracerR3;
+
+ DBGFTRACEREVTSRC hEvtSrc = ASMAtomicIncU64((volatile uint64_t *)&pThis->hEvtSrcNext) - 1;
+
+ int rc = dbgfTracerR3EvtPostSingle(pVM, pThis, hEvtSrc, DBGFTRACEREVT_SRC_REGISTER,
+ NULL /*pvEvtDesc*/, 0 /*cbEvtDesc*/, NULL /*pidEvt*/);
+ if (RT_SUCCESS(rc))
+ *phEvtSrc = hEvtSrc;
+
+ return rc;
+}
+
+
+/**
+ * Deregisters the given event source handle.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param hEvtSrc The event source handle to deregister.
+ */
+VMMR3_INT_DECL(int) DBGFR3TracerDeregisterEvtSrc(PVM pVM, DBGFTRACEREVTSRC hEvtSrc)
+{
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertReturn(hEvtSrc != NIL_DBGFTRACEREVTSRC, VERR_INVALID_HANDLE);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFTRACERINSR3 pThis = pUVM->dbgf.s.pTracerR3;
+ return dbgfTracerR3EvtPostSingle(pVM, pThis, hEvtSrc, DBGFTRACEREVT_SRC_DEREGISTER,
+ NULL /*pvEvtDesc*/, 0 /*cbEvtDesc*/, NULL /*pidEvt*/);
+}
+
+
+/**
+ * Registers an I/O port region create event for the given event source.
+ *
+ * @returns VBox status code.
+ * @param pVM The current context VM instance data.
+ * @param hEvtSrc The event source for the posted event.
+ * @param hRegion The I/O port region handle returned from IOM.
+ * @param cPorts Number of ports registered.
+ * @param fFlags Flags passed to IOM.
+ * @param iPciRegion For a PCI device the region index used for the I/O ports.
+ */
+VMMR3_INT_DECL(int) DBGFR3TracerEvtIoPortCreate(PVM pVM, DBGFTRACEREVTSRC hEvtSrc, uint64_t hRegion, RTIOPORT cPorts, uint32_t fFlags,
+ uint32_t iPciRegion)
+{
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertReturn(hEvtSrc != NIL_DBGFTRACEREVTSRC, VERR_INVALID_HANDLE);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFTRACERINSR3 pThis = pUVM->dbgf.s.pTracerR3;
+
+ DBGFTRACEREVTIOPORTCREATE EvtIoPortCreate;
+ RT_ZERO(EvtIoPortCreate);
+ EvtIoPortCreate.hIoPorts = hRegion;
+ EvtIoPortCreate.cPorts = cPorts;
+ EvtIoPortCreate.fIomFlags = fFlags;
+ EvtIoPortCreate.iPciRegion = iPciRegion;
+ return dbgfTracerR3EvtPostSingle(pVM, pThis, hEvtSrc, DBGFTRACEREVT_IOPORT_REGION_CREATE,
+ &EvtIoPortCreate, sizeof(EvtIoPortCreate), NULL /*pidEvt*/);
+}
+
+
+/**
+ * Registers an MMIO region create event for the given event source.
+ *
+ * @returns VBox status code.
+ * @param pVM The current context VM instance data.
+ * @param hEvtSrc The event source for the posted event.
+ * @param hRegion The MMIO region handle returned from IOM.
+ * @param cbRegion Size of the MMIO region in bytes.
+ * @param fFlags Flags passed to IOM.
+ * @param iPciRegion For a PCI device the region index used for the MMIO region.
+ */
+VMMR3_INT_DECL(int) DBGFR3TracerEvtMmioCreate(PVM pVM, DBGFTRACEREVTSRC hEvtSrc, uint64_t hRegion, RTGCPHYS cbRegion, uint32_t fFlags,
+ uint32_t iPciRegion)
+{
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertReturn(hEvtSrc != NIL_DBGFTRACEREVTSRC, VERR_INVALID_HANDLE);
+
+ PUVM pUVM = pVM->pUVM;
+ PDBGFTRACERINSR3 pThis = pUVM->dbgf.s.pTracerR3;
+
+ DBGFTRACEREVTMMIOCREATE EvtMmioCreate;
+ RT_ZERO(EvtMmioCreate);
+ EvtMmioCreate.hMmioRegion = hRegion;
+ EvtMmioCreate.cbRegion = cbRegion;
+ EvtMmioCreate.fIomFlags = fFlags;
+ EvtMmioCreate.iPciRegion = iPciRegion;
+ return dbgfTracerR3EvtPostSingle(pVM, pThis, hEvtSrc, DBGFTRACEREVT_MMIO_REGION_CREATE,
+ &EvtMmioCreate, sizeof(EvtMmioCreate), NULL /*pidEvt*/);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Type.cpp b/src/VBox/VMM/VMMR3/DBGFR3Type.cpp
new file mode 100644
index 00000000..1be242aa
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFR3Type.cpp
@@ -0,0 +1,1287 @@
+/* $Id: DBGFR3Type.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Type Management.
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_dbgf_type DBGFType - Type Management
+ *
+ * The type management system is intended to ease retrieval of values from
+ * structures in the guest OS without having to take care of the size of pointers.
+ *
+ * @todo r=bird: We need to join this up with modules and address spaces. It
+ * cannot be standalone like this. Also, it must be comming from IPRT as
+ * there is no point in duplicating code (been there, done that with
+ * symbols and debug info already). This unfortunately means we need to
+ * find some common way of abstracting DWARF and Codeview type info so we
+ * can extend those debug info parsers to make type information available.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/param.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+/** Locks the type database for writing. */
+#define DBGF_TYPE_DB_LOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestWrite((pUVM)->dbgf.s.hTypeDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the type database after writing. */
+#define DBGF_TYPE_DB_UNLOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseWrite((pUVM)->dbgf.s.hTypeDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Locks the type database for reading. */
+#define DBGF_TYPE_DB_LOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestRead((pUVM)->dbgf.s.hTypeDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the type database after reading. */
+#define DBGF_TYPE_DB_UNLOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseRead((pUVM)->dbgf.s.hTypeDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * DBGF registered type.
+ */
+typedef struct DBGFTYPE
+{
+ /** String space core. */
+ RTSTRSPACECORE Core;
+ /** Pointer to the registration structure, NULL means builtin type. */
+ PCDBGFTYPEREG pReg;
+ /** How often the type is referenced by other types. */
+ volatile uint32_t cRefs;
+ /** Size of the type. */
+ size_t cbType;
+ /** Builtin type if pReg is NULL (otherwise it is invalid). */
+ DBGFTYPEBUILTIN enmTypeBuiltin;
+} DBGFTYPE;
+/** Pointer to a DBGF type. */
+typedef DBGFTYPE *PDBGFTYPE;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int dbgfR3TypeParseBufferByType(PUVM pUVM, PDBGFTYPE pType, uint8_t *pbBuf, size_t cbBuf,
+ PDBGFTYPEVAL *ppVal, size_t *pcbParsed);
+
+
+/**
+ * Looks up a type by the identifier.
+ *
+ * @returns Pointer to the type structure on success, NULL otherwise.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier.
+ */
+static PDBGFTYPE dbgfR3TypeLookup(PUVM pUVM, const char *pszType)
+{
+ PRTSTRSPACE pTypeSpace = &pUVM->dbgf.s.TypeSpace;
+ return (PDBGFTYPE)RTStrSpaceGet(pTypeSpace, pszType);
+}
+
+
+/**
+ * Calculate the size of the given type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pType The type to calculate the size for.
+ * @param fCalcNested Flag whether to calculate the size for nested
+ * structs if the sizes are 0.
+ */
+static int dbgfR3TypeCalcSize(PUVM pUVM, PDBGFTYPE pType, bool fCalcNested)
+{
+ int rc = VINF_SUCCESS;
+
+ /* Builtin types are never recalculated. */
+ if (pType->pReg)
+ {
+ switch (pType->pReg->enmVariant)
+ {
+ case DBGFTYPEVARIANT_STRUCT:
+ {
+ size_t cbType = 0;
+
+ /* Go through the members and update size. */
+ for (uint32_t i = 0; i < pType->pReg->cMembers && RT_SUCCESS(rc); i++)
+ {
+ PCDBGFTYPEREGMEMBER pMember = &pType->pReg->paMembers[i];
+
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_POINTER)
+ {
+ /* Use the current pointer size. */
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, "ptr_t");
+ if (RT_LIKELY(pTypeMember))
+ {
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ cbType += pMember->cElements * pTypeMember->cbType;
+ else
+ cbType += pTypeMember->cbType;
+ }
+ }
+ else
+ {
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, pMember->pszType);
+ if (RT_LIKELY(pTypeMember))
+ {
+ if ( pTypeMember->cbType == 0
+ && fCalcNested)
+ rc = dbgfR3TypeCalcSize(pUVM, pTypeMember, fCalcNested);
+
+ if (RT_SUCCESS(rc))
+ {
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ cbType += pMember->cElements * pTypeMember->cbType;
+ else
+ cbType += pTypeMember->cbType;
+ }
+ }
+ else
+ rc = VERR_INVALID_STATE;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ pType->cbType = cbType;
+ break;
+ }
+
+ case DBGFTYPEVARIANT_UNION:
+ {
+ /* Get size of the biggest member and use that one. */
+ size_t cbType = 0;
+
+ for (uint32_t i = 0; i < pType->pReg->cMembers && RT_SUCCESS(rc); i++)
+ {
+ PCDBGFTYPEREGMEMBER pMember = &pType->pReg->paMembers[i];
+
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_POINTER)
+ {
+ /* Use the current pointer size. */
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, "ptr_t");
+ if (RT_LIKELY(pTypeMember))
+ {
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ cbType = RT_MAX(cbType, pMember->cElements * pTypeMember->cbType);
+ else
+ cbType = RT_MAX(cbType, pTypeMember->cbType);
+ }
+ }
+ else
+ {
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, pMember->pszType);
+ if (RT_LIKELY(pTypeMember))
+ {
+ if ( pTypeMember->cbType == 0
+ && fCalcNested)
+ rc = dbgfR3TypeCalcSize(pUVM, pTypeMember, fCalcNested);
+
+ if (RT_SUCCESS(rc))
+ {
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ cbType = RT_MAX(cbType, pMember->cElements * pTypeMember->cbType);
+ else
+ cbType = RT_MAX(cbType, pTypeMember->cbType);
+ }
+ }
+ else
+ rc = VERR_INVALID_STATE;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ pType->cbType = cbType;
+ break;
+ }
+
+ case DBGFTYPEVARIANT_ALIAS:
+ {
+ /* Get the size of the alias. */
+ PDBGFTYPE pAliased = dbgfR3TypeLookup(pUVM, pType->pReg->pszAliasedType);
+ if (RT_LIKELY(pAliased))
+ {
+ if ( pAliased->cbType == 0
+ && fCalcNested)
+ rc = dbgfR3TypeCalcSize(pUVM, pAliased, fCalcNested);
+
+ if (RT_SUCCESS(rc))
+ pType->cbType = pAliased->cbType;
+ }
+ else
+ rc = VERR_INVALID_STATE;
+ break;
+ }
+
+ default:
+ AssertMsgFailedReturn(("Invalid type variant: %d", pType->pReg->enmVariant), VERR_INVALID_STATE);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Callback for clearing the size of all non built-in types.
+ *
+ * @returns VBox status code.
+ * @param pStr The type structure.
+ * @param pvUser The user mode VM handle.
+ */
+static DECLCALLBACK(int) dbgfR3TypeTraverseClearSize(PRTSTRSPACECORE pStr, void *pvUser)
+{
+ PDBGFTYPE pType = (PDBGFTYPE)pStr;
+
+ if (pType->pReg)
+ pType->cbType = 0;
+
+ NOREF(pvUser);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback for calculating the size of all non built-in types.
+ *
+ * @returns VBox status code.
+ * @param pStr The type structure.
+ * @param pvUser The user mode VM handle.
+ */
+static DECLCALLBACK(int) dbgfR3TypeTraverseCalcSize(PRTSTRSPACECORE pStr, void *pvUser)
+{
+ PDBGFTYPE pType = (PDBGFTYPE)pStr;
+
+ if ( pType->pReg
+ && !pType->cbType)
+ dbgfR3TypeCalcSize((PUVM)pvUser, pType, true /* fCalcNested */);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Recalculate the sizes of all registered non builtin types.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+static int dbgfR3TypeRecalculateAllSizes(PUVM pUVM)
+{
+ int rc = VINF_SUCCESS;
+
+ /*
+ * Clear the sizes of all non builtin types to 0 first so we know which type we
+ * visited later on.
+ */
+ rc = RTStrSpaceEnumerate(&pUVM->dbgf.s.TypeSpace, dbgfR3TypeTraverseClearSize, pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /* Now recalculate the size. */
+ rc = RTStrSpaceEnumerate(&pUVM->dbgf.s.TypeSpace, dbgfR3TypeTraverseCalcSize, pUVM);
+ }
+
+ return rc;
+}
+
+/**
+ * Validates a given type registration.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The type registration structure.
+ */
+static int dbgfR3TypeValidate(PUVM pUVM, PCDBGFTYPEREG pReg)
+{
+ int rc = VINF_SUCCESS;
+
+ switch (pReg->enmVariant)
+ {
+ case DBGFTYPEVARIANT_ALIAS:
+ if ( pReg->cMembers > 0
+ || pReg->paMembers
+ || !pReg->pszAliasedType)
+ rc = VERR_INVALID_PARAMETER;
+ else
+ {
+ PDBGFTYPE pAlias = dbgfR3TypeLookup(pUVM, pReg->pszAliasedType);
+ if (RT_UNLIKELY(!pAlias))
+ rc = VERR_NOT_FOUND;
+ }
+ break;
+ case DBGFTYPEVARIANT_STRUCT:
+ case DBGFTYPEVARIANT_UNION:
+ if (!pReg->pszAliasedType)
+ {
+ for (uint32_t i = 0; i < pReg->cMembers; i++)
+ {
+ PCDBGFTYPEREGMEMBER pMember = &pReg->paMembers[i];
+
+ /* Use the current pointer size. */
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, pMember->pszType);
+ if (RT_UNLIKELY(!pTypeMember))
+ {
+ rc = VERR_NOT_FOUND;
+ break;
+ }
+
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ {
+ if (pMember->cElements == 0)
+ rc = VERR_INVALID_PARAMETER;
+ }
+ else if (pMember->cElements != 0)
+ rc = VERR_INVALID_PARAMETER;
+ }
+ }
+ else
+ rc = VERR_INVALID_PARAMETER;
+ break;
+ default:
+ AssertMsgFailedBreakStmt(("Invalid type variant: %d", pReg->enmVariant),
+ rc = VERR_INVALID_PARAMETER);
+ }
+
+ return rc;
+}
+
+/**
+ * Retains or releases the reference counters to referenced types for the given
+ * type registration structure.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The type registration structure.
+ * @param fRetain Flag whether to retain or release references.
+ */
+static int dbgfR3TypeUpdateRefCnts(PUVM pUVM, PCDBGFTYPEREG pReg, bool fRetain)
+{
+ int rc = VINF_SUCCESS;
+
+ switch (pReg->enmVariant)
+ {
+ case DBGFTYPEVARIANT_ALIAS:
+ {
+ AssertPtr(pReg->pszAliasedType);
+
+ PDBGFTYPE pAlias = dbgfR3TypeLookup(pUVM, pReg->pszAliasedType);
+ AssertPtr(pAlias);
+
+ if (fRetain)
+ pAlias->cRefs++;
+ else
+ pAlias->cRefs--;
+ break;
+ }
+ case DBGFTYPEVARIANT_STRUCT:
+ case DBGFTYPEVARIANT_UNION:
+ {
+ for (uint32_t i = 0; i < pReg->cMembers; i++)
+ {
+ PCDBGFTYPEREGMEMBER pMember = &pReg->paMembers[i];
+
+ /* Use the current pointer size. */
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, pMember->pszType);
+ AssertPtr(pTypeMember);
+
+ if (fRetain)
+ pTypeMember->cRefs++;
+ else
+ pTypeMember->cRefs--;
+ }
+ break;
+ }
+ default:
+ AssertMsgFailedBreakStmt(("Invalid type variant: %d", pReg->enmVariant),
+ rc = VERR_INVALID_PARAMETER);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Registers a single type in the database.
+ *
+ * @returns VBox status code.
+ * @retval VERR_ALREADY_EXISTS if the type exists already.
+ * @param pUVM The user mode VM handle.
+ * @param pReg The type registration structure.
+ */
+static int dbgfR3TypeRegister(PUVM pUVM, PCDBGFTYPEREG pReg)
+{
+ int rc = VINF_SUCCESS;
+
+ LogFlowFunc(("pUVM=%#p pReg=%#p{%s}\n", pUVM, pReg, pReg->pszType));
+
+ if (dbgfR3TypeLookup(pUVM, pReg->pszType) == NULL)
+ {
+ rc = dbgfR3TypeValidate(pUVM, pReg);
+ if (RT_SUCCESS(rc))
+ {
+ PDBGFTYPE pType = (PDBGFTYPE)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_TYPE, sizeof(DBGFTYPE));
+ if (RT_LIKELY(pType))
+ {
+ pType->Core.pszString = pReg->pszType;
+ pType->pReg = pReg;
+ pType->cRefs = 0;
+ pType->enmTypeBuiltin = DBGFTYPEBUILTIN_INVALID;
+ rc = dbgfR3TypeCalcSize(pUVM, pType, false /* fCalcNested */);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3TypeUpdateRefCnts(pUVM, pReg, true /* fRetain */);
+ if (RT_SUCCESS(rc))
+ {
+ bool fSucc = RTStrSpaceInsert(&pUVM->dbgf.s.TypeSpace, &pType->Core);
+ Assert(fSucc);
+ if (!fSucc)
+ {
+ dbgfR3TypeUpdateRefCnts(pUVM, pReg, false /* fRetain */);
+ rc = VERR_ALREADY_EXISTS;
+ }
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ MMR3HeapFree(pType);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ }
+ else
+ rc = VERR_ALREADY_EXISTS;
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Registers a new built-in type
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param enmTypeBuiltin The builtin type enum.
+ * @param cbType Size of the type in bytes.
+ * @param pszType The type identifier for the builtin type.
+ */
+static int dbgfR3TypeRegisterBuiltin(PUVM pUVM, DBGFTYPEBUILTIN enmTypeBuiltin,
+ size_t cbType, const char *pszType)
+{
+ LogFlowFunc(("pUVM=%#p enmBuiltin=%d pszType=%s\n", pUVM, enmTypeBuiltin, pszType));
+
+ AssertReturn(!dbgfR3TypeLookup(pUVM, pszType), VERR_INVALID_STATE);
+
+ int rc = VINF_SUCCESS;
+ PDBGFTYPE pType = (PDBGFTYPE)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_TYPE, sizeof(DBGFTYPE));
+ if (RT_LIKELY(pType))
+ {
+ pType->Core.pszString = pszType;
+ pType->pReg = NULL;
+ pType->cRefs = 0;
+ pType->cbType = cbType;
+ pType->enmTypeBuiltin = enmTypeBuiltin;
+ bool fSucc = RTStrSpaceInsert(&pUVM->dbgf.s.TypeSpace, &pType->Core);
+ Assert(fSucc);
+ if (!fSucc)
+ rc = VERR_ALREADY_EXISTS;
+
+ if (RT_FAILURE(rc))
+ MMR3HeapFree(pType);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Registers builtin types.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+static int dbgfTypeRegisterBuiltinTypes(PUVM pUVM)
+{
+ int rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_UINT8, sizeof(uint8_t), "uint8_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_INT8, sizeof(int8_t), "int8_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_UINT16, sizeof(uint16_t), "uint16_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_INT16, sizeof(int16_t), "int16_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_UINT32, sizeof(uint32_t), "uint32_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_INT32, sizeof(int32_t), "int32_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_UINT64, sizeof(uint64_t), "uint64_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_INT64, sizeof(int64_t), "int64_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_PTR32, sizeof(uint32_t), "ptr32_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_PTR64, sizeof(uint64_t), "ptr64_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_PTR, 0, "ptr_t");
+ if (RT_SUCCESS(rc))
+ rc = dbgfR3TypeRegisterBuiltin(pUVM, DBGFTYPEBUILTIN_SIZE, 0, "size_t");
+
+ return rc;
+}
+
+
+/**
+ * Parses a single entry for a given type and assigns the value from the byte buffer
+ * to the value entry.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pMember The type member.
+ * @param pValEntry The value entry holding the value on success.
+ * @param pbBuf The raw byte buffer.
+ * @param cbBuf Size of the byte buffer.
+ * @param pcbParsed Where to store the amount of consumed bytes on success.
+ */
+static int dbgfR3TypeParseEntry(PUVM pUVM, PCDBGFTYPEREGMEMBER pMember, PDBGFTYPEVALENTRY pValEntry,
+ uint8_t *pbBuf, size_t cbBuf, size_t *pcbParsed)
+{
+ int rc = VINF_SUCCESS;
+ PDBGFTYPE pTypeMember = dbgfR3TypeLookup(pUVM, pMember->pszType);
+ uint32_t cValBufs = 1;
+ size_t cbParsed = 0;
+ PDBGFTYPEVALBUF pValBuf = &pValEntry->Buf.Val;
+
+ AssertPtrReturn(pTypeMember, VERR_INVALID_STATE);
+
+ if (pMember->fFlags & DBGFTYPEREGMEMBER_F_ARRAY)
+ {
+ cValBufs = pMember->cElements;
+ pValBuf = (PDBGFTYPEVALBUF)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_TYPE, cValBufs * sizeof(DBGFTYPEVALBUF));
+ if (RT_UNLIKELY(!pValBuf))
+ rc = VERR_NO_MEMORY;
+
+ pValEntry->Buf.pVal = pValBuf;
+ pValEntry->cEntries = cValBufs;
+ pValEntry->cbType = pTypeMember->cbType;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ for (uint32_t iValBuf = 0; iValBuf < cValBufs && RT_SUCCESS(rc); iValBuf++)
+ {
+ size_t cbThisParsed = 0;
+
+ if (pTypeMember->pReg)
+ {
+ /* Compound or aliased type */
+ rc = dbgfR3TypeParseBufferByType(pUVM, pTypeMember, pbBuf, cbBuf,
+ &pValBuf->pVal, &cbThisParsed);
+ if (RT_SUCCESS(rc))
+ pValEntry->enmType = DBGFTYPEBUILTIN_COMPOUND;
+ }
+ else
+ {
+ void *pvVal = NULL;
+
+ switch (pTypeMember->enmTypeBuiltin)
+ {
+ case DBGFTYPEBUILTIN_UINT8:
+ pvVal = &pValBuf->u8;
+ cbThisParsed = 1;
+ break;
+ case DBGFTYPEBUILTIN_INT8:
+ pvVal = &pValBuf->i8;
+ cbThisParsed = 1;
+ break;
+ case DBGFTYPEBUILTIN_UINT16:
+ pvVal = &pValBuf->u16;
+ cbThisParsed = 2;
+ break;
+ case DBGFTYPEBUILTIN_INT16:
+ pvVal = &pValBuf->i16;
+ cbThisParsed = 2;
+ break;
+ case DBGFTYPEBUILTIN_UINT32:
+ pvVal = &pValBuf->u32;
+ cbThisParsed = 4;
+ break;
+ case DBGFTYPEBUILTIN_INT32:
+ pvVal = &pValBuf->i32;
+ cbThisParsed = 4;
+ break;
+ case DBGFTYPEBUILTIN_UINT64:
+ pvVal = &pValBuf->u64;
+ cbThisParsed = 8;
+ break;
+ case DBGFTYPEBUILTIN_INT64:
+ pvVal = &pValBuf->i64;
+ cbThisParsed = 8;
+ break;
+ case DBGFTYPEBUILTIN_PTR32:
+ pvVal = &pValBuf->GCPtr;
+ cbThisParsed = 4;
+ break;
+ case DBGFTYPEBUILTIN_PTR64:
+ pvVal = &pValBuf->GCPtr;
+ cbThisParsed = 8;
+ break;
+ case DBGFTYPEBUILTIN_PTR:
+ pvVal = &pValBuf->GCPtr;
+ cbThisParsed = pTypeMember->cbType;
+ break;
+ case DBGFTYPEBUILTIN_SIZE:
+ pvVal = &pValBuf->size;
+ cbThisParsed = pTypeMember->cbType;
+ break;
+ case DBGFTYPEBUILTIN_FLOAT32:
+ case DBGFTYPEBUILTIN_FLOAT64:
+ case DBGFTYPEBUILTIN_COMPOUND:
+ default:
+ AssertMsgFailedBreakStmt(("Invalid built-in type specified: %d\n", pTypeMember->enmTypeBuiltin),
+ rc = VERR_INVALID_STATE);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ pValEntry->enmType = pTypeMember->enmTypeBuiltin;
+ if (cbBuf >= cbThisParsed)
+ memcpy(pvVal, pbBuf, cbThisParsed);
+ else
+ rc = VERR_BUFFER_OVERFLOW;
+ }
+ }
+
+ pValBuf++;
+
+ cbParsed += cbThisParsed;
+ pbBuf += cbThisParsed;
+ cbBuf -= cbThisParsed;
+ }
+ }
+
+ if ( RT_FAILURE(rc)
+ && cValBufs > 1)
+ MMR3HeapFree(pValBuf);
+
+ if (RT_SUCCESS(rc))
+ {
+ pValEntry->cEntries = cValBufs;
+ *pcbParsed = cbParsed;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Parses the given byte buffer and returns the value based no the type information.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pType The type information.
+ * @param pbBuf The byte buffer to parse.
+ * @param cbBuf Size of the buffer.
+ * @param ppVal Where to store the pointer to the value on success.
+ * @param pcbParsed How many bytes of the buffer we consumed.
+ */
+static int dbgfR3TypeParseBufferByType(PUVM pUVM, PDBGFTYPE pType, uint8_t *pbBuf, size_t cbBuf,
+ PDBGFTYPEVAL *ppVal, size_t *pcbParsed)
+{
+ int rc = VINF_SUCCESS;
+ uint32_t cEntries = pType->pReg ? pType->pReg->cMembers : 1;
+ PDBGFTYPEVAL pVal = (PDBGFTYPEVAL)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_TYPE,
+ RT_UOFFSETOF_DYN(DBGFTYPEVAL, aEntries[cEntries]));
+ if (RT_LIKELY(pVal))
+ {
+ size_t cbParsed = 0;
+
+ pVal->pTypeReg = pType->pReg;
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ {
+ PCDBGFTYPEREGMEMBER pMember = &pType->pReg->paMembers[i];
+ PDBGFTYPEVALENTRY pValEntry = &pVal->aEntries[i];
+ rc = dbgfR3TypeParseEntry(pUVM, pMember, pValEntry, pbBuf, cbBuf, &cbParsed);
+ if (RT_SUCCESS(rc))
+ {
+ pbBuf += cbParsed;
+ cbBuf -= cbParsed;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ pVal->cEntries = cEntries;
+ *pcbParsed = cbParsed;
+ *ppVal = pVal;
+ }
+ else
+ MMR3HeapFree(pVal); /** @todo Leak for embedded structs. */
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+
+/**
+ * Dumps one level of a typed value.
+ *
+ * @returns VBox status code.
+ * @param pVal The value to dump.
+ * @param iLvl The current level.
+ * @param cLvlMax The maximum level.
+ * @param pfnDump The dumper callback.
+ * @param pvUser The opaque user data to pass to the dumper callback.
+ */
+static int dbgfR3TypeValDump(PDBGFTYPEVAL pVal, uint32_t iLvl, uint32_t cLvlMax,
+ PFNDBGFR3TYPEVALDUMP pfnDump, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PCDBGFTYPEREG pType = pVal->pTypeReg;
+
+ for (uint32_t i = 0; i < pVal->cEntries && rc == VINF_SUCCESS; i++)
+ {
+ PCDBGFTYPEREGMEMBER pTypeMember = &pType->paMembers[i];
+ PDBGFTYPEVALENTRY pValEntry = &pVal->aEntries[i];
+ PDBGFTYPEVALBUF pValBuf = pValEntry->cEntries > 1 ? pValEntry->Buf.pVal : &pValEntry->Buf.Val;
+
+ rc = pfnDump(0 /* off */, pTypeMember->pszName, iLvl, pValEntry->enmType, pValEntry->cbType,
+ pValBuf, pValEntry->cEntries, pvUser);
+ if ( rc == VINF_SUCCESS
+ && pValEntry->enmType == DBGFTYPEBUILTIN_COMPOUND
+ && iLvl < cLvlMax)
+ {
+ /* Print embedded structs. */
+ for (uint32_t iValBuf = 0; iValBuf < pValEntry->cEntries && rc == VINF_SUCCESS; iValBuf++)
+ rc = dbgfR3TypeValDump(pValBuf[iValBuf].pVal, iLvl + 1, cLvlMax, pfnDump, pvUser);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Dumps one level of a type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pType The type to dump.
+ * @param iLvl The current level.
+ * @param cLvlMax The maximum level.
+ * @param pfnDump The dumper callback.
+ * @param pvUser The opaque user data to pass to the dumper callback.
+ */
+static int dbgfR3TypeDump(PUVM pUVM, PDBGFTYPE pType, uint32_t iLvl, uint32_t cLvlMax,
+ PFNDBGFR3TYPEDUMP pfnDump, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PCDBGFTYPEREG pTypeReg = pType->pReg;
+
+ switch (pTypeReg->enmVariant)
+ {
+ case DBGFTYPEVARIANT_ALIAS:
+ rc = VERR_NOT_IMPLEMENTED;
+ break;
+ case DBGFTYPEVARIANT_STRUCT:
+ case DBGFTYPEVARIANT_UNION:
+ for (uint32_t i = 0; i < pTypeReg->cMembers && rc == VINF_SUCCESS; i++)
+ {
+ PCDBGFTYPEREGMEMBER pTypeMember = &pTypeReg->paMembers[i];
+ PDBGFTYPE pTypeResolved = dbgfR3TypeLookup(pUVM, pTypeMember->pszType);
+
+ rc = pfnDump(0 /* off */, pTypeMember->pszName, iLvl, pTypeMember->pszType,
+ pTypeMember->fFlags, pTypeMember->cElements, pvUser);
+ if ( rc == VINF_SUCCESS
+ && pTypeResolved->pReg
+ && iLvl < cLvlMax)
+ {
+ /* Print embedded structs. */
+ rc = dbgfR3TypeDump(pUVM, pTypeResolved, iLvl + 1, cLvlMax, pfnDump, pvUser);
+ }
+ }
+ break;
+ default:
+ AssertMsgFailed(("Invalid type variant: %u\n", pTypeReg->enmVariant));
+ rc = VERR_INVALID_STATE;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Initializes the type database.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+DECLHIDDEN(int) dbgfR3TypeInit(PUVM pUVM)
+{
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = RTSemRWCreate(&pUVM->dbgf.s.hTypeDbLock);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfTypeRegisterBuiltinTypes(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ RTSemRWDestroy(pUVM->dbgf.s.hTypeDbLock);
+ pUVM->dbgf.s.hTypeDbLock = NIL_RTSEMRW;
+ }
+ }
+ pUVM->dbgf.s.fTypeDbInitialized = RT_SUCCESS(rc);
+ }
+ return rc;
+}
+
+
+/**
+ * Terminates the type database.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+DECLHIDDEN(void) dbgfR3TypeTerm(PUVM pUVM)
+{
+ RTSemRWDestroy(pUVM->dbgf.s.hTypeDbLock);
+ pUVM->dbgf.s.hTypeDbLock = NIL_RTSEMRW;
+ pUVM->dbgf.s.fTypeDbInitialized = false;
+}
+
+
+/**
+ * Registers a new type for lookup.
+ *
+ * @returns VBox status code.
+ * @retval VERR_ALREADY_EXISTS if the type exists already.
+ * @param pUVM The user mode VM handle.
+ * @param cTypes Number of types to register.
+ * @param paTypes The array of type registration structures to register.
+ */
+VMMR3DECL(int) DBGFR3TypeRegister(PUVM pUVM, uint32_t cTypes, PCDBGFTYPEREG paTypes)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(cTypes > 0, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(paTypes, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_WRITE(pUVM);
+ for (uint32_t i = 0; i < cTypes && RT_SUCCESS(rc); i++)
+ {
+ rc = dbgfR3TypeRegister(pUVM, &paTypes[i]);
+ if ( RT_FAILURE(rc)
+ && i > 0)
+ {
+ /* Deregister types in reverse order. */
+ do
+ {
+ int rc2 = DBGFR3TypeDeregister(pUVM, paTypes[i].pszType);
+ AssertRC(rc2);
+ i--;
+ } while (i > 0);
+
+ break;
+ }
+ }
+ DBGF_TYPE_DB_UNLOCK_WRITE(pUVM);
+
+ return rc;
+}
+
+
+/**
+ * Deregisters a previously registered type.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the type is not known.
+ * @retval VERR_RESOURCE_IN_USE if the type is used by another type.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier to deregister.
+ */
+VMMR3DECL(int) DBGFR3TypeDeregister(PUVM pUVM, const char *pszType)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_WRITE(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ {
+ if (!pType->cRefs)
+ {
+
+ }
+ else
+ rc = VERR_RESOURCE_IN_USE;
+ }
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_WRITE(pUVM);
+
+ return rc;
+}
+
+
+/**
+ * Return the type registration structure for the given type identifier.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the type is not known.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier to get the registration structure from.
+ * @param ppTypeReg Where to store the type registration structure on success.
+ */
+VMMR3DECL(int) DBGFR3TypeQueryReg(PUVM pUVM, const char *pszType, PCDBGFTYPEREG *ppTypeReg)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTypeReg, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_READ(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ *ppTypeReg = pType->pReg;
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_READ(pUVM);
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Queries the size a given type would occupy in memory.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the type is not known.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier.
+ * @param pcbType Where to store the amount of memory occupied in bytes.
+ */
+VMMR3DECL(int) DBGFR3TypeQuerySize(PUVM pUVM, const char *pszType, size_t *pcbType)
+{
+ LogFlowFunc(("pUVM=%#p pszType=%s pcbType=%#p\n", pUVM, pszType, pcbType));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(pcbType, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_READ(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ *pcbType = pType->cbType;
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_READ(pUVM);
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Sets the size of the given type in bytes.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the type is not known.
+ * @retval VERR_NOT_SUPPORTED if changing the size of this type is not supported.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier.
+ * @param cbType The size of the type in bytes.
+ *
+ * @note: This currently works only for the builtin pointer type without the explicit
+ * size (ptr_t or DBGFTYPEBUILTIN_PTR).
+ */
+VMMR3DECL(int) DBGFR3TypeSetSize(PUVM pUVM, const char *pszType, size_t cbType)
+{
+ LogFlowFunc(("pUVM=%#p pszType=%s cbType=%zu\n", pUVM, pszType, cbType));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertReturn(cbType > 0, VERR_INVALID_PARAMETER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_WRITE(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ {
+ if ( !pType->pReg
+ && ( pType->enmTypeBuiltin == DBGFTYPEBUILTIN_PTR
+ || pType->enmTypeBuiltin == DBGFTYPEBUILTIN_SIZE))
+ {
+ if (pType->cbType != cbType)
+ {
+ pType->cbType = cbType;
+ rc = dbgfR3TypeRecalculateAllSizes(pUVM);
+ }
+ }
+ else
+ rc = VERR_NOT_SUPPORTED;
+ }
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_WRITE(pUVM);
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Dumps the type information of the given type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszType The type identifier.
+ * @param fFlags Flags to control the dumping (reserved, MBZ).
+ * @param cLvlMax Maximum levels to nest.
+ * @param pfnDump The dumper callback.
+ * @param pvUser Opaque user data.
+ */
+VMMR3DECL(int) DBGFR3TypeDumpEx(PUVM pUVM, const char *pszType, uint32_t fFlags,
+ uint32_t cLvlMax, PFNDBGFR3TYPEDUMP pfnDump, void *pvUser)
+{
+ LogFlowFunc(("pUVM=%#p pszType=%s fFlags=%#x cLvlMax=%u pfnDump=%#p pvUser=%#p\n",
+ pUVM, pszType, fFlags, cLvlMax, pfnDump, pvUser));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(pfnDump, VERR_INVALID_POINTER);
+ RT_NOREF_PV(fFlags);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_READ(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ rc = dbgfR3TypeDump(pUVM, pType, 0 /* iLvl */, cLvlMax, pfnDump, pvUser);
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_READ(pUVM);
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Returns the value of a memory buffer at the given address formatted for the given
+ * type.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if the type is not known.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address to start reading from.
+ * @param pszType The type identifier.
+ * @param ppVal Where to store the pointer to the value structure
+ * on success.
+ */
+VMMR3DECL(int) DBGFR3TypeQueryValByType(PUVM pUVM, PCDBGFADDRESS pAddress, const char *pszType,
+ PDBGFTYPEVAL *ppVal)
+{
+ LogFlowFunc(("pUVM=%#p pAddress=%#p pszType=%s ppVal=%#p\n", pUVM, pAddress, pszType, ppVal));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pAddress, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppVal, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fTypeDbInitialized)
+ {
+ rc = dbgfR3TypeInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ DBGF_TYPE_DB_LOCK_READ(pUVM);
+ PDBGFTYPE pType = dbgfR3TypeLookup(pUVM, pszType);
+ if (pType)
+ {
+ uint8_t *pbBuf = (uint8_t *)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_TYPE, pType->cbType);
+ if (RT_LIKELY(pbBuf))
+ {
+ rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, pAddress, pbBuf, pType->cbType);
+ if (RT_SUCCESS(rc))
+ {
+ /* Parse the buffer based on the type. */
+ size_t cbParsed = 0;
+ rc = dbgfR3TypeParseBufferByType(pUVM, pType, pbBuf, pType->cbType,
+ ppVal, &cbParsed);
+ }
+
+ MMR3HeapFree(pbBuf);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_NOT_FOUND;
+ DBGF_TYPE_DB_UNLOCK_READ(pUVM);
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Frees all acquired resources of a value previously obtained with
+ * DBGFR3TypeQueryValByType().
+ *
+ * @param pVal The value to free.
+ */
+VMMR3DECL(void) DBGFR3TypeValFree(PDBGFTYPEVAL pVal)
+{
+ AssertPtrReturnVoid(pVal);
+
+ for (uint32_t i = 0; i < pVal->cEntries; i++)
+ {
+ PDBGFTYPEVALENTRY pValEntry = &pVal->aEntries[i];
+ PDBGFTYPEVALBUF pValBuf = pValEntry->cEntries > 1 ? pValEntry->Buf.pVal : &pValEntry->Buf.Val;
+
+ if (pValEntry->enmType == DBGFTYPEBUILTIN_COMPOUND)
+ for (uint32_t iBuf = 0; iBuf < pValEntry->cEntries; iBuf++)
+ DBGFR3TypeValFree(pValBuf->pVal);
+
+ if (pValEntry->cEntries > 1)
+ MMR3HeapFree(pValEntry->Buf.pVal);
+ }
+
+ MMR3HeapFree(pVal);
+}
+
+
+/**
+ * Reads the guest memory with the given type and dumps the content of the type.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address to start reading from.
+ * @param pszType The type identifier.
+ * @param fFlags Flags for tweaking (reserved, must be zero).
+ * @param cLvlMax Maximum number of levels to expand embedded structs.
+ * @param pfnDump The dumper callback.
+ * @param pvUser The opaque user data to pass to the callback.
+ */
+VMMR3DECL(int) DBGFR3TypeValDumpEx(PUVM pUVM, PCDBGFADDRESS pAddress, const char *pszType, uint32_t fFlags,
+ uint32_t cLvlMax, FNDBGFR3TYPEVALDUMP pfnDump, void *pvUser)
+{
+ LogFlowFunc(("pUVM=%#p pAddress=%#p pszType=%s fFlags=%#x pfnDump=%#p pvUser=%#p\n",
+ pUVM, pAddress, pszType, fFlags,pfnDump, pvUser));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pAddress, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(pfnDump, VERR_INVALID_POINTER);
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(cLvlMax >= 1, VERR_INVALID_PARAMETER);
+
+ PDBGFTYPEVAL pVal = NULL;
+ int rc = DBGFR3TypeQueryValByType(pUVM, pAddress, pszType, &pVal);
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3TypeValDump(pVal, 0 /* iLvl */, cLvlMax, pfnDump, pvUser);
+ DBGFR3TypeValFree(pVal);
+ }
+
+ LogFlowFunc(("-> rc=%Rrc\n", rc));
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFReg.cpp b/src/VBox/VMM/VMMR3/DBGFReg.cpp
new file mode 100644
index 00000000..c3de8a58
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFReg.cpp
@@ -0,0 +1,2775 @@
+/* $Id: DBGFReg.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Register Methods.
+ */
+
+/*
+ * Copyright (C) 2010-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/ctype.h>
+#include <iprt/string.h>
+#include <iprt/uint128.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Locks the register database for writing. */
+#define DBGF_REG_DB_LOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestWrite((pUVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the register database after writing. */
+#define DBGF_REG_DB_UNLOCK_WRITE(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseWrite((pUVM)->dbgf.s.hRegDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Locks the register database for reading. */
+#define DBGF_REG_DB_LOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWRequestRead((pUVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
+ AssertRC(rcSem); \
+ } while (0)
+
+/** Unlocks the register database after reading. */
+#define DBGF_REG_DB_UNLOCK_READ(pUVM) \
+ do { \
+ int rcSem = RTSemRWReleaseRead((pUVM)->dbgf.s.hRegDbLock); \
+ AssertRC(rcSem); \
+ } while (0)
+
+
+/** The max length of a set, register or sub-field name. */
+#define DBGF_REG_MAX_NAME 40
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Register set registration record type.
+ */
+typedef enum DBGFREGSETTYPE
+{
+ /** Invalid zero value. */
+ DBGFREGSETTYPE_INVALID = 0,
+ /** CPU record. */
+ DBGFREGSETTYPE_CPU,
+ /** Device record. */
+ DBGFREGSETTYPE_DEVICE,
+ /** End of valid record types. */
+ DBGFREGSETTYPE_END
+} DBGFREGSETTYPE;
+
+
+/**
+ * Register set registration record.
+ */
+typedef struct DBGFREGSET
+{
+ /** String space core. */
+ RTSTRSPACECORE Core;
+ /** The registration record type. */
+ DBGFREGSETTYPE enmType;
+ /** The user argument for the callbacks. */
+ union
+ {
+ /** The CPU view. */
+ PVMCPU pVCpu;
+ /** The device view. */
+ PPDMDEVINS pDevIns;
+ /** The general view. */
+ void *pv;
+ } uUserArg;
+
+ /** The register descriptors. */
+ PCDBGFREGDESC paDescs;
+ /** The number of register descriptors. */
+ uint32_t cDescs;
+
+ /** Array of lookup records.
+ * The first part of the array runs parallel to paDescs, the rest are
+ * covering for aliases and bitfield variations. It's done this way to
+ * simplify the query all operations. */
+ struct DBGFREGLOOKUP *paLookupRecs;
+ /** The number of lookup records. */
+ uint32_t cLookupRecs;
+
+ /** The register name prefix. */
+ char szPrefix[1];
+} DBGFREGSET;
+/** Pointer to a register registration record. */
+typedef DBGFREGSET *PDBGFREGSET;
+/** Pointer to a const register registration record. */
+typedef DBGFREGSET const *PCDBGFREGSET;
+
+
+/**
+ * Register lookup record.
+ */
+typedef struct DBGFREGLOOKUP
+{
+ /** The string space core. */
+ RTSTRSPACECORE Core;
+ /** Pointer to the set. */
+ PCDBGFREGSET pSet;
+ /** Pointer to the register descriptor. */
+ PCDBGFREGDESC pDesc;
+ /** If an alias this points to the alias descriptor, NULL if not. */
+ PCDBGFREGALIAS pAlias;
+ /** If a sub-field this points to the sub-field descriptor, NULL if not. */
+ PCDBGFREGSUBFIELD pSubField;
+} DBGFREGLOOKUP;
+/** Pointer to a register lookup record. */
+typedef DBGFREGLOOKUP *PDBGFREGLOOKUP;
+/** Pointer to a const register lookup record. */
+typedef DBGFREGLOOKUP const *PCDBGFREGLOOKUP;
+
+
+/**
+ * Argument packet from DBGFR3RegNmQueryAll to dbgfR3RegNmQueryAllWorker.
+ */
+typedef struct DBGFR3REGNMQUERYALLARGS
+{
+ /** The output register array. */
+ PDBGFREGENTRYNM paRegs;
+ /** The number of entries in the output array. */
+ size_t cRegs;
+ /** The current register number when enumerating the string space.
+ * @remarks Only used by EMT(0). */
+ size_t iReg;
+} DBGFR3REGNMQUERYALLARGS;
+/** Pointer to a dbgfR3RegNmQueryAllWorker argument packet. */
+typedef DBGFR3REGNMQUERYALLARGS *PDBGFR3REGNMQUERYALLARGS;
+
+
+/**
+ * Argument packet passed by DBGFR3RegPrintfV to dbgfR3RegPrintfCbOutput and
+ * dbgfR3RegPrintfCbFormat.
+ */
+typedef struct DBGFR3REGPRINTFARGS
+{
+ /** The user mode VM handle. */
+ PUVM pUVM;
+ /** The target CPU. */
+ VMCPUID idCpu;
+ /** Set if we're looking at guest registers. */
+ bool fGuestRegs;
+ /** The output buffer. */
+ char *pszBuf;
+ /** The format string. */
+ const char *pszFormat;
+ /** The va list with format arguments. */
+ va_list va;
+
+ /** The current buffer offset. */
+ size_t offBuf;
+ /** The amount of buffer space left, not counting the terminator char. */
+ size_t cchLeftBuf;
+ /** The status code of the whole operation. First error is return,
+ * subsequent ones are suppressed. */
+ int rc;
+} DBGFR3REGPRINTFARGS;
+/** Pointer to a DBGFR3RegPrintfV argument packet. */
+typedef DBGFR3REGPRINTFARGS *PDBGFR3REGPRINTFARGS;
+
+
+
+/**
+ * Initializes the register database.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ */
+int dbgfR3RegInit(PUVM pUVM)
+{
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fRegDbInitialized)
+ {
+ rc = RTSemRWCreate(&pUVM->dbgf.s.hRegDbLock);
+ pUVM->dbgf.s.fRegDbInitialized = RT_SUCCESS(rc);
+ }
+ return rc;
+}
+
+
+/**
+ * Terminates the register database.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+void dbgfR3RegTerm(PUVM pUVM)
+{
+ RTSemRWDestroy(pUVM->dbgf.s.hRegDbLock);
+ pUVM->dbgf.s.hRegDbLock = NIL_RTSEMRW;
+ pUVM->dbgf.s.fRegDbInitialized = false;
+}
+
+
+/**
+ * Validates a register name.
+ *
+ * This is used for prefixes, aliases and field names.
+ *
+ * @returns true if valid, false if not.
+ * @param pszName The register name to validate.
+ * @param chDot Set to '.' if accepted, otherwise 0.
+ */
+static bool dbgfR3RegIsNameValid(const char *pszName, char chDot)
+{
+ const char *psz = pszName;
+ if (!RT_C_IS_ALPHA(*psz))
+ return false;
+ char ch;
+ while ((ch = *++psz))
+ if ( !RT_C_IS_LOWER(ch)
+ && !RT_C_IS_DIGIT(ch)
+ && ch != '_'
+ && ch != chDot)
+ return false;
+ if (psz - pszName > DBGF_REG_MAX_NAME)
+ return false;
+ return true;
+}
+
+
+/**
+ * Common worker for registering a register set.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param paRegisters The register descriptors.
+ * @param enmType The set type.
+ * @param pvUserArg The user argument for the callbacks.
+ * @param pszPrefix The name prefix.
+ * @param iInstance The instance number to be appended to @a
+ * pszPrefix when creating the set name.
+ */
+static int dbgfR3RegRegisterCommon(PUVM pUVM, PCDBGFREGDESC paRegisters, DBGFREGSETTYPE enmType, void *pvUserArg,
+ const char *pszPrefix, uint32_t iInstance)
+{
+ /*
+ * Validate input.
+ */
+ /* The name components. */
+ AssertMsgReturn(dbgfR3RegIsNameValid(pszPrefix, 0), ("%s\n", pszPrefix), VERR_INVALID_NAME);
+ const char *psz = RTStrEnd(pszPrefix, RTSTR_MAX);
+ bool const fNeedUnderscore = RT_C_IS_DIGIT(psz[-1]);
+ size_t const cchPrefix = psz - pszPrefix + fNeedUnderscore;
+ AssertMsgReturn(cchPrefix < RT_SIZEOFMEMB(DBGFREGSET, szPrefix) - 4 - 1, ("%s\n", pszPrefix), VERR_INVALID_NAME);
+
+ AssertMsgReturn(iInstance <= 9999, ("%d\n", iInstance), VERR_INVALID_NAME);
+
+ /* The descriptors. */
+ uint32_t cLookupRecs = 0;
+ uint32_t iDesc;
+ for (iDesc = 0; paRegisters[iDesc].pszName != NULL; iDesc++)
+ {
+ AssertMsgReturn(dbgfR3RegIsNameValid(paRegisters[iDesc].pszName, 0), ("%s (#%u)\n", paRegisters[iDesc].pszName, iDesc), VERR_INVALID_NAME);
+
+ if (enmType == DBGFREGSETTYPE_CPU)
+ AssertMsgReturn(iDesc < (unsigned)DBGFREG_END && (unsigned)paRegisters[iDesc].enmReg == iDesc,
+ ("%d iDesc=%d\n", paRegisters[iDesc].enmReg, iDesc),
+ VERR_INVALID_PARAMETER);
+ else
+ AssertReturn(paRegisters[iDesc].enmReg == DBGFREG_END, VERR_INVALID_PARAMETER);
+ AssertReturn( paRegisters[iDesc].enmType > DBGFREGVALTYPE_INVALID
+ && paRegisters[iDesc].enmType < DBGFREGVALTYPE_END, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(paRegisters[iDesc].fFlags & ~DBGFREG_FLAGS_READ_ONLY),
+ ("%#x (#%u)\n", paRegisters[iDesc].fFlags, iDesc),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(paRegisters[iDesc].pfnGet, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_VALID_PTR(paRegisters[iDesc].pfnSet) || (paRegisters[iDesc].fFlags & DBGFREG_FLAGS_READ_ONLY),
+ VERR_INVALID_PARAMETER);
+
+ uint32_t iAlias = 0;
+ PCDBGFREGALIAS paAliases = paRegisters[iDesc].paAliases;
+ if (paAliases)
+ {
+ AssertPtrReturn(paAliases, VERR_INVALID_PARAMETER);
+ for (; paAliases[iAlias].pszName; iAlias++)
+ {
+ AssertMsgReturn(dbgfR3RegIsNameValid(paAliases[iAlias].pszName, 0), ("%s (%s)\n", paAliases[iAlias].pszName, paRegisters[iDesc].pszName), VERR_INVALID_NAME);
+ AssertReturn( paAliases[iAlias].enmType > DBGFREGVALTYPE_INVALID
+ && paAliases[iAlias].enmType < DBGFREGVALTYPE_END, VERR_INVALID_PARAMETER);
+ }
+ }
+
+ uint32_t iSubField = 0;
+ PCDBGFREGSUBFIELD paSubFields = paRegisters[iDesc].paSubFields;
+ if (paSubFields)
+ {
+ AssertPtrReturn(paSubFields, VERR_INVALID_PARAMETER);
+ for (; paSubFields[iSubField].pszName; iSubField++)
+ {
+ AssertMsgReturn(dbgfR3RegIsNameValid(paSubFields[iSubField].pszName, '.'), ("%s (%s)\n", paSubFields[iSubField].pszName, paRegisters[iDesc].pszName), VERR_INVALID_NAME);
+ AssertReturn(paSubFields[iSubField].iFirstBit + paSubFields[iSubField].cBits <= 128, VERR_INVALID_PARAMETER);
+ AssertReturn(paSubFields[iSubField].cBits + paSubFields[iSubField].cShift <= 128, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(paSubFields[iSubField].pfnGet, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(paSubFields[iSubField].pfnSet, VERR_INVALID_POINTER);
+ }
+ }
+
+ cLookupRecs += (1 + iAlias) * (1 + iSubField);
+ }
+
+ /* Check the instance number of the CPUs. */
+ AssertReturn(enmType != DBGFREGSETTYPE_CPU || iInstance < pUVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /*
+ * Allocate a new record and all associated lookup records.
+ */
+ size_t cbRegSet = RT_UOFFSETOF_DYN(DBGFREGSET, szPrefix[cchPrefix + 4 + 1]);
+ cbRegSet = RT_ALIGN_Z(cbRegSet, 32);
+ size_t const offLookupRecArray = cbRegSet;
+ cbRegSet += cLookupRecs * sizeof(DBGFREGLOOKUP);
+
+ PDBGFREGSET pRegSet = (PDBGFREGSET)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_REG, cbRegSet);
+ if (!pRegSet)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize the new record.
+ */
+ pRegSet->Core.pszString = pRegSet->szPrefix;
+ pRegSet->enmType = enmType;
+ pRegSet->uUserArg.pv = pvUserArg;
+ pRegSet->paDescs = paRegisters;
+ pRegSet->cDescs = iDesc;
+ pRegSet->cLookupRecs = cLookupRecs;
+ pRegSet->paLookupRecs = (PDBGFREGLOOKUP)((uintptr_t)pRegSet + offLookupRecArray);
+ if (fNeedUnderscore)
+ RTStrPrintf(pRegSet->szPrefix, cchPrefix + 4 + 1, "%s_%u", pszPrefix, iInstance);
+ else
+ RTStrPrintf(pRegSet->szPrefix, cchPrefix + 4 + 1, "%s%u", pszPrefix, iInstance);
+
+
+ /*
+ * Initialize the lookup records. See DBGFREGSET::paLookupRecs.
+ */
+ char szName[DBGF_REG_MAX_NAME * 3 + 16];
+ strcpy(szName, pRegSet->szPrefix);
+ char *pszReg = strchr(szName, '\0');
+ *pszReg++ = '.';
+
+ /* Array parallel to the descriptors. */
+ int rc = VINF_SUCCESS;
+ PDBGFREGLOOKUP pLookupRec = &pRegSet->paLookupRecs[0];
+ for (iDesc = 0; paRegisters[iDesc].pszName != NULL && RT_SUCCESS(rc); iDesc++)
+ {
+ strcpy(pszReg, paRegisters[iDesc].pszName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
+ if (!pLookupRec->Core.pszString)
+ rc = VERR_NO_STR_MEMORY;
+ pLookupRec->pSet = pRegSet;
+ pLookupRec->pDesc = &paRegisters[iDesc];
+ pLookupRec->pAlias = NULL;
+ pLookupRec->pSubField = NULL;
+ pLookupRec++;
+ }
+
+ /* Aliases and sub-fields. */
+ for (iDesc = 0; paRegisters[iDesc].pszName != NULL && RT_SUCCESS(rc); iDesc++)
+ {
+ PCDBGFREGALIAS pCurAlias = NULL; /* first time we add sub-fields for the real name. */
+ PCDBGFREGALIAS pNextAlias = paRegisters[iDesc].paAliases;
+ const char *pszRegName = paRegisters[iDesc].pszName;
+ while (RT_SUCCESS(rc))
+ {
+ /* Add sub-field records. */
+ PCDBGFREGSUBFIELD paSubFields = paRegisters[iDesc].paSubFields;
+ if (paSubFields)
+ {
+ size_t cchReg = strlen(pszRegName);
+ memcpy(pszReg, pszRegName, cchReg);
+ char *pszSub = &pszReg[cchReg];
+ *pszSub++ = '.';
+ for (uint32_t iSubField = 0; paSubFields[iSubField].pszName && RT_SUCCESS(rc); iSubField++)
+ {
+ strcpy(pszSub, paSubFields[iSubField].pszName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
+ if (!pLookupRec->Core.pszString)
+ rc = VERR_NO_STR_MEMORY;
+ pLookupRec->pSet = pRegSet;
+ pLookupRec->pDesc = &paRegisters[iDesc];
+ pLookupRec->pAlias = pCurAlias;
+ pLookupRec->pSubField = &paSubFields[iSubField];
+ pLookupRec++;
+ }
+ }
+
+ /* Advance to the next alias. */
+ pCurAlias = pNextAlias++;
+ if (!pCurAlias)
+ break;
+ pszRegName = pCurAlias->pszName;
+ if (!pszRegName)
+ break;
+
+ /* The alias record. */
+ strcpy(pszReg, pszRegName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
+ if (!pLookupRec->Core.pszString)
+ rc = VERR_NO_STR_MEMORY;
+ pLookupRec->pSet = pRegSet;
+ pLookupRec->pDesc = &paRegisters[iDesc];
+ pLookupRec->pAlias = pCurAlias;
+ pLookupRec->pSubField = NULL;
+ pLookupRec++;
+ }
+ }
+ Assert(pLookupRec == &pRegSet->paLookupRecs[pRegSet->cLookupRecs]);
+
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Insert the record into the register set string space and optionally into
+ * the CPU register set cache.
+ */
+ DBGF_REG_DB_LOCK_WRITE(pUVM);
+
+ bool fInserted = RTStrSpaceInsert(&pUVM->dbgf.s.RegSetSpace, &pRegSet->Core);
+ if (fInserted)
+ {
+ pUVM->dbgf.s.cRegs += pRegSet->cDescs;
+ if (enmType == DBGFREGSETTYPE_CPU)
+ {
+ if (pRegSet->cDescs > DBGFREG_ALL_COUNT)
+ pUVM->dbgf.s.cRegs -= pRegSet->cDescs - DBGFREG_ALL_COUNT;
+ if (!strcmp(pszPrefix, "cpu"))
+ pUVM->aCpus[iInstance].dbgf.s.pGuestRegSet = pRegSet;
+ else
+ pUVM->aCpus[iInstance].dbgf.s.pHyperRegSet = pRegSet;
+ }
+
+ PDBGFREGLOOKUP paLookupRecs = pRegSet->paLookupRecs;
+ uint32_t iLookupRec = pRegSet->cLookupRecs;
+ while (iLookupRec-- > 0)
+ {
+ bool fInserted2 = RTStrSpaceInsert(&pUVM->dbgf.s.RegSpace, &paLookupRecs[iLookupRec].Core);
+ AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); NOREF(fInserted2);
+ }
+
+ DBGF_REG_DB_UNLOCK_WRITE(pUVM);
+ return VINF_SUCCESS;
+ }
+
+ DBGF_REG_DB_UNLOCK_WRITE(pUVM);
+ rc = VERR_DUPLICATE;
+ }
+
+ /*
+ * Bail out.
+ */
+ for (uint32_t i = 0; i < pRegSet->cLookupRecs; i++)
+ MMR3HeapFree((char *)pRegSet->paLookupRecs[i].Core.pszString);
+ MMR3HeapFree(pRegSet);
+
+ return rc;
+}
+
+
+/**
+ * Registers a set of registers for a CPU.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param paRegisters The register descriptors.
+ * @param fGuestRegs Set if it's the guest registers, clear if
+ * hypervisor registers.
+ */
+VMMR3_INT_DECL(int) DBGFR3RegRegisterCpu(PVM pVM, PVMCPU pVCpu, PCDBGFREGDESC paRegisters, bool fGuestRegs)
+{
+ PUVM pUVM = pVM->pUVM;
+ if (!pUVM->dbgf.s.fRegDbInitialized)
+ {
+ int rc = dbgfR3RegInit(pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return dbgfR3RegRegisterCommon(pUVM, paRegisters, DBGFREGSETTYPE_CPU, pVCpu,
+ fGuestRegs ? "cpu" : "hypercpu", pVCpu->idCpu);
+}
+
+
+/**
+ * Registers a set of registers for a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param paRegisters The register descriptors.
+ * @param pDevIns The device instance. This will be the callback user
+ * argument.
+ * @param pszPrefix The device name.
+ * @param iInstance The device instance.
+ */
+VMMR3_INT_DECL(int) DBGFR3RegRegisterDevice(PVM pVM, PCDBGFREGDESC paRegisters, PPDMDEVINS pDevIns,
+ const char *pszPrefix, uint32_t iInstance)
+{
+ AssertPtrReturn(paRegisters, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszPrefix, VERR_INVALID_POINTER);
+
+ return dbgfR3RegRegisterCommon(pVM->pUVM, paRegisters, DBGFREGSETTYPE_DEVICE, pDevIns, pszPrefix, iInstance);
+}
+
+
+/**
+ * Clears the register value variable.
+ *
+ * @param pValue The variable to clear.
+ */
+DECLINLINE(void) dbgfR3RegValClear(PDBGFREGVAL pValue)
+{
+ pValue->au64[0] = 0;
+ pValue->au64[1] = 0;
+ pValue->au64[2] = 0;
+ pValue->au64[3] = 0;
+ pValue->au64[4] = 0;
+ pValue->au64[5] = 0;
+ pValue->au64[6] = 0;
+ pValue->au64[7] = 0;
+}
+
+
+/**
+ * Sets a 80-bit floating point variable to a 64-bit unsigned interger value.
+ *
+ * @param pValue The value.
+ * @param u64 The integer value.
+ */
+DECLINLINE(void) dbgfR3RegValR80SetU64(PDBGFREGVAL pValue, uint64_t u64)
+{
+ /** @todo fixme */
+ pValue->r80.s.fSign = 0;
+ pValue->r80.s.uExponent = 16383;
+ pValue->r80.s.uMantissa = u64;
+}
+
+
+/**
+ * Sets a 80-bit floating point variable to a 64-bit unsigned interger value.
+ *
+ * @param pValue The value.
+ * @param u128 The integer value.
+ */
+DECLINLINE(void) dbgfR3RegValR80SetU128(PDBGFREGVAL pValue, RTUINT128U u128)
+{
+ /** @todo fixme */
+ pValue->r80.s.fSign = 0;
+ pValue->r80.s.uExponent = 16383;
+ pValue->r80.s.uMantissa = u128.s.Lo;
+}
+
+
+/**
+ * Get a 80-bit floating point variable as a 64-bit unsigned integer.
+ *
+ * @returns 64-bit unsigned integer.
+ * @param pValue The value.
+ */
+DECLINLINE(uint64_t) dbgfR3RegValR80GetU64(PCDBGFREGVAL pValue)
+{
+ /** @todo stupid, stupid MSC. */
+ return pValue->r80.s.uMantissa;
+}
+
+
+/**
+ * Get a 80-bit floating point variable as a 128-bit unsigned integer.
+ *
+ * @returns 128-bit unsigned integer.
+ * @param pValue The value.
+ */
+DECLINLINE(RTUINT128U) dbgfR3RegValR80GetU128(PCDBGFREGVAL pValue)
+{
+ /** @todo stupid, stupid MSC. */
+ RTUINT128U uRet;
+#if 0
+ uRet.s.Lo = (uint64_t)InVal.lrd;
+ uRet.s.Hi = (uint64_t)InVal.lrd / _4G / _4G;
+#else
+ uRet.s.Lo = pValue->r80.s.uMantissa;
+ uRet.s.Hi = 0;
+#endif
+ return uRet;
+}
+
+
+/**
+ * Performs a cast between register value types.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ *
+ * @param pValue The value to cast (input + output).
+ * @param enmFromType The input value.
+ * @param enmToType The desired output value.
+ */
+static int dbgfR3RegValCast(PDBGFREGVAL pValue, DBGFREGVALTYPE enmFromType, DBGFREGVALTYPE enmToType)
+{
+ DBGFREGVAL const InVal = *pValue;
+ dbgfR3RegValClear(pValue);
+
+ /* Note! No default cases here as gcc warnings about missing enum values
+ are desired. */
+ switch (enmFromType)
+ {
+ case DBGFREGVALTYPE_U8:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u8; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128.s.Lo = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.Words.w0 = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.Words.w0 = InVal.u8; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU64(pValue, InVal.u8); return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U16:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u16; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u16; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u16; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u16; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128.s.Lo = InVal.u16; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.Words.w0 = InVal.u16; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.Words.w0 = InVal.u16; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU64(pValue, InVal.u16); return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U32:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u32; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u32; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u32; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u32; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128.s.Lo = InVal.u32; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.DWords.dw0 = InVal.u32; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.DWords.dw0 = InVal.u32; return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU64(pValue, InVal.u32); return VINF_DBGF_ZERO_EXTENDED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U64:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u64; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U128: pValue->u128.s.Lo = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.QWords.qw0 = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.QWords.qw0 = InVal.u64; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU64(pValue, InVal.u64); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U128:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u128.s.Lo; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u128.s.Lo; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u128.s.Lo; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u128.s.Lo; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128 = InVal.u128; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U256: pValue->u256.DQWords.dqw0 = InVal.u128; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U512: pValue->u512.DQWords.dqw0 = InVal.u128; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU128(pValue, InVal.u128); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U256:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u256.Words.w0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u256.Words.w0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u256.DWords.dw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u256.QWords.qw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128 = InVal.u256.DQWords.dqw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256 = InVal.u256; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_U512: pValue->u512.OWords.ow0 = InVal.u256; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU128(pValue, InVal.u256.DQWords.dqw0); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_U512:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.u512.Words.w0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.u512.Words.w0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.u512.DWords.dw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.u512.QWords.qw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128 = InVal.u512.DQWords.dqw0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256 = InVal.u512.OWords.ow0; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512 = InVal.u512; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU128(pValue, InVal.u512.DQWords.dqw0); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_R80:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = (uint8_t )dbgfR3RegValR80GetU64(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = (uint16_t)dbgfR3RegValR80GetU64(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = (uint32_t)dbgfR3RegValR80GetU64(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = (uint64_t)dbgfR3RegValR80GetU64(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128 = dbgfR3RegValR80GetU128(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.DQWords.dqw0 = dbgfR3RegValR80GetU128(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.DQWords.dqw0 = dbgfR3RegValR80GetU128(&InVal); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_R80: pValue->r80 = InVal.r80; return VINF_SUCCESS;
+ case DBGFREGVALTYPE_DTR: return VERR_DBGF_UNSUPPORTED_CAST;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_DTR:
+ switch (enmToType)
+ {
+ case DBGFREGVALTYPE_U8: pValue->u8 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U16: pValue->u16 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U32: pValue->u32 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U64: pValue->u64 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U128: pValue->u128.s.Lo = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U256: pValue->u256.QWords.qw0 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_U512: pValue->u512.QWords.qw0 = InVal.dtr.u64Base; return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_R80: dbgfR3RegValR80SetU64(pValue, InVal.dtr.u64Base); return VINF_DBGF_TRUNCATED_REGISTER;
+ case DBGFREGVALTYPE_DTR: pValue->dtr = InVal.dtr; return VINF_SUCCESS;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ }
+ break;
+
+ case DBGFREGVALTYPE_INVALID:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_32BIT_HACK:
+ break;
+ }
+
+ AssertMsgFailed(("%d / %d\n", enmFromType, enmToType));
+ return VERR_DBGF_UNSUPPORTED_CAST;
+}
+
+
+/**
+ * Worker for the CPU register queries.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The virtual CPU ID.
+ * @param enmReg The register to query.
+ * @param enmType The desired return type.
+ * @param fGuestRegs Query guest CPU registers if set (true),
+ * hypervisor CPU registers if clear (false).
+ * @param pValue Where to return the register value.
+ */
+static DECLCALLBACK(int) dbgfR3RegCpuQueryWorkerOnCpu(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType,
+ bool fGuestRegs, PDBGFREGVAL pValue)
+{
+ int rc = VINF_SUCCESS;
+ DBGF_REG_DB_LOCK_READ(pUVM);
+
+ /*
+ * Look up the register set of the specified CPU.
+ */
+ PDBGFREGSET pSet = fGuestRegs
+ ? pUVM->aCpus[idCpu].dbgf.s.pGuestRegSet
+ : pUVM->aCpus[idCpu].dbgf.s.pHyperRegSet;
+ if (RT_LIKELY(pSet))
+ {
+ /*
+ * Look up the register and get the register value.
+ */
+ if (RT_LIKELY(pSet->cDescs > (size_t)enmReg))
+ {
+ PCDBGFREGDESC pDesc = &pSet->paDescs[enmReg];
+
+ pValue->au64[0] = pValue->au64[1] = 0;
+ rc = pDesc->pfnGet(pSet->uUserArg.pv, pDesc, pValue);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do the cast if the desired return type doesn't match what
+ * the getter returned.
+ */
+ if (pDesc->enmType == enmType)
+ rc = VINF_SUCCESS;
+ else
+ rc = dbgfR3RegValCast(pValue, pDesc->enmType, enmType);
+ }
+ }
+ else
+ rc = VERR_DBGF_REGISTER_NOT_FOUND;
+ }
+ else
+ rc = VERR_INVALID_CPU_ID;
+
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
+ return rc;
+}
+
+
+/**
+ * Internal worker for the CPU register query functions.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The virtual CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register to query.
+ * @param enmType The desired return type.
+ * @param pValue Where to return the register value.
+ */
+static int dbgfR3RegCpuQueryWorker(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType, PDBGFREGVAL pValue)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertMsgReturn(enmReg >= DBGFREG_AL && enmReg <= DBGFREG_END, ("%d\n", enmReg), VERR_INVALID_PARAMETER);
+
+ bool const fGuestRegs = !(idCpu & DBGFREG_HYPER_VMCPUID);
+ idCpu &= ~DBGFREG_HYPER_VMCPUID;
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryWorkerOnCpu, 6,
+ pUVM, idCpu, enmReg, enmType, fGuestRegs, pValue);
+}
+
+
+/**
+ * Queries a 8-bit CPU register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register that's being queried.
+ * @param pu8 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryU8(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint8_t *pu8)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U8, &Value);
+ if (RT_SUCCESS(rc))
+ *pu8 = Value.u8;
+ else
+ *pu8 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 16-bit CPU register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register that's being queried.
+ * @param pu16 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryU16(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint16_t *pu16)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U16, &Value);
+ if (RT_SUCCESS(rc))
+ *pu16 = Value.u16;
+ else
+ *pu16 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 32-bit CPU register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register that's being queried.
+ * @param pu32 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryU32(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint32_t *pu32)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U32, &Value);
+ if (RT_SUCCESS(rc))
+ *pu32 = Value.u32;
+ else
+ *pu32 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 64-bit CPU register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register that's being queried.
+ * @param pu64 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryU64(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint64_t *pu64)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U64, &Value);
+ if (RT_SUCCESS(rc))
+ *pu64 = Value.u64;
+ else
+ *pu64 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a descriptor table register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param enmReg The register that's being queried.
+ * @param pu64Base Where to store the register base value.
+ * @param pu16Limit Where to store the register limit value.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryXdtr(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint64_t *pu64Base, uint16_t *pu16Limit)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_DTR, &Value);
+ if (RT_SUCCESS(rc))
+ {
+ *pu64Base = Value.dtr.u64Base;
+ *pu16Limit = Value.dtr.u32Limit;
+ }
+ else
+ {
+ *pu64Base = 0;
+ *pu16Limit = 0;
+ }
+ return rc;
+}
+
+
+#if 0 /* rewrite / remove */
+
+/**
+ * Wrapper around CPUMQueryGuestMsr for dbgfR3RegCpuQueryBatchWorker.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pReg The where to store the register value and
+ * size.
+ * @param idMsr The MSR to get.
+ */
+static void dbgfR3RegGetMsrBatch(PVMCPU pVCpu, PDBGFREGENTRY pReg, uint32_t idMsr)
+{
+ pReg->enmType = DBGFREGVALTYPE_U64;
+ int rc = CPUMQueryGuestMsr(pVCpu, idMsr, &pReg->Val.u64);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsg(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc));
+ pReg->Val.u64 = 0;
+ }
+}
+
+
+static DECLCALLBACK(int) dbgfR3RegCpuQueryBatchWorker(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+{
+#if 0
+ PVMCPU pVCpu = &pUVM->pVM->aCpus[idCpu];
+ PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ PDBGFREGENTRY pReg = paRegs - 1;
+ while (cRegs-- > 0)
+ {
+ pReg++;
+ pReg->Val.au64[0] = 0;
+ pReg->Val.au64[1] = 0;
+
+ DBGFREG const enmReg = pReg->enmReg;
+ AssertMsgReturn(enmReg >= 0 && enmReg <= DBGFREG_END, ("%d (%#x)\n", enmReg, enmReg), VERR_DBGF_REGISTER_NOT_FOUND);
+ if (enmReg != DBGFREG_END)
+ {
+ PCDBGFREGDESC pDesc = &g_aDbgfRegDescs[enmReg];
+ if (!pDesc->pfnGet)
+ {
+ PCRTUINT128U pu = (PCRTUINT128U)((uintptr_t)pCtx + pDesc->offCtx);
+ pReg->enmType = pDesc->enmType;
+ switch (pDesc->enmType)
+ {
+ case DBGFREGVALTYPE_U8: pReg->Val.u8 = pu->au8[0]; break;
+ case DBGFREGVALTYPE_U16: pReg->Val.u16 = pu->au16[0]; break;
+ case DBGFREGVALTYPE_U32: pReg->Val.u32 = pu->au32[0]; break;
+ case DBGFREGVALTYPE_U64: pReg->Val.u64 = pu->au64[0]; break;
+ case DBGFREGVALTYPE_U128:
+ pReg->Val.au64[0] = pu->au64[0];
+ pReg->Val.au64[1] = pu->au64[1];
+ break;
+ case DBGFREGVALTYPE_R80:
+ pReg->Val.au64[0] = pu->au64[0];
+ pReg->Val.au16[5] = pu->au16[5];
+ break;
+ default:
+ AssertMsgFailedReturn(("%s %d\n", pDesc->pszName, pDesc->enmType), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ }
+ }
+ else
+ {
+ int rc = pDesc->pfnGet(pVCpu, pDesc, pCtx, &pReg->Val.u);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+ }
+ return VINF_SUCCESS;
+#else
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
+
+/**
+ * Query a batch of registers.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param paRegs Pointer to an array of @a cRegs elements. On
+ * input the enmReg members indicates which
+ * registers to query. On successful return the
+ * other members are set. DBGFREG_END can be used
+ * as a filler.
+ * @param cRegs The number of entries in @a paRegs.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ if (!cRegs)
+ return VINF_SUCCESS;
+ AssertReturn(cRegs < _1M, VERR_OUT_OF_RANGE);
+ AssertPtrReturn(paRegs, VERR_INVALID_POINTER);
+ size_t iReg = cRegs;
+ while (iReg-- > 0)
+ {
+ DBGFREG enmReg = paRegs[iReg].enmReg;
+ AssertMsgReturn(enmReg < DBGFREG_END && enmReg >= DBGFREG_AL, ("%d (%#x)", enmReg, enmReg), VERR_DBGF_REGISTER_NOT_FOUND);
+ }
+
+ return VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pUVM, idCpu, paRegs, cRegs);
+}
+
+
+/**
+ * Query all registers for a Virtual CPU.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param paRegs Pointer to an array of @a cRegs elements.
+ * These will be filled with the CPU register
+ * values. Overflowing entries will be set to
+ * DBGFREG_END. The returned registers can be
+ * accessed by using the DBGFREG values as index.
+ * @param cRegs The number of entries in @a paRegs. The
+ * recommended value is DBGFREG_ALL_COUNT.
+ */
+VMMR3DECL(int) DBGFR3RegCpuQueryAll(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ if (!cRegs)
+ return VINF_SUCCESS;
+ AssertReturn(cRegs < _1M, VERR_OUT_OF_RANGE);
+ AssertPtrReturn(paRegs, VERR_INVALID_POINTER);
+
+ /*
+ * Convert it into a batch query (lazy bird).
+ */
+ unsigned iReg = 0;
+ while (iReg < cRegs && iReg < DBGFREG_ALL_COUNT)
+ {
+ paRegs[iReg].enmReg = (DBGFREG)iReg;
+ iReg++;
+ }
+ while (iReg < cRegs)
+ paRegs[iReg++].enmReg = DBGFREG_END;
+
+ return VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pUVM, idCpu, paRegs, cRegs);
+}
+
+#endif /* rewrite or remove? */
+
+/**
+ * Gets the name of a register.
+ *
+ * @returns Pointer to read-only register name (lower case). NULL if the
+ * parameters are invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param enmReg The register identifier.
+ * @param enmType The register type. This is for sort out
+ * aliases. Pass DBGFREGVALTYPE_INVALID to get
+ * the standard name.
+ */
+VMMR3DECL(const char *) DBGFR3RegCpuName(PUVM pUVM, DBGFREG enmReg, DBGFREGVALTYPE enmType)
+{
+ AssertReturn(enmReg >= DBGFREG_AL && enmReg < DBGFREG_END, NULL);
+ AssertReturn(enmType >= DBGFREGVALTYPE_INVALID && enmType < DBGFREGVALTYPE_END, NULL);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+
+ PCDBGFREGSET pSet = pUVM->aCpus[0].dbgf.s.pGuestRegSet;
+ if (RT_UNLIKELY(!pSet))
+ return NULL;
+
+ PCDBGFREGDESC pDesc = &pSet->paDescs[enmReg];
+ PCDBGFREGALIAS pAlias = pDesc->paAliases;
+ if ( pAlias
+ && pDesc->enmType != enmType
+ && enmType != DBGFREGVALTYPE_INVALID)
+ {
+ while (pAlias->pszName)
+ {
+ if (pAlias->enmType == enmType)
+ return pAlias->pszName;
+ pAlias++;
+ }
+ }
+
+ return pDesc->pszName;
+}
+
+
+/**
+ * Fold the string to lower case and copy it into the destination buffer.
+ *
+ * @returns Number of folder characters, -1 on overflow.
+ * @param pszSrc The source string.
+ * @param cchSrc How much to fold and copy.
+ * @param pszDst The output buffer.
+ * @param cbDst The size of the output buffer.
+ */
+static ssize_t dbgfR3RegCopyToLower(const char *pszSrc, size_t cchSrc, char *pszDst, size_t cbDst)
+{
+ ssize_t cchFolded = 0;
+ char ch;
+ while (cchSrc-- > 0 && (ch = *pszSrc++))
+ {
+ if (RT_UNLIKELY(cbDst <= 1))
+ return -1;
+ cbDst--;
+
+ char chLower = RT_C_TO_LOWER(ch);
+ cchFolded += chLower != ch;
+ *pszDst++ = chLower;
+ }
+ if (RT_UNLIKELY(!cbDst))
+ return -1;
+ *pszDst = '\0';
+ return cchFolded;
+}
+
+
+/**
+ * Resolves the register name.
+ *
+ * @returns Lookup record.
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default CPU ID set.
+ * @param pszReg The register name.
+ * @param fGuestRegs Default to guest CPU registers if set, the
+ * hypervisor CPU registers if clear.
+ */
+static PCDBGFREGLOOKUP dbgfR3RegResolve(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, bool fGuestRegs)
+{
+ DBGF_REG_DB_LOCK_READ(pUVM);
+
+ /* Try looking up the name without any case folding or cpu prefixing. */
+ PRTSTRSPACE pRegSpace = &pUVM->dbgf.s.RegSpace;
+ PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, pszReg);
+ if (!pLookupRec)
+ {
+ char szName[DBGF_REG_MAX_NAME * 4 + 16];
+
+ /* Lower case it and try again. */
+ ssize_t cchFolded = dbgfR3RegCopyToLower(pszReg, RTSTR_MAX, szName, sizeof(szName) - DBGF_REG_MAX_NAME);
+ if (cchFolded > 0)
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
+ if ( !pLookupRec
+ && cchFolded >= 0
+ && idDefCpu != VMCPUID_ANY)
+ {
+ /* Prefix it with the specified CPU set. */
+ size_t cchCpuSet = RTStrPrintf(szName, sizeof(szName), fGuestRegs ? "cpu%u." : "hypercpu%u.", idDefCpu);
+ dbgfR3RegCopyToLower(pszReg, RTSTR_MAX, &szName[cchCpuSet], sizeof(szName) - cchCpuSet);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
+ }
+ }
+
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
+ return pLookupRec;
+}
+
+
+/**
+ * Validates the register name.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if the register was found.
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND if not found.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default CPU.
+ * @param pszReg The registe name.
+ */
+VMMR3DECL(int) DBGFR3RegNmValidate(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
+
+ /*
+ * Resolve the register.
+ */
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
+ if (!pLookupRec)
+ return VERR_DBGF_REGISTER_NOT_FOUND;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * On CPU worker for the register queries, used by dbgfR3RegNmQueryWorker and
+ * dbgfR3RegPrintfCbFormatNormal.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pLookupRec The register lookup record.
+ * @param enmType The desired return type.
+ * @param pValue Where to return the register value.
+ * @param penmType Where to store the register value type.
+ * Optional.
+ */
+static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PUVM pUVM, PCDBGFREGLOOKUP pLookupRec, DBGFREGVALTYPE enmType,
+ PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
+{
+ PCDBGFREGDESC pDesc = pLookupRec->pDesc;
+ PCDBGFREGSET pSet = pLookupRec->pSet;
+ PCDBGFREGSUBFIELD pSubField = pLookupRec->pSubField;
+ DBGFREGVALTYPE enmValueType = pDesc->enmType;
+ int rc;
+
+ NOREF(pUVM);
+
+ /*
+ * Get the register or sub-field value.
+ */
+ dbgfR3RegValClear(pValue);
+ if (!pSubField)
+ {
+ rc = pDesc->pfnGet(pSet->uUserArg.pv, pDesc, pValue);
+ if ( pLookupRec->pAlias
+ && pLookupRec->pAlias->enmType != enmValueType
+ && RT_SUCCESS(rc))
+ {
+ rc = dbgfR3RegValCast(pValue, enmValueType, pLookupRec->pAlias->enmType);
+ enmValueType = pLookupRec->pAlias->enmType;
+ }
+ }
+ else
+ {
+ if (pSubField->pfnGet)
+ {
+ rc = pSubField->pfnGet(pSet->uUserArg.pv, pSubField, &pValue->u128);
+ enmValueType = DBGFREGVALTYPE_U128;
+ }
+ else
+ {
+ rc = pDesc->pfnGet(pSet->uUserArg.pv, pDesc, pValue);
+ if ( pLookupRec->pAlias
+ && pLookupRec->pAlias->enmType != enmValueType
+ && RT_SUCCESS(rc))
+ {
+ rc = dbgfR3RegValCast(pValue, enmValueType, pLookupRec->pAlias->enmType);
+ enmValueType = pLookupRec->pAlias->enmType;
+ }
+ if (RT_SUCCESS(rc))
+ {
+ rc = dbgfR3RegValCast(pValue, enmValueType, DBGFREGVALTYPE_U128);
+ if (RT_SUCCESS(rc))
+ {
+ RTUInt128AssignShiftLeft(&pValue->u128, -pSubField->iFirstBit);
+ RTUInt128AssignAndNFirstBits(&pValue->u128, pSubField->cBits);
+ if (pSubField->cShift)
+ RTUInt128AssignShiftLeft(&pValue->u128, pSubField->cShift);
+ }
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ unsigned const cBits = pSubField->cBits + pSubField->cShift;
+ if (cBits <= 8)
+ enmValueType = DBGFREGVALTYPE_U8;
+ else if (cBits <= 16)
+ enmValueType = DBGFREGVALTYPE_U16;
+ else if (cBits <= 32)
+ enmValueType = DBGFREGVALTYPE_U32;
+ else if (cBits <= 64)
+ enmValueType = DBGFREGVALTYPE_U64;
+ else
+ enmValueType = DBGFREGVALTYPE_U128;
+ rc = dbgfR3RegValCast(pValue, DBGFREGVALTYPE_U128, enmValueType);
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do the cast if the desired return type doesn't match what
+ * the getter returned.
+ */
+ if ( enmValueType == enmType
+ || enmType == DBGFREGVALTYPE_END)
+ {
+ rc = VINF_SUCCESS;
+ if (penmType)
+ *penmType = enmValueType;
+ }
+ else
+ {
+ rc = dbgfR3RegValCast(pValue, enmValueType, enmType);
+ if (penmType)
+ *penmType = RT_SUCCESS(rc) ? enmType : enmValueType;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Worker for the register queries.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The virtual CPU ID for the default CPU register
+ * set. Can be OR'ed with DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register to query.
+ * @param enmType The desired return type.
+ * @param pValue Where to return the register value.
+ * @param penmType Where to store the register value type.
+ * Optional.
+ */
+static int dbgfR3RegNmQueryWorker(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, DBGFREGVALTYPE enmType,
+ PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
+
+ Assert(enmType > DBGFREGVALTYPE_INVALID && enmType <= DBGFREGVALTYPE_END);
+ AssertPtr(pValue);
+
+ /*
+ * Resolve the register and call the getter on the relevant CPU.
+ */
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
+ if (pLookupRec)
+ {
+ if (pLookupRec->pSet->enmType == DBGFREGSETTYPE_CPU)
+ idDefCpu = pLookupRec->pSet->uUserArg.pVCpu->idCpu;
+ else if (idDefCpu != VMCPUID_ANY)
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ return VMR3ReqPriorityCallWaitU(pUVM, idDefCpu, (PFNRT)dbgfR3RegNmQueryWorkerOnCpu, 5,
+ pUVM, pLookupRec, enmType, pValue, penmType);
+ }
+ return VERR_DBGF_REGISTER_NOT_FOUND;
+}
+
+
+/**
+ * Queries a descriptor table register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pValue Where to store the register value.
+ * @param penmType Where to store the register value type.
+ */
+VMMR3DECL(int) DBGFR3RegNmQuery(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
+{
+ return dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_END, pValue, penmType);
+}
+
+
+/**
+ * Queries a 8-bit register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu8 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryU8(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint8_t *pu8)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U8, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *pu8 = Value.u8;
+ else
+ *pu8 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 16-bit register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu16 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryU16(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint16_t *pu16)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U16, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *pu16 = Value.u16;
+ else
+ *pu16 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 32-bit register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu32 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryU32(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint32_t *pu32)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U32, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *pu32 = Value.u32;
+ else
+ *pu32 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 64-bit register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu64 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryU64(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U64, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *pu64 = Value.u64;
+ else
+ *pu64 = 0;
+ return rc;
+}
+
+
+/**
+ * Queries a 128-bit register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu128 Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryU128(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PRTUINT128U pu128)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U128, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *pu128 = Value.u128;
+ else
+ pu128->s.Hi = pu128->s.Lo = 0;
+ return rc;
+}
+
+
+#if 0
+/**
+ * Queries a long double register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param plrd Where to store the register value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryLrd(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, long double *plrd)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_R80, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ *plrd = Value.lrd;
+ else
+ *plrd = 0;
+ return rc;
+}
+#endif
+
+
+/**
+ * Queries a descriptor table register value.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
+ * applicable. Can be OR'ed with
+ * DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register that's being queried. Except for
+ * CPU registers, this must be on the form
+ * "set.reg[.sub]".
+ * @param pu64Base Where to store the register base value.
+ * @param pu16Limit Where to store the register limit value.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryXdtr(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64Base, uint16_t *pu16Limit)
+{
+ DBGFREGVAL Value;
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_DTR, &Value, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ *pu64Base = Value.dtr.u64Base;
+ *pu16Limit = Value.dtr.u32Limit;
+ }
+ else
+ {
+ *pu64Base = 0;
+ *pu16Limit = 0;
+ }
+ return rc;
+}
+
+
+/// @todo VMMR3DECL(int) DBGFR3RegNmQueryBatch(PUVM pUVM,VMCPUID idDefCpu, DBGFREGENTRYNM paRegs, size_t cRegs);
+
+
+/**
+ * Gets the number of registers returned by DBGFR3RegNmQueryAll.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pcRegs Where to return the register count.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryAllCount(PUVM pUVM, size_t *pcRegs)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ *pcRegs = pUVM->dbgf.s.cRegs;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Pad register entries.
+ *
+ * @param paRegs The output array.
+ * @param cRegs The size of the output array.
+ * @param iReg The first register to pad.
+ * @param cRegsToPad The number of registers to pad.
+ */
+static void dbgfR3RegNmQueryAllPadEntries(PDBGFREGENTRYNM paRegs, size_t cRegs, size_t iReg, size_t cRegsToPad)
+{
+ if (iReg < cRegs)
+ {
+ size_t iEndReg = iReg + cRegsToPad;
+ if (iEndReg > cRegs)
+ iEndReg = cRegs;
+ while (iReg < iEndReg)
+ {
+ paRegs[iReg].pszName = NULL;
+ paRegs[iReg].enmType = DBGFREGVALTYPE_END;
+ dbgfR3RegValClear(&paRegs[iReg].Val);
+ iReg++;
+ }
+ }
+}
+
+
+/**
+ * Query all registers in a set.
+ *
+ * @param pSet The set.
+ * @param cRegsToQuery The number of registers to query.
+ * @param paRegs The output array.
+ * @param cRegs The size of the output array.
+ */
+static void dbgfR3RegNmQueryAllInSet(PCDBGFREGSET pSet, size_t cRegsToQuery, PDBGFREGENTRYNM paRegs, size_t cRegs)
+{
+ if (cRegsToQuery > pSet->cDescs)
+ cRegsToQuery = pSet->cDescs;
+ if (cRegsToQuery > cRegs)
+ cRegsToQuery = cRegs;
+
+ for (size_t iReg = 0; iReg < cRegsToQuery; iReg++)
+ {
+ paRegs[iReg].enmType = pSet->paDescs[iReg].enmType;
+ paRegs[iReg].pszName = pSet->paLookupRecs[iReg].Core.pszString;
+ dbgfR3RegValClear(&paRegs[iReg].Val);
+ int rc2 = pSet->paDescs[iReg].pfnGet(pSet->uUserArg.pv, &pSet->paDescs[iReg], &paRegs[iReg].Val);
+ AssertRCSuccess(rc2);
+ if (RT_FAILURE(rc2))
+ dbgfR3RegValClear(&paRegs[iReg].Val);
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNRTSTRSPACECALLBACK, Worker used by
+ * dbgfR3RegNmQueryAllWorker}
+ */
+static DECLCALLBACK(int) dbgfR3RegNmQueryAllEnum(PRTSTRSPACECORE pStr, void *pvUser)
+{
+ PCDBGFREGSET pSet = (PCDBGFREGSET)pStr;
+ if (pSet->enmType != DBGFREGSETTYPE_CPU)
+ {
+ PDBGFR3REGNMQUERYALLARGS pArgs = (PDBGFR3REGNMQUERYALLARGS)pvUser;
+ if (pArgs->iReg < pArgs->cRegs)
+ dbgfR3RegNmQueryAllInSet(pSet, pSet->cDescs, &pArgs->paRegs[pArgs->iReg], pArgs->cRegs - pArgs->iReg);
+ pArgs->iReg += pSet->cDescs;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker used by DBGFR3RegNmQueryAll}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ PDBGFR3REGNMQUERYALLARGS pArgs = (PDBGFR3REGNMQUERYALLARGS)pvUser;
+ PDBGFREGENTRYNM paRegs = pArgs->paRegs;
+ size_t const cRegs = pArgs->cRegs;
+ PUVM pUVM = pVM->pUVM;
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
+
+ DBGF_REG_DB_LOCK_READ(pUVM);
+
+ /*
+ * My guest CPU registers.
+ */
+ size_t iCpuReg = pVCpu->idCpu * DBGFREG_ALL_COUNT;
+ if (pUVCpu->dbgf.s.pGuestRegSet)
+ {
+ if (iCpuReg < cRegs)
+ dbgfR3RegNmQueryAllInSet(pUVCpu->dbgf.s.pGuestRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
+ }
+ else
+ dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, iCpuReg, DBGFREG_ALL_COUNT);
+
+ /*
+ * My hypervisor CPU registers.
+ */
+ iCpuReg = pUVM->cCpus * DBGFREG_ALL_COUNT + pUVCpu->idCpu * DBGFREG_ALL_COUNT;
+ if (pUVCpu->dbgf.s.pHyperRegSet)
+ {
+ if (iCpuReg < cRegs)
+ dbgfR3RegNmQueryAllInSet(pUVCpu->dbgf.s.pHyperRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
+ }
+ else
+ dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, iCpuReg, DBGFREG_ALL_COUNT);
+
+ /*
+ * The primary CPU does all the other registers.
+ */
+ if (pUVCpu->idCpu == 0)
+ {
+ pArgs->iReg = pUVM->cCpus * DBGFREG_ALL_COUNT * 2;
+ RTStrSpaceEnumerate(&pUVM->dbgf.s.RegSetSpace, dbgfR3RegNmQueryAllEnum, pArgs);
+ dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, pArgs->iReg, cRegs);
+ }
+
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
+ return VINF_SUCCESS; /* Ignore errors. */
+}
+
+
+/**
+ * Queries all register.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param paRegs The output register value array. The register
+ * name string is read only and shall not be freed
+ * or modified.
+ * @param cRegs The number of entries in @a paRegs. The
+ * correct size can be obtained by calling
+ * DBGFR3RegNmQueryAllCount.
+ */
+VMMR3DECL(int) DBGFR3RegNmQueryAll(PUVM pUVM, PDBGFREGENTRYNM paRegs, size_t cRegs)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(paRegs, VERR_INVALID_POINTER);
+ AssertReturn(cRegs > 0, VERR_OUT_OF_RANGE);
+
+ DBGFR3REGNMQUERYALLARGS Args;
+ Args.paRegs = paRegs;
+ Args.cRegs = cRegs;
+
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3RegNmQueryAllWorker, &Args);
+}
+
+
+/**
+ * On CPU worker for the register modifications, used by DBGFR3RegNmSet.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pLookupRec The register lookup record. Maybe be modified,
+ * so please pass a copy of the user's one.
+ * @param pValue The new register value.
+ * @param pMask Indicate which bits to modify.
+ */
+static DECLCALLBACK(int) dbgfR3RegNmSetWorkerOnCpu(PUVM pUVM, PDBGFREGLOOKUP pLookupRec,
+ PCDBGFREGVAL pValue, PCDBGFREGVAL pMask)
+{
+ RT_NOREF_PV(pUVM);
+ PCDBGFREGSUBFIELD pSubField = pLookupRec->pSubField;
+ if (pSubField && pSubField->pfnSet)
+ return pSubField->pfnSet(pLookupRec->pSet->uUserArg.pv, pSubField, pValue->u128, pMask->u128);
+ return pLookupRec->pDesc->pfnSet(pLookupRec->pSet->uUserArg.pv, pLookupRec->pDesc, pValue, pMask);
+}
+
+
+/**
+ * Worker for the register setting.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The virtual CPU ID for the default CPU register
+ * set. Can be OR'ed with DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register to query.
+ * @param pValue The value to set
+ * @param enmType How to interpret the value in @a pValue.
+ */
+VMMR3DECL(int) DBGFR3RegNmSet(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
+ AssertReturn(enmType > DBGFREGVALTYPE_INVALID && enmType < DBGFREGVALTYPE_END, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pValue, VERR_INVALID_PARAMETER);
+
+ /*
+ * Resolve the register and check that it is writable.
+ */
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
+ if (pLookupRec)
+ {
+ PCDBGFREGDESC pDesc = pLookupRec->pDesc;
+ PCDBGFREGSET pSet = pLookupRec->pSet;
+ PCDBGFREGSUBFIELD pSubField = pLookupRec->pSubField;
+
+ if ( !(pDesc->fFlags & DBGFREG_FLAGS_READ_ONLY)
+ && (pSubField
+ ? !(pSubField->fFlags & DBGFREGSUBFIELD_FLAGS_READ_ONLY)
+ && (pSubField->pfnSet != NULL || pDesc->pfnSet != NULL)
+ : pDesc->pfnSet != NULL) )
+ {
+ /*
+ * Calculate the modification mask and cast the input value to the
+ * type of the target register.
+ */
+ DBGFREGVAL Mask = DBGFREGVAL_INITIALIZE_ZERO;
+ DBGFREGVAL Value = DBGFREGVAL_INITIALIZE_ZERO;
+ switch (enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ Value.u8 = pValue->u8;
+ Mask.u8 = UINT8_MAX;
+ break;
+ case DBGFREGVALTYPE_U16:
+ Value.u16 = pValue->u16;
+ Mask.u16 = UINT16_MAX;
+ break;
+ case DBGFREGVALTYPE_U32:
+ Value.u32 = pValue->u32;
+ Mask.u32 = UINT32_MAX;
+ break;
+ case DBGFREGVALTYPE_U64:
+ Value.u64 = pValue->u64;
+ Mask.u64 = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U128:
+ Value.u128 = pValue->u128;
+ Mask.u128.s.Lo = UINT64_MAX;
+ Mask.u128.s.Hi = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U256:
+ Value.u256 = pValue->u256;
+ Mask.u256.QWords.qw0 = UINT64_MAX;
+ Mask.u256.QWords.qw1 = UINT64_MAX;
+ Mask.u256.QWords.qw2 = UINT64_MAX;
+ Mask.u256.QWords.qw3 = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U512:
+ Value.u512 = pValue->u512;
+ Mask.u512.QWords.qw0 = UINT64_MAX;
+ Mask.u512.QWords.qw1 = UINT64_MAX;
+ Mask.u512.QWords.qw2 = UINT64_MAX;
+ Mask.u512.QWords.qw3 = UINT64_MAX;
+ Mask.u512.QWords.qw4 = UINT64_MAX;
+ Mask.u512.QWords.qw5 = UINT64_MAX;
+ Mask.u512.QWords.qw6 = UINT64_MAX;
+ Mask.u512.QWords.qw7 = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_R80:
+#ifdef RT_COMPILER_WITH_80BIT_LONG_DOUBLE
+ Value.r80Ex.lrd = pValue->r80Ex.lrd;
+#else
+ Value.r80Ex.au64[0] = pValue->r80Ex.au64[0];
+ Value.r80Ex.au16[4] = pValue->r80Ex.au16[4];
+#endif
+ Value.r80Ex.au64[0] = UINT64_MAX;
+ Value.r80Ex.au16[4] = UINT16_MAX;
+ break;
+ case DBGFREGVALTYPE_DTR:
+ Value.dtr.u32Limit = pValue->dtr.u32Limit;
+ Value.dtr.u64Base = pValue->dtr.u64Base;
+ Mask.dtr.u32Limit = UINT32_MAX;
+ Mask.dtr.u64Base = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ AssertFailedReturn(VERR_INTERNAL_ERROR_3);
+ }
+
+ int rc = VINF_SUCCESS;
+ DBGFREGVALTYPE enmRegType = pDesc->enmType;
+ if (pSubField)
+ {
+ unsigned const cBits = pSubField->cBits + pSubField->cShift;
+ if (cBits <= 8)
+ enmRegType = DBGFREGVALTYPE_U8;
+ else if (cBits <= 16)
+ enmRegType = DBGFREGVALTYPE_U16;
+ else if (cBits <= 32)
+ enmRegType = DBGFREGVALTYPE_U32;
+ else if (cBits <= 64)
+ enmRegType = DBGFREGVALTYPE_U64;
+ else if (cBits <= 128)
+ enmRegType = DBGFREGVALTYPE_U128;
+ else if (cBits <= 256)
+ enmRegType = DBGFREGVALTYPE_U256;
+ else
+ enmRegType = DBGFREGVALTYPE_U512;
+ }
+ else if (pLookupRec->pAlias)
+ {
+ /* Restrict the input to the size of the alias register. */
+ DBGFREGVALTYPE enmAliasType = pLookupRec->pAlias->enmType;
+ if (enmAliasType != enmType)
+ {
+ rc = dbgfR3RegValCast(&Value, enmType, enmAliasType);
+ if (RT_FAILURE(rc))
+ return rc;
+ dbgfR3RegValCast(&Mask, enmType, enmAliasType);
+ enmType = enmAliasType;
+ }
+ }
+
+ if (enmType != enmRegType)
+ {
+ int rc2 = dbgfR3RegValCast(&Value, enmType, enmRegType);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
+ rc2 = VINF_SUCCESS;
+ dbgfR3RegValCast(&Mask, enmType, enmRegType);
+ }
+
+ /*
+ * Subfields needs some extra processing if there is no subfield
+ * setter, since we'll be feeding it to the normal register setter
+ * instead. The mask and value must be shifted and truncated to the
+ * subfield position.
+ */
+ if (pSubField && !pSubField->pfnSet)
+ {
+ /* The shift factor is for displaying a subfield value
+ 2**cShift times larger than the stored value. We have
+ to undo this before adjusting value and mask. */
+ if (pSubField->cShift)
+ {
+ /* Warn about trunction of the lower bits that get
+ shifted out below. */
+ if (rc == VINF_SUCCESS)
+ {
+ DBGFREGVAL Value2 = Value;
+ RTUInt128AssignAndNFirstBits(&Value2.u128, -pSubField->cShift);
+ if (!RTUInt128BitAreAllClear(&Value2.u128))
+ rc = VINF_DBGF_TRUNCATED_REGISTER;
+ }
+ RTUInt128AssignShiftRight(&Value.u128, pSubField->cShift);
+ }
+
+ RTUInt128AssignAndNFirstBits(&Value.u128, pSubField->cBits);
+ if (rc == VINF_SUCCESS && RTUInt128IsNotEqual(&Value.u128, &Value.u128))
+ rc = VINF_DBGF_TRUNCATED_REGISTER;
+ RTUInt128AssignAndNFirstBits(&Mask.u128, pSubField->cBits);
+
+ RTUInt128AssignShiftLeft(&Value.u128, pSubField->iFirstBit);
+ RTUInt128AssignShiftLeft(&Mask.u128, pSubField->iFirstBit);
+ }
+
+ /*
+ * Do the actual work on an EMT.
+ */
+ if (pSet->enmType == DBGFREGSETTYPE_CPU)
+ idDefCpu = pSet->uUserArg.pVCpu->idCpu;
+ else if (idDefCpu != VMCPUID_ANY)
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+
+ int rc2 = VMR3ReqPriorityCallWaitU(pUVM, idDefCpu, (PFNRT)dbgfR3RegNmSetWorkerOnCpu, 4,
+ pUVM, pLookupRec, &Value, &Mask);
+
+ if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
+ rc = rc2;
+ return rc;
+ }
+ return VERR_DBGF_READ_ONLY_REGISTER;
+ }
+ return VERR_DBGF_REGISTER_NOT_FOUND;
+}
+
+
+/**
+ * Set a given set of registers.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The virtual CPU ID for the default CPU register
+ * set. Can be OR'ed with DBGFREG_HYPER_VMCPUID.
+ * @param paRegs The array of registers to set.
+ * @param cRegs Number of registers in the array.
+ *
+ * @todo This is a _very_ lazy implementation by a lazy developer, some semantics
+ * need to be figured out before the real implementation especially how and
+ * when errors and informational status codes like VINF_DBGF_TRUNCATED_REGISTER
+ * should be returned (think of an error right in the middle of the batch, should we
+ * save the state and roll back?).
+ */
+VMMR3DECL(int) DBGFR3RegNmSetBatch(PUVM pUVM, VMCPUID idDefCpu, PCDBGFREGENTRYNM paRegs, size_t cRegs)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(paRegs, VERR_INVALID_PARAMETER);
+ AssertReturn(cRegs > 0, VERR_INVALID_PARAMETER);
+
+ for (uint32_t i = 0; i < cRegs; i++)
+ {
+ int rc = DBGFR3RegNmSet(pUVM, idDefCpu, paRegs[i].pszName, &paRegs[i].Val, paRegs[i].enmType);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker for DBGFR3RegFormatValue, cbBuf is sufficent.
+ *
+ * @copydoc DBGFR3RegFormatValueEx
+ */
+DECLINLINE(ssize_t) dbgfR3RegFormatValueInt(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType,
+ unsigned uBase, signed int cchWidth, signed int cchPrecision, uint32_t fFlags)
+{
+ switch (enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ return RTStrFormatU8(pszBuf, cbBuf, pValue->u8, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U16:
+ return RTStrFormatU16(pszBuf, cbBuf, pValue->u16, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U32:
+ return RTStrFormatU32(pszBuf, cbBuf, pValue->u32, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U64:
+ return RTStrFormatU64(pszBuf, cbBuf, pValue->u64, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U128:
+ return RTStrFormatU128(pszBuf, cbBuf, &pValue->u128, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U256:
+ return RTStrFormatU256(pszBuf, cbBuf, &pValue->u256, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_U512:
+ return RTStrFormatU512(pszBuf, cbBuf, &pValue->u512, uBase, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_R80:
+ return RTStrFormatR80u2(pszBuf, cbBuf, &pValue->r80Ex, cchWidth, cchPrecision, fFlags);
+ case DBGFREGVALTYPE_DTR:
+ {
+ ssize_t cch = RTStrFormatU64(pszBuf, cbBuf, pValue->dtr.u64Base,
+ 16, 2+16, 0, RTSTR_F_SPECIAL | RTSTR_F_ZEROPAD);
+ AssertReturn(cch > 0, VERR_DBGF_REG_IPE_1);
+ pszBuf[cch++] = ':';
+ cch += RTStrFormatU64(&pszBuf[cch], cbBuf - cch, pValue->dtr.u32Limit,
+ 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
+ return cch;
+ }
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ /* no default, want gcc warnings */
+ }
+
+ RTStrPrintf(pszBuf, cbBuf, "!enmType=%d!", enmType);
+ return VERR_DBGF_REG_IPE_2;
+}
+
+
+/**
+ * Format a register value, extended version.
+ *
+ * @returns The number of bytes returned, VERR_BUFFER_OVERFLOW on failure.
+ * @param pszBuf The output buffer.
+ * @param cbBuf The size of the output buffer.
+ * @param pValue The value to format.
+ * @param enmType The value type.
+ * @param uBase The base (ignored if not applicable).
+ * @param cchWidth The width if RTSTR_F_WIDTH is set, otherwise
+ * ignored.
+ * @param cchPrecision The width if RTSTR_F_PRECISION is set, otherwise
+ * ignored.
+ * @param fFlags String formatting flags, RTSTR_F_XXX.
+ */
+VMMR3DECL(ssize_t) DBGFR3RegFormatValueEx(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType,
+ unsigned uBase, signed int cchWidth, signed int cchPrecision, uint32_t fFlags)
+{
+ /*
+ * Format to temporary buffer using worker shared with dbgfR3RegPrintfCbFormatNormal.
+ */
+ char szTmp[160];
+ ssize_t cchOutput = dbgfR3RegFormatValueInt(szTmp, sizeof(szTmp), pValue, enmType, uBase, cchWidth, cchPrecision, fFlags);
+ if (cchOutput > 0)
+ {
+ if ((size_t)cchOutput < cbBuf)
+ memcpy(pszBuf, szTmp, cchOutput + 1);
+ else
+ {
+ if (cbBuf)
+ {
+ memcpy(pszBuf, szTmp, cbBuf - 1);
+ pszBuf[cbBuf - 1] = '\0';
+ }
+ cchOutput = VERR_BUFFER_OVERFLOW;
+ }
+ }
+ return cchOutput;
+}
+
+
+/**
+ * Format a register value as hexadecimal and with default width according to
+ * the type.
+ *
+ * @returns The number of bytes returned, VERR_BUFFER_OVERFLOW on failure.
+ * @param pszBuf The output buffer.
+ * @param cbBuf The size of the output buffer.
+ * @param pValue The value to format.
+ * @param enmType The value type.
+ * @param fSpecial Same as RTSTR_F_SPECIAL.
+ */
+VMMR3DECL(ssize_t) DBGFR3RegFormatValue(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType, bool fSpecial)
+{
+ int cchWidth = 0;
+ switch (enmType)
+ {
+ case DBGFREGVALTYPE_U8: cchWidth = 2 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U16: cchWidth = 4 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U32: cchWidth = 8 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U64: cchWidth = 16 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U128: cchWidth = 32 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U256: cchWidth = 64 + fSpecial*2; break;
+ case DBGFREGVALTYPE_U512: cchWidth = 128 + fSpecial*2; break;
+ case DBGFREGVALTYPE_R80: cchWidth = 0; break;
+ case DBGFREGVALTYPE_DTR: cchWidth = 16+1+4 + fSpecial*2; break;
+
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ break;
+ /* no default, want gcc warnings */
+ }
+ uint32_t fFlags = RTSTR_F_ZEROPAD;
+ if (fSpecial)
+ fFlags |= RTSTR_F_SPECIAL;
+ if (cchWidth != 0)
+ fFlags |= RTSTR_F_WIDTH;
+ return DBGFR3RegFormatValueEx(pszBuf, cbBuf, pValue, enmType, 16, cchWidth, 0, fFlags);
+}
+
+
+/**
+ * Format a register using special hacks as well as sub-field specifications
+ * (the latter isn't implemented yet).
+ */
+static size_t
+dbgfR3RegPrintfCbFormatField(PDBGFR3REGPRINTFARGS pThis, PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
+ PCDBGFREGLOOKUP pLookupRec, int cchWidth, int cchPrecision, unsigned fFlags)
+{
+ char szTmp[160];
+
+ NOREF(cchWidth); NOREF(cchPrecision); NOREF(fFlags);
+
+ /*
+ * Retrieve the register value.
+ */
+ DBGFREGVAL Value;
+ DBGFREGVALTYPE enmType;
+ int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pUVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
+ if (RT_FAILURE(rc))
+ {
+ ssize_t cchDefine = RTErrQueryDefine(rc, szTmp, sizeof(szTmp), true /*fFailIfUnknown*/);
+ if (cchDefine <= 0)
+ cchDefine = RTStrPrintf(szTmp, sizeof(szTmp), "rc=%d", rc);
+ return pfnOutput(pvArgOutput, szTmp, cchDefine);
+ }
+
+ char *psz = szTmp;
+
+ /*
+ * Special case: Format eflags.
+ */
+ if ( pLookupRec->pSet->enmType == DBGFREGSETTYPE_CPU
+ && pLookupRec->pDesc->enmReg == DBGFREG_RFLAGS
+ && pLookupRec->pSubField == NULL)
+ {
+ rc = dbgfR3RegValCast(&Value, enmType, DBGFREGVALTYPE_U32);
+ AssertRC(rc);
+ uint32_t const efl = Value.u32;
+
+ /* the iopl */
+ psz += RTStrPrintf(psz, sizeof(szTmp) / 2, "iopl=%u ", X86_EFL_GET_IOPL(efl));
+
+ /* add flags */
+ static const struct
+ {
+ const char *pszSet;
+ const char *pszClear;
+ uint32_t fFlag;
+ } aFlags[] =
+ {
+ { "vip",NULL, X86_EFL_VIP },
+ { "vif",NULL, X86_EFL_VIF },
+ { "ac", NULL, X86_EFL_AC },
+ { "vm", NULL, X86_EFL_VM },
+ { "rf", NULL, X86_EFL_RF },
+ { "nt", NULL, X86_EFL_NT },
+ { "ov", "nv", X86_EFL_OF },
+ { "dn", "up", X86_EFL_DF },
+ { "ei", "di", X86_EFL_IF },
+ { "tf", NULL, X86_EFL_TF },
+ { "ng", "pl", X86_EFL_SF },
+ { "zr", "nz", X86_EFL_ZF },
+ { "ac", "na", X86_EFL_AF },
+ { "po", "pe", X86_EFL_PF },
+ { "cy", "nc", X86_EFL_CF },
+ };
+ for (unsigned i = 0; i < RT_ELEMENTS(aFlags); i++)
+ {
+ const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
+ if (pszAdd)
+ {
+ *psz++ = *pszAdd++;
+ *psz++ = *pszAdd++;
+ if (*pszAdd)
+ *psz++ = *pszAdd++;
+ *psz++ = ' ';
+ }
+ }
+
+ /* drop trailing space */
+ psz--;
+ }
+ else
+ {
+ /*
+ * General case.
+ */
+ AssertMsgFailed(("Not implemented: %s\n", pLookupRec->Core.pszString));
+ return pfnOutput(pvArgOutput, pLookupRec->Core.pszString, pLookupRec->Core.cchString);
+ }
+
+ /* Output the string. */
+ return pfnOutput(pvArgOutput, szTmp, psz - &szTmp[0]);
+}
+
+
+/**
+ * Formats a register having parsed up to the register name.
+ */
+static size_t
+dbgfR3RegPrintfCbFormatNormal(PDBGFR3REGPRINTFARGS pThis, PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
+ PCDBGFREGLOOKUP pLookupRec, unsigned uBase, int cchWidth, int cchPrecision, unsigned fFlags)
+{
+ char szTmp[160];
+
+ /*
+ * Get the register value.
+ */
+ DBGFREGVAL Value;
+ DBGFREGVALTYPE enmType;
+ int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pUVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
+ if (RT_FAILURE(rc))
+ {
+ ssize_t cchDefine = RTErrQueryDefine(rc, szTmp, sizeof(szTmp), true /*fFailIfUnknown*/);
+ if (cchDefine <= 0)
+ cchDefine = RTStrPrintf(szTmp, sizeof(szTmp), "rc=%d", rc);
+ return pfnOutput(pvArgOutput, szTmp, cchDefine);
+ }
+
+ /*
+ * Format the value.
+ */
+ ssize_t cchOutput = dbgfR3RegFormatValueInt(szTmp, sizeof(szTmp), &Value, enmType, uBase, cchWidth, cchPrecision, fFlags);
+ if (RT_UNLIKELY(cchOutput <= 0))
+ {
+ AssertFailed();
+ return pfnOutput(pvArgOutput, "internal-error", sizeof("internal-error") - 1);
+ }
+ return pfnOutput(pvArgOutput, szTmp, cchOutput);
+}
+
+
+/**
+ * @callback_method_impl{FNSTRFORMAT}
+ */
+static DECLCALLBACK(size_t)
+dbgfR3RegPrintfCbFormat(void *pvArg, PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
+ const char **ppszFormat, va_list *pArgs, int cchWidth,
+ int cchPrecision, unsigned fFlags, char chArgSize)
+{
+ NOREF(pArgs); NOREF(chArgSize);
+
+ /*
+ * Parse the format type and hand the job to the appropriate worker.
+ */
+ PDBGFR3REGPRINTFARGS pThis = (PDBGFR3REGPRINTFARGS)pvArg;
+ const char *pszFormat = *ppszFormat;
+ if ( pszFormat[0] != 'V'
+ || pszFormat[1] != 'R')
+ {
+ AssertMsgFailed(("'%s'\n", pszFormat));
+ return 0;
+ }
+ unsigned offCurly = 2;
+ if (pszFormat[offCurly] != '{')
+ {
+ AssertMsgReturn(pszFormat[offCurly], ("'%s'\n", pszFormat), 0);
+ offCurly++;
+ AssertMsgReturn(pszFormat[offCurly] == '{', ("'%s'\n", pszFormat), 0);
+ }
+ const char *pachReg = &pszFormat[offCurly + 1];
+
+ /*
+ * The end and length of the register.
+ */
+ const char *pszEnd = strchr(pachReg, '}');
+ AssertMsgReturn(pszEnd, ("Missing closing curly bracket: '%s'\n", pszFormat), 0);
+ size_t const cchReg = pszEnd - pachReg;
+
+ /*
+ * Look up the register - same as dbgfR3RegResolve, except for locking and
+ * input string termination.
+ */
+ PRTSTRSPACE pRegSpace = &pThis->pUVM->dbgf.s.RegSpace;
+ /* Try looking up the name without any case folding or cpu prefixing. */
+ PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGetN(pRegSpace, pachReg, cchReg);
+ if (!pLookupRec)
+ {
+ /* Lower case it and try again. */
+ char szName[DBGF_REG_MAX_NAME * 4 + 16];
+ ssize_t cchFolded = dbgfR3RegCopyToLower(pachReg, cchReg, szName, sizeof(szName) - DBGF_REG_MAX_NAME);
+ if (cchFolded > 0)
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
+ if ( !pLookupRec
+ && cchFolded >= 0
+ && pThis->idCpu != VMCPUID_ANY)
+ {
+ /* Prefix it with the specified CPU set. */
+ size_t cchCpuSet = RTStrPrintf(szName, sizeof(szName), pThis->fGuestRegs ? "cpu%u." : "hypercpu%u.", pThis->idCpu);
+ dbgfR3RegCopyToLower(pachReg, cchReg, &szName[cchCpuSet], sizeof(szName) - cchCpuSet);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
+ }
+ }
+ AssertMsgReturn(pLookupRec, ("'%s'\n", pszFormat), 0);
+ AssertMsgReturn( pLookupRec->pSet->enmType != DBGFREGSETTYPE_CPU
+ || pLookupRec->pSet->uUserArg.pVCpu->idCpu == pThis->idCpu,
+ ("'%s' idCpu=%u, pSet/cpu=%u\n", pszFormat, pThis->idCpu, pLookupRec->pSet->uUserArg.pVCpu->idCpu),
+ 0);
+
+ /*
+ * Commit the parsed format string. Up to this point it is nice to know
+ * what register lookup failed and such, so we've delayed comitting.
+ */
+ *ppszFormat = pszEnd + 1;
+
+ /*
+ * Call the responsible worker.
+ */
+ switch (pszFormat[offCurly - 1])
+ {
+ case 'R': /* %VR{} */
+ case 'X': /* %VRX{} */
+ return dbgfR3RegPrintfCbFormatNormal(pThis, pfnOutput, pvArgOutput, pLookupRec,
+ 16, cchWidth, cchPrecision, fFlags);
+ case 'U':
+ return dbgfR3RegPrintfCbFormatNormal(pThis, pfnOutput, pvArgOutput, pLookupRec,
+ 10, cchWidth, cchPrecision, fFlags);
+ case 'O':
+ return dbgfR3RegPrintfCbFormatNormal(pThis, pfnOutput, pvArgOutput, pLookupRec,
+ 8, cchWidth, cchPrecision, fFlags);
+ case 'B':
+ return dbgfR3RegPrintfCbFormatNormal(pThis, pfnOutput, pvArgOutput, pLookupRec,
+ 2, cchWidth, cchPrecision, fFlags);
+ case 'F':
+ return dbgfR3RegPrintfCbFormatField(pThis, pfnOutput, pvArgOutput, pLookupRec, cchWidth, cchPrecision, fFlags);
+ default:
+ AssertFailed();
+ return 0;
+ }
+}
+
+
+
+/**
+ * @callback_method_impl{FNRTSTROUTPUT}
+ */
+static DECLCALLBACK(size_t)
+dbgfR3RegPrintfCbOutput(void *pvArg, const char *pachChars, size_t cbChars)
+{
+ PDBGFR3REGPRINTFARGS pArgs = (PDBGFR3REGPRINTFARGS)pvArg;
+ size_t cbToCopy = cbChars;
+ if (cbToCopy >= pArgs->cchLeftBuf)
+ {
+ if (RT_SUCCESS(pArgs->rc))
+ pArgs->rc = VERR_BUFFER_OVERFLOW;
+ cbToCopy = pArgs->cchLeftBuf;
+ }
+ if (cbToCopy > 0)
+ {
+ memcpy(&pArgs->pszBuf[pArgs->offBuf], pachChars, cbToCopy);
+ pArgs->offBuf += cbToCopy;
+ pArgs->cchLeftBuf -= cbToCopy;
+ pArgs->pszBuf[pArgs->offBuf] = '\0';
+ }
+ return cbToCopy;
+}
+
+
+/**
+ * On CPU worker for the register formatting, used by DBGFR3RegPrintfV.
+ *
+ * @returns VBox status code.
+ *
+ * @param pArgs The argument package and state.
+ */
+static DECLCALLBACK(int) dbgfR3RegPrintfWorkerOnCpu(PDBGFR3REGPRINTFARGS pArgs)
+{
+ DBGF_REG_DB_LOCK_READ(pArgs->pUVM);
+ RTStrFormatV(dbgfR3RegPrintfCbOutput, pArgs, dbgfR3RegPrintfCbFormat, pArgs, pArgs->pszFormat, pArgs->va);
+ DBGF_REG_DB_UNLOCK_READ(pArgs->pUVM);
+ return pArgs->rc;
+}
+
+
+/**
+ * Format a registers.
+ *
+ * This is restricted to registers from one CPU, that specified by @a idCpu.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU ID of any CPU registers that may be
+ * printed, pass VMCPUID_ANY if not applicable.
+ * @param pszBuf The output buffer.
+ * @param cbBuf The size of the output buffer.
+ * @param pszFormat The format string. Register names are given by
+ * %VR{name}, they take no arguments.
+ * @param va Other format arguments.
+ */
+VMMR3DECL(int) DBGFR3RegPrintfV(PUVM pUVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, va_list va)
+{
+ AssertPtrReturn(pszBuf, VERR_INVALID_POINTER);
+ AssertReturn(cbBuf > 0, VERR_BUFFER_OVERFLOW);
+ *pszBuf = '\0';
+
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pszFormat, VERR_INVALID_POINTER);
+
+ /*
+ * Set up an argument package and execute the formatting on the
+ * specified CPU.
+ */
+ DBGFR3REGPRINTFARGS Args;
+ Args.pUVM = pUVM;
+ Args.idCpu = idCpu != VMCPUID_ANY ? idCpu & ~DBGFREG_HYPER_VMCPUID : idCpu;
+ Args.fGuestRegs = idCpu != VMCPUID_ANY && !(idCpu & DBGFREG_HYPER_VMCPUID);
+ Args.pszBuf = pszBuf;
+ Args.pszFormat = pszFormat;
+ va_copy(Args.va, va);
+ Args.offBuf = 0;
+ Args.cchLeftBuf = cbBuf - 1;
+ Args.rc = VINF_SUCCESS;
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, Args.idCpu, (PFNRT)dbgfR3RegPrintfWorkerOnCpu, 1, &Args);
+ va_end(Args.va);
+ return rc;
+}
+
+
+/**
+ * Format a registers.
+ *
+ * This is restricted to registers from one CPU, that specified by @a idCpu.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU ID of any CPU registers that may be
+ * printed, pass VMCPUID_ANY if not applicable.
+ * @param pszBuf The output buffer.
+ * @param cbBuf The size of the output buffer.
+ * @param pszFormat The format string. Register names are given by
+ * %VR{name}, %VRU{name}, %VRO{name} and
+ * %VRB{name}, which are hexadecimal, (unsigned)
+ * decimal, octal and binary representation. None
+ * of these types takes any arguments.
+ * @param ... Other format arguments.
+ */
+VMMR3DECL(int) DBGFR3RegPrintf(PUVM pUVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = DBGFR3RegPrintfV(pUVM, idCpu, pszBuf, cbBuf, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFStack.cpp b/src/VBox/VMM/VMMR3/DBGFStack.cpp
new file mode 100644
index 00000000..664692b0
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/DBGFStack.cpp
@@ -0,0 +1,1163 @@
+/* $Id: DBGFStack.cpp $ */
+/** @file
+ * DBGF - Debugger Facility, Call Stack Analyser.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DBGF
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include "DBGFInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/param.h>
+#include <iprt/assert.h>
+#include <iprt/alloca.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/formats/pecoff.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
+
+/**
+ * Unwind context.
+ *
+ * @note Using a constructor and destructor here for simple+safe cleanup.
+ */
+typedef struct DBGFUNWINDCTX
+{
+ PUVM m_pUVM;
+ VMCPUID m_idCpu;
+ RTDBGAS m_hAs;
+ PCCPUMCTX m_pInitialCtx;
+ bool m_fIsHostRing0;
+ uint64_t m_uOsScratch; /**< For passing to DBGFOSREG::pfnStackUnwindAssist. */
+
+ RTDBGMOD m_hCached;
+ RTUINTPTR m_uCachedMapping;
+ RTUINTPTR m_cbCachedMapping;
+ RTDBGSEGIDX m_idxCachedSegMapping;
+
+ RTDBGUNWINDSTATE m_State;
+
+ DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
+ {
+ m_State.u32Magic = RTDBGUNWINDSTATE_MAGIC;
+ m_State.enmArch = RTLDRARCH_AMD64;
+ m_State.pfnReadStack = dbgfR3StackReadCallback;
+ m_State.pvUser = this;
+ RT_ZERO(m_State.u);
+ if (pInitialCtx)
+ {
+ m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax;
+ m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
+ m_State.u.x86.auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
+ m_State.u.x86.auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
+ m_State.u.x86.auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
+ m_State.u.x86.auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
+ m_State.u.x86.auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
+ m_State.u.x86.auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
+ m_State.u.x86.auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
+ m_State.u.x86.auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
+ m_State.u.x86.auRegs[X86_GREG_x10] = pInitialCtx->r10;
+ m_State.u.x86.auRegs[X86_GREG_x11] = pInitialCtx->r11;
+ m_State.u.x86.auRegs[X86_GREG_x12] = pInitialCtx->r12;
+ m_State.u.x86.auRegs[X86_GREG_x13] = pInitialCtx->r13;
+ m_State.u.x86.auRegs[X86_GREG_x14] = pInitialCtx->r14;
+ m_State.u.x86.auRegs[X86_GREG_x15] = pInitialCtx->r15;
+ m_State.uPc = pInitialCtx->rip;
+ m_State.u.x86.uRFlags = pInitialCtx->rflags.u;
+ m_State.u.x86.auSegs[X86_SREG_ES] = pInitialCtx->es.Sel;
+ m_State.u.x86.auSegs[X86_SREG_CS] = pInitialCtx->cs.Sel;
+ m_State.u.x86.auSegs[X86_SREG_SS] = pInitialCtx->ss.Sel;
+ m_State.u.x86.auSegs[X86_SREG_DS] = pInitialCtx->ds.Sel;
+ m_State.u.x86.auSegs[X86_SREG_GS] = pInitialCtx->gs.Sel;
+ m_State.u.x86.auSegs[X86_SREG_FS] = pInitialCtx->fs.Sel;
+ m_State.u.x86.fRealOrV86 = CPUMIsGuestInRealOrV86ModeEx(pInitialCtx);
+ }
+ else if (hAs == DBGF_AS_R0)
+ VMMR3InitR0StackUnwindState(pUVM, idCpu, &m_State);
+
+ m_pUVM = pUVM;
+ m_idCpu = idCpu;
+ m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
+ m_pInitialCtx = pInitialCtx;
+ m_fIsHostRing0 = hAs == DBGF_AS_R0;
+ m_uOsScratch = 0;
+
+ m_hCached = NIL_RTDBGMOD;
+ m_uCachedMapping = 0;
+ m_cbCachedMapping = 0;
+ m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
+ }
+
+ ~DBGFUNWINDCTX();
+
+} DBGFUNWINDCTX;
+/** Pointer to unwind context. */
+typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
+
+
+static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
+{
+ if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
+ {
+ RTDbgModRelease(pUnwindCtx->m_hCached);
+ pUnwindCtx->m_hCached = NIL_RTDBGMOD;
+ }
+ pUnwindCtx->m_cbCachedMapping = 0;
+ pUnwindCtx->m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
+}
+
+
+DBGFUNWINDCTX::~DBGFUNWINDCTX()
+{
+ dbgfR3UnwindCtxFlushCache(this);
+ if (m_hAs != NIL_RTDBGAS)
+ {
+ RTDbgAsRelease(m_hAs);
+ m_hAs = NIL_RTDBGAS;
+ }
+}
+
+
+/**
+ * @interface_method_impl{RTDBGUNWINDSTATE,pfnReadStack}
+ */
+static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst)
+{
+ Assert( pThis->enmArch == RTLDRARCH_AMD64
+ || pThis->enmArch == RTLDRARCH_X86_32);
+
+ PDBGFUNWINDCTX pUnwindCtx = (PDBGFUNWINDCTX)pThis->pvUser;
+ DBGFADDRESS SrcAddr;
+ int rc = VINF_SUCCESS;
+ if (pUnwindCtx->m_fIsHostRing0)
+ DBGFR3AddrFromHostR0(&SrcAddr, uSp);
+ else
+ {
+ if ( pThis->enmArch == RTLDRARCH_X86_32
+ || pThis->enmArch == RTLDRARCH_X86_16)
+ {
+ if (!pThis->u.x86.fRealOrV86)
+ rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pThis->u.x86.auSegs[X86_SREG_SS], uSp);
+ else
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp + ((uint32_t)pThis->u.x86.auSegs[X86_SREG_SS] << 4));
+ }
+ else
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
+ }
+ if (RT_SUCCESS(rc))
+ rc = DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pvDst, cbToRead);
+ if (RT_SUCCESS(rc))
+ return rc;
+ return -rc; /* Ignore read errors. */
+}
+
+
+/**
+ * Sets PC and SP.
+ *
+ * @returns true.
+ * @param pUnwindCtx The unwind context.
+ * @param pAddrPC The program counter (PC) value to set.
+ * @param pAddrStack The stack pointer (SP) value to set.
+ */
+static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
+{
+ Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
+ || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32);
+
+ if (!DBGFADDRESS_IS_FAR(pAddrPC))
+ pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
+ else
+ {
+ pUnwindCtx->m_State.uPc = pAddrPC->off;
+ pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS] = pAddrPC->Sel;
+ }
+ if (!DBGFADDRESS_IS_FAR(pAddrStack))
+ pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
+ else
+ {
+ pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->off;
+ pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel;
+ }
+ return true;
+}
+
+
+/**
+ * Tries to unwind one frame using unwind info.
+ *
+ * @returns true on success, false on failure.
+ * @param pUnwindCtx The unwind context.
+ */
+static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx)
+{
+ /*
+ * Need to load it into the cache?
+ */
+ RTUINTPTR offCache = pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping;
+ if (offCache >= pUnwindCtx->m_cbCachedMapping)
+ {
+ RTDBGMOD hDbgMod = NIL_RTDBGMOD;
+ RTUINTPTR uBase = 0;
+ RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
+ int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_State.uPc, &hDbgMod, &uBase, &idxSeg);
+ if (RT_SUCCESS(rc))
+ {
+ dbgfR3UnwindCtxFlushCache(pUnwindCtx);
+ pUnwindCtx->m_hCached = hDbgMod;
+ pUnwindCtx->m_uCachedMapping = uBase;
+ pUnwindCtx->m_idxCachedSegMapping = idxSeg;
+ pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
+ : RTDbgModSegmentSize(hDbgMod, idxSeg);
+ offCache = pUnwindCtx->m_State.uPc - uBase;
+ }
+ else
+ return false;
+ }
+
+ /*
+ * Do the lookup.
+ */
+ AssertCompile(UINT32_MAX == NIL_RTDBGSEGIDX);
+ int rc = RTDbgModUnwindFrame(pUnwindCtx->m_hCached, pUnwindCtx->m_idxCachedSegMapping, offCache, &pUnwindCtx->m_State);
+ if (RT_SUCCESS(rc))
+ return true;
+ return false;
+}
+
+
+/**
+ * Read stack memory, will init entire buffer.
+ */
+DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
+{
+ int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
+ if (RT_FAILURE(rc))
+ {
+ /* fallback: byte by byte and zero the ones we fail to read. */
+ size_t cbRead;
+ for (cbRead = 0; cbRead < cb; cbRead++)
+ {
+ DBGFADDRESS Addr = *pSrcAddr;
+ rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ if (cbRead)
+ rc = VINF_SUCCESS;
+ memset((char *)pvBuf + cbRead, 0, cb - cbRead);
+ *pcbRead = cbRead;
+ }
+ else
+ *pcbRead = cb;
+ return rc;
+}
+
+/**
+ * Collects sure registers on frame exit.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_MEMORY.
+ * @param pUVM The user mode VM handle for the allocation.
+ * @param pFrame The frame in question.
+ * @param pState The unwind state.
+ */
+static int dbgfR3StackWalkCollectRegisterChanges(PUVM pUVM, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState)
+{
+ pFrame->cSureRegs = 0;
+ pFrame->paSureRegs = NULL;
+
+ if ( pState->enmArch == RTLDRARCH_AMD64
+ || pState->enmArch == RTLDRARCH_X86_32
+ || pState->enmArch == RTLDRARCH_X86_16)
+ {
+ if (pState->u.x86.Loaded.fAll)
+ {
+ /*
+ * Count relevant registers.
+ */
+ uint32_t cRegs = 0;
+ if (pState->u.x86.Loaded.s.fRegs)
+ for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auRegs)); f <<= 1)
+ if (pState->u.x86.Loaded.s.fRegs & f)
+ cRegs++;
+ if (pState->u.x86.Loaded.s.fSegs)
+ for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auSegs)); f <<= 1)
+ if (pState->u.x86.Loaded.s.fSegs & f)
+ cRegs++;
+ if (pState->u.x86.Loaded.s.fRFlags)
+ cRegs++;
+ if (pState->u.x86.Loaded.s.fErrCd)
+ cRegs++;
+ if (cRegs > 0)
+ {
+ /*
+ * Allocate the arrays.
+ */
+ PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
+ AssertReturn(paSureRegs, VERR_NO_MEMORY);
+ pFrame->paSureRegs = paSureRegs;
+ pFrame->cSureRegs = cRegs;
+
+ /*
+ * Popuplate the arrays.
+ */
+ uint32_t iReg = 0;
+ if (pState->u.x86.Loaded.s.fRegs)
+ for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auRegs); i++)
+ if (pState->u.x86.Loaded.s.fRegs & RT_BIT(i))
+ {
+ paSureRegs[iReg].Value.u64 = pState->u.x86.auRegs[i];
+ paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
+ paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_RAX + i);
+ iReg++;
+ }
+
+ if (pState->u.x86.Loaded.s.fSegs)
+ for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auSegs); i++)
+ if (pState->u.x86.Loaded.s.fSegs & RT_BIT(i))
+ {
+ paSureRegs[iReg].Value.u16 = pState->u.x86.auSegs[i];
+ paSureRegs[iReg].enmType = DBGFREGVALTYPE_U16;
+ switch (i)
+ {
+ case X86_SREG_ES: paSureRegs[iReg].enmReg = DBGFREG_ES; break;
+ case X86_SREG_CS: paSureRegs[iReg].enmReg = DBGFREG_CS; break;
+ case X86_SREG_SS: paSureRegs[iReg].enmReg = DBGFREG_SS; break;
+ case X86_SREG_DS: paSureRegs[iReg].enmReg = DBGFREG_DS; break;
+ case X86_SREG_FS: paSureRegs[iReg].enmReg = DBGFREG_FS; break;
+ case X86_SREG_GS: paSureRegs[iReg].enmReg = DBGFREG_GS; break;
+ default: AssertFailedBreak();
+ }
+ iReg++;
+ }
+
+ if (iReg < cRegs)
+ {
+ if (pState->u.x86.Loaded.s.fRFlags)
+ {
+ paSureRegs[iReg].Value.u64 = pState->u.x86.uRFlags;
+ paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
+ paSureRegs[iReg].enmReg = DBGFREG_RFLAGS;
+ iReg++;
+ }
+ if (pState->u.x86.Loaded.s.fErrCd)
+ {
+ paSureRegs[iReg].Value.u64 = pState->u.x86.uErrCd;
+ paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
+ paSureRegs[iReg].enmReg = DBGFREG_END;
+ paSureRegs[iReg].pszName = "trap-errcd";
+ iReg++;
+ }
+ }
+ Assert(iReg == cRegs);
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker routine.
+ *
+ * On x86 the typical stack frame layout is like this:
+ * .. ..
+ * 16 parameter 2
+ * 12 parameter 1
+ * 8 parameter 0
+ * 4 return address
+ * 0 old ebp; current ebp points here
+ */
+DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
+{
+ /*
+ * Stop if we got a read error in the previous run.
+ */
+ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
+ return VERR_NO_MORE_FILES;
+
+ /*
+ * Advance the frame (except for the first).
+ */
+ if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
+ {
+ /* frame, pc and stack is taken from the existing frames return members. */
+ pFrame->AddrFrame = pFrame->AddrReturnFrame;
+ pFrame->AddrPC = pFrame->AddrReturnPC;
+ pFrame->pSymPC = pFrame->pSymReturnPC;
+ pFrame->pLinePC = pFrame->pLineReturnPC;
+
+ /* increment the frame number. */
+ pFrame->iFrame++;
+
+ /* UNWIND_INFO_RET -> USED_UNWIND; return type */
+ if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
+ pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
+ else
+ {
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
+ pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
+ if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID)
+ {
+ pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
+ pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
+ }
+ }
+ pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME;
+ }
+
+ /*
+ * Figure the return address size and use the old PC to guess stack item size.
+ */
+ /** @todo this is bogus... */
+ unsigned cbRetAddr = RTDbgReturnTypeSize(pFrame->enmReturnType);
+ unsigned cbStackItem;
+ switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
+ {
+ case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
+ case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
+ case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
+ case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
+ default:
+ switch (pFrame->enmReturnType)
+ {
+ case RTDBGRETURNTYPE_FAR16:
+ case RTDBGRETURNTYPE_IRET16:
+ case RTDBGRETURNTYPE_IRET32_V86:
+ case RTDBGRETURNTYPE_NEAR16: cbStackItem = 2; break;
+
+ case RTDBGRETURNTYPE_FAR32:
+ case RTDBGRETURNTYPE_IRET32:
+ case RTDBGRETURNTYPE_IRET32_PRIV:
+ case RTDBGRETURNTYPE_NEAR32: cbStackItem = 4; break;
+
+ case RTDBGRETURNTYPE_FAR64:
+ case RTDBGRETURNTYPE_IRET64:
+ case RTDBGRETURNTYPE_NEAR64: cbStackItem = 8; break;
+
+ default:
+ AssertMsgFailed(("%d\n", pFrame->enmReturnType));
+ cbStackItem = 4;
+ break;
+ }
+ }
+
+ /*
+ * Read the raw frame data.
+ * We double cbRetAddr in case we have a far return.
+ */
+ union
+ {
+ uint64_t *pu64;
+ uint32_t *pu32;
+ uint16_t *pu16;
+ uint8_t *pb;
+ void *pv;
+ } u, uRet, uArgs, uBp;
+ size_t cbRead = cbRetAddr*2 + cbStackItem + sizeof(pFrame->Args);
+ u.pv = alloca(cbRead);
+ uBp = u;
+ uRet.pb = u.pb + cbStackItem;
+ uArgs.pb = u.pb + cbStackItem + cbRetAddr;
+
+ Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
+ int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
+ if ( RT_FAILURE(rc)
+ || cbRead < cbRetAddr + cbStackItem)
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
+
+ /*
+ * Return Frame address.
+ *
+ * If we used unwind info to get here, the unwind register context will be
+ * positioned after the return instruction has been executed. We start by
+ * picking up the rBP register here for return frame and will try improve
+ * on it further down by using unwind info.
+ */
+ pFrame->AddrReturnFrame = pFrame->AddrFrame;
+ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
+ {
+ if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
+ || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
+ pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
+ else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
+ ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
+ + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
+ else
+ {
+ pFrame->AddrReturnFrame.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP];
+ pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
+ }
+ }
+ else
+ {
+ switch (cbStackItem)
+ {
+ case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
+ case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
+ case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
+ default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
+ }
+
+ /* Watcom tries to keep the frame pointer odd for far returns. */
+ if ( cbStackItem <= 4
+ && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
+ {
+ if (pFrame->AddrReturnFrame.off & 1)
+ {
+ pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
+ if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR16)
+ {
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
+ pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
+ cbRetAddr = 4;
+ }
+ else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
+ {
+#if 1
+ /* Assumes returning 32-bit code. */
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
+ pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
+ cbRetAddr = 8;
+#else
+ /* Assumes returning 16-bit code. */
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
+ pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
+ cbRetAddr = 4;
+#endif
+ }
+ }
+ else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
+ {
+ if (pFrame->enmReturnType == RTDBGRETURNTYPE_FAR16)
+ {
+ pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
+ cbRetAddr = 2;
+ }
+ else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
+ {
+ pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
+ cbRetAddr = 4;
+ }
+ pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
+ }
+ uArgs.pb = u.pb + cbStackItem + cbRetAddr;
+ }
+
+ pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
+ }
+
+ /*
+ * Return Stack Address.
+ */
+ pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
+ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
+ {
+ if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
+ || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
+ pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
+ else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
+ ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
+ + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
+ else
+ {
+ pFrame->AddrReturnStack.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP];
+ pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
+ }
+ }
+ else
+ {
+ pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
+ pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
+ }
+
+ /*
+ * Return PC.
+ */
+ pFrame->AddrReturnPC = pFrame->AddrPC;
+ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
+ {
+ if (RTDbgReturnTypeIsNear(pFrame->enmReturnType))
+ {
+ pFrame->AddrReturnPC.off = pUnwindCtx->m_State.uPc;
+ pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
+ }
+ else
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
+ pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc);
+ }
+ else
+ {
+ int rc2;
+ switch (pFrame->enmReturnType)
+ {
+ case RTDBGRETURNTYPE_NEAR16:
+ if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
+ {
+ pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
+ pFrame->AddrReturnPC.off = *uRet.pu16;
+ }
+ else
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
+ break;
+ case RTDBGRETURNTYPE_NEAR32:
+ if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
+ {
+ pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
+ pFrame->AddrReturnPC.off = *uRet.pu32;
+ }
+ else
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
+ break;
+ case RTDBGRETURNTYPE_NEAR64:
+ if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
+ {
+ pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
+ pFrame->AddrReturnPC.off = *uRet.pu64;
+ }
+ else
+ DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
+ break;
+ case RTDBGRETURNTYPE_FAR16:
+ rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
+ if (RT_SUCCESS(rc2))
+ break;
+ rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu16[0]);
+ if (RT_SUCCESS(rc2))
+ pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
+ else
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
+ break;
+ case RTDBGRETURNTYPE_FAR32:
+ rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ if (RT_SUCCESS(rc2))
+ break;
+ rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu32[0]);
+ if (RT_SUCCESS(rc2))
+ pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR32;
+ else
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ break;
+ case RTDBGRETURNTYPE_FAR64:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
+ break;
+ case RTDBGRETURNTYPE_IRET16:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
+ break;
+ case RTDBGRETURNTYPE_IRET32:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ break;
+ case RTDBGRETURNTYPE_IRET32_PRIV:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ break;
+ case RTDBGRETURNTYPE_IRET32_V86:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ break;
+ case RTDBGRETURNTYPE_IRET64:
+ DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
+ break;
+ default:
+ AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
+ return VERR_INVALID_PARAMETER;
+ }
+ }
+
+
+ pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ NULL /*poffDisp*/, NULL /*phMod*/);
+ pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
+ NULL /*poffDisp*/, NULL /*phMod*/);
+
+ /*
+ * Frame bitness flag.
+ */
+ /** @todo use previous return type for this? */
+ pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
+ switch (cbStackItem)
+ {
+ case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
+ case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
+ case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
+ default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
+ }
+
+ /*
+ * The arguments.
+ */
+ memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
+
+ /*
+ * Collect register changes.
+ * Then call the OS layer to assist us (e.g. NT trap frames).
+ */
+ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
+ {
+ rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ if ( pUnwindCtx->m_pInitialCtx
+ && pUnwindCtx->m_hAs != NIL_RTDBGAS)
+ {
+ rc = dbgfR3OSStackUnwindAssist(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, pFrame, &pUnwindCtx->m_State,
+ pUnwindCtx->m_pInitialCtx, pUnwindCtx->m_hAs, &pUnwindCtx->m_uOsScratch);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+
+ /*
+ * Try use unwind information to locate the return frame pointer (for the
+ * next loop iteration).
+ */
+ Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
+ pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
+ if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
+ {
+ /* Set PC and SP if we didn't unwind our way here (context will then point
+ and the return PC and SP already). */
+ if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
+ {
+ dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
+ pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP] = pFrame->AddrReturnFrame.off;
+ }
+ /** @todo Reevaluate CS if the previous frame return type isn't near. */
+ if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
+ || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32
+ || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_16)
+ pUnwindCtx->m_State.u.x86.Loaded.fAll = 0;
+ else
+ AssertFailed();
+ if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
+ {
+ if (pUnwindCtx->m_fIsHostRing0)
+ DBGFR3AddrFromHostR0(&pFrame->AddrReturnFrame, pUnwindCtx->m_State.u.x86.FrameAddr.off);
+ else
+ {
+ DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
+ rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame,
+ pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off);
+ if (RT_SUCCESS(rc))
+ pFrame->AddrReturnFrame = AddrReturnFrame;
+ }
+ pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
+ pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Walks the entire stack allocating memory as we walk.
+ */
+static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
+ DBGFCODETYPE enmCodeType,
+ PCDBGFADDRESS pAddrFrame,
+ PCDBGFADDRESS pAddrStack,
+ PCDBGFADDRESS pAddrPC,
+ RTDBGRETURNTYPE enmReturnType,
+ PCDBGFSTACKFRAME *ppFirstFrame)
+{
+ DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
+
+ /* alloc first frame. */
+ PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
+ if (!pCur)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize the frame.
+ */
+ pCur->pNextInternal = NULL;
+ pCur->pFirstInternal = pCur;
+
+ int rc = VINF_SUCCESS;
+ if (pAddrPC)
+ pCur->AddrPC = *pAddrPC;
+ else if (enmCodeType != DBGFCODETYPE_GUEST)
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
+ else
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
+ if (RT_SUCCESS(rc))
+ {
+ uint64_t fAddrMask;
+ if (enmCodeType == DBGFCODETYPE_RING0)
+ fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
+ else if (enmCodeType == DBGFCODETYPE_HYPER)
+ fAddrMask = UINT32_MAX;
+ else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
+ fAddrMask = UINT16_MAX;
+ else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
+ fAddrMask = UINT32_MAX;
+ else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
+ fAddrMask = UINT64_MAX;
+ else
+ {
+ PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
+ CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
+ if (enmCpuMode == CPUMMODE_REAL)
+ {
+ fAddrMask = UINT16_MAX;
+ if (enmReturnType == RTDBGRETURNTYPE_INVALID)
+ pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16;
+ }
+ else if ( enmCpuMode == CPUMMODE_PROTECTED
+ || !CPUMIsGuestIn64BitCode(pVCpu))
+ {
+ fAddrMask = UINT32_MAX;
+ if (enmReturnType == RTDBGRETURNTYPE_INVALID)
+ pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
+ }
+ else
+ {
+ fAddrMask = UINT64_MAX;
+ if (enmReturnType == RTDBGRETURNTYPE_INVALID)
+ pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
+ }
+ }
+
+ if (enmReturnType == RTDBGRETURNTYPE_INVALID)
+ switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
+ {
+ case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16; break;
+ case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32; break;
+ case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64; break;
+ case DBGFADDRESS_FLAGS_RING0:
+ pCur->enmReturnType = HC_ARCH_BITS == 64 ? RTDBGRETURNTYPE_NEAR64 : RTDBGRETURNTYPE_NEAR32;
+ break;
+ default:
+ pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
+ break;
+ }
+
+
+ if (pAddrStack)
+ pCur->AddrStack = *pAddrStack;
+ else if (enmCodeType != DBGFCODETYPE_GUEST)
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
+ else
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
+
+ Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
+ if (pAddrFrame)
+ pCur->AddrFrame = *pAddrFrame;
+ else if (enmCodeType != DBGFCODETYPE_GUEST)
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
+ else if (RT_SUCCESS(rc))
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
+
+ /*
+ * Try unwind and get a better frame pointer and state.
+ */
+ if ( RT_SUCCESS(rc)
+ && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
+ && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx))
+ {
+ pCur->enmReturnType = UnwindCtx.m_State.enmRetType;
+ pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
+ if (!UnwindCtx.m_fIsHostRing0)
+ rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame,
+ UnwindCtx.m_State.u.x86.FrameAddr.sel, UnwindCtx.m_State.u.x86.FrameAddr.off);
+ else
+ DBGFR3AddrFromHostR0(&pCur->AddrFrame, UnwindCtx.m_State.u.x86.FrameAddr.off);
+ }
+ /*
+ * The first frame.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
+ {
+ pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ NULL /*poffDisp*/, NULL /*phMod*/);
+ pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
+ }
+
+ rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
+ }
+ }
+ else
+ pCur->enmReturnType = enmReturnType;
+ if (RT_FAILURE(rc))
+ {
+ DBGFR3StackWalkEnd(pCur);
+ return rc;
+ }
+
+ /*
+ * The other frames.
+ */
+ DBGFSTACKFRAME Next = *pCur;
+ while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
+ {
+ Next.cSureRegs = 0;
+ Next.paSureRegs = NULL;
+
+ /* try walk. */
+ rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
+ if (RT_FAILURE(rc))
+ break;
+
+ /* add the next frame to the chain. */
+ PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
+ if (!pNext)
+ {
+ DBGFR3StackWalkEnd(pCur);
+ return VERR_NO_MEMORY;
+ }
+ *pNext = Next;
+ pCur->pNextInternal = pNext;
+ pCur = pNext;
+ Assert(pCur->pNextInternal == NULL);
+
+ /* check for loop */
+ for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
+ pLoop && pLoop != pCur;
+ pLoop = pLoop->pNextInternal)
+ if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
+ {
+ pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
+ break;
+ }
+
+ /* check for insane recursion */
+ if (pCur->iFrame >= 2048)
+ pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
+ }
+
+ *ppFirstFrame = pCur->pFirstInternal;
+ return rc;
+}
+
+
+/**
+ * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
+ * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
+ */
+static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
+ VMCPUID idCpu,
+ DBGFCODETYPE enmCodeType,
+ PCDBGFADDRESS pAddrFrame,
+ PCDBGFADDRESS pAddrStack,
+ PCDBGFADDRESS pAddrPC,
+ RTDBGRETURNTYPE enmReturnType,
+ PCDBGFSTACKFRAME *ppFirstFrame)
+{
+ /*
+ * Validate parameters.
+ */
+ *ppFirstFrame = NULL;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ if (pAddrFrame)
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
+ if (pAddrStack)
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
+ if (pAddrPC)
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
+ AssertReturn(enmReturnType >= RTDBGRETURNTYPE_INVALID && enmReturnType < RTDBGRETURNTYPE_END, VERR_INVALID_PARAMETER);
+
+ /*
+ * Get the CPUM context pointer and pass it on the specified EMT.
+ */
+ RTDBGAS hAs;
+ PCCPUMCTX pCtx;
+ switch (enmCodeType)
+ {
+ case DBGFCODETYPE_GUEST:
+ pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
+ hAs = DBGF_AS_GLOBAL;
+ break;
+ case DBGFCODETYPE_HYPER:
+ pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
+ hAs = DBGF_AS_RC_AND_GC_GLOBAL;
+ break;
+ case DBGFCODETYPE_RING0:
+ pCtx = NULL; /* No valid context present. */
+ hAs = DBGF_AS_R0;
+ break;
+ default:
+ AssertFailedReturn(VERR_INVALID_PARAMETER);
+ }
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
+ pUVM, idCpu, pCtx, hAs, enmCodeType,
+ pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
+}
+
+
+/**
+ * Begins a guest stack walk, extended version.
+ *
+ * This will walk the current stack, constructing a list of info frames which is
+ * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
+ * list and DBGFR3StackWalkEnd to release it.
+ *
+ * @returns VINF_SUCCESS on success.
+ * @returns VERR_NO_MEMORY if we're out of memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the virtual CPU which stack we want to walk.
+ * @param enmCodeType Code type
+ * @param pAddrFrame Frame address to start at. (Optional)
+ * @param pAddrStack Stack address to start at. (Optional)
+ * @param pAddrPC Program counter to start at. (Optional)
+ * @param enmReturnType The return address type. (Optional)
+ * @param ppFirstFrame Where to return the pointer to the first info frame.
+ */
+VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
+ VMCPUID idCpu,
+ DBGFCODETYPE enmCodeType,
+ PCDBGFADDRESS pAddrFrame,
+ PCDBGFADDRESS pAddrStack,
+ PCDBGFADDRESS pAddrPC,
+ RTDBGRETURNTYPE enmReturnType,
+ PCDBGFSTACKFRAME *ppFirstFrame)
+{
+ return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
+}
+
+
+/**
+ * Begins a guest stack walk.
+ *
+ * This will walk the current stack, constructing a list of info frames which is
+ * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
+ * list and DBGFR3StackWalkEnd to release it.
+ *
+ * @returns VINF_SUCCESS on success.
+ * @returns VERR_NO_MEMORY if we're out of memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the virtual CPU which stack we want to walk.
+ * @param enmCodeType Code type
+ * @param ppFirstFrame Where to return the pointer to the first info frame.
+ */
+VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
+{
+ return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, RTDBGRETURNTYPE_INVALID, ppFirstFrame);
+}
+
+/**
+ * Gets the next stack frame.
+ *
+ * @returns Pointer to the info for the next stack frame.
+ * NULL if no more frames.
+ *
+ * @param pCurrent Pointer to the current stack frame.
+ *
+ */
+VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
+{
+ return pCurrent
+ ? pCurrent->pNextInternal
+ : NULL;
+}
+
+
+/**
+ * Ends a stack walk process.
+ *
+ * This *must* be called after a successful first call to any of the stack
+ * walker functions. If not called we will leak memory or other resources.
+ *
+ * @param pFirstFrame The frame returned by one of the begin functions.
+ */
+VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
+{
+ if ( !pFirstFrame
+ || !pFirstFrame->pFirstInternal)
+ return;
+
+ PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
+ while (pFrame)
+ {
+ PDBGFSTACKFRAME pCur = pFrame;
+ pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
+ if (pFrame)
+ {
+ if (pCur->pSymReturnPC == pFrame->pSymPC)
+ pFrame->pSymPC = NULL;
+ if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
+ pFrame->pSymReturnPC = NULL;
+
+ if (pCur->pSymPC == pFrame->pSymPC)
+ pFrame->pSymPC = NULL;
+ if (pCur->pSymPC == pFrame->pSymReturnPC)
+ pFrame->pSymReturnPC = NULL;
+
+ if (pCur->pLineReturnPC == pFrame->pLinePC)
+ pFrame->pLinePC = NULL;
+ if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
+ pFrame->pLineReturnPC = NULL;
+
+ if (pCur->pLinePC == pFrame->pLinePC)
+ pFrame->pLinePC = NULL;
+ if (pCur->pLinePC == pFrame->pLineReturnPC)
+ pFrame->pLineReturnPC = NULL;
+ }
+
+ RTDbgSymbolFree(pCur->pSymPC);
+ RTDbgSymbolFree(pCur->pSymReturnPC);
+ RTDbgLineFree(pCur->pLinePC);
+ RTDbgLineFree(pCur->pLineReturnPC);
+
+ if (pCur->paSureRegs)
+ {
+ MMR3HeapFree(pCur->paSureRegs);
+ pCur->paSureRegs = NULL;
+ pCur->cSureRegs = 0;
+ }
+
+ pCur->pNextInternal = NULL;
+ pCur->pFirstInternal = NULL;
+ pCur->fFlags = 0;
+ MMR3HeapFree(pCur);
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/EM.cpp b/src/VBox/VMM/VMMR3/EM.cpp
new file mode 100644
index 00000000..bd15b887
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/EM.cpp
@@ -0,0 +1,2759 @@
+/* $Id: EM.cpp $ */
+/** @file
+ * EM - Execution Monitor / Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_em EM - The Execution Monitor / Manager
+ *
+ * The Execution Monitor/Manager is responsible for running the VM, scheduling
+ * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
+ * Interpreted), and keeping the CPU states in sync. The function
+ * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
+ * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
+ * emR3RemExecute).
+ *
+ * The interpreted execution is only used to avoid switching between
+ * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
+ * The interpretation is thus implemented as part of EM.
+ *
+ * @see grp_em
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmqueue.h>
+#include <VBox/vmm/hm.h>
+#include "EMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/cpumdis.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/err.h>
+#include "VMMTracing.h"
+
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <iprt/stream.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
+static const char *emR3GetStateName(EMSTATE enmState);
+#endif
+static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
+#if defined(VBOX_WITH_REM) || defined(DEBUG)
+static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
+#endif
+static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
+
+
+/**
+ * Initializes the EM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
+{
+ LogFlow(("EMR3Init\n"));
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, em.s, 32);
+ AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
+ AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
+ AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
+
+ /*
+ * Init the structure.
+ */
+ PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
+ PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
+
+ int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
+#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN)
+ true
+#else
+ false
+#endif
+ );
+ AssertLogRelRCReturn(rc, rc);
+
+ bool fEnabled;
+ rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
+ AssertLogRelRCReturn(rc, rc);
+ pVM->em.s.fGuruOnTripleFault = !fEnabled;
+ if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
+ {
+ LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
+ pVM->em.s.fGuruOnTripleFault = true;
+ }
+
+ LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
+
+ /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
+ * Whether to try correlate exit history in any context, detect hot spots and
+ * try optimize these using IEM if there are other exits close by. This
+ * overrides the context specific settings. */
+ bool fExitOptimizationEnabled = true;
+ rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
+ * Whether to optimize exits in ring-0. Setting this to false will also disable
+ * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
+ * capabilities of the host kernel, this optimization may be unavailable. */
+ bool fExitOptimizationEnabledR0 = true;
+ rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
+ AssertLogRelRCReturn(rc, rc);
+ fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
+
+ /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
+ * Whether to optimize exits in ring-0 when preemption is disable (or preemption
+ * hooks are in effect). */
+ /** @todo change the default to true here */
+ bool fExitOptimizationEnabledR0PreemptDisabled = true;
+ rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
+ AssertLogRelRCReturn(rc, rc);
+ fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
+
+ /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
+ * Maximum number of instruction to let EMHistoryExec execute in one go. */
+ uint16_t cHistoryExecMaxInstructions = 8192;
+ rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
+ AssertLogRelRCReturn(rc, rc);
+ if (cHistoryExecMaxInstructions < 16)
+ return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
+
+ /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
+ * Maximum number of instruction between exits during probing. */
+ uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
+#ifdef RT_OS_WINDOWS
+ if (VM_IS_NEM_ENABLED(pVM))
+ cHistoryProbeMaxInstructionsWithoutExit = 32;
+#endif
+ rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
+ cHistoryProbeMaxInstructionsWithoutExit);
+ AssertLogRelRCReturn(rc, rc);
+ if (cHistoryProbeMaxInstructionsWithoutExit < 2)
+ return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
+ "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
+
+ /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
+ * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
+ uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
+ ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
+ rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
+ cHistoryProbeMinInstructions);
+ AssertLogRelRCReturn(rc, rc);
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
+ pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
+ pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
+ pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
+ pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
+ pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
+ }
+
+ /*
+ * Saved state.
+ */
+ rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
+ NULL, NULL, NULL,
+ NULL, emR3Save, NULL,
+ NULL, emR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
+ pVCpu->em.s.enmPrevState = EMSTATE_NONE;
+ pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
+ pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
+
+# define EM_REG_COUNTER(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
+ AssertRC(rc);
+
+# define EM_REG_COUNTER_USED(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
+ AssertRC(rc);
+
+# define EM_REG_PROFILE(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
+ AssertRC(rc);
+
+# define EM_REG_PROFILE_ADV(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
+ AssertRC(rc);
+
+ /*
+ * Statistics.
+ */
+#ifdef VBOX_WITH_STATISTICS
+ EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
+ EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
+
+ /* these should be considered for release statistics. */
+ EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
+#endif
+ EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
+#ifdef VBOX_WITH_STATISTICS
+ EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
+ EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
+#endif
+ EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
+#ifdef VBOX_WITH_STATISTICS
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
+#endif /* VBOX_WITH_STATISTICS */
+
+ EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
+ EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
+ EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
+ EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
+
+ EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
+
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
+ AssertRC(rc);
+
+ /* History record statistics */
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
+ AssertRC(rc);
+
+ for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
+ {
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
+ AssertRC(rc);
+ }
+
+ EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
+ }
+
+ emR3InitDbg(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when a VM initialization stage is completed.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat The initialization state that was completed.
+ */
+VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ if (enmWhat == VMINITCOMPLETED_RING0)
+ LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
+ pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
+ pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
+{
+ LogFlow(("EMR3Relocate\n"));
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Reset the EM state for a CPU.
+ *
+ * Called by EMR3Reset and hot plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
+{
+ /* Reset scheduling state. */
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
+
+ /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
+ out of the HALTED state here so that enmPrevState doesn't end up as
+ HALTED when EMR3Execute returns. */
+ if (pVCpu->em.s.enmState == EMSTATE_HALTED)
+ {
+ Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
+ pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
+ }
+}
+
+
+/**
+ * Reset notification.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
+{
+ Log(("EMR3Reset: \n"));
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ EMR3ResetCpu(pVM->apCpusR3[idCpu]);
+}
+
+
+/**
+ * Terminates the EM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ SSMR3PutBool(pSSM, false /*fForceRAW*/);
+
+ Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
+ Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
+ SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
+
+ /* Save mwait state. */
+ SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
+ SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
+ SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
+ SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
+ SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
+ int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
+ AssertRCReturn(rc, rc);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ /*
+ * Validate version.
+ */
+ if ( uVersion > EM_SAVED_STATE_VERSION
+ || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
+ {
+ AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Load the saved state.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ bool fForceRAWIgnored;
+ int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
+ AssertRCReturn(rc, rc);
+
+ if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
+ {
+ SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
+ Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
+
+ pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
+ }
+ if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
+ {
+ /* Load mwait state. */
+ rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
+ AssertRCReturn(rc, rc);
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Argument packet for emR3SetExecutionPolicy.
+ */
+struct EMR3SETEXECPOLICYARGS
+{
+ EMEXECPOLICY enmPolicy;
+ bool fEnforce;
+};
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /*
+ * Only the first CPU changes the variables.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
+ switch (pArgs->enmPolicy)
+ {
+ case EMEXECPOLICY_RECOMPILE_RING0:
+ case EMEXECPOLICY_RECOMPILE_RING3:
+ break;
+ case EMEXECPOLICY_IEM_ALL:
+ pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
+
+ /* For making '.alliem 1' useful during debugging, transition the
+ EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpuX = pVM->apCpusR3[i];
+ switch (pVCpuX->em.s.enmState)
+ {
+ case EMSTATE_DEBUG_GUEST_RAW:
+ case EMSTATE_DEBUG_GUEST_HM:
+ case EMSTATE_DEBUG_GUEST_NEM:
+ case EMSTATE_DEBUG_GUEST_REM:
+ Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
+ pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
+ break;
+ case EMSTATE_DEBUG_GUEST_IEM:
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ AssertFailedReturn(VERR_INVALID_PARAMETER);
+ }
+ Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
+ }
+
+ /*
+ * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
+ */
+ return pVCpu->em.s.enmState == EMSTATE_RAW
+ || pVCpu->em.s.enmState == EMSTATE_HM
+ || pVCpu->em.s.enmState == EMSTATE_NEM
+ || pVCpu->em.s.enmState == EMSTATE_IEM
+ || pVCpu->em.s.enmState == EMSTATE_REM
+ || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
+ ? VINF_EM_RESCHEDULE
+ : VINF_SUCCESS;
+}
+
+
+/**
+ * Changes an execution scheduling policy parameter.
+ *
+ * This is used to enable or disable raw-mode / hardware-virtualization
+ * execution of user and supervisor code.
+ *
+ * @returns VINF_SUCCESS on success.
+ * @returns VINF_RESCHEDULE if a rescheduling might be required.
+ * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param enmPolicy The scheduling policy to change.
+ * @param fEnforce Whether to enforce the policy or not.
+ */
+VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
+
+ struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
+}
+
+
+/**
+ * Queries an execution scheduling policy parameter.
+ *
+ * @returns VBox status code
+ * @param pUVM The user mode VM handle.
+ * @param enmPolicy The scheduling policy to query.
+ * @param pfEnforced Where to return the current value.
+ */
+VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
+{
+ AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /* No need to bother EMTs with a query. */
+ switch (enmPolicy)
+ {
+ case EMEXECPOLICY_RECOMPILE_RING0:
+ case EMEXECPOLICY_RECOMPILE_RING3:
+ *pfEnforced = false;
+ break;
+ case EMEXECPOLICY_IEM_ALL:
+ *pfEnforced = pVM->em.s.fIemExecutesAll;
+ break;
+ default:
+ AssertFailedReturn(VERR_INTERNAL_ERROR_2);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Queries the main execution engine of the VM.
+ *
+ * @returns VBox status code
+ * @param pUVM The user mode VM handle.
+ * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
+ */
+VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
+{
+ AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
+ *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
+
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ *pbMainExecutionEngine = pVM->bMainExecutionEngine;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Raise a fatal error.
+ *
+ * Safely terminate the VM with full state report and stuff. This function
+ * will naturally never return.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc VBox status code.
+ */
+VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
+{
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ longjmp(pVCpu->em.s.u.FatalLongJump, rc);
+}
+
+
+#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
+/**
+ * Gets the EM state name.
+ *
+ * @returns pointer to read only state name,
+ * @param enmState The state.
+ */
+static const char *emR3GetStateName(EMSTATE enmState)
+{
+ switch (enmState)
+ {
+ case EMSTATE_NONE: return "EMSTATE_NONE";
+ case EMSTATE_RAW: return "EMSTATE_RAW";
+ case EMSTATE_HM: return "EMSTATE_HM";
+ case EMSTATE_IEM: return "EMSTATE_IEM";
+ case EMSTATE_REM: return "EMSTATE_REM";
+ case EMSTATE_HALTED: return "EMSTATE_HALTED";
+ case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
+ case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
+ case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
+ case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
+ case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
+ case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
+ case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
+ case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
+ case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
+ case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
+ case EMSTATE_NEM: return "EMSTATE_NEM";
+ case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
+ default: return "Unknown!";
+ }
+}
+#endif /* LOG_ENABLED || VBOX_STRICT */
+
+
+/**
+ * Handle pending ring-3 I/O port write.
+ *
+ * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
+ * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
+{
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
+
+ /* Get and clear the pending data. */
+ RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
+ uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
+ uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
+ uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
+ pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
+
+ /* Assert sanity. */
+ switch (cbValue)
+ {
+ case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
+ case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
+ case 4: break;
+ default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
+ }
+ AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
+
+ /* Do the work.*/
+ VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
+ LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
+ if (IOM_SUCCESS(rcStrict))
+ {
+ pVCpu->cpum.GstCtx.rip += cbInstr;
+ pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Handle pending ring-3 I/O port write.
+ *
+ * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
+ * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
+{
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
+
+ /* Get and clear the pending data. */
+ RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
+ uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
+ uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
+ pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
+
+ /* Assert sanity. */
+ switch (cbValue)
+ {
+ case 1: break;
+ case 2: break;
+ case 4: break;
+ default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
+ }
+ AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
+ AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
+
+ /* Do the work.*/
+ uint32_t uValue = 0;
+ VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
+ LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
+ if (IOM_SUCCESS(rcStrict))
+ {
+ if (cbValue == 4)
+ pVCpu->cpum.GstCtx.rax = uValue;
+ else if (cbValue == 2)
+ pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
+ else
+ pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
+ pVCpu->cpum.GstCtx.rip += cbInstr;
+ pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
+ }
+ return rcStrict;
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS,
+ * Worker for emR3ExecuteSplitLockInstruction}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /* Only execute on the specified EMT. */
+ if (pVCpu == (PVMCPU)pvUser)
+ {
+ LogFunc(("\n"));
+ VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
+ LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ if (rcStrict == VINF_IEM_RAISED_XCPT)
+ rcStrict = VINF_SUCCESS;
+ return rcStrict;
+ }
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handle an instruction causing a split cacheline lock access in SMP VMs.
+ *
+ * Generally we only get here if the host has split-lock detection enabled and
+ * this caused an \#AC because of something the guest did. If we interpret the
+ * instruction as-is, we'll likely just repeat the split-lock access and
+ * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
+ * changes on context switching (costs a tiny bit). Assuming these \#ACs are
+ * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
+ * disregard the lock prefix when emulating the instruction.
+ *
+ * Yes, we could probably modify the MSR (or MSRs) controlling the detection
+ * feature when entering guest context, but the support for the feature isn't a
+ * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
+ * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
+ * Thus the approach is to just deal with the spurious \#ACs first and maybe add
+ * propert detection to SUPDrv later if we find it necessary.
+ *
+ * @see @bugref{10052}
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
+{
+ LogFunc(("\n"));
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
+}
+
+
+/**
+ * Debug loop.
+ *
+ * @returns VBox status code for EM.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc Current EM VBox status code.
+ */
+static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
+{
+ for (;;)
+ {
+ Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
+ const VBOXSTRICTRC rcLast = rc;
+
+ /*
+ * Debug related RC.
+ */
+ switch (VBOXSTRICTRC_VAL(rc))
+ {
+ /*
+ * Single step an instruction.
+ */
+ case VINF_EM_DBG_STEP:
+ if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
+ || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
+ AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
+ else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
+ rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
+ else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
+ rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
+#ifdef VBOX_WITH_REM /** @todo fix me? */
+ else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
+ rc = emR3RemStep(pVM, pVCpu);
+#endif
+ else
+ {
+ rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
+ if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
+ rc = VINF_EM_DBG_STEPPED;
+ }
+ break;
+
+ /*
+ * Simple events: stepped, breakpoint, stop/assertion.
+ */
+ case VINF_EM_DBG_STEPPED:
+ rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
+ break;
+
+ case VINF_EM_DBG_BREAKPOINT:
+ rc = DBGFR3BpHit(pVM, pVCpu);
+ break;
+
+ case VINF_EM_DBG_STOP:
+ rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
+ break;
+
+ case VINF_EM_DBG_EVENT:
+ rc = DBGFR3EventHandlePending(pVM, pVCpu);
+ break;
+
+ case VINF_EM_DBG_HYPER_STEPPED:
+ rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
+ break;
+
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
+ break;
+
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
+ RTLogFlush(NULL);
+ rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
+ break;
+
+ /*
+ * Guru meditation.
+ */
+ case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
+ rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
+ break;
+ case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
+ rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
+ break;
+ case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
+ rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
+ break;
+
+ default: /** @todo don't use default for guru, but make special errors code! */
+ {
+ LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
+ rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
+ break;
+ }
+ }
+
+ /*
+ * Process the result.
+ */
+ switch (VBOXSTRICTRC_VAL(rc))
+ {
+ /*
+ * Continue the debugging loop.
+ */
+ case VINF_EM_DBG_STEP:
+ case VINF_EM_DBG_STOP:
+ case VINF_EM_DBG_EVENT:
+ case VINF_EM_DBG_STEPPED:
+ case VINF_EM_DBG_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ break;
+
+ /*
+ * Resuming execution (in some form) has to be done here if we got
+ * a hypervisor debug event.
+ */
+ case VINF_SUCCESS:
+ case VINF_EM_RESUME:
+ case VINF_EM_SUSPEND:
+ case VINF_EM_RESCHEDULE:
+ case VINF_EM_RESCHEDULE_RAW:
+ case VINF_EM_RESCHEDULE_REM:
+ case VINF_EM_HALT:
+ if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
+ AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
+ if (rc == VINF_SUCCESS)
+ rc = VINF_EM_RESCHEDULE;
+ return rc;
+
+ /*
+ * The debugger isn't attached.
+ * We'll simply turn the thing off since that's the easiest thing to do.
+ */
+ case VERR_DBGF_NOT_ATTACHED:
+ switch (VBOXSTRICTRC_VAL(rcLast))
+ {
+ case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ case VERR_TRPM_PANIC:
+ case VERR_TRPM_DONT_PANIC:
+ case VERR_VMM_RING0_ASSERTION:
+ case VERR_VMM_HYPER_CR3_MISMATCH:
+ case VERR_VMM_RING3_CALL_DISABLED:
+ return rcLast;
+ }
+ return VINF_EM_OFF;
+
+ /*
+ * Status codes terminating the VM in one or another sense.
+ */
+ case VINF_EM_TERMINATE:
+ case VINF_EM_OFF:
+ case VINF_EM_RESET:
+ case VINF_EM_NO_MEMORY:
+ case VINF_EM_RAW_STALE_SELECTOR:
+ case VINF_EM_RAW_IRET_TRAP:
+ case VERR_TRPM_PANIC:
+ case VERR_TRPM_DONT_PANIC:
+ case VERR_IEM_INSTR_NOT_IMPLEMENTED:
+ case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
+ case VERR_VMM_RING0_ASSERTION:
+ case VERR_VMM_HYPER_CR3_MISMATCH:
+ case VERR_VMM_RING3_CALL_DISABLED:
+ case VERR_INTERNAL_ERROR:
+ case VERR_INTERNAL_ERROR_2:
+ case VERR_INTERNAL_ERROR_3:
+ case VERR_INTERNAL_ERROR_4:
+ case VERR_INTERNAL_ERROR_5:
+ case VERR_IPE_UNEXPECTED_STATUS:
+ case VERR_IPE_UNEXPECTED_INFO_STATUS:
+ case VERR_IPE_UNEXPECTED_ERROR_STATUS:
+ return rc;
+
+ /*
+ * The rest is unexpected, and will keep us here.
+ */
+ default:
+ AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
+ break;
+ }
+ } /* debug for ever */
+}
+
+
+#if defined(VBOX_WITH_REM) || defined(DEBUG)
+/**
+ * Steps recompiled code.
+ *
+ * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
+ * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
+{
+ Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+
+ int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
+
+ Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+ return rc;
+}
+#endif /* VBOX_WITH_REM || DEBUG */
+
+
+/**
+ * Executes recompiled code.
+ *
+ * This function contains the recompiler version of the inner
+ * execution loop (the outer loop being in EMR3ExecuteVM()).
+ *
+ * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
+ * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pfFFDone Where to store an indicator telling whether or not
+ * FFs were done before returning.
+ *
+ */
+static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+{
+#ifdef LOG_ENABLED
+ uint32_t cpl = CPUMGetGuestCPL(pVCpu);
+
+ if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
+ Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
+ else
+ Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
+#endif
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
+
+ /*
+ * Spin till we get a forced action which returns anything but VINF_SUCCESS
+ * or the REM suggests raw-mode execution.
+ */
+ *pfFFDone = false;
+ uint32_t cLoops = 0;
+ int rc = VINF_SUCCESS;
+ for (;;)
+ {
+ /*
+ * Execute REM.
+ */
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
+ {
+ STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
+ rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
+ }
+ else
+ {
+ /* Give up this time slice; virtual time continues */
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
+ RTThreadSleep(5);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
+ rc = VINF_SUCCESS;
+ }
+
+ /*
+ * Deal with high priority post execution FFs before doing anything
+ * else. Sync back the state and leave the lock to be on the safe side.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
+
+ /*
+ * Process the returned status code.
+ */
+ if (rc != VINF_SUCCESS)
+ {
+ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
+ break;
+ if (rc != VINF_REM_INTERRUPED_FF)
+ {
+ /* Try dodge unimplemented IEM trouble by reschduling. */
+ if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
+ || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
+ {
+ EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
+ if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
+ {
+ rc = VINF_EM_RESCHEDULE;
+ break;
+ }
+ }
+
+ /*
+ * Anything which is not known to us means an internal error
+ * and the termination of the VM!
+ */
+ AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
+ break;
+ }
+ }
+
+
+ /*
+ * Check and execute forced actions.
+ *
+ * Sync back the VM state and leave the lock before calling any of
+ * these, you never know what's going to happen here.
+ */
+#ifdef VBOX_HIGH_RES_TIMERS_HACK
+ TMTimerPollVoid(pVM, pVCpu);
+#endif
+ AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
+ {
+ STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
+ rc = emR3ForcedActions(pVM, pVCpu, rc);
+ VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
+ STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
+ if ( rc != VINF_SUCCESS
+ && rc != VINF_EM_RESCHEDULE_REM)
+ {
+ *pfFFDone = true;
+ break;
+ }
+ }
+
+ /*
+ * Have to check if we can get back to fast execution mode every so often.
+ */
+ if (!(++cLoops & 7))
+ {
+ EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
+ if ( enmCheck != EMSTATE_REM
+ && enmCheck != EMSTATE_IEM_THEN_REM)
+ {
+ LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
+ return VINF_EM_RESCHEDULE;
+ }
+ Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
+ }
+
+ } /* The Inner Loop, recompiled execution mode version. */
+
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
+ return rc;
+}
+
+
+#ifdef DEBUG
+
+int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
+{
+ EMSTATE enmOldState = pVCpu->em.s.enmState;
+
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
+
+ Log(("Single step BEGIN:\n"));
+ for (uint32_t i = 0; i < cIterations; i++)
+ {
+ DBGFR3PrgStep(pVCpu);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
+ emR3RemStep(pVM, pVCpu);
+ if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
+ break;
+ }
+ Log(("Single step END:\n"));
+ CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
+ pVCpu->em.s.enmState = enmOldState;
+ return VINF_EM_RESCHEDULE;
+}
+
+#endif /* DEBUG */
+
+
+/**
+ * Try execute the problematic code in IEM first, then fall back on REM if there
+ * is too much of it or if IEM doesn't implement something.
+ *
+ * @returns Strict VBox status code from IEMExecLots.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pfFFDone Force flags done indicator.
+ *
+ * @thread EMT(pVCpu)
+ */
+static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+{
+ LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
+ *pfFFDone = false;
+
+ /*
+ * Execute in IEM for a while.
+ */
+ while (pVCpu->em.s.cIemThenRemInstructions < 1024)
+ {
+ uint32_t cInstructions;
+ VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
+ UINT32_MAX/2 /*cPollRate*/, &cInstructions);
+ pVCpu->em.s.cIemThenRemInstructions += cInstructions;
+ if (rcStrict != VINF_SUCCESS)
+ {
+ if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
+ || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
+ break;
+
+ Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
+ VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
+ return rcStrict;
+ }
+
+ EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
+ if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
+ {
+ LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
+ enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
+ pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
+ pVCpu->em.s.enmState = enmNewState;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Check for pending actions.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Switch to REM.
+ */
+ Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
+ pVCpu->em.s.enmState = EMSTATE_REM;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Decides whether to execute RAW, HWACC or REM.
+ *
+ * @returns new EM state
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * We stay in the wait for SIPI state unless explicitly told otherwise.
+ */
+ if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
+ return EMSTATE_WAIT_SIPI;
+
+ /*
+ * Execute everything in IEM?
+ */
+ if ( pVM->em.s.fIemExecutesAll
+ || VM_IS_EXEC_ENGINE_IEM(pVM))
+ return EMSTATE_IEM;
+
+ if (VM_IS_HM_ENABLED(pVM))
+ {
+ if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
+ return EMSTATE_HM;
+ }
+ else if (NEMR3CanExecuteGuest(pVM, pVCpu))
+ return EMSTATE_NEM;
+
+ /*
+ * Note! Raw mode and hw accelerated mode are incompatible. The latter
+ * turns off monitoring features essential for raw mode!
+ */
+ return EMSTATE_IEM_THEN_REM;
+}
+
+
+/**
+ * Executes all high priority post execution force actions.
+ *
+ * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
+ * fatal error status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc The current strict VBox status code rc.
+ */
+VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
+{
+ VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
+
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
+ PDMCritSectBothFF(pVM, pVCpu);
+
+ /* Update CR3 (Nested Paging case for HM). */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
+ int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
+ }
+
+ /* IEM has pending work (typically memory write after INS instruction). */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
+ rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
+
+ /* IOM has pending work (comitting an I/O or MMIO write). */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
+ {
+ rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
+ if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
+ { /* half likely, or at least it's a line shorter. */ }
+ else if (rc == VINF_SUCCESS)
+ rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
+ else
+ pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
+ }
+
+ if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ {
+ if ( rc > VINF_EM_NO_MEMORY
+ && rc <= VINF_EM_LAST)
+ rc = VINF_EM_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
+ *
+ * @returns VBox status code.
+ * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
+{
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ /* Handle the "external interrupt" VM-exit intercept. */
+ if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
+ {
+ VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
+ AssertMsg( rcStrict != VINF_VMX_VMEXIT
+ && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
+ return VBOXSTRICTRC_TODO(rcStrict);
+ }
+#else
+ RT_NOREF(pVCpu);
+#endif
+ return VINF_NO_CHANGE;
+}
+
+
+/**
+ * Helper for emR3ForcedActions() for SVM interrupt intercept.
+ *
+ * @returns VBox status code.
+ * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
+{
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
+ if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
+ {
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
+ VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
+ if (RT_SUCCESS(rcStrict))
+ {
+ AssertMsg( rcStrict != VINF_SVM_VMEXIT
+ && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+
+ AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return VINF_EM_TRIPLE_FAULT;
+ }
+#else
+ NOREF(pVCpu);
+#endif
+ return VINF_NO_CHANGE;
+}
+
+
+/**
+ * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
+ *
+ * @returns VBox status code.
+ * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
+{
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
+ {
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
+ VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
+ if (RT_SUCCESS(rcStrict))
+ {
+ Assert(rcStrict != VINF_SVM_VMEXIT);
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+ AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return VINF_EM_TRIPLE_FAULT;
+ }
+#else
+ NOREF(pVCpu);
+#endif
+ return VINF_NO_CHANGE;
+}
+
+
+/**
+ * Executes all pending forced actions.
+ *
+ * Forced actions can cause execution delays and execution
+ * rescheduling. The first we deal with using action priority, so
+ * that for instance pending timers aren't scheduled and ran until
+ * right before execution. The rescheduling we deal with using
+ * return codes. The same goes for VM termination, only in that case
+ * we exit everything.
+ *
+ * @returns VBox status code of equal or greater importance/severity than rc.
+ * The most important ones are: VINF_EM_RESCHEDULE,
+ * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc The current rc.
+ *
+ */
+int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
+{
+ STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
+#ifdef VBOX_STRICT
+ int rcIrq = VINF_SUCCESS;
+#endif
+ int rc2;
+#define UPDATE_RC() \
+ do { \
+ AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
+ if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
+ break; \
+ if (!rc || rc2 < rc) \
+ rc = rc2; \
+ } while (0)
+ VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
+
+ /*
+ * Post execution chunk first.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
+ || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
+ {
+ /*
+ * EMT Rendezvous (must be serviced before termination).
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
+ UPDATE_RC();
+ /** @todo HACK ALERT! The following test is to make sure EM+TM
+ * thinks the VM is stopped/reset before the next VM state change
+ * is made. We need a better solution for this, or at least make it
+ * possible to do: (rc >= VINF_EM_FIRST && rc <=
+ * VINF_EM_SUSPEND). */
+ if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc;
+ }
+ }
+
+ /*
+ * State change request (cleared by vmR3SetStateLocked).
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
+ {
+ VMSTATE enmState = VMR3GetState(pVM);
+ switch (enmState)
+ {
+ case VMSTATE_FATAL_ERROR:
+ case VMSTATE_FATAL_ERROR_LS:
+ case VMSTATE_GURU_MEDITATION:
+ case VMSTATE_GURU_MEDITATION_LS:
+ Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return VINF_EM_SUSPEND;
+
+ case VMSTATE_DESTROYING:
+ Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return VINF_EM_TERMINATE;
+
+ default:
+ AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
+ }
+ }
+
+ /*
+ * Debugger Facility polling.
+ */
+ if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
+ /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
+ * somewhere before we get here, I would think. */
+ if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
+ rc = rc2;
+ else
+ UPDATE_RC();
+ }
+
+ /*
+ * Postponed reset request.
+ */
+ if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
+ UPDATE_RC();
+ }
+
+ /*
+ * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ {
+ rc2 = PGMR3PhysAllocateHandyPages(pVM);
+ UPDATE_RC();
+ if (rc == VINF_EM_NO_MEMORY)
+ return rc;
+ }
+
+ /* check that we got them all */
+ AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
+ AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
+ }
+
+ /*
+ * Normal priority then.
+ * (Executed in no particular order.)
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
+ {
+ /*
+ * PDM Queues are pending.
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
+ PDMR3QueueFlushAll(pVM);
+
+ /*
+ * PDM DMA transfers are pending.
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
+ PDMR3DmaRun(pVM);
+
+ /*
+ * EMT Rendezvous (make sure they are handled before the requests).
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
+ UPDATE_RC();
+ /** @todo HACK ALERT! The following test is to make sure EM+TM
+ * thinks the VM is stopped/reset before the next VM state change
+ * is made. We need a better solution for this, or at least make it
+ * possible to do: (rc >= VINF_EM_FIRST && rc <=
+ * VINF_EM_SUSPEND). */
+ if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc;
+ }
+ }
+
+ /*
+ * Requests from other threads.
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
+ if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc2;
+ }
+ UPDATE_RC();
+ /** @todo HACK ALERT! The following test is to make sure EM+TM
+ * thinks the VM is stopped/reset before the next VM state change
+ * is made. We need a better solution for this, or at least make it
+ * possible to do: (rc >= VINF_EM_FIRST && rc <=
+ * VINF_EM_SUSPEND). */
+ if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc;
+ }
+ }
+
+ /* check that we got them all */
+ AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
+ }
+
+ /*
+ * Normal priority then. (per-VCPU)
+ * (Executed in no particular order.)
+ */
+ if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
+ && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
+ {
+ /*
+ * Requests from other threads.
+ */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
+ if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc2;
+ }
+ UPDATE_RC();
+ /** @todo HACK ALERT! The following test is to make sure EM+TM
+ * thinks the VM is stopped/reset before the next VM state change
+ * is made. We need a better solution for this, or at least make it
+ * possible to do: (rc >= VINF_EM_FIRST && rc <=
+ * VINF_EM_SUSPEND). */
+ if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc;
+ }
+ }
+
+ /* check that we got them all */
+ Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
+ }
+
+ /*
+ * High priority pre execution chunk last.
+ * (Executed in ascending priority order.)
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
+ {
+ /*
+ * Timers before interrupts.
+ */
+ if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
+ && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ TMR3TimerQueuesDo(pVM);
+
+ /*
+ * Pick up asynchronously posted interrupts into the APIC.
+ */
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
+ APICUpdatePendingInterrupts(pVCpu);
+
+ /*
+ * The instruction following an emulated STI should *always* be executed!
+ *
+ * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
+ * the eip is the same as the inhibited instr address. Before we
+ * are able to execute this instruction in raw mode (iret to
+ * guest code) an external interrupt might force a world switch
+ * again. Possibly allowing a guest interrupt to be dispatched
+ * in the process. This could break the guest. Sounds very
+ * unlikely, but such timing sensitive problem are not as rare as
+ * you might think.
+ *
+ * Note! This used to be a force action flag. Can probably ditch this code.
+ */
+ if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
+ && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ {
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
+ if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
+ {
+ CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
+ Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
+ (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
+ }
+ else
+ Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
+ }
+
+ /** @todo SMIs. If we implement SMIs, this is where they will have to be
+ * delivered. */
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
+ {
+ /*
+ * VMX Nested-guest APIC-write pending (can cause VM-exits).
+ * Takes priority over even SMI and INIT signals.
+ * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
+ */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
+ {
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
+ if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
+ UPDATE_RC();
+ }
+
+ /*
+ * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
+ * Takes priority over "Traps on the previous instruction".
+ * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
+ */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
+ {
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
+ Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
+ UPDATE_RC();
+ }
+
+ /*
+ * VMX Nested-guest preemption timer VM-exit.
+ * Takes priority over NMI-window VM-exits.
+ */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
+ {
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
+ Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
+ UPDATE_RC();
+ }
+ Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
+ }
+#endif
+
+ /*
+ * Guest event injection.
+ */
+ Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
+ bool fWakeupPending = false;
+ if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
+ | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
+ && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
+ && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
+ && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
+ /** @todo r=bird: But interrupt shadows probably do not block vmexits due to host interrupts... */
+ && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
+ {
+ if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
+ {
+ bool fInVmxNonRootMode;
+ bool fInSvmHwvirtMode;
+ if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
+ {
+ fInVmxNonRootMode = false;
+ fInSvmHwvirtMode = false;
+ }
+ else
+ {
+ fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
+ fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
+ }
+
+ if (0)
+ { }
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ /*
+ * VMX NMI-window VM-exit.
+ * Takes priority over non-maskable interrupts (NMIs).
+ * Interrupt shadows block NMI-window VM-exits.
+ * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
+ *
+ * See Intel spec. 25.2 "Other Causes Of VM Exits".
+ * See Intel spec. 26.7.6 "NMI-Window Exiting".
+ */
+ else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
+ && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
+ {
+ Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
+ Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
+ AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
+ && rc2 != VINF_VMX_VMEXIT
+ && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
+ UPDATE_RC();
+ }
+#endif
+ /*
+ * NMIs (take priority over external interrupts).
+ */
+ else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
+ && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
+ {
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if ( fInVmxNonRootMode
+ && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
+ {
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
+ Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
+ UPDATE_RC();
+ }
+ else
+#endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ if ( fInSvmHwvirtMode
+ && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
+ {
+ rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
+ AssertMsg( rc2 != VINF_SVM_VMEXIT
+ && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
+ UPDATE_RC();
+ }
+ else
+#endif
+ {
+ rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
+ if (rc2 == VINF_SUCCESS)
+ {
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+ fWakeupPending = true;
+ if (pVM->em.s.fIemExecutesAll)
+ rc2 = VINF_EM_RESCHEDULE;
+ else
+ {
+ rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
+ : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
+ : VINF_EM_RESCHEDULE_REM;
+ }
+ }
+ UPDATE_RC();
+ }
+ }
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ /*
+ * VMX Interrupt-window VM-exits.
+ * Takes priority over external interrupts.
+ */
+ else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
+ && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
+ {
+ Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
+ Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
+ rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
+ AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
+ && rc2 != VINF_VMX_VMEXIT
+ && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
+ UPDATE_RC();
+ }
+#endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
+ * actually pending like we currently do. */
+#endif
+ /*
+ * External interrupts.
+ */
+ else
+ {
+ /*
+ * VMX: virtual interrupts takes priority over physical interrupts.
+ * SVM: physical interrupts takes priority over virtual interrupts.
+ */
+ if ( fInVmxNonRootMode
+ && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
+ && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
+ {
+ /** @todo NSTVMX: virtual-interrupt delivery. */
+ rc2 = VINF_SUCCESS;
+ }
+ else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
+ && CPUMIsGuestPhysIntrEnabled(pVCpu))
+ {
+ Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
+ if (fInVmxNonRootMode)
+ rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
+ else if (fInSvmHwvirtMode)
+ rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
+ else
+ rc2 = VINF_NO_CHANGE;
+
+ if (rc2 == VINF_NO_CHANGE)
+ {
+ bool fInjected = false;
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
+ /** @todo this really isn't nice, should properly handle this */
+ /* Note! This can still cause a VM-exit (on Intel). */
+ LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
+ pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
+ rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
+ fWakeupPending = true;
+ if ( pVM->em.s.fIemExecutesAll
+ && ( rc2 == VINF_EM_RESCHEDULE_REM
+ || rc2 == VINF_EM_RESCHEDULE_HM
+ || rc2 == VINF_EM_RESCHEDULE_RAW))
+ {
+ rc2 = VINF_EM_RESCHEDULE;
+ }
+#ifdef VBOX_STRICT
+ if (fInjected)
+ rcIrq = rc2;
+#endif
+ }
+ UPDATE_RC();
+ }
+ else if ( fInSvmHwvirtMode
+ && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
+ && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
+ {
+ rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
+ if (rc2 == VINF_NO_CHANGE)
+ {
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
+ uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
+ AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
+ TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
+ Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
+ rc2 = VINF_EM_RESCHEDULE;
+#ifdef VBOX_STRICT
+ rcIrq = rc2;
+#endif
+ }
+ UPDATE_RC();
+ }
+ }
+ } /* CPUMGetGuestGif */
+ }
+
+ /*
+ * Allocate handy pages.
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
+ {
+ rc2 = PGMR3PhysAllocateHandyPages(pVM);
+ UPDATE_RC();
+ }
+
+ /*
+ * Debugger Facility request.
+ */
+ if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
+ && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
+ UPDATE_RC();
+ }
+
+ /*
+ * EMT Rendezvous (must be serviced before termination).
+ */
+ if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
+ && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
+ rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
+ UPDATE_RC();
+ /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
+ * stopped/reset before the next VM state change is made. We need a better
+ * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
+ * && rc >= VINF_EM_SUSPEND). */
+ if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
+ {
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return rc;
+ }
+ }
+
+ /*
+ * State change request (cleared by vmR3SetStateLocked).
+ */
+ if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
+ && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
+ {
+ VMSTATE enmState = VMR3GetState(pVM);
+ switch (enmState)
+ {
+ case VMSTATE_FATAL_ERROR:
+ case VMSTATE_FATAL_ERROR_LS:
+ case VMSTATE_GURU_MEDITATION:
+ case VMSTATE_GURU_MEDITATION_LS:
+ Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return VINF_EM_SUSPEND;
+
+ case VMSTATE_DESTROYING:
+ Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ return VINF_EM_TERMINATE;
+
+ default:
+ AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
+ }
+ }
+
+ /*
+ * Out of memory? Since most of our fellow high priority actions may cause us
+ * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
+ * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
+ * than us since we can terminate without allocating more memory.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ {
+ rc2 = PGMR3PhysAllocateHandyPages(pVM);
+ UPDATE_RC();
+ if (rc == VINF_EM_NO_MEMORY)
+ return rc;
+ }
+
+ /*
+ * If the virtual sync clock is still stopped, make TM restart it.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
+ TMR3VirtualSyncFF(pVM, pVCpu);
+
+#ifdef DEBUG
+ /*
+ * Debug, pause the VM.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
+ {
+ VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
+ Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
+ return VINF_EM_SUSPEND;
+ }
+#endif
+
+ /* check that we got them all */
+ AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
+ AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
+ }
+
+#undef UPDATE_RC
+ Log2(("emR3ForcedActions: returns %Rrc\n", rc));
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
+ Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
+ return rc;
+}
+
+
+/**
+ * Check if the preset execution time cap restricts guest execution scheduling.
+ *
+ * @returns true if allowed, false otherwise
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
+{
+ uint64_t u64UserTime, u64KernelTime;
+
+ if ( pVM->uCpuExecutionCap != 100
+ && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
+ {
+ uint64_t u64TimeNow = RTTimeMilliTS();
+ if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
+ {
+ /* New time slice. */
+ pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
+ pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
+ pVCpu->em.s.u64TimeSliceExec = 0;
+ }
+ pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
+
+ Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
+ if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
+ return false;
+ }
+ return true;
+}
+
+
+/**
+ * Execute VM.
+ *
+ * This function is the main loop of the VM. The emulation thread
+ * calls this function when the VM has been successfully constructed
+ * and we're ready for executing the VM.
+ *
+ * Returning from this function means that the VM is turned off or
+ * suspended (state already saved) and deconstruction is next in line.
+ *
+ * All interaction from other thread are done using forced actions
+ * and signalling of the wait object.
+ *
+ * @returns VBox status code, informational status codes may indicate failure.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
+{
+ Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
+ pVM,
+ pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
+ pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
+ pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
+ VM_ASSERT_EMT(pVM);
+ AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
+ || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
+ || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
+ ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
+
+ int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
+ if (rc == 0)
+ {
+ /*
+ * Start the virtual time.
+ */
+ TMR3NotifyResume(pVM, pVCpu);
+
+ /*
+ * The Outer Main Loop.
+ */
+ bool fFFDone = false;
+
+ /* Reschedule right away to start in the right state. */
+ rc = VINF_SUCCESS;
+
+ /* If resuming after a pause or a state load, restore the previous
+ state or else we'll start executing code. Else, just reschedule. */
+ if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
+ && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
+ || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
+ pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
+ else
+ pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
+
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
+ for (;;)
+ {
+ /*
+ * Before we can schedule anything (we're here because
+ * scheduling is required) we must service any pending
+ * forced actions to avoid any pending action causing
+ * immediate rescheduling upon entering an inner loop
+ *
+ * Do forced actions.
+ */
+ if ( !fFFDone
+ && RT_SUCCESS(rc)
+ && rc != VINF_EM_TERMINATE
+ && rc != VINF_EM_OFF
+ && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
+ {
+ rc = emR3ForcedActions(pVM, pVCpu, rc);
+ VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
+ }
+ else if (fFFDone)
+ fFFDone = false;
+
+#ifdef VBOX_STRICT
+ CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
+#endif
+
+ /*
+ * Now what to do?
+ */
+ Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
+ EMSTATE const enmOldState = pVCpu->em.s.enmState;
+ switch (rc)
+ {
+ /*
+ * Keep doing what we're currently doing.
+ */
+ case VINF_SUCCESS:
+ break;
+
+ /*
+ * Reschedule - to raw-mode execution.
+ */
+/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
+ case VINF_EM_RESCHEDULE_RAW:
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
+ AssertLogRelFailed();
+ pVCpu->em.s.enmState = EMSTATE_NONE;
+ break;
+
+ /*
+ * Reschedule - to HM or NEM.
+ */
+ case VINF_EM_RESCHEDULE_HM:
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
+ if (VM_IS_HM_ENABLED(pVM))
+ {
+ if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
+ {
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
+ pVCpu->em.s.enmState = EMSTATE_HM;
+ }
+ else
+ {
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_IEM_THEN_REM)\n", enmOldState, EMSTATE_IEM_THEN_REM));
+ pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
+ }
+ }
+ else if (VM_IS_NEM_ENABLED(pVM))
+ {
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
+ pVCpu->em.s.enmState = EMSTATE_NEM;
+ }
+ else
+ {
+ AssertLogRelFailed();
+ pVCpu->em.s.enmState = EMSTATE_NONE;
+ }
+ break;
+
+ /*
+ * Reschedule - to recompiled execution.
+ */
+ case VINF_EM_RESCHEDULE_REM:
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
+ enmOldState, EMSTATE_IEM_THEN_REM));
+ if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
+ {
+ pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ }
+ break;
+
+ /*
+ * Resume.
+ */
+ case VINF_EM_RESUME:
+ Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
+ /* Don't reschedule in the halted or wait for SIPI case. */
+ if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
+ || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
+ {
+ pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
+ break;
+ }
+ /* fall through and get scheduled. */
+ RT_FALL_THRU();
+
+ /*
+ * Reschedule.
+ */
+ case VINF_EM_RESCHEDULE:
+ {
+ EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
+ if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ pVCpu->em.s.enmState = enmState;
+ break;
+ }
+
+ /*
+ * Halted.
+ */
+ case VINF_EM_HALT:
+ Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
+ pVCpu->em.s.enmState = EMSTATE_HALTED;
+ break;
+
+ /*
+ * Switch to the wait for SIPI state (application processor only)
+ */
+ case VINF_EM_WAIT_SIPI:
+ Assert(pVCpu->idCpu != 0);
+ Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
+ pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
+ break;
+
+
+ /*
+ * Suspend.
+ */
+ case VINF_EM_SUSPEND:
+ Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
+ Assert(enmOldState != EMSTATE_SUSPENDED);
+ pVCpu->em.s.enmPrevState = enmOldState;
+ pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
+ break;
+
+ /*
+ * Reset.
+ * We might end up doing a double reset for now, we'll have to clean up the mess later.
+ */
+ case VINF_EM_RESET:
+ {
+ if (pVCpu->idCpu == 0)
+ {
+ EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
+ Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
+ if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ pVCpu->em.s.enmState = enmState;
+ }
+ else
+ {
+ /* All other VCPUs go into the wait for SIPI state. */
+ pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
+ }
+ break;
+ }
+
+ /*
+ * Power Off.
+ */
+ case VINF_EM_OFF:
+ pVCpu->em.s.enmState = EMSTATE_TERMINATING;
+ Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ return rc;
+
+ /*
+ * Terminate the VM.
+ */
+ case VINF_EM_TERMINATE:
+ pVCpu->em.s.enmState = EMSTATE_TERMINATING;
+ Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
+ if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ return rc;
+
+
+ /*
+ * Out of memory, suspend the VM and stuff.
+ */
+ case VINF_EM_NO_MEMORY:
+ Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
+ Assert(enmOldState != EMSTATE_SUSPENDED);
+ pVCpu->em.s.enmPrevState = enmOldState;
+ pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+
+ rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
+ N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
+ if (rc != VINF_EM_SUSPEND)
+ {
+ if (RT_SUCCESS_NP(rc))
+ {
+ AssertLogRelMsgFailed(("%Rrc\n", rc));
+ rc = VERR_EM_INTERNAL_ERROR;
+ }
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ }
+ return rc;
+
+ /*
+ * Guest debug events.
+ */
+ case VINF_EM_DBG_STEPPED:
+ case VINF_EM_DBG_STOP:
+ case VINF_EM_DBG_EVENT:
+ case VINF_EM_DBG_BREAKPOINT:
+ case VINF_EM_DBG_STEP:
+ if (enmOldState == EMSTATE_RAW)
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
+ }
+ else if (enmOldState == EMSTATE_HM)
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
+ }
+ else if (enmOldState == EMSTATE_NEM)
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
+ }
+ else if (enmOldState == EMSTATE_REM)
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
+ }
+ else
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
+ }
+ break;
+
+ /*
+ * Hypervisor debug events.
+ */
+ case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
+ break;
+
+ /*
+ * Triple fault.
+ */
+ case VINF_EM_TRIPLE_FAULT:
+ if (!pVM->em.s.fGuruOnTripleFault)
+ {
+ Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
+ rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
+ Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
+ continue;
+ }
+ /* Else fall through and trigger a guru. */
+ RT_FALL_THRU();
+
+ case VERR_VMM_RING0_ASSERTION:
+ Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ break;
+
+ /*
+ * Any error code showing up here other than the ones we
+ * know and process above are considered to be FATAL.
+ *
+ * Unknown warnings and informational status codes are also
+ * included in this.
+ */
+ default:
+ if (RT_SUCCESS_NP(rc))
+ {
+ AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
+ rc = VERR_EM_INTERNAL_ERROR;
+ }
+ Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ break;
+ }
+
+ /*
+ * Act on state transition.
+ */
+ EMSTATE const enmNewState = pVCpu->em.s.enmState;
+ if (enmOldState != enmNewState)
+ {
+ VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
+
+ /* Clear MWait flags and the unhalt FF. */
+ if ( enmOldState == EMSTATE_HALTED
+ && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
+ && ( enmNewState == EMSTATE_RAW
+ || enmNewState == EMSTATE_HM
+ || enmNewState == EMSTATE_NEM
+ || enmNewState == EMSTATE_REM
+ || enmNewState == EMSTATE_IEM_THEN_REM
+ || enmNewState == EMSTATE_DEBUG_GUEST_RAW
+ || enmNewState == EMSTATE_DEBUG_GUEST_HM
+ || enmNewState == EMSTATE_DEBUG_GUEST_NEM
+ || enmNewState == EMSTATE_DEBUG_GUEST_IEM
+ || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
+ {
+ if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
+ {
+ LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
+ pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
+ }
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
+ {
+ LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
+ }
+ }
+ }
+ else
+ VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
+
+ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
+ STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
+
+ /*
+ * Act on the new state.
+ */
+ switch (enmNewState)
+ {
+ /*
+ * Execute raw.
+ */
+ case EMSTATE_RAW:
+ AssertLogRelMsgFailed(("%Rrc\n", rc));
+ rc = VERR_EM_INTERNAL_ERROR;
+ break;
+
+ /*
+ * Execute hardware accelerated raw.
+ */
+ case EMSTATE_HM:
+ rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
+ break;
+
+ /*
+ * Execute hardware accelerated raw.
+ */
+ case EMSTATE_NEM:
+ rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
+ break;
+
+ /*
+ * Execute recompiled.
+ */
+ case EMSTATE_REM:
+ rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
+ Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
+ break;
+
+ /*
+ * Execute in the interpreter.
+ */
+ case EMSTATE_IEM:
+ {
+ uint32_t cInstructions = 0;
+#if 0 /* For testing purposes. */
+ STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
+ rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
+ if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
+ rc = VINF_SUCCESS;
+ else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
+#endif
+ rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
+ if (pVM->em.s.fIemExecutesAll)
+ {
+ Assert(rc != VINF_EM_RESCHEDULE_REM);
+ Assert(rc != VINF_EM_RESCHEDULE_RAW);
+ Assert(rc != VINF_EM_RESCHEDULE_HM);
+#ifdef VBOX_HIGH_RES_TIMERS_HACK
+ if (cInstructions < 2048)
+ TMTimerPollVoid(pVM, pVCpu);
+#endif
+ }
+ fFFDone = false;
+ break;
+ }
+
+ /*
+ * Execute in IEM, hoping we can quickly switch aback to HM
+ * or RAW execution. If our hopes fail, we go to REM.
+ */
+ case EMSTATE_IEM_THEN_REM:
+ {
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
+ rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
+ break;
+ }
+
+ /*
+ * Application processor execution halted until SIPI.
+ */
+ case EMSTATE_WAIT_SIPI:
+ /* no break */
+ /*
+ * hlt - execution halted until interrupt.
+ */
+ case EMSTATE_HALTED:
+ {
+ STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
+ /* If HM (or someone else) store a pending interrupt in
+ TRPM, it must be dispatched ASAP without any halting.
+ Anything pending in TRPM has been accepted and the CPU
+ should already be the right state to receive it. */
+ if (TRPMHasTrap(pVCpu))
+ rc = VINF_EM_RESCHEDULE;
+ /* MWAIT has a special extension where it's woken up when
+ an interrupt is pending even when IF=0. */
+ else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
+ == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
+ {
+ rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
+ if (rc == VINF_SUCCESS)
+ {
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
+ APICUpdatePendingInterrupts(pVCpu);
+
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
+ | VMCPU_FF_INTERRUPT_NESTED_GUEST
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
+ {
+ Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
+ rc = VINF_EM_RESCHEDULE;
+ }
+ }
+ }
+ else
+ {
+ rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
+ /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
+ check VMCPU_FF_UPDATE_APIC here. */
+ if ( rc == VINF_SUCCESS
+ && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
+ {
+ Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
+ rc = VINF_EM_RESCHEDULE;
+ }
+ }
+
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
+ break;
+ }
+
+ /*
+ * Suspended - return to VM.cpp.
+ */
+ case EMSTATE_SUSPENDED:
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
+ return VINF_EM_SUSPEND;
+
+ /*
+ * Debugging in the guest.
+ */
+ case EMSTATE_DEBUG_GUEST_RAW:
+ case EMSTATE_DEBUG_GUEST_HM:
+ case EMSTATE_DEBUG_GUEST_NEM:
+ case EMSTATE_DEBUG_GUEST_IEM:
+ case EMSTATE_DEBUG_GUEST_REM:
+ TMR3NotifySuspend(pVM, pVCpu);
+ rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
+ TMR3NotifyResume(pVM, pVCpu);
+ Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
+ break;
+
+ /*
+ * Debugging in the hypervisor.
+ */
+ case EMSTATE_DEBUG_HYPER:
+ {
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+
+ rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
+ Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
+ if (rc != VINF_SUCCESS)
+ {
+ if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
+ pVCpu->em.s.enmState = EMSTATE_TERMINATING;
+ else
+ {
+ /* switch to guru meditation mode */
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
+ VMMR3FatalDump(pVM, pVCpu, rc);
+ }
+ Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
+ return rc;
+ }
+
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
+ TMR3NotifyResume(pVM, pVCpu);
+ break;
+ }
+
+ /*
+ * Guru meditation takes place in the debugger.
+ */
+ case EMSTATE_GURU_MEDITATION:
+ {
+ TMR3NotifySuspend(pVM, pVCpu);
+ VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
+ VMMR3FatalDump(pVM, pVCpu, rc);
+ emR3Debug(pVM, pVCpu, rc);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
+ return rc;
+ }
+
+ /*
+ * The states we don't expect here.
+ */
+ case EMSTATE_NONE:
+ case EMSTATE_TERMINATING:
+ default:
+ AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ TMR3NotifySuspend(pVM, pVCpu);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
+ return VERR_EM_INTERNAL_ERROR;
+ }
+ } /* The Outer Main Loop */
+ }
+ else
+ {
+ /*
+ * Fatal error.
+ */
+ Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
+ TMR3NotifySuspend(pVM, pVCpu);
+ VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
+ VMMR3FatalDump(pVM, pVCpu, rc);
+ emR3Debug(pVM, pVCpu, rc);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
+ /** @todo change the VM state! */
+ return rc;
+ }
+
+ /* not reached */
+}
+
diff --git a/src/VBox/VMM/VMMR3/EMHM.cpp b/src/VBox/VMM/VMMR3/EMHM.cpp
new file mode 100644
index 00000000..4095d39d
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/EMHM.cpp
@@ -0,0 +1,487 @@
+/* $Id: EMHM.cpp $ */
+/** @file
+ * EM - Execution Monitor / Manager - hardware virtualization
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmqueue.h>
+#include <VBox/vmm/hm.h>
+#include "EMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpumdis.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/err.h>
+#include <VBox/vmm/dbgf.h>
+#include "VMMTracing.h"
+
+#include <iprt/asm.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, int rc);
+DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
+static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
+static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu);
+
+#define EMHANDLERC_WITH_HM
+#define emR3ExecuteInstruction emR3HmExecuteInstruction
+#define emR3ExecuteIOInstruction emR3HmExecuteIOInstruction
+#include "EMHandleRCTmpl.h"
+
+
+/**
+ * Executes instruction in HM mode if we can.
+ *
+ * This is somewhat comparable to REMR3EmulateInstruction.
+ *
+ * @returns VBox strict status code.
+ * @retval VINF_EM_DBG_STEPPED on success.
+ * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
+ * HM right now.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure for the calling EMT.
+ * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
+
+ if (!HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
+ return VINF_EM_RESCHEDULE;
+
+ uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip;
+ for (;;)
+ {
+ /*
+ * Service necessary FFs before going into HM.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ {
+ VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu);
+ if (rcStrict != VINF_SUCCESS)
+ {
+ Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+ }
+ }
+
+ /*
+ * Go execute it.
+ */
+ bool fOld = HMSetSingleInstruction(pVM, pVCpu, true);
+ VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);
+ HMSetSingleInstruction(pVM, pVCpu, fOld);
+ LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+
+ /*
+ * Handle high priority FFs and informational status codes. We don't do
+ * normal FF processing the caller or the next call can deal with them.
+ */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ {
+ rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
+ LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
+ {
+ rcStrict = emR3HmHandleRC(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
+ Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ /*
+ * Done?
+ */
+ if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
+ || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
+ || pVCpu->cpum.GstCtx.rip != uOldRip)
+ {
+ if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.rip != uOldRip)
+ rcStrict = VINF_EM_DBG_STEPPED;
+ Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pVCpu->cpum.GstCtx.rip));
+ CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
+ return rcStrict;
+ }
+ }
+}
+
+
+/**
+ * Executes one (or perhaps a few more) instruction(s).
+ *
+ * @returns VBox status code suitable for EM.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rcRC Return code from RC.
+ * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
+ * instruction and prefix the log output with this text.
+ */
+#if defined(LOG_ENABLED) || defined(DOXYGEN_RUNNING)
+static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
+#else
+static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
+#endif
+{
+ RT_NOREF(rcRC, pVM);
+
+#ifdef LOG_ENABLED
+ /*
+ * Log it.
+ */
+ Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp));
+ if (pszPrefix)
+ {
+ DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
+ }
+#endif
+
+ /*
+ * Use IEM and fallback on REM if the functionality is missing.
+ * Once IEM gets mature enough, nothing should ever fall back.
+ */
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
+ VBOXSTRICTRC rcStrict;
+ uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
+ {
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
+ rcStrict = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
+ }
+ else
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+ rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
+ LogFlow(("emR3HmExecuteInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
+
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Executes one (or perhaps a few more) instruction(s).
+ * This is just a wrapper for discarding pszPrefix in non-logging builds.
+ *
+ * @returns VBox status code suitable for EM.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
+ * instruction and prefix the log output with this text.
+ * @param rcGC GC return code
+ */
+DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
+{
+#ifdef LOG_ENABLED
+ return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
+#else
+ RT_NOREF_PV(pszPrefix);
+ return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC);
+#endif
+}
+
+
+/**
+ * Executes one (or perhaps a few more) IO instruction(s).
+ *
+ * @returns VBox status code suitable for EM.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
+{
+ RT_NOREF(pVM);
+ STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
+
+ VBOXSTRICTRC rcStrict;
+ uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
+ {
+ /*
+ * Hand it over to the interpreter.
+ */
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
+ rcStrict = IEMExecOne(pVCpu);
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ else
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
+ rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
+ STAM_COUNTER_INC(&pVCpu->em.s.StatIoRestarted);
+ }
+
+ STAM_COUNTER_INC(&pVCpu->em.s.StatIoIem);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Process HM specific forced actions.
+ *
+ * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK
+ * or/and VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK are pending.
+ *
+ * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
+ * EM statuses.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Sync page directory.
+ */
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+ {
+ CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
+ Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
+ int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Prefetch pages for EIP and ESP. */
+ /** @todo This is rather expensive. Should investigate if it really helps at all. */
+ /** @todo this should be skipped! */
+ CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS);
+ rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVCpu, X86_SREG_CS, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rip));
+ if (rc == VINF_SUCCESS)
+ rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVCpu, X86_SREG_SS, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rsp));
+ if (rc != VINF_SUCCESS)
+ {
+ if (rc != VINF_PGM_SYNC_CR3)
+ {
+ AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
+ return rc;
+ }
+ rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ /** @todo maybe prefetch the supervisor stack page as well */
+ }
+
+ /*
+ * Allocate handy pages (just in case the above actions have consumed some pages).
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
+ {
+ int rc = PGMR3PhysAllocateHandyPages(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Check whether we're out of memory now.
+ *
+ * This may stem from some of the above actions or operations that has been executed
+ * since we ran FFs. The allocate handy pages must for instance always be followed by
+ * this check.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ return VINF_EM_NO_MEMORY;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
+ *
+ * This function contains the raw-mode version of the inner
+ * execution loop (the outer loop being in EMR3ExecuteVM()).
+ *
+ * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
+ * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pfFFDone Where to store an indicator telling whether or not
+ * FFs were done before returning.
+ */
+int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+{
+ int rc = VERR_IPE_UNINITIALIZED_STATUS;
+
+ LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
+ *pfFFDone = false;
+
+ STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHMExecuteCalled);
+
+ /*
+ * Spin till we get a forced action which returns anything but VINF_SUCCESS.
+ */
+ for (;;)
+ {
+ STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHMEntry, a);
+
+ /* Check if a forced reschedule is pending. */
+ if (HMR3IsRescheduleRequired(pVM, &pVCpu->cpum.GstCtx))
+ {
+ rc = VINF_EM_RESCHEDULE;
+ break;
+ }
+
+ /*
+ * Process high priority pre-execution raw-mode FFs.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ {
+ rc = emR3HmForcedActions(pVM, pVCpu);
+ if (rc != VINF_SUCCESS)
+ break;
+ }
+
+#ifdef LOG_ENABLED
+ /*
+ * Log important stuff before entering GC.
+ */
+ if (TRPMHasTrap(pVCpu))
+ Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
+
+ uint32_t cpl = CPUMGetGuestCPL(pVCpu);
+ if (pVM->cCpus == 1)
+ {
+ if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
+ Log(("HWV86: %08X IF=%d\n", pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
+ else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
+ Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ else
+ Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ }
+ else
+ {
+ if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
+ Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
+ else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
+ Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ else
+ Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ }
+#endif /* LOG_ENABLED */
+
+ /*
+ * Execute the code.
+ */
+ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHMEntry, a);
+
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
+ {
+ STAM_REL_PROFILE_START(&pVCpu->em.s.StatHMExec, x);
+ rc = VMMR3HmRunGC(pVM, pVCpu);
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHMExec, x);
+ }
+ else
+ {
+ /* Give up this time slice; virtual time continues */
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
+ RTThreadSleep(5);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
+ rc = VINF_SUCCESS;
+ }
+
+
+ /*
+ * Deal with high priority post execution FFs before doing anything else.
+ */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
+
+ /*
+ * Process the returned status code.
+ */
+ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
+ break;
+
+ rc = emR3HmHandleRC(pVM, pVCpu, rc);
+ if (rc != VINF_SUCCESS)
+ break;
+
+ /*
+ * Check and execute forced actions.
+ */
+#ifdef VBOX_HIGH_RES_TIMERS_HACK
+ TMTimerPollVoid(pVM, pVCpu);
+#endif
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_MASK))
+ {
+ rc = emR3ForcedActions(pVM, pVCpu, rc);
+ VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
+ if ( rc != VINF_SUCCESS
+ && rc != VINF_EM_RESCHEDULE_HM)
+ {
+ *pfFFDone = true;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Return to outer loop.
+ */
+#if defined(LOG_ENABLED) && defined(DEBUG)
+ RTLogFlush(NULL);
+#endif
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/EMR3Dbg.cpp b/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
new file mode 100644
index 00000000..71f8cad0
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
@@ -0,0 +1,350 @@
+/* $Id: EMR3Dbg.cpp $ */
+/** @file
+ * EM - Execution Monitor / Manager, Debugger Related Bits.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/dbg.h>
+#include "EMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <iprt/string.h>
+#include <iprt/ctype.h>
+
+
+/** @callback_method_impl{FNDBGCCMD,
+ * Implements the '.alliem' command. }
+ */
+static DECLCALLBACK(int) enmR3DbgCmdAllIem(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ int rc;
+ bool f;
+
+ if (cArgs == 0)
+ {
+ rc = EMR3QueryExecutionPolicy(pUVM, EMEXECPOLICY_IEM_ALL, &f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "EMR3QueryExecutionPolicy(,EMEXECPOLICY_IEM_ALL,");
+ DBGCCmdHlpPrintf(pCmdHlp, f ? "alliem: enabled\n" : "alliem: disabled\n");
+ }
+ else
+ {
+ rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
+ rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_IEM_ALL, f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "EMR3SetExecutionPolicy(,EMEXECPOLICY_IEM_ALL,%RTbool)", f);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/** Describes a optional boolean argument. */
+static DBGCVARDESC const g_BoolArg = { 0, 1, DBGCVAR_CAT_ANY, 0, "boolean", "Boolean value." };
+
+/** Commands. */
+static DBGCCMD const g_aCmds[] =
+{
+ {
+ "alliem", 0, 1, &g_BoolArg, 1, 0, enmR3DbgCmdAllIem, "[boolean]",
+ "Enables or disabled executing ALL code in IEM, if no arguments are given it displays the current status."
+ },
+};
+
+
+/**
+ * Translates EMEXITTYPE into a name.
+ *
+ * @returns Pointer to read-only name, NULL if unknown type.
+ * @param enmExitType The exit type to name.
+ */
+VMM_INT_DECL(const char *) EMR3GetExitTypeName(EMEXITTYPE enmExitType)
+{
+ switch (enmExitType)
+ {
+ case EMEXITTYPE_INVALID: return "invalid";
+ case EMEXITTYPE_IO_PORT_READ: return "I/O port read";
+ case EMEXITTYPE_IO_PORT_WRITE: return "I/O port write";
+ case EMEXITTYPE_IO_PORT_STR_READ: return "I/O port string read";
+ case EMEXITTYPE_IO_PORT_STR_WRITE: return "I/O port string write";
+ case EMEXITTYPE_MMIO: return "MMIO access";
+ case EMEXITTYPE_MMIO_READ: return "MMIO read";
+ case EMEXITTYPE_MMIO_WRITE: return "MMIO write";
+ case EMEXITTYPE_MSR_READ: return "MSR read";
+ case EMEXITTYPE_MSR_WRITE: return "MSR write";
+ case EMEXITTYPE_CPUID: return "CPUID";
+ case EMEXITTYPE_RDTSC: return "RDTSC";
+ case EMEXITTYPE_MOV_CRX: return "MOV CRx";
+ case EMEXITTYPE_MOV_DRX: return "MOV DRx";
+ case EMEXITTYPE_VMREAD: return "VMREAD";
+ case EMEXITTYPE_VMWRITE: return "VMWRITE";
+
+ /* Raw-mode only: */
+ case EMEXITTYPE_INVLPG: return "INVLPG";
+ case EMEXITTYPE_LLDT: return "LLDT";
+ case EMEXITTYPE_RDPMC: return "RDPMC";
+ case EMEXITTYPE_CLTS: return "CLTS";
+ case EMEXITTYPE_STI: return "STI";
+ case EMEXITTYPE_INT: return "INT";
+ case EMEXITTYPE_SYSCALL: return "SYSCALL";
+ case EMEXITTYPE_SYSENTER: return "SYSENTER";
+ case EMEXITTYPE_HLT: return "HLT";
+ }
+ return NULL;
+}
+
+
+/**
+ * Translates flags+type into an exit name.
+ *
+ * @returns Exit name.
+ * @param uFlagsAndType The exit to name.
+ * @param pszFallback Buffer for formatting a numeric fallback.
+ * @param cbFallback Size of fallback buffer.
+ */
+static const char *emR3HistoryGetExitName(uint32_t uFlagsAndType, char *pszFallback, size_t cbFallback)
+{
+ const char *pszExitName;
+ switch (uFlagsAndType & EMEXIT_F_KIND_MASK)
+ {
+ case EMEXIT_F_KIND_EM:
+ pszExitName = EMR3GetExitTypeName((EMEXITTYPE)(uFlagsAndType & EMEXIT_F_TYPE_MASK));
+ break;
+
+ case EMEXIT_F_KIND_VMX:
+ pszExitName = HMGetVmxExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
+ break;
+
+ case EMEXIT_F_KIND_SVM:
+ pszExitName = HMGetSvmExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
+ break;
+
+ case EMEXIT_F_KIND_NEM:
+ pszExitName = NEMR3GetExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
+ break;
+
+ case EMEXIT_F_KIND_XCPT:
+ switch (uFlagsAndType & EMEXIT_F_TYPE_MASK)
+ {
+ case X86_XCPT_DE: return "Xcpt #DE";
+ case X86_XCPT_DB: return "Xcpt #DB";
+ case X86_XCPT_NMI: return "Xcpt #NMI";
+ case X86_XCPT_BP: return "Xcpt #BP";
+ case X86_XCPT_OF: return "Xcpt #OF";
+ case X86_XCPT_BR: return "Xcpt #BR";
+ case X86_XCPT_UD: return "Xcpt #UD";
+ case X86_XCPT_NM: return "Xcpt #NM";
+ case X86_XCPT_DF: return "Xcpt #DF";
+ case X86_XCPT_CO_SEG_OVERRUN: return "Xcpt #CO_SEG_OVERRUN";
+ case X86_XCPT_TS: return "Xcpt #TS";
+ case X86_XCPT_NP: return "Xcpt #NP";
+ case X86_XCPT_SS: return "Xcpt #SS";
+ case X86_XCPT_GP: return "Xcpt #GP";
+ case X86_XCPT_PF: return "Xcpt #PF";
+ case X86_XCPT_MF: return "Xcpt #MF";
+ case X86_XCPT_AC: return "Xcpt #AC";
+ case X86_XCPT_MC: return "Xcpt #MC";
+ case X86_XCPT_XF: return "Xcpt #XF";
+ case X86_XCPT_VE: return "Xcpt #VE";
+ case X86_XCPT_SX: return "Xcpt #SX";
+ default:
+ pszExitName = NULL;
+ break;
+ }
+ break;
+
+ default:
+ AssertFailed();
+ pszExitName = NULL;
+ break;
+ }
+ if (pszExitName)
+ return pszExitName;
+ RTStrPrintf(pszFallback, cbFallback, "%#06x", uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_TYPE_MASK));
+ return pszFallback;
+}
+
+
+/**
+ * Displays the VM-exit history.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) emR3InfoExitHistory(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+
+ /*
+ * Figure out target cpu and parse arguments.
+ */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+ bool fReverse = true;
+ uint32_t cLeft = RT_ELEMENTS(pVCpu->em.s.aExitHistory);
+
+ while (pszArgs && *pszArgs)
+ {
+ pszArgs = RTStrStripL(pszArgs);
+ if (!*pszArgs)
+ break;
+ if (RT_C_IS_DIGIT(*pszArgs))
+ {
+ /* The number to dump. */
+ uint32_t uValue = cLeft;
+ RTStrToUInt32Ex(pszArgs, (char **)&pszArgs, 0, &uValue);
+ if (uValue > 0)
+ cLeft = RT_MIN(uValue, RT_ELEMENTS(pVCpu->em.s.aExitHistory));
+ }
+ else if (RTStrCmp(pszArgs, "reverse") == 0)
+ {
+ pszArgs += 7;
+ fReverse = true;
+ }
+ else if (RTStrCmp(pszArgs, "ascending") == 0)
+ {
+ pszArgs += 9;
+ fReverse = false;
+ }
+ else if (RTStrCmp(pszArgs, "asc") == 0)
+ {
+ pszArgs += 3;
+ fReverse = false;
+ }
+ else
+ {
+ const char *pszStart = pszArgs;
+ while (*pszArgs && !RT_C_IS_SPACE(*pszArgs))
+ pszArgs++;
+ pHlp->pfnPrintf(pHlp, "Unknown option: %.*s\n", pszArgs - pszStart, pszArgs);
+ }
+ }
+
+ /*
+ * Do the job.
+ */
+ uint64_t idx = pVCpu->em.s.iNextExit;
+ if (idx == 0)
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: VM-exit history: empty\n", pVCpu->idCpu);
+ else
+ {
+ /*
+ * Print header.
+ */
+ pHlp->pfnPrintf(pHlp,
+ "CPU[%u]: VM-exit history:\n"
+ " Exit No.: TSC timestamp / delta RIP (Flat/*) Exit Name\n"
+ , pVCpu->idCpu);
+
+ /*
+ * Adjust bounds if ascending order.
+ */
+ if (!fReverse)
+ {
+ if (idx > cLeft)
+ idx -= cLeft;
+ else
+ {
+ cLeft = idx;
+ idx = 0;
+ }
+ }
+
+ /*
+ * Print the entries.
+ */
+ uint64_t uPrevTimestamp = 0;
+ do
+ {
+ if (fReverse)
+ idx -= 1;
+ PCEMEXITENTRY const pEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)idx & 0xff];
+
+ /* Get the exit name. */
+ char szExitName[16];
+ const char *pszExitName = emR3HistoryGetExitName(pEntry->uFlagsAndType, szExitName, sizeof(szExitName));
+
+ /* Calc delta (negative if reverse order, positive ascending). */
+ int64_t offDelta = uPrevTimestamp != 0 && pEntry->uTimestamp != 0 ? pEntry->uTimestamp - uPrevTimestamp : 0;
+ uPrevTimestamp = pEntry->uTimestamp;
+
+ char szPC[32];
+ if (!(pEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)))
+ RTStrPrintf(szPC, sizeof(szPC), "%016RX64 ", pEntry->uFlatPC);
+ else if (pEntry->uFlagsAndType & EMEXIT_F_UNFLATTENED_PC)
+ RTStrPrintf(szPC, sizeof(szPC), "%016RX64*", pEntry->uFlatPC);
+ else
+ RTStrPrintf(szPC, sizeof(szPC), "%04x:%08RX32* ", (uint32_t)(pEntry->uFlatPC >> 32), (uint32_t)pEntry->uFlatPC);
+
+ /* Do the printing. */
+ if (pEntry->idxSlot == UINT32_MAX)
+ pHlp->pfnPrintf(pHlp, " %10RU64: %#018RX64/%+-9RI64 %s %#07x %s\n",
+ idx, pEntry->uTimestamp, offDelta, szPC, pEntry->uFlagsAndType, pszExitName);
+ else
+ {
+ /** @todo more on this later */
+ pHlp->pfnPrintf(pHlp, " %10RU64: %#018RX64/%+-9RI64 %s %#07x %s slot=%#x\n",
+ idx, pEntry->uTimestamp, offDelta, szPC, pEntry->uFlagsAndType, pszExitName, pEntry->idxSlot);
+ }
+
+ /* Advance if ascending. */
+ if (!fReverse)
+ idx += 1;
+ } while (--cLeft > 0 && idx > 0);
+ }
+}
+
+
+int emR3InitDbg(PVM pVM)
+{
+ /*
+ * Register info dumpers.
+ */
+ const char *pszExitsDesc = "Dumps the VM-exit history. Arguments: Number of entries; 'asc', 'ascending' or 'reverse'.";
+ int rc = DBGFR3InfoRegisterInternalEx(pVM, "exits", pszExitsDesc, emR3InfoExitHistory, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertLogRelRCReturn(rc, rc);
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "exithistory", pszExitsDesc, emR3InfoExitHistory, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertLogRelRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /*
+ * Register debugger commands.
+ */
+ rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ AssertLogRelRCReturn(rc, rc);
+#endif
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/EMR3Nem.cpp b/src/VBox/VMM/VMMR3/EMR3Nem.cpp
new file mode 100644
index 00000000..cc977299
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/EMR3Nem.cpp
@@ -0,0 +1,487 @@
+/* $Id: EMR3Nem.cpp $ */
+/** @file
+ * EM - Execution Monitor / Manager - NEM interface.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmqueue.h>
+#include "EMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpumdis.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/err.h>
+#include <VBox/vmm/dbgf.h>
+#include "VMMTracing.h"
+
+#include <iprt/asm.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, int rc);
+DECLINLINE(int) emR3NemExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
+static int emR3NemExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
+static int emR3NemForcedActions(PVM pVM, PVMCPU pVCpu);
+
+#define EMHANDLERC_WITH_NEM
+#define emR3ExecuteInstruction emR3NemExecuteInstruction
+#define emR3ExecuteIOInstruction emR3NemExecuteIOInstruction
+#include "EMHandleRCTmpl.h"
+
+
+/**
+ * Executes instruction in NEM mode if we can.
+ *
+ * This is somewhat comparable to REMR3EmulateInstruction.
+ *
+ * @returns VBox strict status code.
+ * @retval VINF_EM_DBG_STEPPED on success.
+ * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
+ * HM right now.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure for the calling EMT.
+ * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
+ * @thread EMT.
+ */
+VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
+
+ if (!NEMR3CanExecuteGuest(pVM, pVCpu))
+ return VINF_EM_RESCHEDULE;
+
+ uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip;
+ for (;;)
+ {
+ /*
+ * Service necessary FFs before going into HM.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ {
+ VBOXSTRICTRC rcStrict = emR3NemForcedActions(pVM, pVCpu);
+ if (rcStrict != VINF_SUCCESS)
+ {
+ Log(("emR3NemSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+ }
+ }
+
+ /*
+ * Go execute it.
+ */
+ bool fOld = NEMR3SetSingleInstruction(pVM, pVCpu, true);
+ VBOXSTRICTRC rcStrict = NEMR3RunGC(pVM, pVCpu);
+ NEMR3SetSingleInstruction(pVM, pVCpu, fOld);
+ LogFlow(("emR3NemSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+
+ /*
+ * Handle high priority FFs and informational status codes. We don't do
+ * normal FF processing the caller or the next call can deal with them.
+ */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ {
+ rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
+ LogFlow(("emR3NemSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
+ {
+ rcStrict = emR3NemHandleRC(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
+ Log(("emR3NemSingleInstruction: emR3NemHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ /*
+ * Done?
+ */
+ CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
+ if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
+ || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
+ || pVCpu->cpum.GstCtx.rip != uOldRip)
+ {
+ if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.rip != uOldRip)
+ rcStrict = VINF_EM_DBG_STEPPED;
+ Log(("emR3NemSingleInstruction: returns %Rrc (rip %llx -> %llx)\n",
+ VBOXSTRICTRC_VAL(rcStrict), uOldRip, pVCpu->cpum.GstCtx.rip));
+ CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
+ return rcStrict;
+ }
+ }
+}
+
+
+/**
+ * Executes one (or perhaps a few more) instruction(s).
+ *
+ * @returns VBox status code suitable for EM.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rcRC Return code from RC.
+ * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
+ * instruction and prefix the log output with this text.
+ */
+#if defined(LOG_ENABLED) || defined(DOXYGEN_RUNNING)
+static int emR3NemExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
+#else
+static int emR3NemExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
+#endif
+{
+ NOREF(rcRC);
+
+#ifdef LOG_ENABLED
+ /*
+ * Log it.
+ */
+ Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp));
+ if (pszPrefix)
+ {
+ DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
+ }
+#endif
+
+ /*
+ * Use IEM and fallback on REM if the functionality is missing.
+ * Once IEM gets mature enough, nothing should ever fall back.
+ */
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
+
+ VBOXSTRICTRC rcStrict;
+ uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
+ {
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
+ rcStrict = IEMExecOne(pVCpu);
+ }
+ else
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+ rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
+ LogFlow(("emR3NemExecuteInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
+
+ NOREF(pVM);
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Executes one (or perhaps a few more) instruction(s).
+ * This is just a wrapper for discarding pszPrefix in non-logging builds.
+ *
+ * @returns VBox status code suitable for EM.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
+ * instruction and prefix the log output with this text.
+ * @param rcGC GC return code
+ */
+DECLINLINE(int) emR3NemExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
+{
+#ifdef LOG_ENABLED
+ return emR3NemExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
+#else
+ RT_NOREF_PV(pszPrefix);
+ return emR3NemExecuteInstructionWorker(pVM, pVCpu, rcGC);
+#endif
+}
+
+/**
+ * Executes one (or perhaps a few more) IO instruction(s).
+ *
+ * @returns VBox status code suitable for EM.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3NemExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
+{
+ RT_NOREF_PV(pVM);
+ STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
+
+ /*
+ * Hand it over to the interpreter.
+ */
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
+ VBOXSTRICTRC rcStrict;
+ uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
+ {
+ rcStrict = IEMExecOne(pVCpu);
+ LogFlow(("emR3NemExecuteIOInstruction: %Rrc (IEMExecOne)\n", VBOXSTRICTRC_VAL(rcStrict)));
+ STAM_COUNTER_INC(&pVCpu->em.s.StatIoIem);
+ }
+ else
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+ rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
+ LogFlow(("emR3NemExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
+ STAM_COUNTER_INC(&pVCpu->em.s.StatIoRestarted);
+ }
+
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Process NEM specific forced actions.
+ *
+ * This function is called when any FFs in VM_FF_HIGH_PRIORITY_PRE_RAW_MASK
+ * or/and VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK are pending.
+ *
+ * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
+ * EM statuses.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int emR3NemForcedActions(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Sync page directory should not happen in NEM mode.
+ */
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+ {
+ Log(("NEM: TODO: Make VMCPU_FF_PGM_SYNC_CR3 / VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL quiet! (%#RX64)\n", (uint64_t)pVCpu->fLocalForcedActions));
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
+ }
+
+ /*
+ * Allocate handy pages (just in case the above actions have consumed some pages).
+ */
+ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
+ {
+ int rc = PGMR3PhysAllocateHandyPages(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Check whether we're out of memory now.
+ *
+ * This may stem from some of the above actions or operations that has been executed
+ * since we ran FFs. The allocate handy pages must for instance always be followed by
+ * this check.
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
+ return VINF_EM_NO_MEMORY;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
+ *
+ * This function contains the raw-mode version of the inner
+ * execution loop (the outer loop being in EMR3ExecuteVM()).
+ *
+ * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
+ * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pfFFDone Where to store an indicator telling whether or not
+ * FFs were done before returning.
+ */
+VBOXSTRICTRC emR3NemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+{
+ VBOXSTRICTRC rcStrict = VERR_IPE_UNINITIALIZED_STATUS;
+
+ LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
+ *pfFFDone = false;
+
+ STAM_REL_COUNTER_INC(&pVCpu->em.s.StatNEMExecuteCalled);
+
+ /*
+ * Spin till we get a forced action which returns anything but VINF_SUCCESS.
+ */
+ for (;;)
+ {
+ STAM_PROFILE_ADV_START(&pVCpu->em.s.StatNEMEntry, a);
+
+ /*
+ * Check that we can execute in NEM mode.
+ */
+ if (NEMR3CanExecuteGuest(pVM, pVCpu))
+ { /* likely */ }
+ else
+ {
+ rcStrict = VINF_EM_RESCHEDULE_REM;
+ break;
+ }
+
+ /*
+ * Process high priority pre-execution raw-mode FFs.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ {
+ rcStrict = emR3NemForcedActions(pVM, pVCpu);
+ if (rcStrict != VINF_SUCCESS)
+ break;
+ }
+
+#ifdef LOG_ENABLED
+ /*
+ * Log important stuff before entering GC.
+ */
+ if (TRPMHasTrap(pVCpu))
+ Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
+
+ if (!(pVCpu->cpum.GstCtx.fExtrn & ( CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
+ | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER)))
+ {
+ uint32_t cpl = CPUMGetGuestCPL(pVCpu);
+ if (pVM->cCpus == 1)
+ {
+ if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
+ Log(("NEMV86: %08x IF=%d\n", pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
+ else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
+ Log(("NEMR%d: %04x:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ else
+ Log(("NEMR%d: %04x:%08x ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ }
+ else
+ {
+ if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
+ Log(("NEMV86-CPU%d: %08x IF=%d\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
+ else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
+ Log(("NEMR%d-CPU%d: %04x:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ else
+ Log(("NEMR%d-CPU%d: %04x:%08x ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
+ }
+ }
+ else if (pVM->cCpus == 1)
+ Log(("NEMRx: -> NEMR3RunGC\n"));
+ else
+ Log(("NEMRx-CPU%u: -> NEMR3RunGC\n", pVCpu->idCpu));
+#endif /* LOG_ENABLED */
+
+ /*
+ * Execute the code.
+ */
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
+ {
+ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatNEMEntry, a);
+ STAM_REL_PROFILE_START(&pVCpu->em.s.StatNEMExec, x);
+ rcStrict = NEMR3RunGC(pVM, pVCpu);
+ STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatNEMExec, x);
+ }
+ else
+ {
+ /* Give up this time slice; virtual time continues */
+ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatNEMEntry, a);
+ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
+ RTThreadSleep(5);
+ STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
+ rcStrict = VINF_SUCCESS;
+ }
+
+
+ /*
+ * Deal with high priority post execution FFs before doing anything else.
+ */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
+
+ /*
+ * Process the returned status code.
+ */
+ if (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
+ break;
+
+ rcStrict = emR3NemHandleRC(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
+ if (rcStrict != VINF_SUCCESS)
+ break;
+
+ /*
+ * Check and execute forced actions.
+ */
+#ifdef VBOX_HIGH_RES_TIMERS_HACK
+ TMTimerPollVoid(pVM, pVCpu);
+#endif
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_MASK))
+ {
+ rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
+ VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
+ if ( rcStrict != VINF_SUCCESS
+ && rcStrict != VINF_EM_RESCHEDULE_HM)
+ {
+ *pfFFDone = true;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Return to outer loop, making sure the fetch all state as we leave.
+ *
+ * Note! Not using CPUM_IMPORT_EXTRN_RET here, to prioritize an rcStrict error
+ * status over import errors.
+ */
+ if (pVCpu->cpum.GstCtx.fExtrn)
+ {
+ int rcImport = NEMImportStateOnDemand(pVCpu, pVCpu->cpum.GstCtx.fExtrn);
+ AssertReturn(RT_SUCCESS(rcImport) || RT_FAILURE_NP(rcStrict), rcImport);
+ }
+#if defined(LOG_ENABLED) && defined(DEBUG)
+ RTLogFlush(NULL);
+#endif
+ return rcStrict;
+}
+
diff --git a/src/VBox/VMM/VMMR3/GCM.cpp b/src/VBox/VMM/VMMR3/GCM.cpp
new file mode 100644
index 00000000..e2077d1e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GCM.cpp
@@ -0,0 +1,258 @@
+/** @file
+ * GCM - Guest Compatibility Manager.
+ */
+
+/*
+ * Copyright (C) 2022-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_gcm GCM - The Guest Compatibility Manager
+ *
+ * The Guest Compatibility Manager provides run-time compatibility fixes
+ * for certain known guest bugs.
+ *
+ * @see grp_gcm
+ *
+ *
+ * @section sec_gcm_fixer Fixers
+ *
+ * A GCM fixer implements a collection of run-time helpers/patches suitable for
+ * a specific guest type. Several fixers can be active at the same time; for
+ * example OS/2 or Windows 9x need their own fixers, but can also runs DOS
+ * applications which need DOS-specific fixers.
+ *
+ * The concept of fixers exists to reduce the number of false positives to a
+ * minimum. Heuristics are used to decide whether a particular fix should be
+ * applied or not; restricting the number of applicable fixes minimizes the
+ * chance that a fix could be misapplied.
+ *
+ * The fixers are invisible to a guest. A common problem is division by zero
+ * caused by a software timing loop which cannot deal with fast CPUs (where
+ * "fast" very much depends on the era when the software was written). A fixer
+ * intercepts division by zero, recognizes known register contents and code
+ * sequence, modifies one or more registers to avoid a divide error, and
+ * restarts the instruction.
+ *
+ * It is not expected that the set of active fixers would be changed during
+ * the lifetime of the VM.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GIM
+#include <VBox/vmm/gcm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmdev.h>
+#include "GCMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/log.h>
+
+#include <iprt/err.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNSSMINTSAVEEXEC gcmR3Save;
+static FNSSMINTLOADEXEC gcmR3Load;
+
+
+/**
+ * Initializes the GCM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) GCMR3Init(PVM pVM)
+{
+ LogFlow(("GCMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompile(sizeof(pVM->gcm.s) <= sizeof(pVM->gcm.padding));
+
+ /*
+ * Register the saved state data unit.
+ */
+ int rc = SSMR3RegisterInternal(pVM, "GCM", 0 /* uInstance */, GCM_SAVED_STATE_VERSION, sizeof(GCM),
+ NULL /* pfnLivePrep */, NULL /* pfnLiveExec */, NULL /* pfnLiveVote*/,
+ NULL /* pfnSavePrep */, gcmR3Save, NULL /* pfnSaveDone */,
+ NULL /* pfnLoadPrep */, gcmR3Load, NULL /* pfnLoadDone */);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "GCM/");
+
+ /*
+ * Validate the GCM settings.
+ */
+ rc = CFGMR3ValidateConfig(pCfgNode, "/GCM/", /* pszNode */
+ "FixerSet", /* pszValidValues */
+ "", /* pszValidNodes */
+ "GCM", /* pszWho */
+ 0); /* uInstance */
+ if (RT_FAILURE(rc))
+ return rc;
+
+#if 1
+ /** @cfgm{/GCM/FixerSet, uint32_t, 0}
+ * The set (bit mask) of enabled fixers. See GCMFIXERID.
+ */
+ uint32_t u32FixerIds;
+ rc = CFGMR3QueryU32Def(pCfgNode, "FixerSet", &u32FixerIds, 0);
+ AssertRCReturn(rc, rc);
+
+ /* Check for unknown bits. */
+ uint32_t u32BadBits = u32FixerIds & ~(GCMFIXER_DBZ_DOS | GCMFIXER_DBZ_OS2 | GCMFIXER_DBZ_WIN9X);
+
+ if (u32BadBits)
+ {
+ rc = VMR3SetError(pVM->pUVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS, "Unsupported GCM fixer bits (%#x) set.", u32BadBits);
+ }
+ else
+ {
+ pVM->gcm.s.enmFixerIds = u32FixerIds;
+ }
+#else
+ pVM->gcm.s.enmFixerIds = GCMFIXER_DBZ_OS2 | GCMFIXER_DBZ_DOS | GCMFIXER_DBZ_WIN9X;
+#endif
+ LogRel(("GCM: Initialized (fixer bits: %#x)\n", u32FixerIds));
+
+ return rc;
+}
+
+
+/**
+ * Finalize the GCM initialization.
+ *
+ * This is called after initializing HM and most other VMM components.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) GCMR3InitCompleted(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTSAVEEXEC}
+ */
+static DECLCALLBACK(int) gcmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pSSM, VERR_SSM_INVALID_STATE);
+
+ int rc = VINF_SUCCESS;
+
+ /*
+ * Save per-VM data.
+ */
+ SSMR3PutU32(pSSM, pVM->gcm.s.enmFixerIds);
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADEXEC}
+ */
+static DECLCALLBACK(int) gcmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ if (uPass != SSM_PASS_FINAL)
+ return VINF_SUCCESS;
+ if (uVersion != GCM_SAVED_STATE_VERSION)
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+
+ int rc;
+
+ /*
+ * Load per-VM data.
+ */
+ uint32_t uFixerIds;
+
+ rc = SSMR3GetU32(pSSM, &uFixerIds);
+ AssertRCReturn(rc, rc);
+
+ if ((GCMFIXERID)uFixerIds != pVM->gcm.s.enmFixerIds)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Saved GCM fixer set %#X differs from the configured one (%#X)."),
+ uFixerIds, pVM->gcm.s.enmFixerIds);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates the GCM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM itself is, at this point, powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) GCMR3Term(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate itself inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) GCMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ RT_NOREF(pVM);
+ RT_NOREF(offDelta);
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * Do whatever fixer-specific resetting that needs to be done.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) GCMR3Reset(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
diff --git a/src/VBox/VMM/VMMR3/GIM.cpp b/src/VBox/VMM/VMMR3/GIM.cpp
new file mode 100644
index 00000000..9e9a40f9
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GIM.cpp
@@ -0,0 +1,705 @@
+/* $Id: GIM.cpp $ */
+/** @file
+ * GIM - Guest Interface Manager.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_gim GIM - The Guest Interface Manager
+ *
+ * The Guest Interface Manager abstracts an interface provider through which
+ * guests may interact with the hypervisor.
+ *
+ * @see grp_gim
+ *
+ *
+ * @section sec_gim_provider Providers
+ *
+ * A GIM provider implements a particular hypervisor interface such as Microsoft
+ * Hyper-V, Linux KVM and so on. It hooks into various components in the VMM to
+ * ease the guest in running under a recognized, virtualized environment.
+ *
+ * The GIM provider configured for the VM needs to be recognized by the guest OS
+ * in order to make use of features supported by the interface. Since it
+ * requires co-operation from the guest OS, a GIM provider may also be referred to
+ * as a paravirtualization interface.
+ *
+ * One of the goals of having a paravirtualized interface is for enabling guests
+ * to be more accurate and efficient when operating in a virtualized
+ * environment. For instance, a guest OS which interfaces to VirtualBox through
+ * a GIM provider may rely on the provider for supplying the correct TSC
+ * frequency of the host processor. The guest can then avoid caliberating the
+ * TSC itself, resulting in higher accuracy and better performance.
+ *
+ * At most, only one GIM provider can be active for a running VM and cannot be
+ * changed during the lifetime of the VM.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GIM
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmdev.h>
+#include "GIMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/log.h>
+
+#include <iprt/err.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+
+/* Include all GIM providers. */
+#include "GIMMinimalInternal.h"
+#include "GIMHvInternal.h"
+#include "GIMKvmInternal.h"
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNSSMINTSAVEEXEC gimR3Save;
+static FNSSMINTLOADEXEC gimR3Load;
+static FNSSMINTLOADDONE gimR3LoadDone;
+
+
+/**
+ * Initializes the GIM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) GIMR3Init(PVM pVM)
+{
+ LogFlow(("GIMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompile(sizeof(pVM->gim.s) <= sizeof(pVM->gim.padding));
+ AssertCompile(sizeof(pVM->apCpusR3[0]->gim.s) <= sizeof(pVM->apCpusR3[0]->gim.padding));
+
+ /*
+ * Initialize members.
+ */
+ pVM->gim.s.hSemiReadOnlyMmio2Handler = NIL_PGMPHYSHANDLERTYPE;
+
+ /*
+ * Register the saved state data unit.
+ */
+ int rc = SSMR3RegisterInternal(pVM, "GIM", 0 /* uInstance */, GIM_SAVED_STATE_VERSION, sizeof(GIM),
+ NULL /* pfnLivePrep */, NULL /* pfnLiveExec */, NULL /* pfnLiveVote*/,
+ NULL /* pfnSavePrep */, gimR3Save, NULL /* pfnSaveDone */,
+ NULL /* pfnLoadPrep */, gimR3Load, gimR3LoadDone);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "GIM/");
+
+ /*
+ * Validate the GIM settings.
+ */
+ rc = CFGMR3ValidateConfig(pCfgNode, "/GIM/", /* pszNode */
+ "Provider" /* pszValidValues */
+ "|Version",
+ "HyperV", /* pszValidNodes */
+ "GIM", /* pszWho */
+ 0); /* uInstance */
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /** @cfgm{/GIM/Provider, string}
+ * The name of the GIM provider. The default is "none". */
+ char szProvider[64];
+ rc = CFGMR3QueryStringDef(pCfgNode, "Provider", szProvider, sizeof(szProvider), "None");
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/GIM/Version, uint32_t}
+ * The interface version. The default is 0, which means "provide the most
+ * up-to-date implementation". */
+ uint32_t uVersion;
+ rc = CFGMR3QueryU32Def(pCfgNode, "Version", &uVersion, 0 /* default */);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Setup the GIM provider for this VM.
+ */
+ LogRel(("GIM: Using provider '%s' (Implementation version: %u)\n", szProvider, uVersion));
+ if (!RTStrCmp(szProvider, "None"))
+ pVM->gim.s.enmProviderId = GIMPROVIDERID_NONE;
+ else
+ {
+ pVM->gim.s.u32Version = uVersion;
+ /** @todo r=bird: Because u32Version is saved, it should be translated to the
+ * 'most up-to-date implementation' version number when 0. Otherwise,
+ * we'll have abiguities when loading the state of older VMs. */
+ if (!RTStrCmp(szProvider, "Minimal"))
+ {
+ pVM->gim.s.enmProviderId = GIMPROVIDERID_MINIMAL;
+ rc = gimR3MinimalInit(pVM);
+ }
+ else if (!RTStrCmp(szProvider, "HyperV"))
+ {
+ pVM->gim.s.enmProviderId = GIMPROVIDERID_HYPERV;
+ rc = gimR3HvInit(pVM, pCfgNode);
+ }
+ else if (!RTStrCmp(szProvider, "KVM"))
+ {
+ pVM->gim.s.enmProviderId = GIMPROVIDERID_KVM;
+ rc = gimR3KvmInit(pVM);
+ }
+ else
+ rc = VMR3SetError(pVM->pUVM, VERR_GIM_INVALID_PROVIDER, RT_SRC_POS, "Provider '%s' unknown.", szProvider);
+ }
+
+ /*
+ * Statistics.
+ */
+ STAM_REL_REG_USED(pVM, &pVM->gim.s.StatDbgXmit, STAMTYPE_COUNTER, "/GIM/Debug/Transmit", STAMUNIT_OCCURENCES, "Debug packets sent.");
+ STAM_REL_REG_USED(pVM, &pVM->gim.s.StatDbgXmitBytes, STAMTYPE_COUNTER, "/GIM/Debug/TransmitBytes", STAMUNIT_OCCURENCES, "Debug bytes sent.");
+ STAM_REL_REG_USED(pVM, &pVM->gim.s.StatDbgRecv, STAMTYPE_COUNTER, "/GIM/Debug/Receive", STAMUNIT_OCCURENCES, "Debug packets received.");
+ STAM_REL_REG_USED(pVM, &pVM->gim.s.StatDbgRecvBytes, STAMTYPE_COUNTER, "/GIM/Debug/ReceiveBytes", STAMUNIT_OCCURENCES, "Debug bytes received.");
+
+ STAM_REL_REG_USED(pVM, &pVM->gim.s.StatHypercalls, STAMTYPE_COUNTER, "/GIM/Hypercalls", STAMUNIT_OCCURENCES, "Number of hypercalls initiated.");
+ return rc;
+}
+
+
+/**
+ * Initializes the remaining bits of the GIM provider.
+ *
+ * This is called after initializing HM and most other VMM components.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) GIMR3InitCompleted(PVM pVM)
+{
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_MINIMAL:
+ return gimR3MinimalInitCompleted(pVM);
+
+ case GIMPROVIDERID_HYPERV:
+ return gimR3HvInitCompleted(pVM);
+
+ case GIMPROVIDERID_KVM:
+ return gimR3KvmInitCompleted(pVM);
+
+ default:
+ break;
+ }
+
+ if (!TMR3CpuTickIsFixedRateMonotonic(pVM, true /* fWithParavirtEnabled */))
+ LogRel(("GIM: Warning!!! Host TSC is unstable. The guest may behave unpredictably with a paravirtualized clock.\n"));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTSAVEEXEC}
+ */
+static DECLCALLBACK(int) gimR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pSSM, VERR_SSM_INVALID_STATE);
+
+ int rc = VINF_SUCCESS;
+#if 0
+ /* Save per-CPU data. */
+ SSMR3PutU32(pSSM, pVM->cCpus);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ rc = SSMR3PutXYZ(pSSM, pVCpu->gim.s.XYZ);
+ }
+#endif
+
+ /*
+ * Save per-VM data.
+ */
+ SSMR3PutU32(pSSM, pVM->gim.s.enmProviderId);
+ SSMR3PutU32(pSSM, pVM->gim.s.u32Version);
+
+ /*
+ * Save provider-specific data.
+ */
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ rc = gimR3HvSave(pVM, pSSM);
+ AssertRCReturn(rc, rc);
+ break;
+
+ case GIMPROVIDERID_KVM:
+ rc = gimR3KvmSave(pVM, pSSM);
+ AssertRCReturn(rc, rc);
+ break;
+
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADEXEC}
+ */
+static DECLCALLBACK(int) gimR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ if (uPass != SSM_PASS_FINAL)
+ return VINF_SUCCESS;
+ if (uVersion != GIM_SAVED_STATE_VERSION)
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+
+ int rc;
+#if 0
+ /* Load per-CPU data. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ rc = SSMR3PutXYZ(pSSM, pVCpu->gim.s.XYZ);
+ }
+#endif
+
+ /*
+ * Load per-VM data.
+ */
+ uint32_t uProviderId;
+ uint32_t uProviderVersion;
+
+ SSMR3GetU32(pSSM, &uProviderId);
+ rc = SSMR3GetU32(pSSM, &uProviderVersion);
+ AssertRCReturn(rc, rc);
+
+ if ((GIMPROVIDERID)uProviderId != pVM->gim.s.enmProviderId)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Saved GIM provider %u differs from the configured one (%u)."),
+ uProviderId, pVM->gim.s.enmProviderId);
+#if 0 /** @todo r=bird: Figure out what you mean to do here with the version. */
+ if (uProviderVersion != pVM->gim.s.u32Version)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Saved GIM provider version %u differs from the configured one (%u)."),
+ uProviderVersion, pVM->gim.s.u32Version);
+#else
+ pVM->gim.s.u32Version = uProviderVersion;
+#endif
+
+ /*
+ * Load provider-specific data.
+ */
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ rc = gimR3HvLoad(pVM, pSSM);
+ AssertRCReturn(rc, rc);
+ break;
+
+ case GIMPROVIDERID_KVM:
+ rc = gimR3KvmLoad(pVM, pSSM);
+ AssertRCReturn(rc, rc);
+ break;
+
+ default:
+ break;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADDONE}
+ */
+static DECLCALLBACK(int) gimR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
+{
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ return gimR3HvLoadDone(pVM, pSSM);
+
+ default:
+ return VINF_SUCCESS;
+ }
+}
+
+
+/**
+ * Terminates the GIM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM itself is, at this point, powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) GIMR3Term(PVM pVM)
+{
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ return gimR3HvTerm(pVM);
+
+ case GIMPROVIDERID_KVM:
+ return gimR3KvmTerm(pVM);
+
+ default:
+ break;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) GIMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ gimR3HvRelocate(pVM, offDelta);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the GIM component this means unmapping and unregistering MMIO2 regions
+ * and other provider-specific resets.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) GIMR3Reset(PVM pVM)
+{
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ return gimR3HvReset(pVM);
+
+ case GIMPROVIDERID_KVM:
+ return gimR3KvmReset(pVM);
+
+ default:
+ break;
+ }
+}
+
+
+/**
+ * Registers the GIM device with VMM.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Pointer to the GIM device instance.
+ * @param pDbg Pointer to the GIM device debug structure, can be
+ * NULL.
+ */
+VMMR3DECL(void) GIMR3GimDeviceRegister(PVM pVM, PPDMDEVINS pDevIns, PGIMDEBUG pDbg)
+{
+ pVM->gim.s.pDevInsR3 = pDevIns;
+ pVM->gim.s.pDbgR3 = pDbg;
+}
+
+
+/**
+ * Gets debug setup specified by the provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDbgSetup Where to store the debug setup details.
+ */
+VMMR3DECL(int) GIMR3GetDebugSetup(PVM pVM, PGIMDEBUGSETUP pDbgSetup)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pDbgSetup, VERR_INVALID_PARAMETER);
+
+ switch (pVM->gim.s.enmProviderId)
+ {
+ case GIMPROVIDERID_HYPERV:
+ return gimR3HvGetDebugSetup(pVM, pDbgSetup);
+ default:
+ break;
+ }
+ return VERR_GIM_NO_DEBUG_CONNECTION;
+}
+
+
+/**
+ * Read data from a host debug session.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pvRead The read buffer.
+ * @param pcbRead The size of the read buffer as well as where to store
+ * the number of bytes read.
+ * @param pfnReadComplete Callback when the buffer has been read and
+ * before signalling reading of the next buffer.
+ * Optional, can be NULL.
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3DebugRead(PVM pVM, void *pvRead, size_t *pcbRead, PFNGIMDEBUGBUFREADCOMPLETED pfnReadComplete)
+{
+ PGIMDEBUG pDbg = pVM->gim.s.pDbgR3;
+ if (pDbg)
+ {
+ if (ASMAtomicReadBool(&pDbg->fDbgRecvBufRead) == true)
+ {
+ STAM_REL_COUNTER_INC(&pVM->gim.s.StatDbgRecv);
+ STAM_REL_COUNTER_ADD(&pVM->gim.s.StatDbgRecvBytes, pDbg->cbDbgRecvBufRead);
+
+ memcpy(pvRead, pDbg->pvDbgRecvBuf, pDbg->cbDbgRecvBufRead);
+ *pcbRead = pDbg->cbDbgRecvBufRead;
+ if (pfnReadComplete)
+ pfnReadComplete(pVM);
+ RTSemEventMultiSignal(pDbg->hDbgRecvThreadSem);
+ ASMAtomicWriteBool(&pDbg->fDbgRecvBufRead, false);
+ return VINF_SUCCESS;
+ }
+ else
+ *pcbRead = 0;
+ return VERR_NO_DATA;
+ }
+ return VERR_GIM_NO_DEBUG_CONNECTION;
+}
+
+
+/**
+ * Write data to a host debug session.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pvWrite The write buffer.
+ * @param pcbWrite The size of the write buffer as well as where to store
+ * the number of bytes written.
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3DebugWrite(PVM pVM, void *pvWrite, size_t *pcbWrite)
+{
+ PGIMDEBUG pDbg = pVM->gim.s.pDbgR3;
+ if (pDbg)
+ {
+ PPDMISTREAM pDbgStream = pDbg->pDbgDrvStream;
+ if (pDbgStream)
+ {
+ size_t cbWrite = *pcbWrite;
+ int rc = pDbgStream->pfnWrite(pDbgStream, pvWrite, pcbWrite);
+ if ( RT_SUCCESS(rc)
+ && *pcbWrite == cbWrite)
+ {
+ STAM_REL_COUNTER_INC(&pVM->gim.s.StatDbgXmit);
+ STAM_REL_COUNTER_ADD(&pVM->gim.s.StatDbgXmitBytes, *pcbWrite);
+ }
+ return rc;
+ }
+ }
+ return VERR_GIM_NO_DEBUG_CONNECTION;
+}
+
+#if 0 /* ??? */
+
+/**
+ * @callback_method_impl{FNPGMPHYSHANDLER,
+ * Write access handler for mapped MMIO2 pages. Currently ignores writes.}
+ *
+ * @todo In the future we might want to let the GIM provider decide what the
+ * handler should do (like throwing \#GP faults).
+ */
+static DECLCALLBACK(VBOXSTRICTRC) gimR3Mmio2WriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf,
+ size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin,
+ void *pvUser)
+{
+ RT_NOREF6(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf);
+ RT_NOREF3(enmAccessType, enmOrigin, pvUser);
+
+ /*
+ * Ignore writes to the mapped MMIO2 page.
+ */
+ Assert(enmAccessType == PGMACCESSTYPE_WRITE);
+ return VINF_SUCCESS; /** @todo Hyper-V says we should \#GP(0) fault for writes to the Hypercall and TSC page. */
+}
+
+
+/**
+ * Unmaps a registered MMIO2 region in the guest address space and removes any
+ * access handlers for it.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegion Pointer to the GIM MMIO2 region.
+ */
+VMMR3_INT_DECL(int) gimR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion)
+{
+ AssertPtr(pVM);
+ AssertPtr(pRegion);
+
+ PPDMDEVINS pDevIns = pVM->gim.s.pDevInsR3;
+ AssertPtr(pDevIns);
+ if (pRegion->fMapped)
+ {
+ int rc = PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage);
+ AssertRC(rc);
+
+ rc = PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, pRegion->GCPhysPage);
+ if (RT_SUCCESS(rc))
+ {
+ pRegion->fMapped = false;
+ pRegion->GCPhysPage = NIL_RTGCPHYS;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Maps a registered MMIO2 region in the guest address space.
+ *
+ * The region will be made read-only and writes from the guest will be ignored.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegion Pointer to the GIM MMIO2 region.
+ * @param GCPhysRegion Where in the guest address space to map the region.
+ */
+VMMR3_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion)
+{
+ PPDMDEVINS pDevIns = pVM->gim.s.pDevInsR3;
+ AssertPtr(pDevIns);
+
+ /* The guest-physical address must be page-aligned. */
+ if (GCPhysRegion & GUEST_PAGE_OFFSET_MASK)
+ {
+ LogFunc(("%s: %#RGp not paging aligned\n", pRegion->szDescription, GCPhysRegion));
+ return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
+ }
+
+ /* Allow only normal pages to be overlaid using our MMIO2 pages (disallow MMIO, ROM, reserved pages). */
+ /** @todo Hyper-V doesn't seem to be very strict about this, may be relax
+ * later if some guest really requires it. */
+ if (!PGMPhysIsGCPhysNormal(pVM, GCPhysRegion))
+ {
+ LogFunc(("%s: %#RGp is not normal memory\n", pRegion->szDescription, GCPhysRegion));
+ return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
+ }
+
+ if (!pRegion->fRegistered)
+ {
+ LogFunc(("%s: Region has not been registered.\n", pRegion->szDescription));
+ return VERR_GIM_IPE_1;
+ }
+
+ /*
+ * Map the MMIO2 region over the specified guest-physical address.
+ */
+ int rc = PDMDevHlpMMIOExMap(pDevIns, NULL, pRegion->iRegion, GCPhysRegion);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Install access-handlers for the mapped page to prevent (ignore) writes to it
+ * from the guest.
+ */
+ if (pVM->gim.s.hSemiReadOnlyMmio2Handler == NIL_PGMPHYSHANDLERTYPE)
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE,
+ gimR3Mmio2WriteHandler,
+ NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NULL /* pszPfHandlerR0 */,
+ NULL /* pszModRC */, NULL /* pszHandlerRC */, NULL /* pszPfHandlerRC */,
+ "GIM read-only MMIO2 handler",
+ &pVM->gim.s.hSemiReadOnlyMmio2Handler);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PGMHandlerPhysicalRegister(pVM, GCPhysRegion, GCPhysRegion + (pRegion->cbRegion - 1),
+ pVM->gim.s.hSemiReadOnlyMmio2Handler,
+ NULL /* pvUserR3 */, NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */,
+ pRegion->szDescription);
+ if (RT_SUCCESS(rc))
+ {
+ pRegion->fMapped = true;
+ pRegion->GCPhysPage = GCPhysRegion;
+ return rc;
+ }
+ }
+
+ PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, GCPhysRegion);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Registers the physical handler for the registered and mapped MMIO2 region.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegion Pointer to the GIM MMIO2 region.
+ */
+VMMR3_INT_DECL(int) gimR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion)
+{
+ AssertPtr(pRegion);
+ AssertReturn(pRegion->fRegistered, VERR_GIM_IPE_2);
+ AssertReturn(pRegion->fMapped, VERR_GIM_IPE_3);
+
+ return PGMR3HandlerPhysicalRegister(pVM,
+ PGMPHYSHANDLERKIND_WRITE,
+ pRegion->GCPhysPage, pRegion->GCPhysPage + (pRegion->cbRegion - 1),
+ gimR3Mmio2WriteHandler, NULL /* pvUserR3 */,
+ NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NIL_RTR0PTR /* pvUserR0 */,
+ NULL /* pszModRC */, NULL /* pszHandlerRC */, NIL_RTRCPTR /* pvUserRC */,
+ pRegion->szDescription);
+}
+
+
+/**
+ * Deregisters the physical handler for the MMIO2 region.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegion Pointer to the GIM MMIO2 region.
+ */
+VMMR3_INT_DECL(int) gimR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion)
+{
+ return PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage);
+}
+
+#endif
+
diff --git a/src/VBox/VMM/VMMR3/GIMHv.cpp b/src/VBox/VMM/VMMR3/GIMHv.cpp
new file mode 100644
index 00000000..0452facb
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GIMHv.cpp
@@ -0,0 +1,2277 @@
+/* $Id: GIMHv.cpp $ */
+/** @file
+ * GIM - Guest Interface Manager, Hyper-V implementation.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GIM
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/em.h>
+#include "GIMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <VBox/version.h>
+
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <iprt/mem.h>
+#include <iprt/semaphore.h>
+#include <iprt/spinlock.h>
+#include <iprt/zero.h>
+#ifdef DEBUG_ramshankar
+# include <iprt/udp.h>
+#endif
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/**
+ * GIM Hyper-V saved-state version.
+ */
+#define GIM_HV_SAVED_STATE_VERSION UINT32_C(4)
+/** Saved states, priot to saving debug UDP source/destination ports. */
+#define GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG_UDP_PORTS UINT32_C(3)
+/** Saved states, prior to any synthetic interrupt controller support. */
+#define GIM_HV_SAVED_STATE_VERSION_PRE_SYNIC UINT32_C(2)
+/** Vanilla saved states, prior to any debug support. */
+#define GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG UINT32_C(1)
+
+#ifdef VBOX_WITH_STATISTICS
+# define GIMHV_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Gim, kCpumMsrWrFn_Gim, 0, 0, 0, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define GIMHV_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Gim, kCpumMsrWrFn_Gim, 0, 0, 0, 0, 0, a_szName }
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/**
+ * Array of MSR ranges supported by Hyper-V.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_HyperV[] =
+{
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE0_FIRST, MSR_GIM_HV_RANGE0_LAST, "Hyper-V range 0"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE1_FIRST, MSR_GIM_HV_RANGE1_LAST, "Hyper-V range 1"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE2_FIRST, MSR_GIM_HV_RANGE2_LAST, "Hyper-V range 2"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE3_FIRST, MSR_GIM_HV_RANGE3_LAST, "Hyper-V range 3"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE4_FIRST, MSR_GIM_HV_RANGE4_LAST, "Hyper-V range 4"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE5_FIRST, MSR_GIM_HV_RANGE5_LAST, "Hyper-V range 5"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE6_FIRST, MSR_GIM_HV_RANGE6_LAST, "Hyper-V range 6"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE7_FIRST, MSR_GIM_HV_RANGE7_LAST, "Hyper-V range 7"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE8_FIRST, MSR_GIM_HV_RANGE8_LAST, "Hyper-V range 8"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE9_FIRST, MSR_GIM_HV_RANGE9_LAST, "Hyper-V range 9"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE10_FIRST, MSR_GIM_HV_RANGE10_LAST, "Hyper-V range 10"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE11_FIRST, MSR_GIM_HV_RANGE11_LAST, "Hyper-V range 11"),
+ GIMHV_MSRRANGE(MSR_GIM_HV_RANGE12_FIRST, MSR_GIM_HV_RANGE12_LAST, "Hyper-V range 12")
+};
+#undef GIMHV_MSRRANGE
+
+/**
+ * DHCP OFFER packet response to the guest (client) over the Hyper-V debug
+ * transport.
+ *
+ * - MAC: Destination: broadcast.
+ * - MAC: Source: 00:00:00:00:01 (hypervisor). It's important that it's
+ * different from the client's MAC address which is all 0's.
+ * - IP: Source: 10.0.5.1 (hypervisor)
+ * - IP: Destination: broadcast.
+ * - IP: Checksum included.
+ * - BOOTP: Client IP address: 10.0.5.5.
+ * - BOOTP: Server IP address: 10.0.5.1.
+ * - DHCP options: Subnet mask, router, lease-time, DHCP server identifier.
+ * Options are kept to a minimum required for making Windows guests happy.
+ */
+#define GIMHV_DEBUGCLIENT_IPV4 RT_H2N_U32_C(0x0a000505) /* 10.0.5.5 */
+#define GIMHV_DEBUGSERVER_IPV4 RT_H2N_U32_C(0x0a000501) /* 10.0.5.1 */
+static const uint8_t g_abDhcpOffer[] =
+{
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x45, 0x10,
+ 0x01, 0x28, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11, 0x6a, 0xb5, 0x0a, 0x00, 0x05, 0x01, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x43, 0x00, 0x44, 0x01, 0x14, 0x00, 0x00, 0x02, 0x01, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x05, 0x05, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x01, 0x04, 0xff,
+ 0xff, 0xff, 0x00, 0x03, 0x04, 0x0a, 0x00, 0x05, 0x01, 0x33, 0x04, 0xff, 0xff, 0xff, 0xff, 0x36,
+ 0x04, 0x0a, 0x00, 0x05, 0x01, 0xff
+};
+
+/**
+ * DHCP ACK packet response to the guest (client) over the Hyper-V debug
+ * transport.
+ *
+ * - MAC: Destination: 00:00:00:00:00 (client).
+ * - IP: Destination: 10.0.5.5 (client).
+ * - Rest are mostly similar to the DHCP offer.
+ */
+static const uint8_t g_abDhcpAck[] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x45, 0x10,
+ 0x01, 0x28, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11, 0x5b, 0xb0, 0x0a, 0x00, 0x05, 0x01, 0x0a, 0x00,
+ 0x05, 0x05, 0x00, 0x43, 0x00, 0x44, 0x01, 0x14, 0x00, 0x00, 0x02, 0x01, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x05, 0x05, 0x0a, 0x00, 0x05, 0x05, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x05, 0x01, 0x04, 0xff,
+ 0xff, 0xff, 0x00, 0x03, 0x04, 0x0a, 0x00, 0x05, 0x01, 0x33, 0x04, 0xff, 0xff, 0xff, 0xff, 0x36,
+ 0x04, 0x0a, 0x00, 0x05, 0x01, 0xff
+};
+
+/**
+ * ARP reply to the guest (client) over the Hyper-V debug transport.
+ *
+ * - MAC: Destination: 00:00:00:00:00 (client)
+ * - MAC: Source: 00:00:00:00:01 (hypervisor)
+ * - ARP: Reply: 10.0.5.1 is at Source MAC address.
+ */
+static const uint8_t g_abArpReply[] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x06, 0x00, 0x01,
+ 0x08, 0x00, 0x06, 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x00, 0x05, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x05, 0x05
+};
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int gimR3HvInitHypercallSupport(PVM pVM);
+static void gimR3HvTermHypercallSupport(PVM pVM);
+static int gimR3HvInitDebugSupport(PVM pVM);
+static void gimR3HvTermDebugSupport(PVM pVM);
+static DECLCALLBACK(void) gimR3HvTimerCallback(PVM pVM, TMTIMERHANDLE pTimer, void *pvUser);
+
+/**
+ * Initializes the Hyper-V GIM provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pGimCfg The GIM CFGM node.
+ */
+VMMR3_INT_DECL(int) gimR3HvInit(PVM pVM, PCFGMNODE pGimCfg)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_HYPERV, VERR_INTERNAL_ERROR_5);
+
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+
+ /*
+ * Initialize timer handles and such.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
+ for (uint8_t idxStimer = 0; idxStimer < RT_ELEMENTS(pHvCpu->aStimers); idxStimer++)
+ pHvCpu->aStimers[idxStimer].hTimer = NIL_TMTIMERHANDLE;
+ }
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgHv = CFGMR3GetChild(pGimCfg, "HyperV");
+ if (pCfgHv)
+ {
+ /*
+ * Validate the Hyper-V settings.
+ */
+ int rc2 = CFGMR3ValidateConfig(pCfgHv, "/HyperV/",
+ "VendorID"
+ "|VSInterface"
+ "|HypercallDebugInterface",
+ "" /* pszValidNodes */, "GIM/HyperV" /* pszWho */, 0 /* uInstance */);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ }
+
+ /** @cfgm{/GIM/HyperV/VendorID, string, 'VBoxVBoxVBox'}
+ * The Hyper-V vendor signature, must be 12 characters. */
+ char szVendor[13];
+ int rc = CFGMR3QueryStringDef(pCfgHv, "VendorID", szVendor, sizeof(szVendor), "VBoxVBoxVBox");
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgReturn(strlen(szVendor) == 12,
+ ("The VendorID config value must be exactly 12 chars, '%s' isn't!\n", szVendor),
+ VERR_INVALID_PARAMETER);
+
+ LogRel(("GIM: HyperV: Reporting vendor as '%s'\n", szVendor));
+ /** @todo r=bird: GIM_HV_VENDOR_MICROSOFT is 12 char and the string is max
+ * 12+terminator, so the NCmp is a little bit misleading. */
+ if (!RTStrNCmp(szVendor, GIM_HV_VENDOR_MICROSOFT, sizeof(GIM_HV_VENDOR_MICROSOFT) - 1))
+ {
+ LogRel(("GIM: HyperV: Warning! Posing as the Microsoft vendor may alter guest behaviour!\n"));
+ pHv->fIsVendorMsHv = true;
+ }
+
+ /** @cfgm{/GIM/HyperV/VSInterface, bool, true}
+ * The Microsoft virtualization service interface (debugging). */
+ rc = CFGMR3QueryBoolDef(pCfgHv, "VSInterface", &pHv->fIsInterfaceVs, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/GIM/HyperV/HypercallDebugInterface, bool, false}
+ * Whether we specify the guest to use hypercalls for debugging rather than MSRs. */
+ rc = CFGMR3QueryBoolDef(pCfgHv, "HypercallDebugInterface", &pHv->fDbgHypercallInterface, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Determine interface capabilities based on the version.
+ */
+ if (!pVM->gim.s.u32Version)
+ {
+ /* Basic features. */
+ pHv->uBaseFeat = 0
+ //| GIM_HV_BASE_FEAT_VP_RUNTIME_MSR
+ | GIM_HV_BASE_FEAT_PART_TIME_REF_COUNT_MSR
+ //| GIM_HV_BASE_FEAT_BASIC_SYNIC_MSRS // Both required for synethetic timers
+ //| GIM_HV_BASE_FEAT_STIMER_MSRS // Both required for synethetic timers
+ | GIM_HV_BASE_FEAT_APIC_ACCESS_MSRS
+ | GIM_HV_BASE_FEAT_HYPERCALL_MSRS
+ | GIM_HV_BASE_FEAT_VP_ID_MSR
+ | GIM_HV_BASE_FEAT_VIRT_SYS_RESET_MSR
+ //| GIM_HV_BASE_FEAT_STAT_PAGES_MSR
+ | GIM_HV_BASE_FEAT_PART_REF_TSC_MSR
+ //| GIM_HV_BASE_FEAT_GUEST_IDLE_STATE_MSR
+ | GIM_HV_BASE_FEAT_TIMER_FREQ_MSRS
+ //| GIM_HV_BASE_FEAT_DEBUG_MSRS
+ ;
+
+ /* Miscellaneous features. */
+ pHv->uMiscFeat = 0
+ //| GIM_HV_MISC_FEAT_GUEST_DEBUGGING
+ //| GIM_HV_MISC_FEAT_XMM_HYPERCALL_INPUT
+ | GIM_HV_MISC_FEAT_TIMER_FREQ
+ | GIM_HV_MISC_FEAT_GUEST_CRASH_MSRS
+ //| GIM_HV_MISC_FEAT_DEBUG_MSRS
+ ;
+
+ /* Hypervisor recommendations to the guest. */
+ pHv->uHyperHints = GIM_HV_HINT_MSR_FOR_SYS_RESET
+ | GIM_HV_HINT_RELAX_TIME_CHECKS
+ | GIM_HV_HINT_X2APIC_MSRS
+ ;
+
+ /* Partition features. */
+ pHv->uPartFlags |= GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS;
+
+ /* Expose more if we're posing as Microsoft. We can, if needed, force MSR-based Hv
+ debugging by not exposing these bits while exposing the VS interface. The better
+ way is what we do currently, via the GIM_HV_DEBUG_OPTIONS_USE_HYPERCALLS bit. */
+ if (pHv->fIsVendorMsHv)
+ {
+ pHv->uMiscFeat |= GIM_HV_MISC_FEAT_GUEST_DEBUGGING
+ | GIM_HV_MISC_FEAT_DEBUG_MSRS;
+
+ pHv->uPartFlags |= GIM_HV_PART_FLAGS_DEBUGGING;
+ }
+ }
+
+ /*
+ * Populate the required fields in MMIO2 region records for registering.
+ */
+ for (size_t i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++)
+ pHv->aMmio2Regions[i].hMmio2 = NIL_PGMMMIO2HANDLE;
+
+ AssertCompile(GIM_HV_PAGE_SIZE == GUEST_PAGE_SIZE);
+ PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+ pRegion->iRegion = GIM_HV_HYPERCALL_PAGE_REGION_IDX;
+ pRegion->fRCMapping = false;
+ pRegion->cbRegion = GIM_HV_PAGE_SIZE; /* Sanity checked in gimR3HvLoad(), gimR3HvEnableTscPage() & gimR3HvEnableHypercallPage() */
+ pRegion->GCPhysPage = NIL_RTGCPHYS;
+ RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V hypercall page");
+
+ pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+ pRegion->iRegion = GIM_HV_REF_TSC_PAGE_REGION_IDX;
+ pRegion->fRCMapping = false;
+ pRegion->cbRegion = GIM_HV_PAGE_SIZE; /* Sanity checked in gimR3HvLoad(), gimR3HvEnableTscPage() & gimR3HvEnableHypercallPage() */
+ pRegion->GCPhysPage = NIL_RTGCPHYS;
+ RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V TSC page");
+
+ /*
+ * Make sure the CPU ID bit are in accordance with the Hyper-V
+ * requirement and other paranoia checks.
+ * See "Requirements for implementing the Microsoft hypervisor interface" spec.
+ */
+ Assert(!(pHv->uPartFlags & ( GIM_HV_PART_FLAGS_CREATE_PART
+ | GIM_HV_PART_FLAGS_ACCESS_MEMORY_POOL
+ | GIM_HV_PART_FLAGS_ACCESS_PART_ID
+ | GIM_HV_PART_FLAGS_ADJUST_MSG_BUFFERS
+ | GIM_HV_PART_FLAGS_CREATE_PORT
+ | GIM_HV_PART_FLAGS_ACCESS_STATS
+ | GIM_HV_PART_FLAGS_CPU_MGMT
+ | GIM_HV_PART_FLAGS_CPU_PROFILER)));
+ Assert((pHv->uBaseFeat & (GIM_HV_BASE_FEAT_HYPERCALL_MSRS | GIM_HV_BASE_FEAT_VP_ID_MSR))
+ == (GIM_HV_BASE_FEAT_HYPERCALL_MSRS | GIM_HV_BASE_FEAT_VP_ID_MSR));
+#ifdef VBOX_STRICT
+ for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++)
+ {
+ PCGIMMMIO2REGION pCur = &pHv->aMmio2Regions[i];
+ Assert(!pCur->fRCMapping);
+ Assert(!pCur->fMapped);
+ Assert(pCur->GCPhysPage == NIL_RTGCPHYS);
+ }
+#endif
+
+ /*
+ * Expose HVP (Hypervisor Present) bit to the guest.
+ */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
+
+ /*
+ * Modify the standard hypervisor leaves for Hyper-V.
+ */
+ CPUMCPUIDLEAF HyperLeaf;
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000000);
+ if ( pHv->fIsVendorMsHv
+ && pHv->fIsInterfaceVs)
+ HyperLeaf.uEax = UINT32_C(0x40000082); /* Since we expose 0x40000082 below for the Hyper-V PV-debugging case. */
+ else
+ HyperLeaf.uEax = UINT32_C(0x40000006); /* Minimum value for Hyper-V default is 0x40000005. */
+ /*
+ * Don't report vendor as 'Microsoft Hv'[1] by default, see @bugref{7270#c152}.
+ * [1]: ebx=0x7263694d ('rciM') ecx=0x666f736f ('foso') edx=0x76482074 ('vH t')
+ */
+ {
+ uint32_t uVendorEbx;
+ uint32_t uVendorEcx;
+ uint32_t uVendorEdx;
+ uVendorEbx = ((uint32_t)szVendor[ 3]) << 24 | ((uint32_t)szVendor[ 2]) << 16 | ((uint32_t)szVendor[1]) << 8
+ | (uint32_t)szVendor[ 0];
+ uVendorEcx = ((uint32_t)szVendor[ 7]) << 24 | ((uint32_t)szVendor[ 6]) << 16 | ((uint32_t)szVendor[5]) << 8
+ | (uint32_t)szVendor[ 4];
+ uVendorEdx = ((uint32_t)szVendor[11]) << 24 | ((uint32_t)szVendor[10]) << 16 | ((uint32_t)szVendor[9]) << 8
+ | (uint32_t)szVendor[ 8];
+ HyperLeaf.uEbx = uVendorEbx;
+ HyperLeaf.uEcx = uVendorEcx;
+ HyperLeaf.uEdx = uVendorEdx;
+ }
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ HyperLeaf.uLeaf = UINT32_C(0x40000001);
+ HyperLeaf.uEax = 0x31237648; /* 'Hv#1' */
+ HyperLeaf.uEbx = 0; /* Reserved */
+ HyperLeaf.uEcx = 0; /* Reserved */
+ HyperLeaf.uEdx = 0; /* Reserved */
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Add Hyper-V specific leaves.
+ */
+ HyperLeaf.uLeaf = UINT32_C(0x40000002); /* MBZ until MSR_GIM_HV_GUEST_OS_ID is set by the guest. */
+ HyperLeaf.uEax = 0;
+ HyperLeaf.uEbx = 0;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ HyperLeaf.uLeaf = UINT32_C(0x40000003);
+ HyperLeaf.uEax = pHv->uBaseFeat;
+ HyperLeaf.uEbx = pHv->uPartFlags;
+ HyperLeaf.uEcx = pHv->uPowMgmtFeat;
+ HyperLeaf.uEdx = pHv->uMiscFeat;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ HyperLeaf.uLeaf = UINT32_C(0x40000004);
+ HyperLeaf.uEax = pHv->uHyperHints;
+ HyperLeaf.uEbx = 0xffffffff;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000005);
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ /* Leaf 0x40000006 is inserted in gimR3HvInitCompleted(). */
+
+ if ( pHv->fIsVendorMsHv
+ && pHv->fIsInterfaceVs)
+ {
+ HyperLeaf.uLeaf = UINT32_C(0x40000080);
+ HyperLeaf.uEax = 0;
+ HyperLeaf.uEbx = 0x7263694d; /* 'rciM' */
+ HyperLeaf.uEcx = 0x666f736f; /* 'foso'*/
+ HyperLeaf.uEdx = 0x53562074; /* 'SV t' */
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ HyperLeaf.uLeaf = UINT32_C(0x40000081);
+ HyperLeaf.uEax = 0x31235356; /* '1#SV' */
+ HyperLeaf.uEbx = 0;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ HyperLeaf.uLeaf = UINT32_C(0x40000082);
+ HyperLeaf.uEax = RT_BIT_32(1);
+ HyperLeaf.uEbx = 0;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Insert all MSR ranges of Hyper-V.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aMsrRanges_HyperV); i++)
+ {
+ int rc2 = CPUMR3MsrRangesInsert(pVM, &g_aMsrRanges_HyperV[i]);
+ AssertLogRelRCReturn(rc2, rc2);
+ }
+
+ /*
+ * Setup non-zero MSRs.
+ */
+ if (pHv->uMiscFeat & GIM_HV_MISC_FEAT_GUEST_CRASH_MSRS)
+ pHv->uCrashCtlMsr = MSR_GIM_HV_CRASH_CTL_NOTIFY;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMHVCPU pHvCpu = &pVM->apCpusR3[idCpu]->gim.s.u.HvCpu;
+ for (uint8_t idxSintMsr = 0; idxSintMsr < RT_ELEMENTS(pHvCpu->auSintMsrs); idxSintMsr++)
+ pHvCpu->auSintMsrs[idxSintMsr] = MSR_GIM_HV_SINT_MASKED;
+ }
+
+ /*
+ * Setup hypercall support.
+ */
+ rc = gimR3HvInitHypercallSupport(pVM);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Setup debug support.
+ */
+ rc = gimR3HvInitDebugSupport(pVM);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Setup up the per-VCPU synthetic timers.
+ */
+ if ( (pHv->uBaseFeat & GIM_HV_BASE_FEAT_STIMER_MSRS)
+ || (pHv->uBaseFeat & GIM_HV_BASE_FEAT_BASIC_SYNIC_MSRS))
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
+
+ for (uint8_t idxStimer = 0; idxStimer < RT_ELEMENTS(pHvCpu->aStimers); idxStimer++)
+ {
+ PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
+
+ /* Associate the synthetic timer with its corresponding VCPU. */
+ pHvStimer->idCpu = pVCpu->idCpu;
+ pHvStimer->idxStimer = idxStimer;
+
+ /* Create the timer and associate the context pointers. */
+ char szName[32];
+ RTStrPrintf(szName, sizeof(szName), "Hyper-V[%u] Timer%u", pVCpu->idCpu, idxStimer);
+ rc = TMR3TimerCreate(pVM, TMCLOCK_VIRTUAL_SYNC, gimR3HvTimerCallback, pHvStimer /* pvUser */,
+ TMTIMER_FLAGS_RING0, szName, &pHvStimer->hTimer);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+ }
+
+ /*
+ * Register statistics.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
+
+ for (size_t idxStimer = 0; idxStimer < RT_ELEMENTS(pHvCpu->aStatStimerFired); idxStimer++)
+ {
+ int rc2 = STAMR3RegisterF(pVM, &pHvCpu->aStatStimerFired[idxStimer], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, "Number of times the synthetic timer fired.",
+ "/GIM/HyperV/%u/Stimer%u_Fired", idCpu, idxStimer);
+ AssertLogRelRCReturn(rc2, rc2);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes remaining bits of the Hyper-V provider.
+ *
+ * This is called after initializing HM and almost all other VMM components.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvInitCompleted(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ pHv->cTscTicksPerSecond = TMCpuTicksPerSecond(pVM);
+
+ /*
+ * Determine interface capabilities based on the version.
+ */
+ if (!pVM->gim.s.u32Version)
+ {
+ /* Hypervisor capabilities; features used by the hypervisor. */
+ pHv->uHyperCaps = HMIsNestedPagingActive(pVM) ? GIM_HV_HOST_FEAT_NESTED_PAGING : 0;
+ pHv->uHyperCaps |= HMIsMsrBitmapActive(pVM) ? GIM_HV_HOST_FEAT_MSR_BITMAP : 0;
+ }
+
+ CPUMCPUIDLEAF HyperLeaf;
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000006);
+ HyperLeaf.uEax = pHv->uHyperCaps;
+ HyperLeaf.uEbx = 0;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ int rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Inform APIC whether Hyper-V compatibility mode is enabled or not.
+ * Do this here rather than on gimR3HvInit() as it gets called after APIC
+ * has finished inserting/removing the x2APIC MSR range.
+ */
+ if (pHv->uHyperHints & GIM_HV_HINT_X2APIC_MSRS)
+ APICR3HvSetCompatMode(pVM, true);
+
+ return rc;
+}
+
+
+/**
+ * Terminates the Hyper-V GIM provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvTerm(PVM pVM)
+{
+ gimR3HvReset(pVM);
+ gimR3HvTermHypercallSupport(pVM);
+ gimR3HvTermDebugSupport(pVM);
+
+ PCGIMHV pHv = &pVM->gim.s.u.Hv;
+ if ( (pHv->uBaseFeat & GIM_HV_BASE_FEAT_STIMER_MSRS)
+ || (pHv->uBaseFeat & GIM_HV_BASE_FEAT_BASIC_SYNIC_MSRS))
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMHVCPU pHvCpu = &pVM->apCpusR3[idCpu]->gim.s.u.HvCpu;
+ for (uint8_t idxStimer = 0; idxStimer < RT_ELEMENTS(pHvCpu->aStimers); idxStimer++)
+ {
+ PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
+ TMR3TimerDestroy(pVM, pHvStimer->hTimer);
+ pHvStimer->hTimer = NIL_TMTIMERHANDLE;
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) gimR3HvRelocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ RT_NOREF(pVM, offDelta);
+}
+
+
+/**
+ * This resets Hyper-V provider MSRs and unmaps whatever Hyper-V regions that
+ * the guest may have mapped.
+ *
+ * This is called when the VM is being reset.
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(void) gimR3HvReset(PVM pVM)
+{
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Unmap MMIO2 pages that the guest may have setup.
+ */
+ LogRel(("GIM: HyperV: Resetting MMIO2 regions and MSRs\n"));
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++)
+ {
+ PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[i];
+#if 0
+ gimR3Mmio2Unmap(pVM, pRegion);
+#else
+ pRegion->fMapped = false;
+ pRegion->GCPhysPage = NIL_RTGCPHYS;
+#endif
+ }
+
+ /*
+ * Reset MSRs.
+ */
+ pHv->u64GuestOsIdMsr = 0;
+ pHv->u64HypercallMsr = 0;
+ pHv->u64TscPageMsr = 0;
+ pHv->uCrashP0Msr = 0;
+ pHv->uCrashP1Msr = 0;
+ pHv->uCrashP2Msr = 0;
+ pHv->uCrashP3Msr = 0;
+ pHv->uCrashP4Msr = 0;
+ pHv->uDbgStatusMsr = 0;
+ pHv->uDbgPendingBufferMsr = 0;
+ pHv->uDbgSendBufferMsr = 0;
+ pHv->uDbgRecvBufferMsr = 0;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMHVCPU pHvCpu = &pVM->apCpusR3[idCpu]->gim.s.u.HvCpu;
+ pHvCpu->uSControlMsr = 0;
+ pHvCpu->uSimpMsr = 0;
+ pHvCpu->uSiefpMsr = 0;
+ pHvCpu->uApicAssistPageMsr = 0;
+
+ for (uint8_t idxSint = 0; idxSint < RT_ELEMENTS(pHvCpu->auSintMsrs); idxSint++)
+ pHvCpu->auSintMsrs[idxSint] = MSR_GIM_HV_SINT_MASKED;
+
+ for (uint8_t idxStimer = 0; idxStimer < RT_ELEMENTS(pHvCpu->aStimers); idxStimer++)
+ {
+ PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
+ pHvStimer->uStimerConfigMsr = 0;
+ pHvStimer->uStimerCountMsr = 0;
+ }
+ }
+}
+
+
+/**
+ * Callback for when debug data is available over the debugger connection.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static DECLCALLBACK(void) gimR3HvDebugBufAvail(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ RTGCPHYS GCPhysPendingBuffer = pHv->uDbgPendingBufferMsr;
+ if ( GCPhysPendingBuffer
+ && PGMPhysIsGCPhysNormal(pVM, GCPhysPendingBuffer))
+ {
+ uint8_t bPendingData = 1;
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysPendingBuffer, &bPendingData, sizeof(bPendingData));
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(5, ("GIM: HyperV: Failed to set pending debug receive buffer at %#RGp, rc=%Rrc\n", GCPhysPendingBuffer,
+ rc));
+ }
+ }
+}
+
+
+/**
+ * Callback for when debug data has been read from the debugger connection.
+ *
+ * This will be invoked before signalling read of the next debug buffer.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static DECLCALLBACK(void) gimR3HvDebugBufReadCompleted(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ RTGCPHYS GCPhysPendingBuffer = pHv->uDbgPendingBufferMsr;
+ if ( GCPhysPendingBuffer
+ && PGMPhysIsGCPhysNormal(pVM, GCPhysPendingBuffer))
+ {
+ uint8_t bPendingData = 0;
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysPendingBuffer, &bPendingData, sizeof(bPendingData));
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(5, ("GIM: HyperV: Failed to clear pending debug receive buffer at %#RGp, rc=%Rrc\n", GCPhysPendingBuffer,
+ rc));
+ }
+ }
+}
+
+
+/**
+ * Get Hyper-V debug setup parameters.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDbgSetup Where to store the debug setup details.
+ */
+VMMR3_INT_DECL(int) gimR3HvGetDebugSetup(PVM pVM, PGIMDEBUGSETUP pDbgSetup)
+{
+ Assert(pDbgSetup);
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ if (pHv->fDbgEnabled)
+ {
+ pDbgSetup->pfnDbgRecvBufAvail = gimR3HvDebugBufAvail;
+ pDbgSetup->cbDbgRecvBuf = GIM_HV_PAGE_SIZE;
+ return VINF_SUCCESS;
+ }
+ return VERR_GIM_NO_DEBUG_CONNECTION;
+}
+
+
+/**
+ * Hyper-V state-save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+VMMR3_INT_DECL(int) gimR3HvSave(PVM pVM, PSSMHANDLE pSSM)
+{
+ PCGIMHV pHv = &pVM->gim.s.u.Hv;
+
+ /*
+ * Save the Hyper-V SSM version.
+ */
+ SSMR3PutU32(pSSM, GIM_HV_SAVED_STATE_VERSION);
+
+ /*
+ * Save per-VM MSRs.
+ */
+ SSMR3PutU64(pSSM, pHv->u64GuestOsIdMsr);
+ SSMR3PutU64(pSSM, pHv->u64HypercallMsr);
+ SSMR3PutU64(pSSM, pHv->u64TscPageMsr);
+
+ /*
+ * Save Hyper-V features / capabilities.
+ */
+ SSMR3PutU32(pSSM, pHv->uBaseFeat);
+ SSMR3PutU32(pSSM, pHv->uPartFlags);
+ SSMR3PutU32(pSSM, pHv->uPowMgmtFeat);
+ SSMR3PutU32(pSSM, pHv->uMiscFeat);
+ SSMR3PutU32(pSSM, pHv->uHyperHints);
+ SSMR3PutU32(pSSM, pHv->uHyperCaps);
+
+ /*
+ * Save the Hypercall region.
+ */
+ PCGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+ SSMR3PutU8(pSSM, pRegion->iRegion);
+ SSMR3PutBool(pSSM, pRegion->fRCMapping);
+ SSMR3PutU32(pSSM, pRegion->cbRegion);
+ SSMR3PutGCPhys(pSSM, pRegion->GCPhysPage);
+ SSMR3PutStrZ(pSSM, pRegion->szDescription);
+
+ /*
+ * Save the reference TSC region.
+ */
+ pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+ SSMR3PutU8(pSSM, pRegion->iRegion);
+ SSMR3PutBool(pSSM, pRegion->fRCMapping);
+ SSMR3PutU32(pSSM, pRegion->cbRegion);
+ SSMR3PutGCPhys(pSSM, pRegion->GCPhysPage);
+ SSMR3PutStrZ(pSSM, pRegion->szDescription);
+ /* Save the TSC sequence so we can bump it on restore (as the CPU frequency/offset may change). */
+ uint32_t uTscSequence = 0;
+ if ( pRegion->fMapped
+ && MSR_GIM_HV_REF_TSC_IS_ENABLED(pHv->u64TscPageMsr))
+ {
+ PCGIMHVREFTSC pRefTsc = (PCGIMHVREFTSC)pRegion->pvPageR3;
+ uTscSequence = pRefTsc->u32TscSequence;
+ }
+ SSMR3PutU32(pSSM, uTscSequence);
+
+ /*
+ * Save debug support data.
+ */
+ SSMR3PutU64(pSSM, pHv->uDbgPendingBufferMsr);
+ SSMR3PutU64(pSSM, pHv->uDbgSendBufferMsr);
+ SSMR3PutU64(pSSM, pHv->uDbgRecvBufferMsr);
+ SSMR3PutU64(pSSM, pHv->uDbgStatusMsr);
+ SSMR3PutU32(pSSM, pHv->enmDbgReply);
+ SSMR3PutU32(pSSM, pHv->uDbgBootpXId);
+ SSMR3PutU32(pSSM, pHv->DbgGuestIp4Addr.u);
+ SSMR3PutU16(pSSM, pHv->uUdpGuestDstPort);
+ SSMR3PutU16(pSSM, pHv->uUdpGuestSrcPort);
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMHVCPU pHvCpu = &pVM->apCpusR3[idCpu]->gim.s.u.HvCpu;
+ SSMR3PutU64(pSSM, pHvCpu->uSimpMsr);
+ for (size_t idxSintMsr = 0; idxSintMsr < RT_ELEMENTS(pHvCpu->auSintMsrs); idxSintMsr++)
+ SSMR3PutU64(pSSM, pHvCpu->auSintMsrs[idxSintMsr]);
+ }
+
+ return SSMR3PutU8(pSSM, UINT8_MAX);
+}
+
+
+/**
+ * Hyper-V state-load operation, final pass.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+VMMR3_INT_DECL(int) gimR3HvLoad(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Load the Hyper-V SSM version first.
+ */
+ uint32_t uHvSavedStateVersion;
+ int rc = SSMR3GetU32(pSSM, &uHvSavedStateVersion);
+ AssertRCReturn(rc, rc);
+ if ( uHvSavedStateVersion != GIM_HV_SAVED_STATE_VERSION
+ && uHvSavedStateVersion != GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG_UDP_PORTS
+ && uHvSavedStateVersion != GIM_HV_SAVED_STATE_VERSION_PRE_SYNIC
+ && uHvSavedStateVersion != GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG)
+ return SSMR3SetLoadError(pSSM, VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION, RT_SRC_POS,
+ N_("Unsupported Hyper-V saved-state version %u (current %u)!"),
+ uHvSavedStateVersion, GIM_HV_SAVED_STATE_VERSION);
+
+ /*
+ * Update the TSC frequency from TM.
+ */
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ pHv->cTscTicksPerSecond = TMCpuTicksPerSecond(pVM);
+
+ /*
+ * Load per-VM MSRs.
+ */
+ SSMR3GetU64(pSSM, &pHv->u64GuestOsIdMsr);
+ SSMR3GetU64(pSSM, &pHv->u64HypercallMsr);
+ SSMR3GetU64(pSSM, &pHv->u64TscPageMsr);
+
+ /*
+ * Load Hyper-V features / capabilities.
+ */
+ SSMR3GetU32(pSSM, &pHv->uBaseFeat);
+ SSMR3GetU32(pSSM, &pHv->uPartFlags);
+ SSMR3GetU32(pSSM, &pHv->uPowMgmtFeat);
+ SSMR3GetU32(pSSM, &pHv->uMiscFeat);
+ SSMR3GetU32(pSSM, &pHv->uHyperHints);
+ SSMR3GetU32(pSSM, &pHv->uHyperCaps);
+
+ /*
+ * Load and enable the Hypercall region.
+ */
+ PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+ SSMR3GetU8(pSSM, &pRegion->iRegion);
+ SSMR3GetBool(pSSM, &pRegion->fRCMapping);
+ SSMR3GetU32(pSSM, &pRegion->cbRegion);
+ SSMR3GetGCPhys(pSSM, &pRegion->GCPhysPage);
+ rc = SSMR3GetStrZ(pSSM, pRegion->szDescription, sizeof(pRegion->szDescription));
+ AssertRCReturn(rc, rc);
+
+ if (pRegion->cbRegion != GIM_HV_PAGE_SIZE)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Hypercall page region size %#x invalid, expected %#x"),
+ pRegion->cbRegion, GIM_HV_PAGE_SIZE);
+
+ if (MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(pHv->u64HypercallMsr))
+ {
+ Assert(pRegion->GCPhysPage != NIL_RTGCPHYS);
+ if (RT_LIKELY(pRegion->fRegistered))
+ {
+ rc = gimR3HvEnableHypercallPage(pVM, pRegion->GCPhysPage);
+ if (RT_FAILURE(rc))
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the hypercall page. GCPhys=%#RGp rc=%Rrc"),
+ pRegion->GCPhysPage, rc);
+ }
+ else
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Hypercall MMIO2 region not registered. Missing GIM device?!"));
+ }
+
+ /*
+ * Load and enable the reference TSC region.
+ */
+ uint32_t uTscSequence;
+ pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+ SSMR3GetU8(pSSM, &pRegion->iRegion);
+ SSMR3GetBool(pSSM, &pRegion->fRCMapping);
+ SSMR3GetU32(pSSM, &pRegion->cbRegion);
+ SSMR3GetGCPhys(pSSM, &pRegion->GCPhysPage);
+ SSMR3GetStrZ(pSSM, pRegion->szDescription, sizeof(pRegion->szDescription));
+ rc = SSMR3GetU32(pSSM, &uTscSequence);
+ AssertRCReturn(rc, rc);
+
+ if (pRegion->cbRegion != GIM_HV_PAGE_SIZE)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("TSC page region size %#x invalid, expected %#x"),
+ pRegion->cbRegion, GIM_HV_PAGE_SIZE);
+
+ if (MSR_GIM_HV_REF_TSC_IS_ENABLED(pHv->u64TscPageMsr))
+ {
+ Assert(pRegion->GCPhysPage != NIL_RTGCPHYS);
+ if (pRegion->fRegistered)
+ {
+ rc = gimR3HvEnableTscPage(pVM, pRegion->GCPhysPage, true /* fUseThisTscSeq */, uTscSequence);
+ if (RT_FAILURE(rc))
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the TSC page. GCPhys=%#RGp rc=%Rrc"),
+ pRegion->GCPhysPage, rc);
+ }
+ else
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("TSC-page MMIO2 region not registered. Missing GIM device?!"));
+ }
+
+ /*
+ * Load the debug support data.
+ */
+ if (uHvSavedStateVersion > GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG)
+ {
+ SSMR3GetU64(pSSM, &pHv->uDbgPendingBufferMsr);
+ SSMR3GetU64(pSSM, &pHv->uDbgSendBufferMsr);
+ SSMR3GetU64(pSSM, &pHv->uDbgRecvBufferMsr);
+ SSMR3GetU64(pSSM, &pHv->uDbgStatusMsr);
+ SSM_GET_ENUM32_RET(pSSM, pHv->enmDbgReply, GIMHVDEBUGREPLY);
+ SSMR3GetU32(pSSM, &pHv->uDbgBootpXId);
+ rc = SSMR3GetU32(pSSM, &pHv->DbgGuestIp4Addr.u);
+ AssertRCReturn(rc, rc);
+ if (uHvSavedStateVersion > GIM_HV_SAVED_STATE_VERSION_PRE_DEBUG_UDP_PORTS)
+ {
+ rc = SSMR3GetU16(pSSM, &pHv->uUdpGuestDstPort); AssertRCReturn(rc, rc);
+ rc = SSMR3GetU16(pSSM, &pHv->uUdpGuestSrcPort); AssertRCReturn(rc, rc);
+ }
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMHVCPU pHvCpu = &pVM->apCpusR3[idCpu]->gim.s.u.HvCpu;
+ SSMR3GetU64(pSSM, &pHvCpu->uSimpMsr);
+ if (uHvSavedStateVersion <= GIM_HV_SAVED_STATE_VERSION_PRE_SYNIC)
+ SSMR3GetU64(pSSM, &pHvCpu->auSintMsrs[GIM_HV_VMBUS_MSG_SINT]);
+ else
+ {
+ for (uint8_t idxSintMsr = 0; idxSintMsr < RT_ELEMENTS(pHvCpu->auSintMsrs); idxSintMsr++)
+ SSMR3GetU64(pSSM, &pHvCpu->auSintMsrs[idxSintMsr]);
+ }
+ }
+
+ uint8_t bDelim;
+ rc = SSMR3GetU8(pSSM, &bDelim);
+ }
+ else
+ rc = VINF_SUCCESS;
+
+ return rc;
+}
+
+
+/**
+ * Hyper-V load-done callback.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+VMMR3_INT_DECL(int) gimR3HvLoadDone(PVM pVM, PSSMHANDLE pSSM)
+{
+ if (RT_SUCCESS(SSMR3HandleGetStatus(pSSM)))
+ {
+ /*
+ * Update EM on whether MSR_GIM_HV_GUEST_OS_ID allows hypercall instructions.
+ */
+ if (pVM->gim.s.u.Hv.u64GuestOsIdMsr)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ EMSetHypercallInstructionsEnabled(pVM->apCpusR3[idCpu], true);
+ else
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ EMSetHypercallInstructionsEnabled(pVM->apCpusR3[idCpu], false);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enables the Hyper-V APIC-assist page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPhysApicAssistPage Where to map the APIC-assist page.
+ */
+VMMR3_INT_DECL(int) gimR3HvEnableApicAssistPage(PVMCPU pVCpu, RTGCPHYS GCPhysApicAssistPage)
+{
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3;
+ AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+ /*
+ * Map the APIC-assist-page at the specified address.
+ */
+ /** @todo this is buggy when large pages are used due to a PGM limitation, see
+ * @bugref{7532}. Instead of the overlay style mapping, we just
+ * rewrite guest memory directly. */
+ AssertCompile(sizeof(g_abRTZero64K) >= GUEST_PAGE_SIZE);
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysApicAssistPage, g_abRTZero64K, GUEST_PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ /** @todo Inform APIC. */
+ LogRel(("GIM%u: HyperV: Enabled APIC-assist page at %#RGp\n", pVCpu->idCpu, GCPhysApicAssistPage));
+ }
+ else
+ {
+ LogRelFunc(("GIM%u: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", pVCpu->idCpu, rc));
+ rc = VERR_GIM_OPERATION_FAILED;
+ }
+ return rc;
+}
+
+
+/**
+ * Disables the Hyper-V APIC-assist page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvDisableApicAssistPage(PVMCPU pVCpu)
+{
+ LogRel(("GIM%u: HyperV: Disabled APIC-assist page\n", pVCpu->idCpu));
+ /** @todo inform APIC */
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNTMTIMERINT, Hyper-V synthetic timer callback.}
+ */
+static DECLCALLBACK(void) gimR3HvTimerCallback(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ PGIMHVSTIMER pHvStimer = (PGIMHVSTIMER)pvUser;
+ Assert(pHvStimer);
+ Assert(TMTimerIsLockOwner(pVM, hTimer));
+ Assert(pHvStimer->idCpu < pVM->cCpus);
+ Assert(pHvStimer->hTimer == hTimer);
+ RT_NOREF(hTimer);
+
+ PVMCPU pVCpu = pVM->apCpusR3[pHvStimer->idCpu];
+ PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
+ Assert(pHvStimer->idxStimer < RT_ELEMENTS(pHvCpu->aStatStimerFired));
+
+ STAM_COUNTER_INC(&pHvCpu->aStatStimerFired[pHvStimer->idxStimer]);
+
+ uint64_t const uStimerConfig = pHvStimer->uStimerConfigMsr;
+ uint16_t const idxSint = MSR_GIM_HV_STIMER_GET_SINTX(uStimerConfig);
+ if (RT_LIKELY(idxSint < RT_ELEMENTS(pHvCpu->auSintMsrs)))
+ {
+ uint64_t const uSint = pHvCpu->auSintMsrs[idxSint];
+ if (!MSR_GIM_HV_SINT_IS_MASKED(uSint))
+ {
+ uint8_t const uVector = MSR_GIM_HV_SINT_GET_VECTOR(uSint);
+ bool const fAutoEoi = MSR_GIM_HV_SINT_IS_AUTOEOI(uSint);
+ APICHvSendInterrupt(pVCpu, uVector, fAutoEoi, XAPICTRIGGERMODE_EDGE);
+ }
+ }
+
+ /* Re-arm the timer if it's periodic. */
+ if (MSR_GIM_HV_STIMER_IS_PERIODIC(uStimerConfig))
+ gimHvStartStimer(pVCpu, pHvStimer);
+}
+
+
+/**
+ * Enables the Hyper-V SIEF page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPhysSiefPage Where to map the SIEF page.
+ */
+VMMR3_INT_DECL(int) gimR3HvEnableSiefPage(PVMCPU pVCpu, RTGCPHYS GCPhysSiefPage)
+{
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3;
+ AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+ /*
+ * Map the SIEF page at the specified address.
+ */
+ /** @todo this is buggy when large pages are used due to a PGM limitation, see
+ * @bugref{7532}. Instead of the overlay style mapping, we just
+ * rewrite guest memory directly. */
+ AssertCompile(sizeof(g_abRTZero64K) >= GUEST_PAGE_SIZE);
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSiefPage, g_abRTZero64K, GUEST_PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ /** @todo SIEF setup. */
+ LogRel(("GIM%u: HyperV: Enabled SIEF page at %#RGp\n", pVCpu->idCpu, GCPhysSiefPage));
+ }
+ else
+ {
+ LogRelFunc(("GIM%u: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", pVCpu->idCpu, rc));
+ rc = VERR_GIM_OPERATION_FAILED;
+ }
+ return rc;
+}
+
+
+/**
+ * Disables the Hyper-V SIEF page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvDisableSiefPage(PVMCPU pVCpu)
+{
+ LogRel(("GIM%u: HyperV: Disabled APIC-assist page\n", pVCpu->idCpu));
+ /** @todo SIEF teardown. */
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enables the Hyper-V TSC page.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhysTscPage Where to map the TSC page.
+ * @param fUseThisTscSeq Whether to set the TSC sequence number to the one
+ * specified in @a uTscSeq.
+ * @param uTscSeq The TSC sequence value to use. Ignored if
+ * @a fUseThisTscSeq is false.
+ */
+VMMR3_INT_DECL(int) gimR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage, bool fUseThisTscSeq, uint32_t uTscSeq)
+{
+ PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3;
+ PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+ AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+ int rc;
+ if (pRegion->fMapped)
+ {
+ /*
+ * Is it already enabled at the given guest-address?
+ */
+ if (pRegion->GCPhysPage == GCPhysTscPage)
+ return VINF_SUCCESS;
+
+ /*
+ * If it's mapped at a different address, unmap the previous address.
+ */
+ rc = gimR3HvDisableTscPage(pVM);
+ AssertRC(rc);
+ }
+
+ /*
+ * Map the TSC-page at the specified address.
+ */
+ Assert(!pRegion->fMapped);
+
+ /** @todo this is buggy when large pages are used due to a PGM limitation, see
+ * @bugref{7532}. Instead of the overlay style mapping, we just
+ * rewrite guest memory directly. */
+#if 0
+ rc = gimR3Mmio2Map(pVM, pRegion, GCPhysTscPage);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(pRegion->GCPhysPage == GCPhysTscPage);
+
+ /*
+ * Update the TSC scale. Windows guests expect a non-zero TSC sequence, otherwise
+ * they fallback to using the reference count MSR which is not ideal in terms of VM-exits.
+ *
+ * Also, Hyper-V normalizes the time in 10 MHz, see:
+ * http://technet.microsoft.com/it-it/sysinternals/dn553408%28v=vs.110%29
+ */
+ PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pRegion->pvPageR3;
+ Assert(pRefTsc);
+
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ uint64_t const u64TscKHz = pHv->cTscTicksPerSecond / UINT64_C(1000);
+ uint32_t u32TscSeq = 1;
+ if ( fUseThisTscSeq
+ && uTscSeq < UINT32_C(0xfffffffe))
+ u32TscSeq = uTscSeq + 1;
+ pRefTsc->u32TscSequence = u32TscSeq;
+ pRefTsc->u64TscScale = ((INT64_C(10000) << 32) / u64TscKHz) << 32;
+ pRefTsc->i64TscOffset = 0;
+
+ LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64) Seq=%#RU32\n",
+ GCPhysTscPage, pRefTsc->u64TscScale, u64TscKHz, u64TscKHz, pRefTsc->u32TscSequence));
+
+ TMR3CpuTickParavirtEnable(pVM);
+ return VINF_SUCCESS;
+ }
+ else
+ LogRelFunc(("gimR3Mmio2Map failed. rc=%Rrc\n", rc));
+ return VERR_GIM_OPERATION_FAILED;
+#else
+ AssertReturn(pRegion->cbRegion == GUEST_PAGE_SIZE, VERR_GIM_IPE_2);
+ PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)RTMemAllocZ(GUEST_PAGE_SIZE);
+ if (RT_UNLIKELY(!pRefTsc))
+ {
+ LogRelFunc(("Failed to alloc %#x bytes\n", GUEST_PAGE_SIZE));
+ return VERR_NO_MEMORY;
+ }
+
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ uint64_t const u64TscKHz = pHv->cTscTicksPerSecond / UINT64_C(1000);
+ uint32_t u32TscSeq = 1;
+ if ( fUseThisTscSeq
+ && uTscSeq < UINT32_C(0xfffffffe))
+ u32TscSeq = uTscSeq + 1;
+ pRefTsc->u32TscSequence = u32TscSeq;
+ pRefTsc->u64TscScale = ((INT64_C(10000) << 32) / u64TscKHz) << 32;
+ pRefTsc->i64TscOffset = 0;
+
+ rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysTscPage, pRefTsc, sizeof(*pRefTsc));
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64) Seq=%#RU32\n",
+ GCPhysTscPage, pRefTsc->u64TscScale, u64TscKHz, u64TscKHz, pRefTsc->u32TscSequence));
+
+ pRegion->GCPhysPage = GCPhysTscPage;
+ pRegion->fMapped = true;
+ TMR3CpuTickParavirtEnable(pVM);
+ }
+ else
+ {
+ LogRelFunc(("GIM: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", rc));
+ rc = VERR_GIM_OPERATION_FAILED;
+ }
+ RTMemFree(pRefTsc);
+ return rc;
+#endif
+}
+
+
+/**
+ * Enables the Hyper-V SIM page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPhysSimPage Where to map the SIM page.
+ */
+VMMR3_INT_DECL(int) gimR3HvEnableSimPage(PVMCPU pVCpu, RTGCPHYS GCPhysSimPage)
+{
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3;
+ AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+ /*
+ * Map the SIMP page at the specified address.
+ */
+ /** @todo this is buggy when large pages are used due to a PGM limitation, see
+ * @bugref{7532}. Instead of the overlay style mapping, we just
+ * rewrite guest memory directly. */
+ AssertCompile(sizeof(g_abRTZero64K) >= GUEST_PAGE_SIZE);
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSimPage, g_abRTZero64K, GUEST_PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ /** @todo SIM setup. */
+ LogRel(("GIM%u: HyperV: Enabled SIM page at %#RGp\n", pVCpu->idCpu, GCPhysSimPage));
+ }
+ else
+ {
+ LogRelFunc(("GIM%u: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", pVCpu->idCpu, rc));
+ rc = VERR_GIM_OPERATION_FAILED;
+ }
+ return rc;
+}
+
+
+/**
+ * Disables the Hyper-V SIM page.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvDisableSimPage(PVMCPU pVCpu)
+{
+ LogRel(("GIM%u: HyperV: Disabled SIM page\n", pVCpu->idCpu));
+ /** @todo SIM teardown. */
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Disables the Hyper-V TSC page.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3HvDisableTscPage(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+ if (pRegion->fMapped)
+ {
+#if 0
+ gimR3Mmio2Unmap(pVM, pRegion);
+ Assert(!pRegion->fMapped);
+#else
+ pRegion->fMapped = false;
+#endif
+ LogRel(("GIM: HyperV: Disabled TSC page\n"));
+
+ TMR3CpuTickParavirtDisable(pVM);
+ return VINF_SUCCESS;
+ }
+ return VERR_GIM_PVTSC_NOT_ENABLED;
+}
+
+
+/**
+ * Disables the Hyper-V Hypercall page.
+ *
+ * @returns VBox status code.
+ */
+VMMR3_INT_DECL(int) gimR3HvDisableHypercallPage(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+ if (pRegion->fMapped)
+ {
+#if 0
+ gimR3Mmio2Unmap(pVM, pRegion);
+ Assert(!pRegion->fMapped);
+#else
+ pRegion->fMapped = false;
+#endif
+ LogRel(("GIM: HyperV: Disabled Hypercall-page\n"));
+ return VINF_SUCCESS;
+ }
+ return VERR_GIM_HYPERCALLS_NOT_ENABLED;
+}
+
+
+/**
+ * Enables the Hyper-V Hypercall page.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhysHypercallPage Where to map the hypercall page.
+ */
+VMMR3_INT_DECL(int) gimR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage)
+{
+ PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3;
+ PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+ AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+ if (pRegion->fMapped)
+ {
+ /*
+ * Is it already enabled at the given guest-address?
+ */
+ if (pRegion->GCPhysPage == GCPhysHypercallPage)
+ return VINF_SUCCESS;
+
+ /*
+ * If it's mapped at a different address, unmap the previous address.
+ */
+ int rc2 = gimR3HvDisableHypercallPage(pVM);
+ AssertRC(rc2);
+ }
+
+ /*
+ * Map the hypercall-page at the specified address.
+ */
+ Assert(!pRegion->fMapped);
+
+ /** @todo this is buggy when large pages are used due to a PGM limitation, see
+ * @bugref{7532}. Instead of the overlay style mapping, we just
+ * rewrite guest memory directly. */
+#if 0
+ int rc = gimR3Mmio2Map(pVM, pRegion, GCPhysHypercallPage);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(pRegion->GCPhysPage == GCPhysHypercallPage);
+
+ /*
+ * Patch the hypercall-page.
+ */
+ size_t cbWritten = 0;
+ rc = VMMPatchHypercall(pVM, pRegion->pvPageR3, GUEST_PAGE_SIZE, &cbWritten);
+ if ( RT_SUCCESS(rc)
+ && cbWritten < GUEST_PAGE_SIZE)
+ {
+ uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten;
+ *pbLast = 0xc3; /* RET */
+
+ /*
+ * Notify VMM that hypercalls are now enabled for all VCPUs.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ VMMHypercallsEnable(pVM->apCpusR3[idCpu]);
+
+ LogRel(("GIM: HyperV: Enabled hypercall page at %#RGp\n", GCPhysHypercallPage));
+ return VINF_SUCCESS;
+ }
+ if (rc == VINF_SUCCESS)
+ rc = VERR_GIM_OPERATION_FAILED;
+ LogRel(("GIM: HyperV: VMMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten));
+
+ gimR3Mmio2Unmap(pVM, pRegion);
+ }
+
+ LogRel(("GIM: HyperV: gimR3Mmio2Map failed. rc=%Rrc\n", rc));
+ return rc;
+#else
+ AssertReturn(pRegion->cbRegion == GUEST_PAGE_SIZE, VERR_GIM_IPE_3);
+ void *pvHypercallPage = RTMemAllocZ(GUEST_PAGE_SIZE);
+ if (RT_UNLIKELY(!pvHypercallPage))
+ {
+ LogRelFunc(("Failed to alloc %#x bytes\n", GUEST_PAGE_SIZE));
+ return VERR_NO_MEMORY;
+ }
+
+ /*
+ * Patch the hypercall-page.
+ */
+ size_t cbHypercall = 0;
+ int rc = GIMQueryHypercallOpcodeBytes(pVM, pvHypercallPage, GUEST_PAGE_SIZE, &cbHypercall, NULL /*puDisOpcode*/);
+ if ( RT_SUCCESS(rc)
+ && cbHypercall < GUEST_PAGE_SIZE)
+ {
+ uint8_t *pbLast = (uint8_t *)pvHypercallPage + cbHypercall;
+ *pbLast = 0xc3; /* RET */
+
+ rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysHypercallPage, pvHypercallPage, GUEST_PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ pRegion->GCPhysPage = GCPhysHypercallPage;
+ pRegion->fMapped = true;
+ LogRel(("GIM: HyperV: Enabled hypercall page at %#RGp\n", GCPhysHypercallPage));
+ }
+ else
+ LogRel(("GIM: HyperV: PGMPhysSimpleWriteGCPhys failed during hypercall page setup. rc=%Rrc\n", rc));
+ }
+ else
+ {
+ if (rc == VINF_SUCCESS)
+ rc = VERR_GIM_OPERATION_FAILED;
+ LogRel(("GIM: HyperV: VMMPatchHypercall failed. rc=%Rrc cbHypercall=%u\n", rc, cbHypercall));
+ }
+
+ RTMemFree(pvHypercallPage);
+ return rc;
+#endif
+}
+
+
+/**
+ * Initializes Hyper-V guest hypercall support.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int gimR3HvInitHypercallSupport(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ pHv->pbHypercallIn = (uint8_t *)RTMemAllocZ(GIM_HV_PAGE_SIZE);
+ if (RT_LIKELY(pHv->pbHypercallIn))
+ {
+ pHv->pbHypercallOut = (uint8_t *)RTMemAllocZ(GIM_HV_PAGE_SIZE);
+ if (RT_LIKELY(pHv->pbHypercallOut))
+ return VINF_SUCCESS;
+ RTMemFree(pHv->pbHypercallIn);
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Terminates Hyper-V guest hypercall support.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void gimR3HvTermHypercallSupport(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ RTMemFree(pHv->pbHypercallIn);
+ pHv->pbHypercallIn = NULL;
+
+ RTMemFree(pHv->pbHypercallOut);
+ pHv->pbHypercallOut = NULL;
+}
+
+
+/**
+ * Initializes Hyper-V guest debug support.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int gimR3HvInitDebugSupport(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ if ( (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
+ || pHv->fIsInterfaceVs)
+ {
+ pHv->fDbgEnabled = true;
+ pHv->pvDbgBuffer = RTMemAllocZ(GIM_HV_PAGE_SIZE);
+ if (!pHv->pvDbgBuffer)
+ return VERR_NO_MEMORY;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates Hyper-V guest debug support.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void gimR3HvTermDebugSupport(PVM pVM)
+{
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ if (pHv->pvDbgBuffer)
+ {
+ RTMemFree(pHv->pvDbgBuffer);
+ pHv->pvDbgBuffer = NULL;
+ }
+}
+
+
+/**
+ * Reads data from a debugger connection, asynchronous.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvBuf Where to read the data.
+ * @param cbBuf Size of the read buffer @a pvBuf, must be >= @a cbRead.
+ * @param cbRead Number of bytes to read.
+ * @param pcbRead Where to store how many bytes were really read.
+ * @param cMsTimeout Timeout of the read operation in milliseconds.
+ * @param fUdpPkt Whether the debug data returned in @a pvBuf needs to be
+ * encapsulated in a UDP frame.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvDebugRead(PVM pVM, void *pvBuf, uint32_t cbBuf, uint32_t cbRead, uint32_t *pcbRead,
+ uint32_t cMsTimeout, bool fUdpPkt)
+{
+ NOREF(cMsTimeout); /** @todo implement timeout. */
+ AssertCompile(sizeof(size_t) >= sizeof(uint32_t));
+ AssertReturn(cbBuf >= cbRead, VERR_INVALID_PARAMETER);
+
+ int rc;
+ if (!fUdpPkt)
+ {
+ /*
+ * Read the raw debug data.
+ */
+ size_t cbReallyRead = cbRead;
+ rc = gimR3DebugRead(pVM, pvBuf, &cbReallyRead, gimR3HvDebugBufReadCompleted);
+ *pcbRead = (uint32_t)cbReallyRead;
+ }
+ else
+ {
+ /*
+ * Guest requires UDP encapsulated frames.
+ */
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ rc = VERR_GIM_IPE_1;
+ switch (pHv->enmDbgReply)
+ {
+ case GIMHVDEBUGREPLY_UDP:
+ {
+ size_t cbReallyRead = cbRead;
+ rc = gimR3DebugRead(pVM, pvBuf, &cbReallyRead, gimR3HvDebugBufReadCompleted);
+ if ( RT_SUCCESS(rc)
+ && cbReallyRead > 0)
+ {
+ uint8_t abFrame[sizeof(RTNETETHERHDR) + RTNETIPV4_MIN_LEN + sizeof(RTNETUDP)];
+ if (cbReallyRead + sizeof(abFrame) <= cbBuf)
+ {
+ /*
+ * Windows guests pumps ethernet frames over the Hyper-V debug connection as
+ * explained in gimR3HvHypercallPostDebugData(). Here, we reconstruct the packet
+ * with the guest's self-chosen IP ARP address we saved in pHv->DbgGuestAddr.
+ *
+ * Note! We really need to pass the minimum IPv4 header length. The Windows 10 guest
+ * is -not- happy if we include the IPv4 options field, i.e. using sizeof(RTNETIPV4)
+ * instead of RTNETIPV4_MIN_LEN.
+ */
+ RT_ZERO(abFrame);
+ PRTNETETHERHDR pEthHdr = (PRTNETETHERHDR)&abFrame[0];
+ PRTNETIPV4 pIpHdr = (PRTNETIPV4) (pEthHdr + 1);
+ PRTNETUDP pUdpHdr = (PRTNETUDP) ((uint8_t *)pIpHdr + RTNETIPV4_MIN_LEN);
+
+ /* Ethernet */
+ pEthHdr->EtherType = RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4);
+ /* IPv4 */
+ pIpHdr->ip_v = 4;
+ pIpHdr->ip_hl = RTNETIPV4_MIN_LEN / sizeof(uint32_t);
+ pIpHdr->ip_tos = 0;
+ pIpHdr->ip_len = RT_H2N_U16((uint16_t)cbReallyRead + sizeof(RTNETUDP) + RTNETIPV4_MIN_LEN);
+ pIpHdr->ip_id = 0;
+ pIpHdr->ip_off = 0;
+ pIpHdr->ip_ttl = 255;
+ pIpHdr->ip_p = RTNETIPV4_PROT_UDP;
+ pIpHdr->ip_sum = 0;
+ pIpHdr->ip_src.u = 0;
+ pIpHdr->ip_dst.u = pHv->DbgGuestIp4Addr.u;
+ pIpHdr->ip_sum = RTNetIPv4HdrChecksum(pIpHdr);
+ /* UDP */
+ pUdpHdr->uh_dport = pHv->uUdpGuestSrcPort;
+ pUdpHdr->uh_sport = pHv->uUdpGuestDstPort;
+ pUdpHdr->uh_ulen = RT_H2N_U16_C((uint16_t)cbReallyRead + sizeof(*pUdpHdr));
+
+ /* Make room by moving the payload and prepending the headers. */
+ uint8_t *pbData = (uint8_t *)pvBuf;
+ memmove(pbData + sizeof(abFrame), pbData, cbReallyRead);
+ memcpy(pbData, &abFrame[0], sizeof(abFrame));
+
+ /* Update the adjusted sizes. */
+ cbReallyRead += sizeof(abFrame);
+ }
+ else
+ rc = VERR_BUFFER_UNDERFLOW;
+ }
+ *pcbRead = (uint32_t)cbReallyRead;
+ break;
+ }
+
+ case GIMHVDEBUGREPLY_ARP_REPLY:
+ {
+ uint32_t const cbArpReplyPkt = sizeof(g_abArpReply);
+ if (cbBuf >= cbArpReplyPkt)
+ {
+ memcpy(pvBuf, g_abArpReply, cbArpReplyPkt);
+ rc = VINF_SUCCESS;
+ *pcbRead = cbArpReplyPkt;
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_ARP_REPLY_SENT;
+ }
+ else
+ {
+ rc = VERR_BUFFER_UNDERFLOW;
+ *pcbRead = 0;
+ }
+ break;
+ }
+
+ case GIMHVDEBUGREPLY_DHCP_OFFER:
+ {
+ uint32_t const cbDhcpOfferPkt = sizeof(g_abDhcpOffer);
+ if (cbBuf >= cbDhcpOfferPkt)
+ {
+ memcpy(pvBuf, g_abDhcpOffer, cbDhcpOfferPkt);
+ PRTNETETHERHDR pEthHdr = (PRTNETETHERHDR)pvBuf;
+ PRTNETIPV4 pIpHdr = (PRTNETIPV4) (pEthHdr + 1);
+ PRTNETUDP pUdpHdr = (PRTNETUDP) ((uint8_t *)pIpHdr + RTNETIPV4_MIN_LEN);
+ PRTNETBOOTP pBootpHdr = (PRTNETBOOTP) (pUdpHdr + 1);
+ pBootpHdr->bp_xid = pHv->uDbgBootpXId;
+
+ rc = VINF_SUCCESS;
+ *pcbRead = cbDhcpOfferPkt;
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_DHCP_OFFER_SENT;
+ LogRel(("GIM: HyperV: Debug DHCP offered IP address %RTnaipv4, transaction Id %#x\n", pBootpHdr->bp_yiaddr,
+ RT_N2H_U32(pHv->uDbgBootpXId)));
+ }
+ else
+ {
+ rc = VERR_BUFFER_UNDERFLOW;
+ *pcbRead = 0;
+ }
+ break;
+ }
+
+ case GIMHVDEBUGREPLY_DHCP_ACK:
+ {
+ uint32_t const cbDhcpAckPkt = sizeof(g_abDhcpAck);
+ if (cbBuf >= cbDhcpAckPkt)
+ {
+ memcpy(pvBuf, g_abDhcpAck, cbDhcpAckPkt);
+ PRTNETETHERHDR pEthHdr = (PRTNETETHERHDR)pvBuf;
+ PRTNETIPV4 pIpHdr = (PRTNETIPV4) (pEthHdr + 1);
+ PRTNETUDP pUdpHdr = (PRTNETUDP) ((uint8_t *)pIpHdr + RTNETIPV4_MIN_LEN);
+ PRTNETBOOTP pBootpHdr = (PRTNETBOOTP) (pUdpHdr + 1);
+ pBootpHdr->bp_xid = pHv->uDbgBootpXId;
+
+ rc = VINF_SUCCESS;
+ *pcbRead = cbDhcpAckPkt;
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_DHCP_ACK_SENT;
+ LogRel(("GIM: HyperV: Debug DHCP acknowledged IP address %RTnaipv4, transaction Id %#x\n",
+ pBootpHdr->bp_yiaddr, RT_N2H_U32(pHv->uDbgBootpXId)));
+ }
+ else
+ {
+ rc = VERR_BUFFER_UNDERFLOW;
+ *pcbRead = 0;
+ }
+ break;
+ }
+
+ case GIMHVDEBUGREPLY_ARP_REPLY_SENT:
+ case GIMHVDEBUGREPLY_DHCP_OFFER_SENT:
+ case GIMHVDEBUGREPLY_DHCP_ACK_SENT:
+ {
+ rc = VINF_SUCCESS;
+ *pcbRead = 0;
+ break;
+ }
+
+ default:
+ {
+ AssertMsgFailed(("GIM: HyperV: Invalid/unimplemented debug reply type %u\n", pHv->enmDbgReply));
+ rc = VERR_INTERNAL_ERROR_2;
+ }
+ }
+ Assert(rc != VERR_GIM_IPE_1);
+
+#ifdef DEBUG_ramshankar
+ if ( rc == VINF_SUCCESS
+ && *pcbRead > 0)
+ {
+ RTSOCKET hSocket;
+ int rc2 = RTUdpCreateClientSocket("localhost", 52000, NULL, &hSocket);
+ if (RT_SUCCESS(rc2))
+ {
+ size_t cbTmpWrite = *pcbRead;
+ RTSocketWriteNB(hSocket, pvBuf, *pcbRead, &cbTmpWrite); NOREF(cbTmpWrite);
+ RTSocketClose(hSocket);
+ }
+ }
+#endif
+ }
+
+ return rc;
+}
+
+
+/**
+ * Writes data to the debugger connection, asynchronous.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvData Pointer to the data to be written.
+ * @param cbWrite Size of the write buffer @a pvData.
+ * @param pcbWritten Where to store the number of bytes written.
+ * @param fUdpPkt Whether the debug data in @a pvData is encapsulated in a
+ * UDP frame.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvDebugWrite(PVM pVM, void *pvData, uint32_t cbWrite, uint32_t *pcbWritten, bool fUdpPkt)
+{
+ Assert(cbWrite > 0);
+
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ bool fIgnorePkt = false;
+ uint8_t *pbData = (uint8_t *)pvData;
+ if (fUdpPkt)
+ {
+#ifdef DEBUG_ramshankar
+ RTSOCKET hSocket;
+ int rc2 = RTUdpCreateClientSocket("localhost", 52000, NULL, &hSocket);
+ if (RT_SUCCESS(rc2))
+ {
+ size_t cbTmpWrite = cbWrite;
+ RTSocketWriteNB(hSocket, pbData, cbWrite, &cbTmpWrite); NOREF(cbTmpWrite);
+ RTSocketClose(hSocket);
+ }
+#endif
+ /*
+ * Windows guests sends us ethernet frames over the Hyper-V debug connection.
+ * It sends DHCP/ARP queries with zero'd out MAC addresses and requires fudging up the
+ * packets somewhere.
+ *
+ * The Microsoft WinDbg debugger talks UDP and thus only expects the actual debug
+ * protocol payload.
+ *
+ * If the guest is configured with the "nodhcp" option it sends ARP queries with
+ * a self-chosen IP and after a couple of attempts of receiving no replies, the guest
+ * picks its own IP address. After this, the guest starts sending the UDP packets
+ * we require. We thus ignore the initial ARP packets until the guest eventually
+ * starts talking UDP. Then we can finally feed the UDP payload over the debug
+ * connection.
+ *
+ * When 'kdvm.dll' is the debug transport in the guest (Windows 7), it doesn't bother
+ * with this DHCP/ARP phase. It starts sending debug data in a UDP frame right away.
+ */
+ if (cbWrite > sizeof(RTNETETHERHDR))
+ {
+ PCRTNETETHERHDR pEtherHdr = (PCRTNETETHERHDR)pbData;
+ if (pEtherHdr->EtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4))
+ {
+ if (cbWrite > sizeof(RTNETETHERHDR) + RTNETIPV4_MIN_LEN + RTNETUDP_MIN_LEN)
+ {
+ size_t const cbMaxIpHdr = cbWrite - sizeof(RTNETETHERHDR) - sizeof(RTNETUDP) - 1;
+ size_t const cbMaxIpPkt = cbWrite - sizeof(RTNETETHERHDR);
+ PCRTNETIPV4 pIp4Hdr = (PCRTNETIPV4)(pbData + sizeof(RTNETETHERHDR));
+ bool const fValidIp4 = RTNetIPv4IsHdrValid(pIp4Hdr, cbMaxIpHdr, cbMaxIpPkt, false /*fChecksum*/);
+ if ( fValidIp4
+ && pIp4Hdr->ip_p == RTNETIPV4_PROT_UDP)
+ {
+ uint32_t const cbIpHdr = pIp4Hdr->ip_hl * 4;
+ uint32_t const cbMaxUdpPkt = cbWrite - sizeof(RTNETETHERHDR) - cbIpHdr;
+ PCRTNETUDP pUdpHdr = (PCRTNETUDP)((uint8_t *)pIp4Hdr + cbIpHdr);
+ if ( pUdpHdr->uh_ulen > RT_H2N_U16(sizeof(RTNETUDP))
+ && pUdpHdr->uh_ulen <= RT_H2N_U16((uint16_t)cbMaxUdpPkt))
+ {
+ /*
+ * Check for DHCP.
+ */
+ bool fBuggyPkt = false;
+ size_t const cbUdpPkt = cbMaxIpPkt - cbIpHdr;
+ if ( pUdpHdr->uh_dport == RT_N2H_U16_C(RTNETIPV4_PORT_BOOTPS)
+ && pUdpHdr->uh_sport == RT_N2H_U16_C(RTNETIPV4_PORT_BOOTPC))
+ {
+ PCRTNETBOOTP pDhcpPkt = (PCRTNETBOOTP)(pUdpHdr + 1);
+ uint8_t bMsgType;
+ if ( cbMaxIpPkt >= cbIpHdr + RTNETUDP_MIN_LEN + RTNETBOOTP_DHCP_MIN_LEN
+ && RTNetIPv4IsDHCPValid(pUdpHdr, pDhcpPkt, cbUdpPkt - sizeof(*pUdpHdr), &bMsgType))
+ {
+ switch (bMsgType)
+ {
+ case RTNET_DHCP_MT_DISCOVER:
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_DHCP_OFFER;
+ pHv->uDbgBootpXId = pDhcpPkt->bp_xid;
+ break;
+ case RTNET_DHCP_MT_REQUEST:
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_DHCP_ACK;
+ pHv->uDbgBootpXId = pDhcpPkt->bp_xid;
+ break;
+ default:
+ LogRelMax(5, ("GIM: HyperV: Debug DHCP MsgType %#x not implemented! Packet dropped\n",
+ bMsgType));
+ break;
+ }
+ fIgnorePkt = true;
+ }
+ else if ( pIp4Hdr->ip_src.u == GIMHV_DEBUGCLIENT_IPV4
+ && pIp4Hdr->ip_dst.u == 0)
+ {
+ /*
+ * Windows 8.1 seems to be sending malformed BOOTP packets at the final stage of the
+ * debugger sequence. It appears that a previously sent DHCP request buffer wasn't cleared
+ * in the guest and they re-use it instead of sending a zero destination+source port packet
+ * as expected below.
+ *
+ * We workaround Microsoft's bug here, or at least, I'm classifying it as a bug to
+ * preserve my own sanity, see @bugref{8006#c54}.
+ */
+ fBuggyPkt = true;
+ }
+ }
+
+ if ( ( !pUdpHdr->uh_dport
+ && !pUdpHdr->uh_sport)
+ || fBuggyPkt)
+ {
+ /*
+ * Extract the UDP payload and pass it to the debugger and record the guest IP address.
+ *
+ * Hyper-V sends UDP debugger packets with source and destination port as 0 except in the
+ * aforementioned buggy case. The buggy packet case requires us to remember the ports and
+ * reply to them, otherwise the guest won't receive the replies we sent with port 0.
+ */
+ uint32_t const cbFrameHdr = sizeof(RTNETETHERHDR) + cbIpHdr + sizeof(RTNETUDP);
+ pbData += cbFrameHdr;
+ cbWrite -= cbFrameHdr;
+ pHv->DbgGuestIp4Addr.u = pIp4Hdr->ip_src.u;
+ pHv->uUdpGuestDstPort = pUdpHdr->uh_dport;
+ pHv->uUdpGuestSrcPort = pUdpHdr->uh_sport;
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_UDP;
+ }
+ else
+ {
+ LogFlow(("GIM: HyperV: Ignoring UDP packet SourcePort=%u DstPort=%u\n", pUdpHdr->uh_sport,
+ pUdpHdr->uh_dport));
+ fIgnorePkt = true;
+ }
+ }
+ else
+ {
+ LogFlow(("GIM: HyperV: Ignoring malformed UDP packet. cbMaxUdpPkt=%u UdpPkt.len=%u\n", cbMaxUdpPkt,
+ RT_N2H_U16(pUdpHdr->uh_ulen)));
+ fIgnorePkt = true;
+ }
+ }
+ else
+ {
+ LogFlow(("GIM: HyperV: Ignoring non-IP / non-UDP packet. fValidIp4=%RTbool Proto=%u\n", fValidIp4,
+ pIp4Hdr->ip_p));
+ fIgnorePkt = true;
+ }
+ }
+ else
+ {
+ LogFlow(("GIM: HyperV: Ignoring IPv4 packet; too short to be valid UDP. cbWrite=%u\n", cbWrite));
+ fIgnorePkt = true;
+ }
+ }
+ else if (pEtherHdr->EtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_ARP))
+ {
+ /*
+ * Check for targetted ARP query.
+ */
+ PCRTNETARPHDR pArpHdr = (PCRTNETARPHDR)(pbData + sizeof(RTNETETHERHDR));
+ if ( pArpHdr->ar_hlen == sizeof(RTMAC)
+ && pArpHdr->ar_plen == sizeof(RTNETADDRIPV4)
+ && pArpHdr->ar_htype == RT_H2N_U16(RTNET_ARP_ETHER)
+ && pArpHdr->ar_ptype == RT_H2N_U16(RTNET_ETHERTYPE_IPV4))
+ {
+ uint16_t uArpOp = pArpHdr->ar_oper;
+ if (uArpOp == RT_H2N_U16_C(RTNET_ARPOP_REQUEST))
+ {
+ PCRTNETARPIPV4 pArpPkt = (PCRTNETARPIPV4)pArpHdr;
+ bool fGratuitous = pArpPkt->ar_spa.u == pArpPkt->ar_tpa.u;
+ if ( !fGratuitous
+ && pArpPkt->ar_spa.u == GIMHV_DEBUGCLIENT_IPV4
+ && pArpPkt->ar_tpa.u == GIMHV_DEBUGSERVER_IPV4)
+ {
+ pHv->enmDbgReply = GIMHVDEBUGREPLY_ARP_REPLY;
+ }
+ }
+ }
+ fIgnorePkt = true;
+ }
+ else
+ {
+ LogFlow(("GIM: HyperV: Ignoring non-IP packet. Ethertype=%#x\n", RT_N2H_U16(pEtherHdr->EtherType)));
+ fIgnorePkt = true;
+ }
+ }
+ }
+
+ if (!fIgnorePkt)
+ {
+ AssertCompile(sizeof(size_t) >= sizeof(uint32_t));
+ size_t cbWriteBuf = cbWrite;
+ int rc = gimR3DebugWrite(pVM, pbData, &cbWriteBuf);
+ if ( RT_SUCCESS(rc)
+ && cbWriteBuf == cbWrite)
+ *pcbWritten = (uint32_t)cbWriteBuf;
+ else
+ *pcbWritten = 0;
+ }
+ else
+ *pcbWritten = cbWrite;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Performs the HvPostDebugData hypercall.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param prcHv Where to store the result of the hypercall operation.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvHypercallPostDebugData(PVM pVM, int *prcHv)
+{
+ AssertPtr(pVM);
+ AssertPtr(prcHv);
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ int rcHv = GIM_HV_STATUS_OPERATION_DENIED;
+
+ /*
+ * Grab the parameters.
+ */
+ PGIMHVDEBUGPOSTIN pIn = (PGIMHVDEBUGPOSTIN)pHv->pbHypercallIn;
+ AssertPtrReturn(pIn, VERR_GIM_IPE_1);
+ uint32_t cbWrite = pIn->cbWrite;
+ uint32_t fFlags = pIn->fFlags;
+ uint8_t *pbData = ((uint8_t *)pIn) + sizeof(PGIMHVDEBUGPOSTIN);
+
+ PGIMHVDEBUGPOSTOUT pOut = (PGIMHVDEBUGPOSTOUT)pHv->pbHypercallOut;
+
+ /*
+ * Perform the hypercall.
+ */
+#if 0
+ /* Currently disabled as Windows 10 guest passes us undocumented flags. */
+ if (fFlags & ~GIM_HV_DEBUG_POST_OPTIONS_MASK))
+ rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
+#else
+ RT_NOREF1(fFlags);
+#endif
+ if (cbWrite > GIM_HV_DEBUG_MAX_DATA_SIZE)
+ rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
+ else if (!cbWrite)
+ {
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ pOut->cbPending = 0;
+ }
+ else if (cbWrite > 0)
+ {
+ uint32_t cbWritten = 0;
+ int rc2 = gimR3HvDebugWrite(pVM, pbData, cbWrite, &cbWritten, pHv->fIsVendorMsHv /*fUdpPkt*/);
+ if ( RT_SUCCESS(rc2)
+ && cbWritten == cbWrite)
+ {
+ pOut->cbPending = 0;
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ }
+ else
+ rcHv = GIM_HV_STATUS_INSUFFICIENT_BUFFER;
+ }
+
+ /*
+ * Update the guest memory with result.
+ */
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, pHv->GCPhysHypercallOut, pHv->pbHypercallOut, sizeof(GIMHVDEBUGPOSTOUT));
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(10, ("GIM: HyperV: HvPostDebugData failed to update guest memory. rc=%Rrc\n", rc));
+ rc = VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED;
+ }
+ else
+ Assert(rc == VINF_SUCCESS);
+
+ *prcHv = rcHv;
+ return rc;
+}
+
+
+/**
+ * Performs the HvRetrieveDebugData hypercall.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param prcHv Where to store the result of the hypercall operation.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvHypercallRetrieveDebugData(PVM pVM, int *prcHv)
+{
+ AssertPtr(pVM);
+ AssertPtr(prcHv);
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+ int rcHv = GIM_HV_STATUS_OPERATION_DENIED;
+
+ /*
+ * Grab the parameters.
+ */
+ PGIMHVDEBUGRETRIEVEIN pIn = (PGIMHVDEBUGRETRIEVEIN)pHv->pbHypercallIn;
+ AssertPtrReturn(pIn, VERR_GIM_IPE_1);
+ uint32_t cbRead = pIn->cbRead;
+ uint32_t fFlags = pIn->fFlags;
+ uint64_t uTimeout = pIn->u64Timeout;
+ uint32_t cMsTimeout = (fFlags & GIM_HV_DEBUG_RETREIVE_LOOP) ? (uTimeout * 100) / RT_NS_1MS_64 : 0;
+
+ PGIMHVDEBUGRETRIEVEOUT pOut = (PGIMHVDEBUGRETRIEVEOUT)pHv->pbHypercallOut;
+ AssertPtrReturn(pOut, VERR_GIM_IPE_2);
+ uint32_t *pcbReallyRead = &pOut->cbRead;
+ uint32_t *pcbRemainingRead = &pOut->cbRemaining;
+ void *pvData = ((uint8_t *)pOut) + sizeof(GIMHVDEBUGRETRIEVEOUT);
+
+ /*
+ * Perform the hypercall.
+ */
+ *pcbReallyRead = 0;
+ *pcbRemainingRead = cbRead;
+#if 0
+ /* Currently disabled as Windows 10 guest passes us undocumented flags. */
+ if (fFlags & ~GIM_HV_DEBUG_RETREIVE_OPTIONS_MASK)
+ rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
+#endif
+ if (cbRead > GIM_HV_DEBUG_MAX_DATA_SIZE)
+ rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
+ else if (fFlags & GIM_HV_DEBUG_RETREIVE_TEST_ACTIVITY)
+ rcHv = GIM_HV_STATUS_SUCCESS; /** @todo implement this. */
+ else if (!cbRead)
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ else if (cbRead > 0)
+ {
+ int rc2 = gimR3HvDebugRead(pVM, pvData, GIM_HV_PAGE_SIZE, cbRead, pcbReallyRead, cMsTimeout,
+ pHv->fIsVendorMsHv /*fUdpPkt*/);
+ Assert(*pcbReallyRead <= cbRead);
+ if ( RT_SUCCESS(rc2)
+ && *pcbReallyRead > 0)
+ {
+ *pcbRemainingRead = cbRead - *pcbReallyRead;
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ }
+ else
+ rcHv = GIM_HV_STATUS_NO_DATA;
+ }
+
+ /*
+ * Update the guest memory with result.
+ */
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, pHv->GCPhysHypercallOut, pHv->pbHypercallOut,
+ sizeof(GIMHVDEBUGRETRIEVEOUT) + *pcbReallyRead);
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(10, ("GIM: HyperV: HvRetrieveDebugData failed to update guest memory. rc=%Rrc\n", rc));
+ rc = VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED;
+ }
+ else
+ Assert(rc == VINF_SUCCESS);
+
+ *prcHv = rcHv;
+ return rc;
+}
+
+
+/**
+ * Performs the HvExtCallQueryCapabilities extended hypercall.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param prcHv Where to store the result of the hypercall operation.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvHypercallExtQueryCap(PVM pVM, int *prcHv)
+{
+ AssertPtr(pVM);
+ AssertPtr(prcHv);
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+
+ /*
+ * Grab the parameters.
+ */
+ PGIMHVEXTQUERYCAP pOut = (PGIMHVEXTQUERYCAP)pHv->pbHypercallOut;
+
+ /*
+ * Perform the hypercall.
+ */
+ pOut->fCapabilities = GIM_HV_EXT_HYPERCALL_CAP_ZERO_MEM;
+
+ /*
+ * Update the guest memory with result.
+ */
+ int rcHv;
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, pHv->GCPhysHypercallOut, pHv->pbHypercallOut, sizeof(GIMHVEXTQUERYCAP));
+ if (RT_SUCCESS(rc))
+ {
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ LogRel(("GIM: HyperV: Queried extended hypercall capabilities %#RX64 at %#RGp\n", pOut->fCapabilities,
+ pHv->GCPhysHypercallOut));
+ }
+ else
+ {
+ rcHv = GIM_HV_STATUS_OPERATION_DENIED;
+ LogRelMax(10, ("GIM: HyperV: HvHypercallExtQueryCap failed to update guest memory. rc=%Rrc\n", rc));
+ rc = VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED;
+ }
+
+ *prcHv = rcHv;
+ return rc;
+}
+
+
+/**
+ * Performs the HvExtCallGetBootZeroedMemory extended hypercall.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param prcHv Where to store the result of the hypercall operation.
+ *
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) gimR3HvHypercallExtGetBootZeroedMem(PVM pVM, int *prcHv)
+{
+ AssertPtr(pVM);
+ AssertPtr(prcHv);
+ PGIMHV pHv = &pVM->gim.s.u.Hv;
+
+ /*
+ * Grab the parameters.
+ */
+ PGIMHVEXTGETBOOTZEROMEM pOut = (PGIMHVEXTGETBOOTZEROMEM)pHv->pbHypercallOut;
+
+ /*
+ * Perform the hypercall.
+ */
+ uint32_t const cRanges = PGMR3PhysGetRamRangeCount(pVM);
+ pOut->cPages = 0;
+ for (uint32_t iRange = 0; iRange < cRanges; iRange++)
+ {
+ RTGCPHYS GCPhysStart;
+ RTGCPHYS GCPhysEnd;
+ int rc = PGMR3PhysGetRange(pVM, iRange, &GCPhysStart, &GCPhysEnd, NULL /* pszDesc */, NULL /* fIsMmio */);
+ if (RT_FAILURE(rc))
+ {
+ LogRelMax(10, ("GIM: HyperV: HvHypercallExtGetBootZeroedMem: PGMR3PhysGetRange failed for iRange(%u) rc=%Rrc\n",
+ iRange, rc));
+ *prcHv = GIM_HV_STATUS_OPERATION_DENIED;
+ return rc;
+ }
+
+ RTGCPHYS const cbRange = RT_ALIGN(GCPhysEnd - GCPhysStart + 1, GUEST_PAGE_SIZE);
+ pOut->cPages += cbRange >> GIM_HV_PAGE_SHIFT;
+ if (iRange == 0)
+ pOut->GCPhysStart = GCPhysStart;
+ }
+
+ /*
+ * Update the guest memory with result.
+ */
+ int rcHv;
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, pHv->GCPhysHypercallOut, pHv->pbHypercallOut, sizeof(GIMHVEXTGETBOOTZEROMEM));
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("GIM: HyperV: Queried boot zeroed guest memory range (starting at %#RGp spanning %u pages) at %#RGp\n",
+ pOut->GCPhysStart, pOut->cPages, pHv->GCPhysHypercallOut));
+ rcHv = GIM_HV_STATUS_SUCCESS;
+ }
+ else
+ {
+ rcHv = GIM_HV_STATUS_OPERATION_DENIED;
+ LogRelMax(10, ("GIM: HyperV: HvHypercallExtGetBootZeroedMem failed to update guest memory. rc=%Rrc\n", rc));
+ rc = VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED;
+ }
+
+ *prcHv = rcHv;
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/GIMKvm.cpp b/src/VBox/VMM/VMMR3/GIMKvm.cpp
new file mode 100644
index 00000000..865e7268
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GIMKvm.cpp
@@ -0,0 +1,630 @@
+/* $Id: GIMKvm.cpp $ */
+/** @file
+ * GIM - Guest Interface Manager, KVM implementation.
+ */
+
+/*
+ * Copyright (C) 2015-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GIM
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/em.h>
+#include "GIMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/disopcode.h>
+#include <VBox/err.h>
+#include <VBox/version.h>
+
+#include <iprt/asm-math.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <iprt/mem.h>
+
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+/**
+ * GIM KVM saved-state version.
+ */
+#define GIM_KVM_SAVED_STATE_VERSION UINT32_C(1)
+
+/**
+ * VBox internal struct. to passback to EMT rendezvous callback while enabling
+ * the KVM wall-clock.
+ */
+typedef struct KVMWALLCLOCKINFO
+{
+ /** Guest physical address of the wall-clock struct. */
+ RTGCPHYS GCPhysWallClock;
+} KVMWALLCLOCKINFO;
+/** Pointer to the wall-clock info. struct. */
+typedef KVMWALLCLOCKINFO *PKVMWALLCLOCKINFO;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_STATISTICS
+# define GIMKVM_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Gim, kCpumMsrWrFn_Gim, 0, 0, 0, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define GIMKVM_MSRRANGE(a_uFirst, a_uLast, a_szName) \
+ { (a_uFirst), (a_uLast), kCpumMsrRdFn_Gim, kCpumMsrWrFn_Gim, 0, 0, 0, 0, 0, a_szName }
+#endif
+
+/**
+ * Array of MSR ranges supported by KVM.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Kvm[] =
+{
+ GIMKVM_MSRRANGE(MSR_GIM_KVM_RANGE0_FIRST, MSR_GIM_KVM_RANGE0_LAST, "KVM range 0"),
+ GIMKVM_MSRRANGE(MSR_GIM_KVM_RANGE1_FIRST, MSR_GIM_KVM_RANGE1_LAST, "KVM range 1")
+};
+#undef GIMKVM_MSRRANGE
+
+
+/**
+ * Updates the KVM VCPU system-time structure in guest memory.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ *
+ * @remarks This must be called after the system time MSR value has been updated.
+ */
+static int gimR3KvmUpdateSystemTime(PVM pVM, PVMCPU pVCpu)
+{
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+ PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
+
+ /*
+ * Validate the MSR has the enable bit and the guest's system time struct. address.
+ */
+ MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr);
+ if (!PGMPhysIsGCPhysNormal(pVM, pKvmCpu->GCPhysSystemTime))
+ {
+ LogRel(("GIM: KVM: VCPU%3d: Invalid physical addr requested for mapping system-time struct. GCPhysSystemTime=%#RGp\n",
+ pVCpu->idCpu, pKvmCpu->GCPhysSystemTime));
+ return VERR_GIM_OPERATION_FAILED;
+ }
+
+ VMSTATE const enmVMState = pVM->enmVMState;
+ bool const fRunning = VMSTATE_IS_RUNNING(enmVMState);
+ Assert(!(pKvmCpu->u32SystemTimeVersion & UINT32_C(1)));
+
+ /*
+ * Construct a system-time struct.
+ */
+ GIMKVMSYSTEMTIME SystemTime;
+ RT_ZERO(SystemTime);
+ SystemTime.u32Version = pKvmCpu->u32SystemTimeVersion + !!fRunning;
+ SystemTime.u64NanoTS = pKvmCpu->uVirtNanoTS;
+ SystemTime.u64Tsc = pKvmCpu->uTsc;
+ SystemTime.fFlags = pKvmCpu->fSystemTimeFlags | GIM_KVM_SYSTEM_TIME_FLAGS_TSC_STABLE;
+
+ /*
+ * How the guest calculates the system time (nanoseconds):
+ *
+ * tsc = rdtsc - SysTime.u64Tsc
+ * if (SysTime.i8TscShift >= 0)
+ * tsc <<= i8TscShift;
+ * else
+ * tsc >>= -i8TscShift;
+ * time = ((tsc * SysTime.u32TscScale) >> 32) + SysTime.u64NanoTS
+ */
+ uint64_t u64TscFreq = pKvm->cTscTicksPerSecond;
+ SystemTime.i8TscShift = 0;
+ while (u64TscFreq > 2 * RT_NS_1SEC_64)
+ {
+ u64TscFreq >>= 1;
+ SystemTime.i8TscShift--;
+ }
+ uint32_t uTscFreqLo = (uint32_t)u64TscFreq;
+ while (uTscFreqLo <= RT_NS_1SEC)
+ {
+ uTscFreqLo <<= 1;
+ SystemTime.i8TscShift++;
+ }
+ SystemTime.u32TscScale = ASMDivU64ByU32RetU32(RT_NS_1SEC_64 << 32, uTscFreqLo);
+
+ /*
+ * For informational purposes, back-calculate the exact TSC frequency the guest will see.
+ * Note that the frequency is in kHz, not Hz, since that's what Linux uses.
+ */
+ uint64_t uTscKHz = (RT_NS_1MS_64 << 32) / SystemTime.u32TscScale;
+ if (SystemTime.i8TscShift < 0)
+ uTscKHz <<= -SystemTime.i8TscShift;
+ else
+ uTscKHz >>= SystemTime.i8TscShift;
+
+ /*
+ * Update guest memory with the system-time struct.
+ *
+ * We update the struct with an incremented, odd version field to indicate to the guest
+ * that the memory is being updated concurrently by the host and it should discard any
+ * data from this struct when it reads an odd version.
+ *
+ * When the VM is not running, we don't need to do this two step update for obvious
+ * reasons and so we skip it.
+ */
+ if (fRunning)
+ Assert(SystemTime.u32Version & UINT32_C(1));
+ else
+ Assert(!(SystemTime.u32Version & UINT32_C(1)));
+
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, pKvmCpu->GCPhysSystemTime, &SystemTime, sizeof(GIMKVMSYSTEMTIME));
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("GIM: KVM: VCPU%3d: Enabled system-time struct. at %#RGp - u32TscScale=%#RX32 i8TscShift=%d uVersion=%#RU32 "
+ "fFlags=%#x uTsc=%#RX64 uVirtNanoTS=%#RX64 TscKHz=%RU64\n", pVCpu->idCpu, pKvmCpu->GCPhysSystemTime,
+ SystemTime.u32TscScale, SystemTime.i8TscShift, SystemTime.u32Version + !!fRunning, SystemTime.fFlags,
+ pKvmCpu->uTsc, pKvmCpu->uVirtNanoTS, uTscKHz));
+ TMR3CpuTickParavirtEnable(pVM);
+ }
+ else
+ {
+ LogRel(("GIM: KVM: VCPU%3d: Failed to write system-time struct. at %#RGp. rc=%Rrc\n", pVCpu->idCpu,
+ pKvmCpu->GCPhysSystemTime, rc));
+ }
+
+ if (fRunning)
+ {
+ ++SystemTime.u32Version;
+ Assert(!(SystemTime.u32Version & UINT32_C(1)));
+ rc = PGMPhysSimpleWriteGCPhys(pVM, pKvmCpu->GCPhysSystemTime + RT_UOFFSETOF(GIMKVMSYSTEMTIME, u32Version),
+ &SystemTime.u32Version, sizeof(SystemTime.u32Version));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("GIM: KVM: VCPU%3d: Failed to write system-time struct. while updating version field at %#RGp. rc=%Rrc\n",
+ pVCpu->idCpu, pKvmCpu->GCPhysSystemTime, rc));
+ return rc;
+ }
+
+ /* Update the version so our next write will start with an even value. */
+ pKvmCpu->u32SystemTimeVersion += 2;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Initializes the KVM GIM provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3KvmInit(PVM pVM)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_KVM, VERR_INTERNAL_ERROR_5);
+
+ int rc;
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+
+ /*
+ * Determine interface capabilities based on the version.
+ */
+ if (!pVM->gim.s.u32Version)
+ {
+ /* Basic features. */
+ pKvm->uBaseFeat = 0
+ | GIM_KVM_BASE_FEAT_CLOCK_OLD
+ //| GIM_KVM_BASE_FEAT_NOP_IO_DELAY
+ //| GIM_KVM_BASE_FEAT_MMU_OP
+ | GIM_KVM_BASE_FEAT_CLOCK
+ //| GIM_KVM_BASE_FEAT_ASYNC_PF
+ //| GIM_KVM_BASE_FEAT_STEAL_TIME
+ //| GIM_KVM_BASE_FEAT_PV_EOI
+ | GIM_KVM_BASE_FEAT_PV_UNHALT
+ ;
+ /* Rest of the features are determined in gimR3KvmInitCompleted(). */
+ }
+
+ /*
+ * Expose HVP (Hypervisor Present) bit to the guest.
+ */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
+
+ /*
+ * Modify the standard hypervisor leaves for KVM.
+ */
+ CPUMCPUIDLEAF HyperLeaf;
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000000);
+ HyperLeaf.uEax = UINT32_C(0x40000001); /* Minimum value for KVM is 0x40000001. */
+ HyperLeaf.uEbx = 0x4B4D564B; /* 'KVMK' */
+ HyperLeaf.uEcx = 0x564B4D56; /* 'VMKV' */
+ HyperLeaf.uEdx = 0x0000004D; /* 'M000' */
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Add KVM specific leaves.
+ */
+ HyperLeaf.uLeaf = UINT32_C(0x40000001);
+ HyperLeaf.uEax = pKvm->uBaseFeat;
+ HyperLeaf.uEbx = 0; /* Reserved */
+ HyperLeaf.uEcx = 0; /* Reserved */
+ HyperLeaf.uEdx = 0; /* Reserved */
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Insert all MSR ranges of KVM.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aMsrRanges_Kvm); i++)
+ {
+ rc = CPUMR3MsrRangesInsert(pVM, &g_aMsrRanges_Kvm[i]);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Setup hypercall and #UD handling.
+ * Note! We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ EMSetHypercallInstructionsEnabled(pVM->apCpusR3[idCpu], true);
+
+ size_t cbHypercall = 0;
+ rc = GIMQueryHypercallOpcodeBytes(pVM, pKvm->abOpcodeNative, sizeof(pKvm->abOpcodeNative), &cbHypercall, &pKvm->uOpcodeNative);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelReturn(cbHypercall == sizeof(pKvm->abOpcodeNative), VERR_GIM_IPE_1);
+ pKvm->fTrapXcptUD = pKvm->uOpcodeNative != OP_VMCALL;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes remaining bits of the KVM provider.
+ *
+ * This is called after initializing HM and almost all other VMM components.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3KvmInitCompleted(PVM pVM)
+{
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+ pKvm->cTscTicksPerSecond = TMCpuTicksPerSecond(pVM);
+
+ if (TMR3CpuTickIsFixedRateMonotonic(pVM, true /* fWithParavirtEnabled */))
+ {
+ /** @todo We might want to consider just enabling this bit *always*. As far
+ * as I can see in the Linux guest, the "TSC_STABLE" bit is only
+ * translated as a "monotonic" bit which even in Async systems we
+ * -should- be reporting a strictly monotonic TSC to the guest. */
+ pKvm->uBaseFeat |= GIM_KVM_BASE_FEAT_TSC_STABLE;
+
+ CPUMCPUIDLEAF HyperLeaf;
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000001);
+ HyperLeaf.uEax = pKvm->uBaseFeat;
+ HyperLeaf.uEbx = 0;
+ HyperLeaf.uEcx = 0;
+ HyperLeaf.uEdx = 0;
+ int rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates the KVM GIM provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3KvmTerm(PVM pVM)
+{
+ gimR3KvmReset(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This resets KVM provider MSRs and unmaps whatever KVM regions that
+ * the guest may have mapped.
+ *
+ * This is called when the VM is being reset.
+ *
+ * @param pVM The cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(void) gimR3KvmReset(PVM pVM)
+{
+ VM_ASSERT_EMT0(pVM);
+ LogRel(("GIM: KVM: Resetting MSRs\n"));
+
+ /*
+ * Reset MSRs.
+ */
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+ pKvm->u64WallClockMsr = 0;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PGIMKVMCPU pKvmCpu = &pVM->apCpusR3[idCpu]->gim.s.u.KvmCpu;
+ pKvmCpu->u64SystemTimeMsr = 0;
+ pKvmCpu->u32SystemTimeVersion = 0;
+ pKvmCpu->fSystemTimeFlags = 0;
+ pKvmCpu->GCPhysSystemTime = 0;
+ pKvmCpu->uTsc = 0;
+ pKvmCpu->uVirtNanoTS = 0;
+ }
+}
+
+
+/**
+ * KVM state-save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+VMMR3_INT_DECL(int) gimR3KvmSave(PVM pVM, PSSMHANDLE pSSM)
+{
+ PCGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+
+ /*
+ * Save the KVM SSM version.
+ */
+ SSMR3PutU32(pSSM, GIM_KVM_SAVED_STATE_VERSION);
+
+ /*
+ * Save per-VCPU data.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PCGIMKVMCPU pKvmCpu = &pVM->apCpusR3[idCpu]->gim.s.u.KvmCpu;
+ SSMR3PutU64(pSSM, pKvmCpu->u64SystemTimeMsr);
+ SSMR3PutU64(pSSM, pKvmCpu->uTsc);
+ SSMR3PutU64(pSSM, pKvmCpu->uVirtNanoTS);
+ SSMR3PutGCPhys(pSSM, pKvmCpu->GCPhysSystemTime);
+ SSMR3PutU32(pSSM, pKvmCpu->u32SystemTimeVersion);
+ SSMR3PutU8(pSSM, pKvmCpu->fSystemTimeFlags);
+ }
+
+ /*
+ * Save per-VM data.
+ */
+ SSMR3PutU64(pSSM, pKvm->u64WallClockMsr);
+ return SSMR3PutU32(pSSM, pKvm->uBaseFeat);
+}
+
+
+/**
+ * KVM state-load operation, final pass.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+VMMR3_INT_DECL(int) gimR3KvmLoad(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Load the KVM SSM version first.
+ */
+ uint32_t uKvmSavedStatVersion;
+ int rc = SSMR3GetU32(pSSM, &uKvmSavedStatVersion);
+ AssertRCReturn(rc, rc);
+ if (uKvmSavedStatVersion != GIM_KVM_SAVED_STATE_VERSION)
+ return SSMR3SetLoadError(pSSM, VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION, RT_SRC_POS,
+ N_("Unsupported KVM saved-state version %u (expected %u)."),
+ uKvmSavedStatVersion, GIM_KVM_SAVED_STATE_VERSION);
+
+ /*
+ * Update the TSC frequency from TM.
+ */
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+ pKvm->cTscTicksPerSecond = TMCpuTicksPerSecond(pVM);
+
+ /*
+ * Load per-VCPU data.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
+
+ SSMR3GetU64(pSSM, &pKvmCpu->u64SystemTimeMsr);
+ SSMR3GetU64(pSSM, &pKvmCpu->uTsc);
+ SSMR3GetU64(pSSM, &pKvmCpu->uVirtNanoTS);
+ SSMR3GetGCPhys(pSSM, &pKvmCpu->GCPhysSystemTime);
+ SSMR3GetU32(pSSM, &pKvmCpu->u32SystemTimeVersion);
+ rc = SSMR3GetU8(pSSM, &pKvmCpu->fSystemTimeFlags);
+ AssertRCReturn(rc, rc);
+
+ /* Enable the system-time struct. if necessary. */
+ /** @todo update guest struct only if cTscTicksPerSecond doesn't match host
+ * anymore. */
+ if (MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr))
+ {
+ Assert(!TMVirtualIsTicking(pVM)); /* paranoia. */
+ Assert(!TMCpuTickIsTicking(pVCpu));
+ gimR3KvmUpdateSystemTime(pVM, pVCpu);
+ }
+ }
+
+ /*
+ * Load per-VM data.
+ */
+ SSMR3GetU64(pSSM, &pKvm->u64WallClockMsr);
+ rc = SSMR3GetU32(pSSM, &pKvm->uBaseFeat);
+ AssertRCReturn(rc, rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Disables the KVM system-time struct.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3KvmDisableSystemTime(PVM pVM)
+{
+ TMR3CpuTickParavirtDisable(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{PFNVMMEMTRENDEZVOUS,
+ * Worker for gimR3KvmEnableWallClock}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) gimR3KvmEnableWallClockCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ PKVMWALLCLOCKINFO pWallClockInfo = (PKVMWALLCLOCKINFO)pvUser; AssertPtr(pWallClockInfo);
+ RTGCPHYS GCPhysWallClock = pWallClockInfo->GCPhysWallClock;
+ RT_NOREF1(pVCpu);
+
+ /*
+ * Read the wall-clock version (sequence) from the guest.
+ */
+ uint32_t uVersion;
+ Assert(PGMPhysIsGCPhysNormal(pVM, GCPhysWallClock));
+ int rc = PGMPhysSimpleReadGCPhys(pVM, &uVersion, GCPhysWallClock, sizeof(uVersion));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("GIM: KVM: Failed to read wall-clock struct. version at %#RGp. rc=%Rrc\n", GCPhysWallClock, rc));
+ return rc;
+ }
+
+ /*
+ * Ensure the version is incrementally even.
+ */
+ /* faster: uVersion = (uVersion | 1) + 1; */
+ if (!(uVersion & 1))
+ ++uVersion;
+ ++uVersion;
+
+ /*
+ * Update wall-clock guest struct. with UTC information.
+ */
+ RTTIMESPEC TimeSpec;
+ int32_t iSec;
+ int32_t iNano;
+ TMR3UtcNow(pVM, &TimeSpec);
+ RTTimeSpecGetSecondsAndNano(&TimeSpec, &iSec, &iNano);
+
+ GIMKVMWALLCLOCK WallClock;
+ RT_ZERO(WallClock);
+ AssertCompile(sizeof(uVersion) == sizeof(WallClock.u32Version));
+ WallClock.u32Version = uVersion;
+ WallClock.u32Sec = iSec;
+ WallClock.u32Nano = iNano;
+
+ /*
+ * Write out the wall-clock struct. to guest memory.
+ */
+ Assert(!(WallClock.u32Version & 1));
+ rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysWallClock, &WallClock, sizeof(GIMKVMWALLCLOCK));
+ if (RT_SUCCESS(rc))
+ LogRel(("GIM: KVM: Enabled wall-clock struct. at %#RGp - u32Sec=%u u32Nano=%u uVersion=%#RU32\n", GCPhysWallClock,
+ WallClock.u32Sec, WallClock.u32Nano, WallClock.u32Version));
+ else
+ LogRel(("GIM: KVM: Failed to write wall-clock struct. at %#RGp. rc=%Rrc\n", GCPhysWallClock, rc));
+ return rc;
+}
+
+
+/**
+ * Enables the KVM wall-clock structure.
+ *
+ * Since the wall-clock can be read by any VCPU but it is a global struct. in
+ * guest-memory, we do an EMT rendezvous here to be on the safe side. The
+ * alternative is to use an MMIO2 region and use the WallClock.u32Version field
+ * for transactional update. However, this MSR is rarely written to (typically
+ * once during bootup) it's currently not a performance issue especially since
+ * we're already in ring-3. If we really wanted better performance in this code
+ * path, we should be doing it in ring-0 with transactional update while make
+ * sure there is only 1 writer as well.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhysWallClock Where the guest wall-clock structure is located.
+ *
+ * @remarks Don't do any release assertions here, these can be triggered by
+ * guest R0 code.
+ */
+VMMR3_INT_DECL(int) gimR3KvmEnableWallClock(PVM pVM, RTGCPHYS GCPhysWallClock)
+{
+ KVMWALLCLOCKINFO WallClockInfo;
+ WallClockInfo.GCPhysWallClock = GCPhysWallClock;
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, gimR3KvmEnableWallClockCallback, &WallClockInfo);
+}
+
+
+/**
+ * Enables the KVM system time structure.
+ *
+ * This can be done concurrently because the guest memory being updated is per-VCPU
+ * and the struct even has a "version" field which needs to be incremented
+ * before/after altering guest memory to allow concurrent updates from the host.
+ * Hence this is not being done in an EMT rendezvous. It -is- done in ring-3 since
+ * we call into ring-3 only TM code in the end.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param uMsrSystemTime The system time MSR value being written.
+ */
+VMMR3_INT_DECL(int) gimR3KvmEnableSystemTime(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uMsrSystemTime)
+{
+ Assert(uMsrSystemTime & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
+ PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
+ PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
+
+ /*
+ * Update the system-time struct.
+ * The system-time structs are usually placed at a different guest address for each VCPU.
+ */
+ pKvmCpu->uTsc = TMCpuTickGetNoCheck(pVCpu);
+ pKvmCpu->uVirtNanoTS = ASMMultU64ByU32DivByU32(pKvmCpu->uTsc, RT_NS_1SEC, pKvm->cTscTicksPerSecond);
+ pKvmCpu->u64SystemTimeMsr = uMsrSystemTime;
+ pKvmCpu->GCPhysSystemTime = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uMsrSystemTime);
+
+ int rc = gimR3KvmUpdateSystemTime(pVM, pVCpu);
+ if (RT_FAILURE(rc))
+ {
+ pKvmCpu->u64SystemTimeMsr = 0;
+ /* We shouldn't throw a #GP(0) here for buggy guests (neither does KVM apparently), see @bugref{8627}. */
+ }
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/GIMMinimal.cpp b/src/VBox/VMM/VMMR3/GIMMinimal.cpp
new file mode 100644
index 00000000..48564eca
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GIMMinimal.cpp
@@ -0,0 +1,140 @@
+/* $Id: GIMMinimal.cpp $ */
+/** @file
+ * GIM - Guest Interface Manager, Minimal implementation.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GIM
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/apic.h>
+#include "GIMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+
+/**
+ * Initializes the Minimal provider.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3MinimalInit(PVM pVM)
+{
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_MINIMAL, VERR_INTERNAL_ERROR_5);
+
+ /*
+ * Expose HVP (Hypervisor Present) bit to the guest.
+ */
+ CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
+
+ /*
+ * Insert the hypervisor leaf range.
+ */
+ CPUMCPUIDLEAF HyperLeaf;
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000000);
+ HyperLeaf.uEax = UINT32_C(0x40000010); /* Maximum leaf we implement. */
+ int rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Insert missing zero leaves (you never know what missing leaves are
+ * going to return when read).
+ */
+ RT_ZERO(HyperLeaf);
+ for (uint32_t uLeaf = UINT32_C(0x40000001); uLeaf <= UINT32_C(0x40000010); uLeaf++)
+ {
+ HyperLeaf.uLeaf = uLeaf;
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+ else
+ LogRel(("GIM: Minimal: Failed to insert hypervisor leaf %#RX32. rc=%Rrc\n", HyperLeaf.uLeaf, rc));
+
+ return rc;
+}
+
+
+/**
+ * Initializes remaining bits of the Minimal provider.
+ * This is called after initializing HM and almost all other VMM components.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) gimR3MinimalInitCompleted(PVM pVM)
+{
+ /*
+ * Expose a generic hypervisor-agnostic leaf (originally defined by VMware).
+ * The leaves range from 0x40000010 to 0x400000FF.
+ *
+ * This is done in the init. completed routine as we need PDM to be
+ * initialized (otherwise APICGetTimerFreq() would fail).
+ */
+ CPUMCPUIDLEAF HyperLeaf;
+ int rc = CPUMR3CpuIdGetLeaf(pVM, &HyperLeaf, 0x40000000, 0 /* uSubLeaf */);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(HyperLeaf.uEax >= 0x40000010);
+
+ /*
+ * Add the timing information hypervisor leaf.
+ * MacOS X uses this to determine the TSC, bus frequency. See @bugref{7270}.
+ *
+ * EAX - TSC frequency in KHz.
+ * EBX - APIC frequency in KHz.
+ * ECX, EDX - Reserved.
+ */
+ uint64_t uApicFreq;
+ rc = APICGetTimerFreq(pVM, &uApicFreq);
+ AssertLogRelRCReturn(rc, rc);
+
+ RT_ZERO(HyperLeaf);
+ HyperLeaf.uLeaf = UINT32_C(0x40000010);
+ HyperLeaf.uEax = TMCpuTicksPerSecond(pVM) / UINT64_C(1000);
+ HyperLeaf.uEbx = (uApicFreq + 500) / UINT64_C(1000);
+ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ else
+ LogRel(("GIM: Minimal: failed to get hypervisor leaf 0x40000000. rc=%Rrc\n", rc));
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/GMM.cpp b/src/VBox/VMM/VMMR3/GMM.cpp
new file mode 100644
index 00000000..9a8c7f12
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GMM.cpp
@@ -0,0 +1,453 @@
+/* $Id: GMM.cpp $ */
+/** @file
+ * GMM - Global Memory Manager, ring-3 request wrappers.
+ */
+
+/*
+ * Copyright (C) 2008-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GMM
+#include <VBox/vmm/gmm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vmcc.h>
+#include <VBox/sup.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+
+/**
+ * @see GMMR0InitialReservation
+ */
+GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
+ GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
+{
+ if (!SUPR3IsDriverless())
+ {
+ GMMINITIALRESERVATIONREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.cBasePages = cBasePages;
+ Req.cShadowPages = cShadowPages;
+ Req.cFixedPages = cFixedPages;
+ Req.enmPolicy = enmPolicy;
+ Req.enmPriority = enmPriority;
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @see GMMR0UpdateReservation
+ */
+GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
+{
+ if (!SUPR3IsDriverless())
+ {
+ GMMUPDATERESERVATIONREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.cBasePages = cBasePages;
+ Req.cShadowPages = cShadowPages;
+ Req.cFixedPages = cFixedPages;
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Prepares a GMMR0AllocatePages request.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
+ * @param pVM The cross context VM structure.
+ * @param[out] ppReq Where to store the pointer to the request packet.
+ * @param cPages The number of pages that's to be allocated.
+ * @param enmAccount The account to charge.
+ */
+GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
+{
+ uint32_t cb = RT_UOFFSETOF_DYN(GMMALLOCATEPAGESREQ, aPages[cPages]);
+ PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
+ if (!pReq)
+ return VERR_NO_TMP_MEMORY;
+
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = cb;
+ pReq->enmAccount = enmAccount;
+ pReq->cPages = cPages;
+ NOREF(pVM);
+ *ppReq = pReq;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Performs a GMMR0AllocatePages request.
+ *
+ * This will call VMSetError on failure.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
+ */
+GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
+{
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
+ if (RT_SUCCESS(rc))
+ {
+#ifdef LOG_ENABLED
+ for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
+ Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp fZeroed=%d\n",
+ pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys, pReq->aPages[iPage].fZeroed));
+#endif
+ return rc;
+ }
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages);
+}
+
+
+/**
+ * Cleans up a GMMR0AllocatePages request.
+ * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
+ */
+GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
+{
+ RTMemTmpFree(pReq);
+}
+
+
+/**
+ * Prepares a GMMR0FreePages request.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
+ * @param pVM The cross context VM structure.
+ * @param[out] ppReq Where to store the pointer to the request packet.
+ * @param cPages The number of pages that's to be freed.
+ * @param enmAccount The account to charge.
+ */
+GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
+{
+ uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
+ PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
+ if (!pReq)
+ return VERR_NO_TMP_MEMORY;
+
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = cb;
+ pReq->enmAccount = enmAccount;
+ pReq->cPages = cPages;
+ NOREF(pVM);
+ *ppReq = pReq;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Re-prepares a GMMR0FreePages request.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pReq A request buffer previously returned by
+ * GMMR3FreePagesPrepare().
+ * @param cPages The number of pages originally passed to
+ * GMMR3FreePagesPrepare().
+ * @param enmAccount The account to charge.
+ */
+GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
+{
+ Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
+ pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
+ pReq->enmAccount = enmAccount;
+ pReq->cPages = cPages;
+ NOREF(pVM);
+}
+
+
+/**
+ * Performs a GMMR0FreePages request.
+ * This will call VMSetError on failure.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
+ * @param cActualPages The number of pages actually freed.
+ */
+GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
+{
+ /*
+ * Adjust the request if we ended up with fewer pages than anticipated.
+ */
+ if (cActualPages != pReq->cPages)
+ {
+ AssertReturn(cActualPages < pReq->cPages, VERR_GMM_ACTUAL_PAGES_IPE);
+ if (!cActualPages)
+ return VINF_SUCCESS;
+ pReq->cPages = cActualPages;
+ pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cActualPages]);
+ }
+
+ /*
+ * Do the job.
+ */
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
+ if (RT_SUCCESS(rc))
+ return rc;
+ AssertRC(rc);
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("GMMR0FreePages failed to free %u pages"),
+ pReq->cPages);
+}
+
+
+/**
+ * Cleans up a GMMR0FreePages request.
+ * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
+ */
+GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
+{
+ RTMemTmpFree(pReq);
+}
+
+
+/**
+ * Frees allocated pages, for bailing out on failure.
+ *
+ * This will not call VMSetError on failure but will use AssertLogRel instead.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pAllocReq The allocation request to undo.
+ */
+GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
+{
+ uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
+ PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
+ AssertLogRelReturnVoid(pReq);
+
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = cb;
+ pReq->enmAccount = pAllocReq->enmAccount;
+ pReq->cPages = pAllocReq->cPages;
+ uint32_t iPage = pAllocReq->cPages;
+ while (iPage-- > 0)
+ {
+ Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
+ pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
+ }
+
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
+ AssertLogRelRC(rc);
+
+ RTMemTmpFree(pReq);
+}
+
+
+/**
+ * @see GMMR0BalloonedPages
+ */
+GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
+{
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ GMMBALLOONEDPAGESREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.enmAction = enmAction;
+ Req.cBalloonedPages = cBalloonedPages;
+
+ rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
+ }
+ /*
+ * Ignore reset and fail all other requests.
+ */
+ else if (enmAction == GMMBALLOONACTION_RESET && cBalloonedPages == 0)
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_SUP_DRIVERLESS;
+ return rc;
+}
+
+
+/**
+ * @note Caller does the driverless check.
+ * @see GMMR0QueryVMMMemoryStatsReq
+ */
+GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize)
+{
+ GMMMEMSTATSREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.cAllocPages = 0;
+ Req.cFreePages = 0;
+ Req.cBalloonedPages = 0;
+ Req.cSharedPages = 0;
+
+ *pcTotalAllocPages = 0;
+ *pcTotalFreePages = 0;
+ *pcTotalBalloonPages = 0;
+ *puTotalBalloonSize = 0;
+
+ /* Must be callable from any thread, so can't use VMMR3CallR0. */
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS, 0, &Req.Hdr);
+ if (rc == VINF_SUCCESS)
+ {
+ *pcTotalAllocPages = Req.cAllocPages;
+ *pcTotalFreePages = Req.cFreePages;
+ *pcTotalBalloonPages = Req.cBalloonedPages;
+ *puTotalBalloonSize = Req.cSharedPages;
+ }
+ return rc;
+}
+
+
+/**
+ * @see GMMR0QueryMemoryStatsReq
+ */
+GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages)
+{
+ GMMMEMSTATSREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.cAllocPages = 0;
+ Req.cFreePages = 0;
+ Req.cBalloonedPages = 0;
+
+ *pcAllocPages = 0;
+ *pcMaxPages = 0;
+ *pcBalloonPages = 0;
+
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_QUERY_MEM_STATS, 0, &Req.Hdr);
+ if (rc == VINF_SUCCESS)
+ {
+ *pcAllocPages = Req.cAllocPages;
+ *pcMaxPages = Req.cMaxPages;
+ *pcBalloonPages = Req.cBalloonedPages;
+ }
+ return rc;
+}
+
+
+/**
+ * @see GMMR0MapUnmapChunk
+ */
+GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
+{
+ GMMMAPUNMAPCHUNKREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idChunkMap = idChunkMap;
+ Req.idChunkUnmap = idChunkUnmap;
+ Req.pvR3 = NULL;
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
+ if (RT_SUCCESS(rc) && ppvR3)
+ *ppvR3 = Req.pvR3;
+ return rc;
+}
+
+
+/**
+ * @see GMMR0FreeLargePage
+ */
+GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
+{
+ GMMFREELARGEPAGEREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idPage = idPage;
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
+}
+
+
+/**
+ * @see GMMR0RegisterSharedModule
+ */
+GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
+{
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
+ if (rc == VINF_SUCCESS)
+ rc = pReq->rc;
+ return rc;
+}
+
+
+/**
+ * @see GMMR0RegisterSharedModule
+ */
+GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq)
+{
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = sizeof(*pReq);
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &pReq->Hdr);
+}
+
+
+/**
+ * @see GMMR0ResetSharedModules
+ */
+GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM)
+{
+ if (!SUPR3IsDriverless())
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_RESET_SHARED_MODULES, 0, NULL);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @see GMMR0CheckSharedModules
+ */
+GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
+{
+ return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
+}
+
+
+#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
+/**
+ * @see GMMR0FindDuplicatePage
+ */
+GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage)
+{
+ GMMFINDDUPLICATEPAGEREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idPage = idPage;
+ Req.fDuplicate = false;
+
+ /* Must be callable from any thread, so can't use VMMR3CallR0. */
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_FIND_DUPLICATE_PAGE, 0, &Req.Hdr);
+ if (rc == VINF_SUCCESS)
+ return Req.fDuplicate;
+ return false;
+}
+#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
+
diff --git a/src/VBox/VMM/VMMR3/GVMMR3.cpp b/src/VBox/VMM/VMMR3/GVMMR3.cpp
new file mode 100644
index 00000000..81d72493
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/GVMMR3.cpp
@@ -0,0 +1,227 @@
+/* $Id: GVMMR3.cpp $ */
+/** @file
+ * GVMM - Global VM Manager, ring-3 request wrappers.
+ */
+
+/*
+ * Copyright (C) 2021-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_GVMM
+#include <VBox/vmm/gvmm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vmcc.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/sup.h>
+#include <VBox/err.h>
+
+#include <iprt/mem.h>
+
+
+/**
+ * Driverless: VMMR0_DO_GVMM_CREATE_VM
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param cCpus The number of CPUs to create the VM for.
+ * @param pSession The support driver session handle.
+ * @param ppVM Where to return the pointer to the VM structure.
+ * @param ppVMR0 Where to return the ring-0 address of the VM structure
+ * for use in VMMR0 calls.
+ */
+VMMR3_INT_DECL(int) GVMMR3CreateVM(PUVM pUVM, uint32_t cCpus, PSUPDRVSESSION pSession, PVM *ppVM, PRTR0PTR ppVMR0)
+{
+ AssertReturn(cCpus >= VMM_MIN_CPU_COUNT && cCpus <= VMM_MAX_CPU_COUNT, VERR_INVALID_PARAMETER);
+ AssertCompile((sizeof(VM) & HOST_PAGE_OFFSET_MASK) == 0);
+ AssertCompile((sizeof(VMCPU) & HOST_PAGE_OFFSET_MASK) == 0);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ GVMMCREATEVMREQ CreateVMReq;
+ CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
+ CreateVMReq.pSession = pSession;
+ CreateVMReq.pVMR0 = NIL_RTR0PTR;
+ CreateVMReq.pVMR3 = NULL;
+ CreateVMReq.cCpus = cCpus;
+ rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
+ if (RT_SUCCESS(rc))
+ {
+ *ppVM = CreateVMReq.pVMR3;
+ *ppVMR0 = CreateVMReq.pVMR0;
+ }
+ }
+ else
+ {
+ /*
+ * Driverless.
+ */
+ /* Allocate the VM structure: */
+ size_t const cbVM = sizeof(VM) + sizeof(VMCPU) * cCpus;
+ PVM pVM = (PVM)RTMemPageAlloc(cbVM + HOST_PAGE_SIZE * (1 + 2 * cCpus));
+ if (!pVM)
+ return VERR_NO_PAGE_MEMORY;
+
+ /* Set up guard pages: */
+ RTMemProtect(pVM, HOST_PAGE_SIZE, RTMEM_PROT_NONE);
+ pVM = (PVM)((uintptr_t)pVM + HOST_PAGE_SIZE);
+ RTMemProtect(pVM + 1, HOST_PAGE_SIZE, RTMEM_PROT_NONE);
+
+ /* VM: */
+ pVM->enmVMState = VMSTATE_CREATING;
+ pVM->pVMR3 = pVM;
+ pVM->hSelf = _1M;
+ pVM->pSession = pSession;
+ pVM->cCpus = cCpus;
+ pVM->uCpuExecutionCap = 100;
+ pVM->cbSelf = sizeof(VM);
+ pVM->cbVCpu = sizeof(VMCPU);
+ pVM->uStructVersion = 1;
+
+ /* CPUs: */
+ PVMCPU pVCpu = (PVMCPU)((uintptr_t)pVM + sizeof(VM) + HOST_PAGE_SIZE);
+ for (VMCPUID idxCpu = 0; idxCpu < cCpus; idxCpu++)
+ {
+ pVM->apCpusR3[idxCpu] = pVCpu;
+
+ pVCpu->enmState = VMCPUSTATE_STOPPED;
+ pVCpu->pVMR3 = pVM;
+ pVCpu->hNativeThread = NIL_RTNATIVETHREAD;
+ pVCpu->hNativeThreadR0 = NIL_RTNATIVETHREAD;
+ pVCpu->hThread = NIL_RTTHREAD;
+ pVCpu->idCpu = idxCpu;
+
+ RTMemProtect(pVCpu + 1, HOST_PAGE_SIZE, RTMEM_PROT_NONE);
+ pVCpu = (PVMCPU)((uintptr_t)pVCpu + sizeof(VMCPU) + HOST_PAGE_SIZE);
+ }
+
+ *ppVM = pVM;
+ *ppVMR0 = NIL_RTR0PTR;
+ }
+ RT_NOREF(pUVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Driverless: VMMR0_DO_GVMM_DESTROY_VM
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) GVMMR3DestroyVM(PUVM pUVM, PVM pVM)
+{
+ AssertPtrReturn(pVM, VERR_INVALID_VM_HANDLE);
+ Assert(pUVM->cCpus == pVM->cCpus);
+ RT_NOREF(pUVM);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ rc = SUPR3CallVMMR0Ex(pVM->pVMR0ForCall, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
+ else
+ {
+ RTMemPageFree((uint8_t *)pVM - HOST_PAGE_SIZE,
+ sizeof(VM) + sizeof(VMCPU) * pVM->cCpus + HOST_PAGE_SIZE * (1 + 2 * pVM->cCpus));
+ rc = VINF_SUCCESS;
+ }
+ return rc;
+}
+
+
+/**
+ * Register the calling EMT with GVM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The Virtual CPU ID.
+ * @thread EMT(idCpu)
+ * @see GVMMR0RegisterVCpu
+ */
+VMMR3_INT_DECL(int) GVMMR3RegisterVCpu(PVM pVM, VMCPUID idCpu)
+{
+ Assert(VMMGetCpuId(pVM) == idCpu);
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
+ if (RT_FAILURE(rc))
+ LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
+ }
+ else
+ rc = VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * Deregister the calling EMT from GVM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The Virtual CPU ID.
+ * @thread EMT(idCpu)
+ * @see GVMMR0DeregisterVCpu
+ */
+VMMR3_INT_DECL(int) GVMMR3DeregisterVCpu(PVM pVM, VMCPUID idCpu)
+{
+ Assert(VMMGetCpuId(pVM) == idCpu);
+ int rc;
+ if (!SUPR3IsDriverless())
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), idCpu, VMMR0_DO_GVMM_DEREGISTER_VMCPU, 0, NULL);
+ else
+ rc = VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * @see GVMMR0RegisterWorkerThread
+ */
+VMMR3_INT_DECL(int) GVMMR3RegisterWorkerThread(PVM pVM, GVMMWORKERTHREAD enmWorker)
+{
+ if (SUPR3IsDriverless())
+ return VINF_SUCCESS;
+ GVMMREGISTERWORKERTHREADREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.hNativeThreadR3 = RTThreadNativeSelf();
+ return SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID,
+ VMMR0_DO_GVMM_REGISTER_WORKER_THREAD, (unsigned)enmWorker, &Req.Hdr);
+}
+
+
+/**
+ * @see GVMMR0DeregisterWorkerThread
+ */
+VMMR3_INT_DECL(int) GVMMR3DeregisterWorkerThread(PVM pVM, GVMMWORKERTHREAD enmWorker)
+{
+ if (SUPR3IsDriverless())
+ return VINF_SUCCESS;
+ return SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID,
+ VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD, (unsigned)enmWorker, NULL);
+}
+
diff --git a/src/VBox/VMM/VMMR3/HM.cpp b/src/VBox/VMM/VMMR3/HM.cpp
new file mode 100644
index 00000000..898b5589
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/HM.cpp
@@ -0,0 +1,3517 @@
+/* $Id: HM.cpp $ */
+/** @file
+ * HM - Intel/AMD VM Hardware Support Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_hm HM - Hardware Assisted Virtualization Manager
+ *
+ * The HM manages guest execution using the VT-x and AMD-V CPU hardware
+ * extensions.
+ *
+ * {summary of what HM does}
+ *
+ * Hardware assisted virtualization manager was originally abbreviated HWACCM,
+ * however that was cumbersome to write and parse for such a central component,
+ * so it was shortened to HM when refactoring the code in the 4.3 development
+ * cycle.
+ *
+ * {add sections with more details}
+ *
+ * @sa @ref grp_hm
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_HM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/gcm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/vmm/hm_svm.h>
+#include "HMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/env.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @def HMVMX_REPORT_FEAT
+ * Reports VT-x feature to the release log.
+ *
+ * @param a_uAllowed1 Mask of allowed-1 feature bits.
+ * @param a_uAllowed0 Mask of allowed-0 feature bits.
+ * @param a_StrDesc The description string to report.
+ * @param a_Featflag Mask of the feature to report.
+ */
+#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
+ do { \
+ if ((a_uAllowed1) & (a_Featflag)) \
+ { \
+ if ((a_uAllowed0) & (a_Featflag)) \
+ LogRel(("HM: " a_StrDesc " (must be set)\n")); \
+ else \
+ LogRel(("HM: " a_StrDesc "\n")); \
+ } \
+ else \
+ LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
+ } while (0)
+
+/** @def HMVMX_REPORT_ALLOWED_FEAT
+ * Reports an allowed VT-x feature to the release log.
+ *
+ * @param a_uAllowed1 Mask of allowed-1 feature bits.
+ * @param a_StrDesc The description string to report.
+ * @param a_FeatFlag Mask of the feature to report.
+ */
+#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
+ do { \
+ if ((a_uAllowed1) & (a_FeatFlag)) \
+ LogRel(("HM: " a_StrDesc "\n")); \
+ else \
+ LogRel(("HM: " a_StrDesc " not supported\n")); \
+ } while (0)
+
+/** @def HMVMX_REPORT_MSR_CAP
+ * Reports MSR feature capability.
+ *
+ * @param a_MsrCaps Mask of MSR feature bits.
+ * @param a_StrDesc The description string to report.
+ * @param a_fCap Mask of the feature to report.
+ */
+#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
+ do { \
+ if ((a_MsrCaps) & (a_fCap)) \
+ LogRel(("HM: " a_StrDesc "\n")); \
+ } while (0)
+
+/** @def HMVMX_LOGREL_FEAT
+ * Dumps a feature flag from a bitmap of features to the release log.
+ *
+ * @param a_fVal The value of all the features.
+ * @param a_fMask The specific bitmask of the feature.
+ */
+#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
+ do { \
+ if ((a_fVal) & (a_fMask)) \
+ LogRel(("HM: %s\n", #a_fMask)); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static int hmR3InitFinalizeR3(PVM pVM);
+static int hmR3InitFinalizeR0(PVM pVM);
+static int hmR3InitFinalizeR0Intel(PVM pVM);
+static int hmR3InitFinalizeR0Amd(PVM pVM);
+static int hmR3TermCPU(PVM pVM);
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * Returns the name of the hardware exception.
+ *
+ * @returns The name of the hardware exception.
+ * @param uVector The exception vector.
+ */
+static const char *hmR3GetXcptName(uint8_t uVector)
+{
+ switch (uVector)
+ {
+ case X86_XCPT_DE: return "#DE";
+ case X86_XCPT_DB: return "#DB";
+ case X86_XCPT_NMI: return "#NMI";
+ case X86_XCPT_BP: return "#BP";
+ case X86_XCPT_OF: return "#OF";
+ case X86_XCPT_BR: return "#BR";
+ case X86_XCPT_UD: return "#UD";
+ case X86_XCPT_NM: return "#NM";
+ case X86_XCPT_DF: return "#DF";
+ case X86_XCPT_CO_SEG_OVERRUN: return "#CO_SEG_OVERRUN";
+ case X86_XCPT_TS: return "#TS";
+ case X86_XCPT_NP: return "#NP";
+ case X86_XCPT_SS: return "#SS";
+ case X86_XCPT_GP: return "#GP";
+ case X86_XCPT_PF: return "#PF";
+ case X86_XCPT_MF: return "#MF";
+ case X86_XCPT_AC: return "#AC";
+ case X86_XCPT_MC: return "#MC";
+ case X86_XCPT_XF: return "#XF";
+ case X86_XCPT_VE: return "#VE";
+ case X86_XCPT_CP: return "#CP";
+ case X86_XCPT_VC: return "#VC";
+ case X86_XCPT_SX: return "#SX";
+ }
+ return "Reserved";
+}
+#endif /* VBOX_WITH_STATISTICS */
+
+
+/**
+ * Initializes the HM.
+ *
+ * This is the very first component to really do init after CFGM so that we can
+ * establish the predominant execution engine for the VM prior to initializing
+ * other modules. It takes care of NEM initialization if needed (HM disabled or
+ * not available in HW).
+ *
+ * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
+ * hypervisor API via NEM, and then back on raw-mode if that isn't available
+ * either. The fallback to raw-mode will not happen if /HM/HMForced is set
+ * (like for guest using SMP or 64-bit as well as for complicated guest like OS
+ * X, OS/2 and others).
+ *
+ * Note that a lot of the set up work is done in ring-0 and thus postponed till
+ * the ring-3 and ring-0 callback to HMR3InitCompleted.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ *
+ * @remarks Be careful with what we call here, since most of the VMM components
+ * are uninitialized.
+ */
+VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
+{
+ LogFlowFunc(("\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, hm.s, 32);
+ AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
+
+ /*
+ * Register the saved state data unit.
+ */
+ int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
+ NULL, NULL, NULL,
+ NULL, hmR3Save, NULL,
+ NULL, hmR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
+
+ /*
+ * Validate the HM settings.
+ */
+ rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
+ "HMForced" /* implied 'true' these days */
+ "|UseNEMInstead"
+ "|FallbackToNEM"
+ "|FallbackToIEM"
+ "|EnableNestedPaging"
+ "|EnableUX"
+ "|EnableLargePages"
+ "|EnableVPID"
+ "|IBPBOnVMExit"
+ "|IBPBOnVMEntry"
+ "|SpecCtrlByHost"
+ "|L1DFlushOnSched"
+ "|L1DFlushOnVMEntry"
+ "|MDSClearOnSched"
+ "|MDSClearOnVMEntry"
+ "|TPRPatchingEnabled"
+ "|64bitEnabled"
+ "|Exclusive"
+ "|MaxResumeLoops"
+ "|VmxPleGap"
+ "|VmxPleWindow"
+ "|VmxLbr"
+ "|UseVmxPreemptTimer"
+ "|SvmPauseFilter"
+ "|SvmPauseFilterThreshold"
+ "|SvmVirtVmsaveVmload"
+ "|SvmVGif"
+ "|LovelyMesaDrvWorkaround"
+ "|MissingOS2TlbFlushWorkaround"
+ "|AlwaysInterceptVmxMovDRx"
+ , "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /** @cfgm{/HM/HMForced, bool, false}
+ * Forces hardware virtualization, no falling back on raw-mode. HM must be
+ * enabled, i.e. /HMEnabled must be true. */
+ bool const fHMForced = true;
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ AssertRelease(pVM->fHMEnabled);
+#else
+ AssertRelease(!pVM->fHMEnabled);
+#endif
+
+ /** @cfgm{/HM/UseNEMInstead, bool, true}
+ * Don't use HM, use NEM instead. */
+ bool fUseNEMInstead = false;
+ rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
+ AssertRCReturn(rc, rc);
+ if (fUseNEMInstead && pVM->fHMEnabled)
+ {
+ LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
+ pVM->fHMEnabled = false;
+ }
+
+ /** @cfgm{/HM/FallbackToNEM, bool, true}
+ * Enables fallback on NEM. */
+ bool fFallbackToNEM = true;
+ rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/FallbackToIEM, bool, false on AMD64 else true }
+ * Enables fallback on NEM. */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ bool fFallbackToIEM = false;
+#else
+ bool fFallbackToIEM = true;
+#endif
+ rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToIEM", &fFallbackToIEM, fFallbackToIEM);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableNestedPaging, bool, false}
+ * Enables nested paging (aka extended page tables). */
+ bool fAllowNestedPaging = false;
+ rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &fAllowNestedPaging, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableUX, bool, true}
+ * Enables the VT-x unrestricted execution feature. */
+ bool fAllowUnrestricted = true;
+ rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &fAllowUnrestricted, true);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableLargePages, bool, false}
+ * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
+ * page table walking and maybe better TLB hit rate in some cases. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableVPID, bool, false}
+ * Enables the VT-x VPID feature. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
+ * Enables TPR patching for 32-bit windows guests with IO-APIC. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
+ * Enables AMD64 cpu features.
+ * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
+ * already have the support. */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuestsCfg, HC_ARCH_BITS == 64);
+ AssertLogRelRCReturn(rc, rc);
+#else
+ pVM->hm.s.fAllow64BitGuestsCfg = false;
+#endif
+
+ /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
+ * The pause-filter exiting gap in TSC ticks. When the number of ticks between
+ * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
+ * latest PAUSE instruction to be start of a new PAUSE loop.
+ */
+ rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
+ * The pause-filter exiting window in TSC ticks. When the number of ticks
+ * between the current PAUSE instruction and first PAUSE of a loop exceeds
+ * VmxPleWindow, a VM-exit is triggered.
+ *
+ * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
+ */
+ rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/VmxLbr, bool, false}
+ * Whether to enable LBR for the guest. This is disabled by default as it's only
+ * useful while debugging and enabling it causes a noticeable performance hit. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "VmxLbr", &pVM->hm.s.vmx.fLbrCfg, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
+ * A counter that is decrement each time a PAUSE instruction is executed by the
+ * guest. When the counter is 0, a \#VMEXIT is triggered.
+ *
+ * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
+ */
+ rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
+ * The pause filter threshold in ticks. When the elapsed time (in ticks) between
+ * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
+ * PauseFilter count is reset to its initial value. However, if PAUSE is
+ * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
+ * be triggered.
+ *
+ * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
+ * activated.
+ */
+ rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
+ * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
+ * available. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/SvmVGif, bool, true}
+ * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
+ * if it's available. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/SvmLbrVirt, bool, false}
+ * Whether to make use of the LBR virtualization feature of the CPU if it's
+ * available. This is disabled by default as it's only useful while debugging
+ * and enabling it causes a small hit to performance. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "SvmLbrVirt", &pVM->hm.s.svm.fLbrVirt, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/Exclusive, bool}
+ * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
+ * global init for each host CPU. If false, we do local init each time we wish
+ * to execute guest code.
+ *
+ * On Windows, default is false due to the higher risk of conflicts with other
+ * hypervisors.
+ *
+ * On Mac OS X, this setting is ignored since the code does not handle local
+ * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
+ */
+#if defined(RT_OS_DARWIN)
+ pVM->hm.s.fGlobalInit = true;
+#else
+ rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
+# if defined(RT_OS_WINDOWS)
+ false
+# else
+ true
+# endif
+ );
+ AssertLogRelRCReturn(rc, rc);
+#endif
+
+ /** @cfgm{/HM/MaxResumeLoops, uint32_t}
+ * The number of times to resume guest execution before we forcibly return to
+ * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
+ * determines the default value. */
+ rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoopsCfg, 0 /* set by R0 later */);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/UseVmxPreemptTimer, bool}
+ * Whether to make use of the VMX-preemption timer feature of the CPU if it's
+ * available. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimerCfg, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/IBPBOnVMExit, bool}
+ * Costly paranoia setting. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/IBPBOnVMEntry, bool}
+ * Costly paranoia setting. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/L1DFlushOnSched, bool, true}
+ * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
+ * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
+ if (pVM->hm.s.fL1dFlushOnVmEntry)
+ pVM->hm.s.fL1dFlushOnSched = false;
+
+ /** @cfgm{/HM/SpecCtrlByHost, bool}
+ * Another expensive paranoia setting. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/MDSClearOnSched, bool, true}
+ * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
+ * ignored on CPUs that aren't affected. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
+ * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
+ * ignored on CPUs that aren't affected. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
+ if (pVM->hm.s.fMdsClearOnVmEntry)
+ pVM->hm.s.fMdsClearOnSched = false;
+
+ /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
+ * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
+ * the hypervisor it is running under. */
+ bool fMesaWorkaround;
+ rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &fMesaWorkaround, false);
+ AssertLogRelRCReturn(rc, rc);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv = fMesaWorkaround;
+ }
+
+ /** @cfgm{/HM/MissingOS2TlbFlushWorkaround,bool}
+ * Workaround OS/2 not flushing the TLB after page directory and page table
+ * modifications when returning to protected mode from a real mode call
+ * (TESTCFG.SYS typically crashes). See ticketref:20625 for details. */
+ rc = CFGMR3QueryBoolDef(pCfgHm, "MissingOS2TlbFlushWorkaround", &pVM->hm.s.fMissingOS2TlbFlushWorkaround, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/AlwaysInterceptVmxMovDRx,int8_t,0}
+ * Whether to always intercept MOV DRx when using VMX.
+ * The value is a tristate: 1 for always intercepting, -1 for lazy intercept,
+ * and 0 for default. The default means that it's always intercepted when the
+ * host DR6 contains bits not known to the guest.
+ *
+ * With the introduction of transactional synchronization extensions new
+ * instructions, aka TSX-NI or RTM, bit 16 in DR6 is cleared to indicate that a
+ * \#DB was related to a transaction. The bit is also cleared when writing zero
+ * to it, so guest lazily resetting DR6 by writing 0 to it, ends up with an
+ * unexpected value. Similiarly, bit 11 in DR7 is used to enabled RTM
+ * debugging support and therefore writable by the guest.
+ *
+ * Out of caution/paranoia, we will by default intercept DRx moves when setting
+ * DR6 to zero (on the host) doesn't result in 0xffff0ff0 (X86_DR6_RA1_MASK).
+ * Note that it seems DR6.RTM remains writable even after the microcode updates
+ * disabling TSX. */
+ rc = CFGMR3QueryS8Def(pCfgHm, "AlwaysInterceptVmxMovDRx", &pVM->hm.s.vmx.fAlwaysInterceptMovDRxCfg, 0);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Check if VT-x or AMD-v support according to the users wishes.
+ */
+ /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
+ * VERR_SVM_IN_USE. */
+ if (pVM->fHMEnabled)
+ {
+ uint32_t fCaps;
+ rc = SUPR3QueryVTCaps(&fCaps);
+ if (RT_SUCCESS(rc))
+ {
+ if (fCaps & SUPVTCAPS_AMD_V)
+ {
+ pVM->hm.s.svm.fSupported = true;
+ LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
+ }
+ else if (fCaps & SUPVTCAPS_VT_X)
+ {
+ const char *pszWhy;
+ rc = SUPR3QueryVTxSupported(&pszWhy);
+ if (RT_SUCCESS(rc))
+ {
+ pVM->hm.s.vmx.fSupported = true;
+ LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
+ fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
+ fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
+ (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
+ }
+ else
+ {
+ /*
+ * Before failing, try fallback to NEM if we're allowed to do that.
+ */
+ pVM->fHMEnabled = false;
+ Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
+ if (fFallbackToNEM)
+ {
+ LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
+ int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
+
+ ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
+ if ( RT_SUCCESS(rc2)
+ && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
+ rc = VINF_SUCCESS;
+ }
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
+ }
+ }
+ else
+ AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
+ VERR_INTERNAL_ERROR_5);
+
+ /*
+ * Disable nested paging and unrestricted guest execution now if they're
+ * configured so that CPUM can make decisions based on our configuration.
+ */
+ if ( fAllowNestedPaging
+ && (fCaps & SUPVTCAPS_NESTED_PAGING))
+ {
+ pVM->hm.s.fNestedPagingCfg = true;
+ if (fCaps & SUPVTCAPS_VT_X)
+ {
+ if ( fAllowUnrestricted
+ && (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST))
+ pVM->hm.s.vmx.fUnrestrictedGuestCfg = true;
+ else
+ Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
+ }
+ }
+ else
+ Assert(!pVM->hm.s.fNestedPagingCfg);
+ }
+ else
+ {
+ const char *pszMsg;
+ switch (rc)
+ {
+ case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
+ case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
+ case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
+ case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
+ case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
+ case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
+ case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
+ case VERR_SUP_DRIVERLESS: pszMsg = "Driverless mode"; break;
+ default:
+ return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
+ }
+
+ /*
+ * Before failing, try fallback to NEM if we're allowed to do that.
+ */
+ pVM->fHMEnabled = false;
+ if (fFallbackToNEM)
+ {
+ LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
+ int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
+ ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
+ if ( RT_SUCCESS(rc2)
+ && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
+ {
+ rc = VINF_SUCCESS;
+
+ /* For some reason, HM is in charge or large pages. Make sure to enable them: */
+ PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
+ }
+ }
+
+ /*
+ * Then try fall back on IEM if NEM isn't available and we're allowed to.
+ */
+ if (RT_FAILURE(rc))
+ {
+ if ( fFallbackToIEM
+ && (!fFallbackToNEM || rc == VERR_NEM_NOT_AVAILABLE || rc == VERR_SUP_DRIVERLESS))
+ {
+ LogRel(("HM: HMR3Init: Falling back on IEM: %s\n", !fFallbackToNEM ? pszMsg : "NEM not available"));
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_IEM);
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ PGMR3EnableNemMode(pVM);
+#endif
+ }
+ else
+ return VM_SET_ERROR(pVM, rc, pszMsg);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Disabled HM mean raw-mode, unless NEM is supposed to be used.
+ */
+ rc = VERR_NEM_NOT_AVAILABLE;
+ if (fUseNEMInstead)
+ {
+ rc = NEMR3Init(pVM, false /*fFallback*/, true);
+ ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
+ if (RT_SUCCESS(rc))
+ {
+ /* For some reason, HM is in charge or large pages. Make sure to enable them: */
+ PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
+ }
+ else if (!fFallbackToIEM || rc != VERR_NEM_NOT_AVAILABLE)
+ return rc;
+ }
+
+ if (fFallbackToIEM && rc == VERR_NEM_NOT_AVAILABLE)
+ {
+ LogRel(("HM: HMR3Init: Falling back on IEM%s\n", fUseNEMInstead ? ": NEM not available" : ""));
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_IEM);
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ PGMR3EnableNemMode(pVM);
+#endif
+ }
+
+ if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET
+ || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT /* paranoia */)
+ return VM_SET_ERROR(pVM, rc, "Misconfigured VM: No guest execution engine available!");
+ }
+
+ if (pVM->fHMEnabled)
+ {
+ /*
+ * Register info handlers now that HM is used for sure.
+ */
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
+ DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
+ hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the HM LBR info.", hmR3InfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+ }
+
+ Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes HM components after ring-3 phase has been fully initialized.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR3(PVM pVM)
+{
+ LogFlowFunc(("\n"));
+
+ if (!HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->hm.s.fActive = false;
+ pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
+ pVCpu->hm.s.fGCMTrapXcptDE = GCMShouldTrapXcptDE(pVCpu); /* Is safe to call now since GCMR3Init() has completed. */
+ }
+
+#if defined(RT_ARCH_AMD64) ||defined(RT_ARCH_X86)
+ /*
+ * Check if L1D flush is needed/possible.
+ */
+ if ( !g_CpumHostFeatures.s.fFlushCmd
+ || g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
+ || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
+ || g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d
+ || g_CpumHostFeatures.s.fArchRdclNo)
+ pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
+
+ /*
+ * Check if MDS flush is needed/possible.
+ * On atoms and knight family CPUs, we will only allow clearing on scheduling.
+ */
+ if ( !g_CpumHostFeatures.s.fMdsClear
+ || g_CpumHostFeatures.s.fArchMdsNo)
+ pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
+ else if ( ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
+ && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
+ || ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
+ && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
+ {
+ if (!pVM->hm.s.fMdsClearOnSched)
+ pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
+ pVM->hm.s.fMdsClearOnVmEntry = false;
+ }
+ else if ( g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
+ || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
+ pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
+#endif
+
+ /*
+ * Statistics.
+ */
+#ifdef VBOX_WITH_STATISTICS
+ STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
+#endif
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
+#else
+ bool const fCpuSupportsVmx = false;
+#endif
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PHMCPU pHmCpu = &pVCpu->hm.s;
+ int rc;
+
+# define HM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
+ rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
+ AssertRC(rc); \
+ } while (0)
+# define HM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
+ HM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
+
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_PROFILE(&pHmCpu->StatPoke, "/PROF/CPU%u/HM/Poke", "Profiling of RTMpPokeCpu.");
+ HM_REG_PROFILE(&pHmCpu->StatSpinPoke, "/PROF/CPU%u/HM/PokeWait", "Profiling of poke wait.");
+ HM_REG_PROFILE(&pHmCpu->StatSpinPokeFailed, "/PROF/CPU%u/HM/PokeWaitFailed", "Profiling of poke wait when RTMpPokeCpu fails.");
+ HM_REG_PROFILE(&pHmCpu->StatEntry, "/PROF/CPU%u/HM/Entry", "Profiling of entry until entering GC.");
+ HM_REG_PROFILE(&pHmCpu->StatPreExit, "/PROF/CPU%u/HM/SwitchFromGC_1", "Profiling of pre-exit processing after returning from GC.");
+ HM_REG_PROFILE(&pHmCpu->StatExitHandling, "/PROF/CPU%u/HM/SwitchFromGC_2", "Profiling of exit handling (longjmps not included!)");
+ HM_REG_PROFILE(&pHmCpu->StatExitIO, "/PROF/CPU%u/HM/SwitchFromGC_2/IO", "I/O.");
+ HM_REG_PROFILE(&pHmCpu->StatExitMovCRx, "/PROF/CPU%u/HM/SwitchFromGC_2/MovCRx", "MOV CRx.");
+ HM_REG_PROFILE(&pHmCpu->StatExitXcptNmi, "/PROF/CPU%u/HM/SwitchFromGC_2/XcptNmi", "Exceptions, NMIs.");
+ HM_REG_PROFILE(&pHmCpu->StatExitVmentry, "/PROF/CPU%u/HM/SwitchFromGC_2/Vmentry", "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.");
+ HM_REG_PROFILE(&pHmCpu->StatImportGuestState, "/PROF/CPU%u/HM/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
+ HM_REG_PROFILE(&pHmCpu->StatExportGuestState, "/PROF/CPU%u/HM/ExportGuestState", "Profiling of exporting guest state to hardware before VM-entry.");
+ HM_REG_PROFILE(&pHmCpu->StatLoadGuestFpuState, "/PROF/CPU%u/HM/LoadGuestFpuState", "Profiling of CPUMR0LoadGuestFPU.");
+ HM_REG_PROFILE(&pHmCpu->StatInGC, "/PROF/CPU%u/HM/InGC", "Profiling of execution of guest-code in hardware.");
+# ifdef HM_PROFILE_EXIT_DISPATCH
+ HM_REG_STAT(&pHmCpu->StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "/PROF/CPU%u/HM/ExitDispatch", "Profiling the dispatching of exit handlers.");
+# endif
+#endif
+# define HM_REG_COUNTER(a, b, desc) HM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
+
+ HM_REG_COUNTER(&pHmCpu->StatImportGuestStateFallback, "/HM/CPU%u/ImportGuestStateFallback", "Times vmxHCImportGuestState took the fallback code path.");
+ HM_REG_COUNTER(&pHmCpu->StatReadToTransientFallback, "/HM/CPU%u/ReadToTransientFallback", "Times vmxHCReadToTransient took the fallback code path.");
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pHmCpu->StatExitAll, "/HM/CPU%u/Exit/All", "Total exits (excludes nested-guest and debug loops exits).");
+ HM_REG_COUNTER(&pHmCpu->StatDebugExitAll, "/HM/CPU%u/Exit/DebugAll", "Total debug-loop exits.");
+ HM_REG_COUNTER(&pHmCpu->StatNestedExitAll, "/HM/CPU%u/Exit/NestedGuest/All", "Total nested-guest exits.");
+ HM_REG_COUNTER(&pHmCpu->StatExitShadowNM, "/HM/CPU%u/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestNM, "/HM/CPU%u/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitShadowPF, "/HM/CPU%u/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitShadowPFEM, "/HM/CPU%u/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestPF, "/HM/CPU%u/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestUD, "/HM/CPU%u/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestSS, "/HM/CPU%u/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestNP, "/HM/CPU%u/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestTS, "/HM/CPU%u/Exit/Trap/Gst/#TS", "Guest #TS (task switch) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestOF, "/HM/CPU%u/Exit/Trap/Gst/#OF", "Guest #OF (overflow) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestGP, "/HM/CPU%u/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestDE, "/HM/CPU%u/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestDF, "/HM/CPU%u/Exit/Trap/Gst/#DF", "Guest #DF (double fault) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestBR, "/HM/CPU%u/Exit/Trap/Gst/#BR", "Guest #BR (boundary range exceeded) exception.");
+#endif
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestAC, "/HM/CPU%u/Exit/Trap/Gst/#AC", "Guest #AC (alignment check) exception.");
+ if (fCpuSupportsVmx)
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestACSplitLock, "/HM/CPU%u/Exit/Trap/Gst/#AC-split-lock", "Guest triggered #AC due to split-lock being enabled on the host (interpreted).");
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestDB, "/HM/CPU%u/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestMF, "/HM/CPU%u/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestBP, "/HM/CPU%u/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestXF, "/HM/CPU%u/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
+ HM_REG_COUNTER(&pHmCpu->StatExitGuestXcpUnk, "/HM/CPU%u/Exit/Trap/Gst/Other", "Other guest exceptions.");
+ HM_REG_COUNTER(&pHmCpu->StatExitRdmsr, "/HM/CPU%u/Exit/Instr/Rdmsr", "MSR read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitWrmsr, "/HM/CPU%u/Exit/Instr/Wrmsr", "MSR write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitDRxWrite, "/HM/CPU%u/Exit/Instr/DR-Write", "Debug register write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitDRxRead, "/HM/CPU%u/Exit/Instr/DR-Read", "Debug register read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR0Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR2Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR3Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR4Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR8Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR0Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR2Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR3Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR4Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitCR8Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitClts, "/HM/CPU%u/Exit/Instr/CLTS", "CLTS instruction.");
+ HM_REG_COUNTER(&pHmCpu->StatExitLmsw, "/HM/CPU%u/Exit/Instr/LMSW", "LMSW instruction.");
+ HM_REG_COUNTER(&pHmCpu->StatExitXdtrAccess, "/HM/CPU%u/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
+ HM_REG_COUNTER(&pHmCpu->StatExitIOWrite, "/HM/CPU%u/Exit/Instr/IO/Write", "I/O write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitIORead, "/HM/CPU%u/Exit/Instr/IO/Read", "I/O read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitIOStringWrite, "/HM/CPU%u/Exit/Instr/IO/WriteString", "String I/O write.");
+ HM_REG_COUNTER(&pHmCpu->StatExitIOStringRead, "/HM/CPU%u/Exit/Instr/IO/ReadString", "String I/O read.");
+ HM_REG_COUNTER(&pHmCpu->StatExitIntWindow, "/HM/CPU%u/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
+ HM_REG_COUNTER(&pHmCpu->StatExitExtInt, "/HM/CPU%u/Exit/ExtInt", "Physical maskable interrupt (host).");
+#endif
+ HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
+ HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
+ HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
+ HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
+ HM_REG_COUNTER(&pHmCpu->StatExitApicAccess, "/HM/CPU%u/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
+
+ HM_REG_COUNTER(&pHmCpu->StatSwitchTprMaskedIrq, "/HM/CPU%u/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchGuestIrq, "/HM/CPU%u/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchPendingHostIrq, "/HM/CPU%u/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchHmToR3FF, "/HM/CPU%u/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchVmReq, "/HM/CPU%u/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchPgmPoolFlush, "/HM/CPU%u/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchDma, "/HM/CPU%u/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchExitToR3, "/HM/CPU%u/Switch/ExitToR3", "Exit to ring-3 (total).");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchLongJmpToR3, "/HM/CPU%u/Switch/LongJmpToR3", "Longjump to ring-3.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchMaxResumeLoops, "/HM/CPU%u/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchHltToR3, "/HM/CPU%u/Switch/HltToR3", "HLT causing us to go to ring-3.");
+ HM_REG_COUNTER(&pHmCpu->StatSwitchApicAccessToR3, "/HM/CPU%u/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
+#endif
+ HM_REG_COUNTER(&pHmCpu->StatSwitchPreempt, "/HM/CPU%u/Switch/Preempting", "EMT has been preempted while in HM context.");
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pHmCpu->StatSwitchNstGstVmexit, "/HM/CPU%u/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
+
+ HM_REG_COUNTER(&pHmCpu->StatInjectInterrupt, "/HM/CPU%u/EventInject/Interrupt", "Injected an external interrupt into the guest.");
+ HM_REG_COUNTER(&pHmCpu->StatInjectXcpt, "/HM/CPU%u/EventInject/Trap", "Injected an exception into the guest.");
+ HM_REG_COUNTER(&pHmCpu->StatInjectReflect, "/HM/CPU%u/EventInject/Reflect", "Reflecting an exception caused due to event injection.");
+ HM_REG_COUNTER(&pHmCpu->StatInjectConvertDF, "/HM/CPU%u/EventInject/ReflectDF", "Injected a converted #DF caused due to event injection.");
+ HM_REG_COUNTER(&pHmCpu->StatInjectInterpret, "/HM/CPU%u/EventInject/Interpret", "Falling back to interpreter for handling exception caused due to event injection.");
+ HM_REG_COUNTER(&pHmCpu->StatInjectReflectNPF, "/HM/CPU%u/EventInject/ReflectNPF", "Reflecting event that caused an EPT violation / nested #PF.");
+
+ HM_REG_COUNTER(&pHmCpu->StatFlushPage, "/HM/CPU%u/Flush/Page", "Invalidating a guest page on all guest CPUs.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushPageManual, "/HM/CPU%u/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushPhysPageManual, "/HM/CPU%u/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlb, "/HM/CPU%u/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlbManual, "/HM/CPU%u/Flush/TLB/Manual", "Request a full guest-TLB flush.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlbNstGst, "/HM/CPU%u/Flush/TLB/NestedGuest", "Request a nested-guest-TLB flush.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
+ HM_REG_COUNTER(&pHmCpu->StatNoFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/Skipped", "No TLB flushing required.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushEntire, "/HM/CPU%u/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
+ HM_REG_COUNTER(&pHmCpu->StatFlushAsid, "/HM/CPU%u/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushNestedPaging, "/HM/CPU%u/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgVirt, "/HM/CPU%u/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
+ HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgPhys, "/HM/CPU%u/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
+ HM_REG_COUNTER(&pHmCpu->StatTlbShootdown, "/HM/CPU%u/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
+ HM_REG_COUNTER(&pHmCpu->StatTlbShootdownFlush, "/HM/CPU%u/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
+
+ HM_REG_COUNTER(&pHmCpu->StatTscParavirt, "/HM/CPU%u/TSC/Paravirt", "Paravirtualized TSC in effect.");
+ HM_REG_COUNTER(&pHmCpu->StatTscOffset, "/HM/CPU%u/TSC/Offset", "TSC offsetting is in effect.");
+ HM_REG_COUNTER(&pHmCpu->StatTscIntercept, "/HM/CPU%u/TSC/Intercept", "Intercept TSC accesses.");
+
+ HM_REG_COUNTER(&pHmCpu->StatDRxArmed, "/HM/CPU%u/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
+ HM_REG_COUNTER(&pHmCpu->StatDRxContextSwitch, "/HM/CPU%u/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
+ HM_REG_COUNTER(&pHmCpu->StatDRxIoCheck, "/HM/CPU%u/Debug/IOCheck", "Checking for I/O breakpoint.");
+
+ HM_REG_COUNTER(&pHmCpu->StatExportMinimal, "/HM/CPU%u/Export/Minimal", "VM-entry exporting minimal guest-state.");
+ HM_REG_COUNTER(&pHmCpu->StatExportFull, "/HM/CPU%u/Export/Full", "VM-entry exporting the full guest-state.");
+ HM_REG_COUNTER(&pHmCpu->StatLoadGuestFpu, "/HM/CPU%u/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
+ HM_REG_COUNTER(&pHmCpu->StatExportHostState, "/HM/CPU%u/Export/HostState", "VM-entry exporting host-state.");
+
+ if (fCpuSupportsVmx)
+ {
+ HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRip, "/HM/CPU%u/WriteHostRIP", "Number of VMX_VMCS_HOST_RIP instructions.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRsp, "/HM/CPU%u/WriteHostRSP", "Number of VMX_VMCS_HOST_RSP instructions.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxVmLaunch, "/HM/CPU%u/VMLaunch", "Number of VM-entries using VMLAUNCH.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxVmResume, "/HM/CPU%u/VMResume", "Number of VM-entries using VMRESUME.");
+ }
+
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelBase, "/HM/CPU%u/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelLimit, "/HM/CPU%u/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelAttr, "/HM/CPU%u/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
+
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelBase, "/HM/CPU%u/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelLimit, "/HM/CPU%u/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelAttr, "/HM/CPU%u/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
+
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckRmOk, "/HM/CPU%u/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadSel, "/HM/CPU%u/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
+#endif
+ if (fCpuSupportsVmx)
+ {
+ HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/PreemptTimer", "VMX-preemption timer fired.");
+ HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline, "/HM/CPU%u/PreemptTimer/ReusingDeadline", "VMX-preemption timer arming logic using previously calculated deadline");
+ HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired, "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired", "VMX-preemption timer arming logic found previous deadline already expired (ignored)");
+ HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline, "/HM/CPU%u/PreemptTimer/RecalcingDeadline", "VMX-preemption timer arming logic recalculating the deadline (slightly expensive)");
+ HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)");
+ }
+#ifdef VBOX_WITH_STATISTICS
+ /*
+ * Guest Exit reason stats.
+ */
+ if (fCpuSupportsVmx)
+ {
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ const char *pszExitName = HMGetVmxExitName(j);
+ if (pszExitName)
+ {
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
+ AssertRCReturn(rc, rc);
+ }
+ }
+ }
+ else
+ {
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ const char *pszExitName = HMGetSvmExitName(j);
+ if (pszExitName)
+ {
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
+ AssertRC(rc);
+ }
+ }
+ }
+ HM_REG_COUNTER(&pHmCpu->StatExitReasonNpf, "/HM/CPU%u/Exit/Reason/#NPF", "Nested page faults");
+
+#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
+ /*
+ * Nested-guest VM-exit reason stats.
+ */
+ if (fCpuSupportsVmx)
+ {
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ const char *pszExitName = HMGetVmxExitName(j);
+ if (pszExitName)
+ {
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
+ AssertRC(rc);
+ }
+ }
+ }
+ else
+ {
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ const char *pszExitName = HMGetSvmExitName(j);
+ if (pszExitName)
+ {
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
+ AssertRC(rc);
+ }
+ }
+ }
+ HM_REG_COUNTER(&pHmCpu->StatNestedExitReasonNpf, "/HM/CPU%u/Exit/NestedGuest/Reason/#NPF", "Nested page faults");
+#endif
+
+ /*
+ * Injected interrupts stats.
+ */
+ char szDesc[64];
+ for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedIrqs); j++)
+ {
+ RTStrPrintf(&szDesc[0], sizeof(szDesc), "Interrupt %u", j);
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectIntr/%02X", idCpu, j);
+ AssertRC(rc);
+ }
+
+ /*
+ * Injected exception stats.
+ */
+ for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedXcpts); j++)
+ {
+ RTStrPrintf(&szDesc[0], sizeof(szDesc), "%s exception", hmR3GetXcptName(j));
+ rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedXcpts[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectXcpt/%02X", idCpu, j);
+ AssertRC(rc);
+ }
+
+#endif /* VBOX_WITH_STATISTICS */
+#undef HM_REG_COUNTER
+#undef HM_REG_PROFILE
+#undef HM_REG_STAT
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when a init phase has completed.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat The phase that completed.
+ */
+VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ switch (enmWhat)
+ {
+ case VMINITCOMPLETED_RING3:
+ return hmR3InitFinalizeR3(pVM);
+ case VMINITCOMPLETED_RING0:
+ return hmR3InitFinalizeR0(pVM);
+ default:
+ return VINF_SUCCESS;
+ }
+}
+
+
+/**
+ * Turns off normal raw mode features.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void hmR3DisableRawMode(PVM pVM)
+{
+/** @todo r=bird: HM shouldn't be doing this crap. */
+ /* Reinit the paging mode to force the new shadow mode. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
+ }
+}
+
+
+/**
+ * Initialize VT-x or AMD-V.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR0(PVM pVM)
+{
+ int rc;
+
+ /*
+ * Since HM is in charge of large pages, if large pages isn't supported on Intel CPUs,
+ * we must disable it here. Doing it here rather than in hmR3InitFinalizeR0Intel covers
+ * the case of informing PGM even when NEM is the execution engine.
+ */
+ if ( pVM->hm.s.fLargePages
+ && pVM->hm.s.vmx.fSupported
+ && !(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M))
+ {
+ pVM->hm.s.fLargePages = false;
+ PGMSetLargePageUsage(pVM, false);
+ LogRel(("HM: Disabled large page support as the CPU doesn't allow EPT PDEs to map 2MB pages\n"));
+ }
+
+ if (!HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
+ /*
+ * Hack to allow users to work around broken BIOSes that incorrectly set
+ * EFER.SVME, which makes us believe somebody else is already using AMD-V.
+ */
+ if ( !pVM->hm.s.vmx.fSupported
+ && !pVM->hm.s.svm.fSupported
+ && pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
+ && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
+ {
+ LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
+ pVM->hm.s.svm.fSupported = true;
+ pVM->hm.s.svm.fIgnoreInUseError = true;
+ pVM->hm.s.ForR3.rcInit = VINF_SUCCESS;
+ }
+
+ /*
+ * Report ring-0 init errors.
+ */
+ if ( !pVM->hm.s.vmx.fSupported
+ && !pVM->hm.s.svm.fSupported)
+ {
+ LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit));
+ LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.u64HostFeatCtrl));
+ switch (pVM->hm.s.ForR3.rcInit)
+ {
+ case VERR_VMX_IN_VMX_ROOT_MODE:
+ return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
+ case VERR_VMX_NO_VMX:
+ return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
+ case VERR_VMX_MSR_VMX_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
+ case VERR_VMX_MSR_ALL_VMX_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
+ case VERR_VMX_MSR_LOCKING_FAILED:
+ return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
+ case VERR_VMX_MSR_VMX_ENABLE_FAILED:
+ return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
+ case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
+ return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
+
+ case VERR_SVM_IN_USE:
+ return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
+ case VERR_SVM_NO_SVM:
+ return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
+ case VERR_SVM_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
+ }
+ return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit);
+ }
+
+ /*
+ * Enable VT-x or AMD-V on all host CPUs.
+ */
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("HM: Failed to enable, error %Rrc\n", rc));
+ HMR3CheckError(pVM, rc);
+ return rc;
+ }
+
+ /*
+ * No TPR patching is required when the IO-APIC is not enabled for this VM.
+ * (Main should have taken care of this already)
+ */
+ if (!PDMHasIoApic(pVM))
+ {
+ Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
+ pVM->hm.s.fTprPatchingAllowed = false;
+ }
+
+ LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
+ pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
+ pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
+
+ /*
+ * Do the vendor specific initialization
+ *
+ * Note! We disable release log buffering here since we're doing relatively
+ * lot of logging and doesn't want to hit the disk with each LogRel
+ * statement.
+ */
+ AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ if (pVM->hm.s.vmx.fSupported)
+ rc = hmR3InitFinalizeR0Intel(pVM);
+ else
+ rc = hmR3InitFinalizeR0Amd(pVM);
+ LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
+ : "HM: VT-x/AMD-V init method: Local\n"));
+ RTLogRelSetBuffering(fOldBuffered);
+ pVM->hm.s.fInitialized = true;
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
+ */
+static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
+{
+ NOREF(pVM);
+ NOREF(pvAllocation);
+ NOREF(GCPhysAllocation);
+}
+
+
+/**
+ * Returns a description of the VMCS (and associated regions') memory type given the
+ * IA32_VMX_BASIC MSR.
+ *
+ * @returns The descriptive memory type.
+ * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
+ */
+static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
+{
+ uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
+ switch (uMemType)
+ {
+ case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
+ case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
+ }
+ return "Unknown";
+}
+
+
+/**
+ * Returns a single-line description of all the activity-states supported by the CPU
+ * given the IA32_VMX_MISC MSR.
+ *
+ * @returns All supported activity states.
+ * @param uMsrMisc IA32_VMX_MISC MSR value.
+ */
+static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
+{
+ static const char * const s_apszActStates[] =
+ {
+ "",
+ " ( HLT )",
+ " ( SHUTDOWN )",
+ " ( HLT SHUTDOWN )",
+ " ( SIPI_WAIT )",
+ " ( HLT SIPI_WAIT )",
+ " ( SHUTDOWN SIPI_WAIT )",
+ " ( HLT SHUTDOWN SIPI_WAIT )"
+ };
+ uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
+ Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
+ return s_apszActStates[idxActStates];
+}
+
+
+/**
+ * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
+ *
+ * @param fFeatMsr The feature control MSR value.
+ */
+static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
+{
+ uint64_t const val = fFeatMsr;
+ LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
+ HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
+ HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
+ HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
+ HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
+ HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
+ HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
+ HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
+ if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
+ LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_BASIC MSR to the log.
+ *
+ * @param uBasicMsr The VMX basic MSR value.
+ */
+static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
+{
+ LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
+ LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
+ LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
+ LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
+ "< 4 GB" : "None"));
+ LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
+ LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
+ LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
+ LogRel(("HM: Supports true-capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
+ LogRel(("HM: VM-entry Xcpt error-code optional = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_XCPT_ERRCODE)));
+}
+
+
+/**
+ * Reports MSR_IA32_PINBASED_CTLS to the log.
+ *
+ * @param pVmxMsr Pointer to the VMX MSR.
+ */
+static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
+{
+ uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
+ uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
+ LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
+ *
+ * @param pVmxMsr Pointer to the VMX MSR.
+ */
+static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
+{
+ uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
+ uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
+ LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TERTIARY_CTLS", VMX_PROC_CTLS_USE_TERTIARY_CTLS);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
+ *
+ * @param pVmxMsr Pointer to the VMX MSR.
+ */
+static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
+{
+ uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
+ uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
+ LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_XCPT_VE", VMX_PROC_CTLS2_EPT_XCPT_VE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPP_EPT", VMX_PROC_CTLS2_SPP_EPT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_PROCBASED_CTLS3 MSR to the log.
+ *
+ * @param uProcCtls3 The tertiary processor-based VM-execution control MSR.
+ */
+static void hmR3VmxReportProcBasedCtls3Msr(uint64_t uProcCtls3)
+{
+ LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS3 = %#RX64\n", uProcCtls3));
+ LogRel(("HM: LOADIWKEY_EXIT = %RTbool\n", RT_BOOL(uProcCtls3 & VMX_PROC_CTLS3_LOADIWKEY_EXIT)));
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
+ *
+ * @param pVmxMsr Pointer to the VMX MSR.
+ */
+static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
+{
+ uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
+ uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
+ LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_ENTRY_CTLS_LOAD_CET_STATE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_ENTRY_CTLS_LOAD_PKRS_MSR);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
+ *
+ * @param pVmxMsr Pointer to the VMX MSR.
+ */
+static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
+{
+ uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
+ uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
+ LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_EXIT_CTLS_LOAD_CET_STATE);
+ HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_EXIT_CTLS_LOAD_PKRS_MSR);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
+ *
+ * @param fCaps The VMX EPT/VPID capability MSR value.
+ */
+static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
+{
+ LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
+ HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
+ HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
+ HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_5", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_5);
+ HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_UC", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_UC);
+ HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_WB", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB);
+ HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
+ HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
+ HMVMX_REPORT_MSR_CAP(fCaps, "ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
+ HMVMX_REPORT_MSR_CAP(fCaps, "ADVEXITINFO_EPT_VIOLATION", MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION);
+ HMVMX_REPORT_MSR_CAP(fCaps, "SUPER_SHW_STACK", MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
+ HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_MISC MSR to the log.
+ *
+ * @param pVM Pointer to the VM.
+ * @param fMisc The VMX misc. MSR value.
+ */
+static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
+{
+ LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
+ uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
+ if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
+ LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
+ else
+ {
+ LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
+ pVM->hm.s.vmx.cPreemptTimerShift));
+ }
+ LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
+ LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
+ hmR3VmxGetActivityStateAllDesc(fMisc)));
+ LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
+ LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
+ LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
+ LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
+ VMX_MISC_MAX_MSRS(fMisc)));
+ LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
+ LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
+ LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
+ LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
+ *
+ * @param uVmcsEnum The VMX VMCS enum MSR value.
+ */
+static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
+{
+ LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
+ LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
+}
+
+
+/**
+ * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
+ *
+ * @param uVmFunc The VMX VMFUNC MSR value.
+ */
+static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
+{
+ LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
+ HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
+}
+
+
+/**
+ * Reports VMX CR0, CR4 fixed MSRs.
+ *
+ * @param pMsrs Pointer to the VMX MSRs.
+ */
+static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
+{
+ LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
+ LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
+ LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
+ LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
+}
+
+
+/**
+ * Finish VT-x initialization (after ring-0 init).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR0Intel(PVM pVM)
+{
+ int rc;
+
+ LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
+ AssertLogRelReturn(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl != 0, VERR_HM_IPE_4);
+
+ LogRel(("HM: Using VT-x implementation 3.0\n"));
+ LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
+ LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4));
+ LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer));
+ LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl));
+ LogRel(("HM: Host DR6 zero'ed = %#RX64%s\n", pVM->hm.s.ForR3.vmx.u64HostDr6Zeroed,
+ pVM->hm.s.ForR3.vmx.fAlwaysInterceptMovDRx ? " - always intercept MOV DRx" : ""));
+
+ hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl);
+ hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic);
+
+ hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls);
+ hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls);
+ if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
+ hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2);
+ if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
+ hmR3VmxReportProcBasedCtls3Msr(pVM->hm.s.ForR3.vmx.Msrs.u64ProcCtls3);
+
+ hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls);
+ hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls);
+
+ if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
+ {
+ /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
+ LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls));
+ LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls));
+ LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls));
+ LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls));
+ }
+
+ hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc);
+ hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum);
+ if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps)
+ hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps);
+ if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc)
+ hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc);
+ hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs);
+
+#ifdef TODO_9217_VMCSINFO
+ LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PCVMXVMCSINFOSHARED pVmcsInfo = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfo;
+ LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
+ LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysVmcs));
+ }
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if (pVM->cpum.ro.GuestFeatures.fVmx)
+ {
+ LogRel(("HM: Nested-guest:\n"));
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PCVMXVMCSINFOSHARED pVmcsInfoNstGst = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfoNstGst;
+ LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysMsrBitmap));
+ LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysVmcs));
+ }
+ }
+#endif
+#endif /* TODO_9217_VMCSINFO */
+
+ /*
+ * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
+ */
+ AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
+ || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
+ VERR_HM_IPE_1);
+ AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg
+ || ( (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
+ && pVM->hm.s.fNestedPagingCfg),
+ VERR_HM_IPE_1);
+
+ /*
+ * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
+ * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
+ * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
+ */
+ if ( !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
+ && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
+ {
+ CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
+ LogRel(("HM: Disabled RDTSCP\n"));
+ }
+
+ if (!pVM->hm.s.vmx.fUnrestrictedGuestCfg)
+ {
+ /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
+ rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
+ if (RT_SUCCESS(rc))
+ {
+ /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
+ Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
+ esp. Figure 20-5.*/
+ ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
+ pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
+
+ /* Bit set to 0 means software interrupts are redirected to the
+ 8086 program interrupt handler rather than switching to
+ protected-mode handler. */
+ memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
+
+ /* Allow all port IO, so that port IO instructions do not cause
+ exceptions and would instead cause a VM-exit (based on VT-x's
+ IO bitmap which we currently configure to always cause an exit). */
+ memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, X86_PAGE_SIZE * 2);
+ *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
+
+ /*
+ * Construct a 1024 element page directory with 4 MB pages for the identity mapped
+ * page table used in real and protected mode without paging with EPT.
+ */
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + X86_PAGE_SIZE * 3);
+ for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
+ {
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
+ | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
+ | X86_PDE4M_G;
+ }
+
+ /* We convert it here every time as PCI regions could be reconfigured. */
+ if (PDMVmmDevHeapIsEnabled(pVM))
+ {
+ RTGCPHYS GCPhys;
+ rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
+ AssertRCReturn(rc, rc);
+ LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
+
+ rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
+ AssertRCReturn(rc, rc);
+ LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
+ }
+ }
+ else
+ {
+ LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
+ pVM->hm.s.vmx.pRealModeTSS = NULL;
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
+ }
+ }
+
+ LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
+ : "HM: Guest support: 32-bit only\n"));
+
+ /*
+ * Call ring-0 to set up the VM.
+ */
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
+ if (rc != VINF_SUCCESS)
+ {
+ LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ LogRel(("HM: CPU[%u] Last instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
+ LogRel(("HM: CPU[%u] HM error %#x (%u)\n", idCpu, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
+ }
+ HMR3CheckError(pVM, rc);
+ return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
+ }
+
+ LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer));
+ LogRel(("HM: Enabled VMX\n"));
+ pVM->hm.s.vmx.fEnabled = true;
+
+ hmR3DisableRawMode(pVM); /** @todo make this go away! */
+
+ /*
+ * Log configuration details.
+ */
+ if (pVM->hm.s.fNestedPagingCfg)
+ {
+ LogRel(("HM: Enabled nested paging\n"));
+ if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
+ LogRel(("HM: EPT flush type = Single context\n"));
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
+ LogRel(("HM: EPT flush type = All contexts\n"));
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
+ LogRel(("HM: EPT flush type = Not supported\n"));
+ else
+ LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt));
+
+ if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
+ LogRel(("HM: Enabled unrestricted guest execution\n"));
+
+ if (pVM->hm.s.fLargePages)
+ {
+ /* Use large (2 MB) pages for our EPT PDEs where possible. */
+ PGMSetLargePageUsage(pVM, true);
+ LogRel(("HM: Enabled large page support\n"));
+ }
+ }
+ else
+ Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
+
+ if (pVM->hm.s.ForR3.vmx.fVpid)
+ {
+ LogRel(("HM: Enabled VPID\n"));
+ if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
+ LogRel(("HM: VPID flush type = Individual addresses\n"));
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
+ LogRel(("HM: VPID flush type = Single context\n"));
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
+ LogRel(("HM: VPID flush type = All contexts\n"));
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
+ LogRel(("HM: VPID flush type = Single context retain globals\n"));
+ else
+ LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid));
+ }
+ else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
+ LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
+
+ if (pVM->hm.s.vmx.fUsePreemptTimerCfg)
+ LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
+ else
+ LogRel(("HM: Disabled VMX-preemption timer\n"));
+
+ if (pVM->hm.s.fVirtApicRegs)
+ LogRel(("HM: Enabled APIC-register virtualization support\n"));
+
+ if (pVM->hm.s.fPostedIntrs)
+ LogRel(("HM: Enabled posted-interrupt processing support\n"));
+
+ if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing)
+ {
+ bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
+ LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Finish AMD-V initialization (after ring-0 init).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR0Amd(PVM pVM)
+{
+ LogFunc(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
+
+ LogRel(("HM: Using AMD-V implementation 2.0\n"));
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t u32Family;
+ uint32_t u32Model;
+ uint32_t u32Stepping;
+ if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
+ LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
+#endif
+ LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
+ LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr));
+ LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.ForR3.svm.u32Rev));
+ LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.ForR3.uMaxAsid));
+ LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.ForR3.svm.fFeatures));
+
+ /*
+ * Enumerate AMD-V features.
+ */
+ static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
+ {
+#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
+ HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
+ HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
+ HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
+ HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
+ HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
+ HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
+ HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
+ HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
+ HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
+ HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
+ HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
+ HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
+ HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
+ HMSVM_REPORT_FEATURE("GMET", X86_CPUID_SVM_FEATURE_EDX_GMET),
+ HMSVM_REPORT_FEATURE("SSSCHECK", X86_CPUID_SVM_FEATURE_EDX_SSSCHECK),
+ HMSVM_REPORT_FEATURE("SPEC_CTRL", X86_CPUID_SVM_FEATURE_EDX_SPEC_CTRL),
+ HMSVM_REPORT_FEATURE("HOST_MCE_OVERRIDE", X86_CPUID_SVM_FEATURE_EDX_HOST_MCE_OVERRIDE),
+ HMSVM_REPORT_FEATURE("TLBICTL", X86_CPUID_SVM_FEATURE_EDX_TLBICTL),
+#undef HMSVM_REPORT_FEATURE
+ };
+
+ uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
+ if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
+ {
+ LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
+ fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
+ }
+ if (fSvmFeatures)
+ for (unsigned iBit = 0; iBit < 32; iBit++)
+ if (RT_BIT_32(iBit) & fSvmFeatures)
+ LogRel(("HM: Reserved bit %u\n", iBit));
+
+ /*
+ * Nested paging is determined in HMR3Init, verify the sanity of that.
+ */
+ AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
+ || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
+ VERR_HM_IPE_1);
+
+#if 0
+ /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
+ * here. */
+ if (RTR0IsPostIpiSupport())
+ pVM->hm.s.fPostedIntrs = true;
+#endif
+
+ /*
+ * Determine whether we need to intercept #UD in SVM mode for emulating
+ * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
+ * when executed in long-mode. This is only really applicable when
+ * non-default CPU profiles are in effect, i.e. guest vendor differs
+ * from the host one.
+ */
+ if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
+ switch (CPUMGetGuestCpuVendor(pVM))
+ {
+ case CPUMCPUVENDOR_INTEL:
+ case CPUMCPUVENDOR_VIA: /*?*/
+ case CPUMCPUVENDOR_SHANGHAI: /*?*/
+ switch (CPUMGetHostCpuVendor(pVM))
+ {
+ case CPUMCPUVENDOR_AMD:
+ case CPUMCPUVENDOR_HYGON:
+ if (pVM->hm.s.fAllow64BitGuestsCfg)
+ {
+ LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
+ }
+ break;
+ default: break;
+ }
+ default: break;
+ }
+
+ /*
+ * Call ring-0 to set up the VM.
+ */
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
+ if (rc != VINF_SUCCESS)
+ {
+ AssertMsgFailed(("%Rrc\n", rc));
+ LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
+ return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
+ }
+
+ LogRel(("HM: Enabled SVM\n"));
+ pVM->hm.s.svm.fEnabled = true;
+
+ if (pVM->hm.s.fNestedPagingCfg)
+ {
+ LogRel(("HM: Enabled nested paging\n"));
+
+ /*
+ * Enable large pages (2 MB) if applicable.
+ */
+ if (pVM->hm.s.fLargePages)
+ {
+ PGMSetLargePageUsage(pVM, true);
+ LogRel(("HM: Enabled large page support\n"));
+ }
+ }
+
+ if (pVM->hm.s.fVirtApicRegs)
+ LogRel(("HM: Enabled APIC-register virtualization support\n"));
+
+ if (pVM->hm.s.fPostedIntrs)
+ LogRel(("HM: Enabled posted-interrupt processing support\n"));
+
+ hmR3DisableRawMode(pVM);
+
+ LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
+ : "HM: Disabled TPR patching\n"));
+
+ LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
+ : "HM: Guest support: 32-bit only\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
+{
+ /* Fetch the current paging mode during the relocate callback during state loading. */
+ if (VMR3GetState(pVM) == VMSTATE_LOADING)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
+ }
+ }
+}
+
+
+/**
+ * Terminates the HM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM itself is, at this point, powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
+{
+ if (pVM->hm.s.vmx.pRealModeTSS)
+ {
+ PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
+ pVM->hm.s.vmx.pRealModeTSS = 0;
+ }
+ hmR3TermCPU(pVM);
+ return 0;
+}
+
+
+/**
+ * Terminates the per-VCPU HM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3TermCPU(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used by HMR3Reset and CPU hot plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure to reset.
+ */
+VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
+{
+ /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
+ the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
+ pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
+
+ pVCpu->hm.s.fActive = false;
+ pVCpu->hm.s.Event.fPending = false;
+ pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
+ pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
+ pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
+#endif
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the HM component this means that any GDT/LDT/TSS monitors
+ * needs to be removed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
+{
+ LogFlow(("HMR3Reset:\n"));
+
+ if (HMIsEnabled(pVM))
+ hmR3DisableRawMode(pVM);
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ HMR3ResetCpu(pVM->apCpusR3[idCpu]);
+
+ /* Clear all patch information. */
+ pVM->hm.s.pGuestPatchMem = 0;
+ pVM->hm.s.pFreeGuestPatchMem = 0;
+ pVM->hm.s.cbGuestPatchMem = 0;
+ pVM->hm.s.cPatches = 0;
+ pVM->hm.s.PatchTree = 0;
+ pVM->hm.s.fTprPatchingActive = false;
+ ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
+}
+
+
+/**
+ * Callback to patch a TPR instruction (vmmcall or mov cr8).
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Unused.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+
+ /* Only execute the handler on the VCPU the original patch request was issued. */
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ Log(("hmR3RemovePatches\n"));
+ for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ uint8_t abInstr[15];
+ PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+ RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
+ int rc;
+
+#ifdef LOG_ENABLED
+ char szOutput[256];
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), NULL);
+ if (RT_SUCCESS(rc))
+ Log(("Patched instr: %s\n", szOutput));
+#endif
+
+ /* Check if the instruction is still the same. */
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
+ if (rc != VINF_SUCCESS)
+ {
+ Log(("Patched code removed? (rc=%Rrc0\n", rc));
+ continue; /* swapped out or otherwise removed; skip it. */
+ }
+
+ if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
+ {
+ Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
+ continue; /* skip it. */
+ }
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
+ AssertRC(rc);
+
+#ifdef LOG_ENABLED
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), NULL);
+ if (RT_SUCCESS(rc))
+ Log(("Original instr: %s\n", szOutput));
+#endif
+ }
+ pVM->hm.s.cPatches = 0;
+ pVM->hm.s.PatchTree = 0;
+ pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
+ pVM->hm.s.fTprPatchingActive = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for enabling patching in a VT-x/AMD-V guest.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idCpu VCPU to execute hmR3RemovePatches on.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+static DECLCALLBACK(int) hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
+{
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
+ AssertRC(rc);
+
+ pVM->hm.s.pGuestPatchMem = pPatchMem;
+ pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
+ pVM->hm.s.cbGuestPatchMem = cbPatchMem;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enable patching in a VT-x/AMD-V guest
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ VM_ASSERT_EMT(pVM);
+ Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
+ if (pVM->cCpus > 1)
+ {
+ /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
+ int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
+ (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
+ AssertRC(rc);
+ return rc;
+ }
+ return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
+}
+
+
+/**
+ * Disable patching in a VT-x/AMD-V guest.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
+ RT_NOREF2(pPatchMem, cbPatchMem);
+
+ Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
+ Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
+
+ /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
+ (void *)(uintptr_t)VMMGetCpuId(pVM));
+ AssertRC(rc);
+
+ pVM->hm.s.pGuestPatchMem = 0;
+ pVM->hm.s.pFreeGuestPatchMem = 0;
+ pVM->hm.s.cbGuestPatchMem = 0;
+ pVM->hm.s.fTprPatchingActive = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback to patch a TPR instruction (vmmcall or mov cr8).
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser User specified CPU context.
+ *
+ */
+static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /*
+ * Only execute the handler on the VCPU the original patch request was
+ * issued. (The other CPU(s) might not yet have switched to protected
+ * mode, nor have the correct memory context.)
+ */
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ /*
+ * We're racing other VCPUs here, so don't try patch the instruction twice
+ * and make sure there is still room for our patch record.
+ */
+ PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
+ PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+ if (pPatch)
+ {
+ Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ uint32_t const idx = pVM->hm.s.cPatches;
+ if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
+ {
+ Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ pPatch = &pVM->hm.s.aPatches[idx];
+
+ Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
+
+ /*
+ * Disassembler the instruction and get cracking.
+ */
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
+ DISCPUSTATE Dis;
+ uint32_t cbOp;
+ int rc = EMInterpretDisasCurrent(pVCpu, &Dis, &cbOp);
+ AssertRC(rc);
+ if ( rc == VINF_SUCCESS
+ && Dis.pCurInstr->uOpcode == OP_MOV
+ && cbOp >= 3)
+ {
+ static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
+
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOp;
+
+ if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
+ {
+ /* write. */
+ if (Dis.Param2.fUse == DISUSE_REG_GEN32)
+ {
+ pPatch->enmType = HMTPRINSTR_WRITE_REG;
+ pPatch->uSrcOperand = Dis.Param2.Base.idxGenReg;
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", Dis.Param2.Base.idxGenReg));
+ }
+ else
+ {
+ Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
+ pPatch->enmType = HMTPRINSTR_WRITE_IMM;
+ pPatch->uSrcOperand = Dis.Param2.uValue;
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", Dis.Param2.uValue));
+ }
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
+ pPatch->cbNewOp = sizeof(s_abVMMCall);
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
+ }
+ else
+ {
+ /*
+ * TPR Read.
+ *
+ * Found:
+ * mov eax, dword [fffe0080] (5 bytes)
+ * Check if next instruction is:
+ * shr eax, 4
+ */
+ Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
+
+ uint8_t const idxMmioReg = Dis.Param1.Base.idxGenReg;
+ uint8_t const cbOpMmio = cbOp;
+ uint64_t const uSavedRip = pCtx->rip;
+
+ pCtx->rip += cbOp;
+ rc = EMInterpretDisasCurrent(pVCpu, &Dis, &cbOp);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
+ pCtx->rip = uSavedRip;
+
+ if ( rc == VINF_SUCCESS
+ && Dis.pCurInstr->uOpcode == OP_SHR
+ && Dis.Param1.fUse == DISUSE_REG_GEN32
+ && Dis.Param1.Base.idxGenReg == idxMmioReg
+ && Dis.Param2.fUse == DISUSE_IMMEDIATE8
+ && Dis.Param2.uValue == 4
+ && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
+ {
+ uint8_t abInstr[15];
+
+ /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
+ access CR8 in 32-bit mode and not cause a #VMEXIT. */
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOpMmio + cbOp;
+
+ /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
+ abInstr[0] = 0xf0;
+ abInstr[1] = 0x0f;
+ abInstr[2] = 0x20;
+ abInstr[3] = 0xc0 | Dis.Param1.Base.idxGenReg;
+ for (unsigned i = 4; i < pPatch->cbOp; i++)
+ abInstr[i] = 0x90; /* nop */
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
+ pPatch->cbNewOp = pPatch->cbOp;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
+
+ Log(("Acceptable read/shr candidate!\n"));
+ pPatch->enmType = HMTPRINSTR_READ_SHR4;
+ }
+ else
+ {
+ pPatch->enmType = HMTPRINSTR_READ;
+ pPatch->uDstOperand = idxMmioReg;
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
+ pPatch->cbNewOp = sizeof(s_abVMMCall);
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
+ }
+ }
+
+ pPatch->Core.Key = pCtx->eip;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+
+ pVM->hm.s.cPatches++;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Save invalid patch, so we will not try again.
+ */
+ Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
+ pPatch->Core.Key = pCtx->eip;
+ pPatch->enmType = HMTPRINSTR_INVALID;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+ pVM->hm.s.cPatches++;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback to patch a TPR instruction (jump to generated code).
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser User specified CPU context.
+ *
+ */
+static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /*
+ * Only execute the handler on the VCPU the original patch request was
+ * issued. (The other CPU(s) might not yet have switched to protected
+ * mode, nor have the correct memory context.)
+ */
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ /*
+ * We're racing other VCPUs here, so don't try patch the instruction twice
+ * and make sure there is still room for our patch record.
+ */
+ PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
+ PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+ if (pPatch)
+ {
+ Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ uint32_t const idx = pVM->hm.s.cPatches;
+ if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
+ {
+ Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ pPatch = &pVM->hm.s.aPatches[idx];
+
+ Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
+
+ /*
+ * Disassemble the instruction and get cracking.
+ */
+ DISCPUSTATE Dis;
+ uint32_t cbOp;
+ int rc = EMInterpretDisasCurrent(pVCpu, &Dis, &cbOp);
+ AssertRC(rc);
+ if ( rc == VINF_SUCCESS
+ && Dis.pCurInstr->uOpcode == OP_MOV
+ && cbOp >= 5)
+ {
+ uint8_t aPatch[64];
+ uint32_t off = 0;
+
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOp;
+ pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
+
+ if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
+ {
+ /*
+ * TPR write:
+ *
+ * push ECX [51]
+ * push EDX [52]
+ * push EAX [50]
+ * xor EDX,EDX [31 D2]
+ * mov EAX,EAX [89 C0]
+ * or
+ * mov EAX,0000000CCh [B8 CC 00 00 00]
+ * mov ECX,0C0000082h [B9 82 00 00 C0]
+ * wrmsr [0F 30]
+ * pop EAX [58]
+ * pop EDX [5A]
+ * pop ECX [59]
+ * jmp return_address [E9 return_address]
+ */
+ bool fUsesEax = (Dis.Param2.fUse == DISUSE_REG_GEN32 && Dis.Param2.Base.idxGenReg == DISGREG_EAX);
+
+ aPatch[off++] = 0x51; /* push ecx */
+ aPatch[off++] = 0x52; /* push edx */
+ if (!fUsesEax)
+ aPatch[off++] = 0x50; /* push eax */
+ aPatch[off++] = 0x31; /* xor edx, edx */
+ aPatch[off++] = 0xd2;
+ if (Dis.Param2.fUse == DISUSE_REG_GEN32)
+ {
+ if (!fUsesEax)
+ {
+ aPatch[off++] = 0x89; /* mov eax, src_reg */
+ aPatch[off++] = MAKE_MODRM(3, Dis.Param2.Base.idxGenReg, DISGREG_EAX);
+ }
+ }
+ else
+ {
+ Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
+ aPatch[off++] = 0xb8; /* mov eax, immediate */
+ *(uint32_t *)&aPatch[off] = Dis.Param2.uValue;
+ off += sizeof(uint32_t);
+ }
+ aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
+ *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
+ off += sizeof(uint32_t);
+
+ aPatch[off++] = 0x0f; /* wrmsr */
+ aPatch[off++] = 0x30;
+ if (!fUsesEax)
+ aPatch[off++] = 0x58; /* pop eax */
+ aPatch[off++] = 0x5a; /* pop edx */
+ aPatch[off++] = 0x59; /* pop ecx */
+ }
+ else
+ {
+ /*
+ * TPR read:
+ *
+ * push ECX [51]
+ * push EDX [52]
+ * push EAX [50]
+ * mov ECX,0C0000082h [B9 82 00 00 C0]
+ * rdmsr [0F 32]
+ * mov EAX,EAX [89 C0]
+ * pop EAX [58]
+ * pop EDX [5A]
+ * pop ECX [59]
+ * jmp return_address [E9 return_address]
+ */
+ Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
+
+ if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
+ aPatch[off++] = 0x51; /* push ecx */
+ if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
+ aPatch[off++] = 0x52; /* push edx */
+ if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
+ aPatch[off++] = 0x50; /* push eax */
+
+ aPatch[off++] = 0x31; /* xor edx, edx */
+ aPatch[off++] = 0xd2;
+
+ aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
+ *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
+ off += sizeof(uint32_t);
+
+ aPatch[off++] = 0x0f; /* rdmsr */
+ aPatch[off++] = 0x32;
+
+ if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
+ {
+ aPatch[off++] = 0x89; /* mov dst_reg, eax */
+ aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, Dis.Param1.Base.idxGenReg);
+ }
+
+ if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
+ aPatch[off++] = 0x58; /* pop eax */
+ if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
+ aPatch[off++] = 0x5a; /* pop edx */
+ if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
+ aPatch[off++] = 0x59; /* pop ecx */
+ }
+ aPatch[off++] = 0xe9; /* jmp return_address */
+ *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
+ off += sizeof(RTRCUINTPTR);
+
+ if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
+ {
+ /* Write new code to the patch buffer. */
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
+ AssertRC(rc);
+
+#ifdef LOG_ENABLED
+ uint32_t cbCurInstr;
+ for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
+ GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
+ GCPtrInstr += RT_MAX(cbCurInstr, 1))
+ {
+ char szOutput[256];
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), &cbCurInstr);
+ if (RT_SUCCESS(rc))
+ Log(("Patch instr %s\n", szOutput));
+ else
+ Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
+ }
+#endif
+
+ pPatch->aNewOpcode[0] = 0xE9;
+ *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
+
+ /* Overwrite the TPR instruction with a jump. */
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
+ AssertRC(rc);
+
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
+
+ pVM->hm.s.pFreeGuestPatchMem += off;
+ pPatch->cbNewOp = 5;
+
+ pPatch->Core.Key = pCtx->eip;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+
+ pVM->hm.s.cPatches++;
+ pVM->hm.s.fTprPatchingActive = true;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
+ return VINF_SUCCESS;
+ }
+
+ Log(("Ran out of space in our patch buffer!\n"));
+ }
+ else
+ Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
+
+
+ /*
+ * Save invalid patch, so we will not try again.
+ */
+ pPatch = &pVM->hm.s.aPatches[idx];
+ pPatch->Core.Key = pCtx->eip;
+ pPatch->enmType = HMTPRINSTR_INVALID;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+ pVM->hm.s.cPatches++;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Attempt to patch TPR mmio instructions.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
+{
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
+ pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
+ (void *)(uintptr_t)pVCpu->idCpu);
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * Checks if we need to reschedule due to VMM device heap changes.
+ *
+ * @returns true if a reschedule is required, otherwise false.
+ * @param pVM The cross context VM structure.
+ * @param pCtx VM execution context.
+ */
+VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
+{
+ /*
+ * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
+ * when the unrestricted guest execution feature is missing (VT-x only).
+ */
+ if ( pVM->hm.s.vmx.fEnabled
+ && !pVM->hm.s.vmx.fUnrestrictedGuestCfg
+ && CPUMIsGuestInRealModeEx(pCtx)
+ && !PDMVmmDevHeapIsEnabled(pVM))
+ return true;
+
+ return false;
+}
+
+
+/**
+ * Noticiation callback from DBGF when interrupt breakpoints or generic debug
+ * event settings changes.
+ *
+ * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
+ * function is just updating the VM globals.
+ *
+ * @param pVM The VM cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
+{
+ /* Interrupts. */
+ bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
+ || pVM->dbgf.ro.cHardIntBreakpoints > 0;
+
+ /* CPU Exceptions. */
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ /* Common VM exits. */
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ /* Vendor specific VM exits. */
+ if (HMR3IsVmxEnabled(pVM->pUVM))
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+ else
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ /* Done. */
+ pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
+}
+
+
+/**
+ * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
+ *
+ * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
+ * per CPU settings.
+ *
+ * @param pVM The VM cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
+{
+ pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
+}
+
+
+/**
+ * Checks if we are currently using hardware acceleration.
+ *
+ * @returns true if hardware acceleration is being used, otherwise false.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
+{
+ return pVCpu->hm.s.fActive;
+}
+
+
+/**
+ * External interface for querying whether hardware acceleration is enabled.
+ *
+ * @returns true if VT-x or AMD-V is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMIsEnabled, HMIsEnabledNotMacro.
+ */
+VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
+}
+
+
+/**
+ * External interface for querying whether VT-x is being used.
+ *
+ * @returns true if VT-x is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMR3IsSvmEnabled, HMIsEnabled
+ */
+VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.vmx.fEnabled
+ && pVM->hm.s.vmx.fSupported
+ && pVM->fHMEnabled;
+}
+
+
+/**
+ * External interface for querying whether AMD-V is being used.
+ *
+ * @returns true if VT-x is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMR3IsVmxEnabled, HMIsEnabled
+ */
+VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.svm.fEnabled
+ && pVM->hm.s.svm.fSupported
+ && pVM->fHMEnabled;
+}
+
+
+/**
+ * Checks if we are currently using nested paging.
+ *
+ * @returns true if nested paging is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.fNestedPagingCfg;
+}
+
+
+/**
+ * Checks if virtualized APIC registers are enabled.
+ *
+ * When enabled this feature allows the hardware to access most of the
+ * APIC registers in the virtual-APIC page without causing VM-exits. See
+ * Intel spec. 29.1.1 "Virtualized APIC Registers".
+ *
+ * @returns true if virtualized APIC registers is enabled, otherwise
+ * false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.fVirtApicRegs;
+}
+
+
+/**
+ * Checks if APIC posted-interrupt processing is enabled.
+ *
+ * This returns whether we can deliver interrupts to the guest without
+ * leaving guest-context by updating APIC state from host-context.
+ *
+ * @returns true if APIC posted-interrupt processing is enabled,
+ * otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.fPostedIntrs;
+}
+
+
+/**
+ * Checks if we are currently using VPID in VT-x mode.
+ *
+ * @returns true if VPID is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.ForR3.vmx.fVpid;
+}
+
+
+/**
+ * Checks if we are currently using VT-x unrestricted execution,
+ * aka UX.
+ *
+ * @returns true if UX is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.vmx.fUnrestrictedGuestCfg
+ || pVM->hm.s.svm.fSupported;
+}
+
+
+/**
+ * Checks if the VMX-preemption timer is being used.
+ *
+ * @returns true if the VMX-preemption timer is being used, otherwise false.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
+{
+ return HMIsEnabled(pVM)
+ && pVM->hm.s.vmx.fEnabled
+ && pVM->hm.s.vmx.fUsePreemptTimerCfg;
+}
+
+
+#ifdef TODO_9217_VMCSINFO
+/**
+ * Helper for HMR3CheckError to log VMCS controls to the release log.
+ *
+ * @param idCpu The Virtual CPU ID.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
+{
+ LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
+ {
+ uint32_t const u32Val = pVmcsInfo->u32PinCtls;
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
+ }
+ LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
+ {
+ uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TERTIARY_CTLS );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
+ }
+ LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
+ {
+ uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_XCPT_VE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPP_EPT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
+ }
+ LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
+ {
+ uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_CET_STATE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PKRS_MSR );
+ }
+ LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
+ {
+ uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_CET_STATE );
+ HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PKRS_MSR );
+ }
+}
+#endif
+
+
+/**
+ * Check fatal VT-x/AMD-V error and produce some meaningful
+ * log release message.
+ *
+ * @param pVM The cross context VM structure.
+ * @param iStatusCode VBox status code.
+ */
+VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
+{
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
+ * might be getting inaccurate values for non-guru'ing EMTs. */
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+#ifdef TODO_9217_VMCSINFO
+ PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
+#endif
+ bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
+ switch (iStatusCode)
+ {
+ case VERR_VMX_INVALID_VMCS_PTR:
+ {
+ LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
+ LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
+#ifdef TODO_9217_VMCSINFO
+ LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", idCpu, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
+ pVmcsInfo->HCPhysVmcs));
+#endif
+ LogRel(("HM: CPU[%u] Current VMCS version %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
+ LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
+ LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
+ break;
+ }
+
+ case VERR_VMX_UNABLE_TO_START_VM:
+ {
+ LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
+ LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
+ LogRel(("HM: CPU[%u] Instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
+ LogRel(("HM: CPU[%u] Exit reason %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32ExitReason));
+
+ if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
+ || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
+ {
+ LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
+ LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
+ }
+ else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
+ {
+#ifdef TODO_9217_VMCSINFO
+ hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
+ LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
+ LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrLoad));
+ LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrStore));
+ LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysHostMsrLoad));
+ LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", idCpu, pVmcsInfo->cEntryMsrLoad));
+ LogRel(("HM: CPU[%u] cExitMsrStore %u\n", idCpu, pVmcsInfo->cExitMsrStore));
+ LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", idCpu, pVmcsInfo->cExitMsrLoad));
+#endif
+ }
+ /** @todo Log VM-entry event injection control fields
+ * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
+ * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
+ break;
+ }
+
+ case VERR_VMX_INVALID_GUEST_STATE:
+ {
+ LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
+ LogRel(("HM: CPU[%u] HM error = %#RX32\n", idCpu, pVCpu->hm.s.u32HMError));
+ LogRel(("HM: CPU[%u] Guest-intr. state = %#RX32\n", idCpu, pVCpu->hm.s.vmx.LastError.u32GuestIntrState));
+#ifdef TODO_9217_VMCSINFO
+ hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
+#endif
+ break;
+ }
+
+ /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
+ case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
+ case VERR_VMX_INVALID_VMXON_PTR:
+ case VERR_VMX_UNEXPECTED_EXIT:
+ case VERR_VMX_INVALID_VMCS_FIELD:
+ case VERR_SVM_UNKNOWN_EXIT:
+ case VERR_SVM_UNEXPECTED_EXIT:
+ case VERR_SVM_UNEXPECTED_PATCH_TYPE:
+ case VERR_SVM_UNEXPECTED_XCPT_EXIT:
+ case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
+ break;
+ }
+ }
+
+ if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
+ {
+ LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1));
+ LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0));
+ }
+ else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
+ LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError));
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * Save only data that cannot be re-loaded while entering HM ring-0 code. This
+ * is because we always save the VM state from ring-3 and thus most HM state
+ * will be re-synced dynamically at runtime and don't need to be part of the VM
+ * saved state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ Log(("hmR3Save:\n"));
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ Assert(!pVCpu->hm.s.Event.fPending);
+ if (pVM->cpum.ro.GuestFeatures.fSvm)
+ {
+ PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
+ SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
+ SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
+ SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
+ SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
+ SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
+ SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
+ SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
+ SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
+ }
+ }
+
+ /* Save the guest patch data. */
+ SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
+ SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
+ SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
+
+ /* Store all the guest patch records too. */
+ int rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ AssertCompileSize(HMTPRINSTR, 4);
+ PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+ SSMR3PutU32(pSSM, pPatch->Core.Key);
+ SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
+ SSMR3PutU32(pSSM, pPatch->cbOp);
+ SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
+ SSMR3PutU32(pSSM, pPatch->cbNewOp);
+ SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
+ SSMR3PutU32(pSSM, pPatch->uSrcOperand);
+ SSMR3PutU32(pSSM, pPatch->uDstOperand);
+ SSMR3PutU32(pSSM, pPatch->pJumpTarget);
+ rc = SSMR3PutU32(pSSM, pPatch->cFaults);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ int rc;
+
+ LogFlowFunc(("uVersion=%u\n", uVersion));
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
+ && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
+ && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
+ && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
+ {
+ AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ /*
+ * Load per-VCPU state.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
+ {
+ /* Load the SVM nested hw.virt state if the VM is configured for it. */
+ if (pVM->cpum.ro.GuestFeatures.fSvm)
+ {
+ PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
+ SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
+ SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
+ SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
+ SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
+ SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
+ SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
+ SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
+ rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
+ AssertRCReturn(rc, rc);
+ }
+ }
+ else
+ {
+ /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
+ SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.fPending);
+ SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.u32ErrCode);
+ SSMR3GetU64(pSSM, &pVCpu->hm.s.Event.u64IntInfo);
+
+ /* VMX fWasInRealMode related data. */
+ uint32_t uDummy;
+ SSMR3GetU32(pSSM, &uDummy);
+ SSMR3GetU32(pSSM, &uDummy);
+ rc = SSMR3GetU32(pSSM, &uDummy);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /*
+ * Load TPR patching data.
+ */
+ if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
+ {
+ SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
+ SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
+ SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
+
+ /* Fetch all TPR patch records. */
+ rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
+ AssertRCReturn(rc, rc);
+ for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+ SSMR3GetU32(pSSM, &pPatch->Core.Key);
+ SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
+ SSMR3GetU32(pSSM, &pPatch->cbOp);
+ SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
+ SSMR3GetU32(pSSM, &pPatch->cbNewOp);
+ SSM_GET_ENUM32_RET(pSSM, pPatch->enmType, HMTPRINSTR);
+
+ if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
+ pVM->hm.s.fTprPatchingActive = true;
+ Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTprPatchingActive == false);
+
+ SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
+ SSMR3GetU32(pSSM, &pPatch->uDstOperand);
+ SSMR3GetU32(pSSM, &pPatch->cFaults);
+ rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
+ AssertRCReturn(rc, rc);
+
+ LogFlow(("hmR3Load: patch %d\n", i));
+ LogFlow(("Key = %x\n", pPatch->Core.Key));
+ LogFlow(("cbOp = %d\n", pPatch->cbOp));
+ LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
+ LogFlow(("type = %d\n", pPatch->enmType));
+ LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
+ LogFlow(("dstop = %d\n", pPatch->uDstOperand));
+ LogFlow(("cFaults = %d\n", pPatch->cFaults));
+ LogFlow(("target = %x\n", pPatch->pJumpTarget));
+
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Displays HM info.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ if (HMIsEnabled(pVM))
+ {
+ if (pVM->hm.s.vmx.fSupported)
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
+ else
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
+ pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
+ if (pVM->hm.s.vmx.fSupported)
+ {
+ PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
+ bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
+ bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
+
+ pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
+ pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
+ if (fRealOnV86Active)
+ {
+ pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfoShared->RealMode.Eflags.u32);
+ pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfoShared->RealMode.AttrCS.u);
+ pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfoShared->RealMode.AttrSS.u);
+ pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfoShared->RealMode.AttrDS.u);
+ pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfoShared->RealMode.AttrES.u);
+ pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfoShared->RealMode.AttrFS.u);
+ pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfoShared->RealMode.AttrGS.u);
+ }
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
+}
+
+
+/**
+ * Displays the HM Last-Branch-Record info. for the guest.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ if (!HMIsEnabled(pVM))
+ pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
+ else if (HMIsVmxActive(pVM))
+ {
+ if (pVM->hm.s.vmx.fLbrCfg)
+ {
+ PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
+ uint32_t const cLbrStack = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1;
+
+ /** @todo r=ramshankar: The index technically varies depending on the CPU, but
+ * 0xf should cover everything we support thus far. Fix if necessary
+ * later. */
+ uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
+ if (idxTopOfStack > cLbrStack)
+ {
+ pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
+ idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
+ return;
+ }
+
+ /*
+ * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
+ */
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
+ uint32_t idxCurrent = idxTopOfStack;
+ Assert(idxTopOfStack < cLbrStack);
+ Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
+ Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
+ for (;;)
+ {
+ if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst)
+ pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
+ pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
+ else
+ pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
+
+ idxCurrent = (idxCurrent - 1) % cLbrStack;
+ if (idxCurrent == idxTopOfStack)
+ break;
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "VM not configured to record LBRs for the guest\n");
+ }
+ else
+ {
+ Assert(HMIsSvmActive(pVM));
+ /** @todo SVM: LBRs (get them from VMCB if possible). */
+ pHlp->pfnPrintf(pHlp, "SVM LBR not implemented.\n");
+ }
+}
+
+
+/**
+ * Displays the HM pending event.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ if (HMIsEnabled(pVM))
+ {
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
+ if (pVCpu->hm.s.Event.fPending)
+ {
+ pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
+ pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
+ pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
+ pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
+}
+
+
+/**
+ * Displays the SVM nested-guest VMCB cache.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
+ if ( fSvmEnabled
+ && pVM->cpum.ro.GuestFeatures.fSvm)
+ {
+ PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
+ pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
+ pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
+ pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
+ pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
+ pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
+ pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
+ pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
+ pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
+ pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
+ pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
+ pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
+ pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
+ pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
+ }
+ else
+ {
+ if (!fSvmEnabled)
+ pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
+ else
+ pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/IEMR3.cpp b/src/VBox/VMM/VMMR3/IEMR3.cpp
new file mode 100644
index 00000000..2bb2240d
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/IEMR3.cpp
@@ -0,0 +1,545 @@
+/* $Id: IEMR3.cpp $ */
+/** @file
+ * IEM - Interpreted Execution Manager.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include "IEMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/err.h>
+#ifdef VBOX_WITH_DEBUGGER
+# include <VBox/dbg.h>
+#endif
+
+#include <iprt/assert.h>
+#include <iprt/getopt.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNDBGFINFOARGVINT iemR3InfoITlb;
+static FNDBGFINFOARGVINT iemR3InfoDTlb;
+#ifdef VBOX_WITH_DEBUGGER
+static void iemR3RegisterDebuggerCommands(void);
+#endif
+
+
+static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
+{
+ switch (enmTargetCpu)
+ {
+#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
+ CASE_RET_STR(IEMTARGETCPU_8086);
+ CASE_RET_STR(IEMTARGETCPU_V20);
+ CASE_RET_STR(IEMTARGETCPU_186);
+ CASE_RET_STR(IEMTARGETCPU_286);
+ CASE_RET_STR(IEMTARGETCPU_386);
+ CASE_RET_STR(IEMTARGETCPU_486);
+ CASE_RET_STR(IEMTARGETCPU_PENTIUM);
+ CASE_RET_STR(IEMTARGETCPU_PPRO);
+ CASE_RET_STR(IEMTARGETCPU_CURRENT);
+#undef CASE_RET_STR
+ default: return "Unknown";
+ }
+}
+
+
+/**
+ * Initializes the interpreted execution manager.
+ *
+ * This must be called after CPUM as we're quering information from CPUM about
+ * the guest and host CPUs.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) IEMR3Init(PVM pVM)
+{
+ int rc;
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
+
+#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
+ /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
+ * Controls whether the custom VBox specific CPUID host call interface is
+ * enabled or not. */
+# ifdef DEBUG_bird
+ rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
+# else
+ rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
+# endif
+ AssertLogRelRCReturn(rc, rc);
+#endif
+
+ /*
+ * Initialize per-CPU data and register statistics.
+ */
+ uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
+ uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
+
+ pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
+ pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
+
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+ "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+ "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+ "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
+
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits", idCpu);
+#endif
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu);
+ STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);
+
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu);
+ STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu);
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
+
+#if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
+ /* Instruction statistics: */
+# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
+ STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
+ STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
+# include "IEMInstructionStatisticsTmpl.h"
+# undef IEM_DO_INSTR_STAT
+#endif
+
+ /*
+ * Host and guest CPU information.
+ */
+ if (idCpu == 0)
+ {
+ pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
+ pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
+ || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
+ ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
+ else
+#endif
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
+
+#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
+ switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
+ {
+ case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
+ case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
+ case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
+ case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
+ case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
+ case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
+ case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
+ case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
+ case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
+ default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
+ }
+ LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
+ iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
+#else
+ LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
+ CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
+#endif
+ }
+ else
+ {
+ pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
+ pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
+ pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
+#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
+ pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
+#endif
+ }
+
+ /*
+ * Mark all buffers free.
+ */
+ uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
+ while (iMemMap-- > 0)
+ pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
+ }
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ /*
+ * Register the per-VM VMX APIC-access page handler type.
+ */
+ if (pVM->cpum.ro.GuestFeatures.fVmx)
+ {
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
+ iemVmxApicAccessPageHandler,
+ "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
+ AssertLogRelRCReturn(rc, rc);
+ }
+#endif
+
+ DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
+ DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
+#ifdef VBOX_WITH_DEBUGGER
+ iemR3RegisterDebuggerCommands();
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+VMMR3DECL(int) IEMR3Term(PVM pVM)
+{
+ NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3DECL(void) IEMR3Relocate(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
+static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
+{
+ if (*pfHeader)
+ return;
+ pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
+ *pfHeader = true;
+}
+
+
+/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
+static void iemR3InfoTlbPrintSlot(PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe, uint32_t uSlot)
+{
+ pHlp->pfnPrintf(pHlp, "%02x: %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s/%s%s%s/%s %s\n",
+ uSlot,
+ (pTlbe->uTag & IEMTLB_REVISION_MASK) == pTlb->uTlbRevision ? "valid "
+ : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
+ : "expired",
+ (pTlbe->uTag & ~IEMTLB_REVISION_MASK) << X86_PAGE_SHIFT,
+ pTlbe->GCPhys, pTlbe->pbMappingR3,
+ (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "NX" : " X",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "RO" : "RW",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "U" : "-",
+ pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "S" : "M",
+ (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
+ : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired");
+}
+
+
+/** Displays one or more TLB slots. */
+static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
+ uint32_t uSlot, uint32_t cSlots, bool *pfHeader)
+{
+ if (uSlot < RT_ELEMENTS(pTlb->aEntries))
+ {
+ if (cSlots > RT_ELEMENTS(pTlb->aEntries))
+ {
+ pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
+ cSlots, RT_ELEMENTS(pTlb->aEntries));
+ cSlots = RT_ELEMENTS(pTlb->aEntries);
+ }
+
+ iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
+ while (cSlots-- > 0)
+ {
+ IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
+ iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
+ uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
+ }
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
+ uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
+}
+
+
+/** Displays the TLB slot for the given address. */
+static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
+ uint64_t uAddress, bool *pfHeader)
+{
+ iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
+
+ uint64_t const uTag = (uAddress << 16) >> (X86_PAGE_SHIFT + 16);
+ uint32_t const uSlot = (uint8_t)uTag;
+ IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
+ pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
+ Tlbe.uTag == (uTag | pTlb->uTlbRevision) ? "match"
+ : (Tlbe.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
+ iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
+}
+
+
+/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
+static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
+{
+ /*
+ * This is entirely argument driven.
+ */
+ static RTGETOPTDEF const s_aOptions[] =
+ {
+ { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
+ { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
+ { "all", 'A', RTGETOPT_REQ_NOTHING },
+ { "--all", 'A', RTGETOPT_REQ_NOTHING },
+ { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
+ { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
+ { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
+ };
+
+ char szDefault[] = "-A";
+ char *papszDefaults[2] = { szDefault, NULL };
+ if (cArgs == 0)
+ {
+ cArgs = 1;
+ papszArgs = papszDefaults;
+ }
+
+ RTGETOPTSTATE State;
+ int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
+ AssertRCReturnVoid(rc);
+
+ bool fNeedHeader = true;
+ bool fAddressMode = true;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = VMMGetCpuById(pVM, 0);
+
+ RTGETOPTUNION ValueUnion;
+ while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
+ {
+ switch (rc)
+ {
+ case 'c':
+ if (ValueUnion.u32 >= pVM->cCpus)
+ pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
+ else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
+ {
+ pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
+ fNeedHeader = true;
+ }
+ break;
+
+ case 'a':
+ iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ ValueUnion.u64, &fNeedHeader);
+ fAddressMode = true;
+ break;
+
+ case 'A':
+ iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), &fNeedHeader);
+ break;
+
+ case 'r':
+ iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, &fNeedHeader);
+ fAddressMode = false;
+ break;
+
+ case 's':
+ iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ ValueUnion.u32, 1, &fNeedHeader);
+ fAddressMode = false;
+ break;
+
+ case VINF_GETOPT_NOT_OPTION:
+ if (fAddressMode)
+ {
+ uint64_t uAddr;
+ rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
+ if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
+ iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ uAddr, &fNeedHeader);
+ else
+ pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
+ }
+ else
+ {
+ uint32_t uSlot;
+ rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
+ if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
+ iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
+ uSlot, 1, &fNeedHeader);
+ else
+ pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
+ }
+ break;
+
+ case 'h':
+ pHlp->pfnPrintf(pHlp,
+ "Usage: info %ctlb [options]\n"
+ "\n"
+ "Options:\n"
+ " -c<n>, --cpu=<n>, --vcpu=<n>\n"
+ " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
+ " -A, --all, all\n"
+ " Display all the TLB entries (default if no other args).\n"
+ " -a<virt>, --address=<virt>\n"
+ " Shows the TLB entry for the specified guest virtual address.\n"
+ " -r<slot:count>, --range=<slot:count>\n"
+ " Shows the TLB entries for the specified slot range.\n"
+ " -s<slot>,--slot=<slot>\n"
+ " Shows the given TLB slot.\n"
+ "\n"
+ "Non-options are interpreted according to the last -a, -r or -s option,\n"
+ "defaulting to addresses if not preceeded by any of those options.\n"
+ , fITlb ? 'i' : 'd');
+ return;
+
+ default:
+ pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
+ return;
+ }
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
+ */
+static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
+ */
+static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
+}
+
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/** @callback_method_impl{FNDBGCCMD,
+ * Implements the '.alliem' command. }
+ */
+static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
+ if (pVCpu)
+ {
+ VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAll, 1, pVCpu);
+ return VINF_SUCCESS;
+ }
+ RT_NOREF(paArgs, cArgs);
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
+}
+
+
+/**
+ * Called by IEMR3Init to register debugger commands.
+ */
+static void iemR3RegisterDebuggerCommands(void)
+{
+ /*
+ * Register debugger commands.
+ */
+ static DBGCCMD const s_aCmds[] =
+ {
+ {
+ /* .pszCmd = */ "iemflushtlb",
+ /* .cArgsMin = */ 0,
+ /* .cArgsMax = */ 0,
+ /* .paArgDescs = */ NULL,
+ /* .cArgDescs = */ 0,
+ /* .fFlags = */ 0,
+ /* .pfnHandler = */ iemR3DbgFlushTlbs,
+ /* .pszSyntax = */ "",
+ /* .pszDescription = */ "Flushed the code and data TLBs"
+ },
+ };
+
+ int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
+ AssertLogRelRC(rc);
+}
+
+#endif
+
diff --git a/src/VBox/VMM/VMMR3/IOM.cpp b/src/VBox/VMM/VMMR3/IOM.cpp
new file mode 100644
index 00000000..2a909796
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/IOM.cpp
@@ -0,0 +1,477 @@
+/* $Id: IOM.cpp $ */
+/** @file
+ * IOM - Input / Output Monitor.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_iom IOM - The Input / Output Monitor
+ *
+ * The input/output monitor will handle I/O exceptions routing them to the
+ * appropriate device. It implements an API to register and deregister virtual
+ * I/0 port handlers and memory mapped I/O handlers. A handler is PDM devices
+ * and a set of callback functions.
+ *
+ * @see grp_iom
+ *
+ *
+ * @section sec_iom_rawmode Raw-Mode
+ *
+ * In raw-mode I/O port access is trapped (\#GP(0)) by ensuring that the actual
+ * IOPL is 0 regardless of what the guest IOPL is. The \#GP handler use the
+ * disassembler (DIS) to figure which instruction caused it (there are a number
+ * of instructions in addition to the I/O ones) and if it's an I/O port access
+ * it will hand it to IOMRCIOPortHandler (via EMInterpretPortIO).
+ * IOMRCIOPortHandler will lookup the port in the AVL tree of registered
+ * handlers. If found, the handler will be called otherwise default action is
+ * taken. (Default action is to write into the void and read all set bits.)
+ *
+ * Memory Mapped I/O (MMIO) is implemented as a slightly special case of PGM
+ * access handlers. An MMIO range is registered with IOM which then registers it
+ * with the PGM access handler sub-system. The access handler catches all
+ * access and will be called in the context of a \#PF handler. In RC and R0 this
+ * handler is iomMmioPfHandler while in ring-3 it's iomR3MmioHandler (although
+ * in ring-3 there can be alternative ways). iomMmioPfHandler will attempt to
+ * emulate the instruction that is doing the access and pass the corresponding
+ * reads / writes to the device.
+ *
+ * Emulating I/O port access is less complex and should be slightly faster than
+ * emulating MMIO, so in most cases we should encourage the OS to use port I/O.
+ * Devices which are frequently accessed should register GC handlers to speed up
+ * execution.
+ *
+ *
+ * @section sec_iom_hm Hardware Assisted Virtualization Mode
+ *
+ * When running in hardware assisted virtualization mode we'll be doing much the
+ * same things as in raw-mode. The main difference is that we're running in the
+ * host ring-0 context and that we don't get faults (\#GP(0) and \#PG) but
+ * exits.
+ *
+ *
+ * @section sec_iom_rem Recompiled Execution Mode
+ *
+ * When running in the recompiler things are different. I/O port access is
+ * handled by calling IOMIOPortRead and IOMIOPortWrite directly. While MMIO can
+ * be handled in one of two ways. The normal way is that we have a registered a
+ * special RAM range with the recompiler and in the three callbacks (for byte,
+ * word and dword access) we call IOMMMIORead and IOMMMIOWrite directly. The
+ * alternative ways that the physical memory access which goes via PGM will take
+ * care of it by calling iomR3MmioHandler via the PGM access handler machinery
+ * - this shouldn't happen but it is an alternative...
+ *
+ *
+ * @section sec_iom_other Other Accesses
+ *
+ * I/O ports aren't really exposed in any other way, unless you count the
+ * instruction interpreter in EM, but that's just what we're doing in the
+ * raw-mode \#GP(0) case really. Now, it's possible to call IOMIOPortRead and
+ * IOMIOPortWrite directly to talk to a device, but this is really bad behavior
+ * and should only be done as temporary hacks (the PC BIOS device used to setup
+ * the CMOS this way back in the dark ages).
+ *
+ * MMIO has similar direct routes as the I/O ports and these shouldn't be used
+ * for the same reasons and with the same restrictions. OTOH since MMIO is
+ * mapped into the physical memory address space, it can be accessed in a number
+ * of ways thru PGM.
+ *
+ *
+ * @section sec_iom_logging Logging Levels
+ *
+ * Following assignments:
+ * - Level 5 is used for defering I/O port and MMIO writes to ring-3.
+ *
+ */
+
+/** @todo MMIO - simplifying the device end.
+ * - Add a return status for doing DBGFSTOP on access where there are no known
+ * registers.
+ * -
+ *
+ * */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_IOM
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmdev.h>
+#include "IOMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/param.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/string.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+
+
+
+/**
+ * Initializes the IOM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) IOMR3Init(PVM pVM)
+{
+ LogFlow(("IOMR3Init:\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, iom.s, 32);
+ AssertCompile(sizeof(pVM->iom.s) <= sizeof(pVM->iom.padding));
+ AssertCompileMemberAlignment(IOM, CritSect, sizeof(uintptr_t));
+
+ /*
+ * Initialize the REM critical section.
+ */
+#ifdef IOM_WITH_CRIT_SECT_RW
+ int rc = PDMR3CritSectRwInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
+#else
+ int rc = PDMR3CritSectInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
+#endif
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the MMIO access handler type.
+ */
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, 0 /*fFlags*/,
+ iomMmioHandlerNew, "MMIO", &pVM->iom.s.hNewMmioHandlerType);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Info.
+ */
+ DBGFR3InfoRegisterInternal(pVM, "ioport", "Dumps all IOPort ranges. No arguments.", &iomR3IoPortInfo);
+ DBGFR3InfoRegisterInternal(pVM, "mmio", "Dumps all MMIO ranges. No arguments.", &iomR3MmioInfo);
+
+ /*
+ * Statistics (names are somewhat contorted to make the registration
+ * sub-trees appear at the end of each group).
+ */
+ STAM_REG(pVM, &pVM->iom.s.StatIoPortCommits, STAMTYPE_COUNTER, "/IOM/IoPortCommits", STAMUNIT_OCCURENCES, "Number of ring-3 I/O port commits.");
+ STAM_REG(pVM, &pVM->iom.s.StatIoPortIn, STAMTYPE_COUNTER, "/IOM/IoPortIN", STAMUNIT_OCCURENCES, "Number of IN instructions (attempts)");
+ STAM_REG(pVM, &pVM->iom.s.StatIoPortInS, STAMTYPE_COUNTER, "/IOM/IoPortINS", STAMUNIT_OCCURENCES, "Number of INS instructions (attempts)");
+ STAM_REG(pVM, &pVM->iom.s.StatIoPortOutS, STAMTYPE_COUNTER, "/IOM/IoPortOUT", STAMUNIT_OCCURENCES, "Number of OUT instructions (attempts)");
+ STAM_REG(pVM, &pVM->iom.s.StatIoPortOutS, STAMTYPE_COUNTER, "/IOM/IoPortOUTS", STAMUNIT_OCCURENCES, "Number of OUTS instructions (attempts)");
+
+ STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerR3, STAMTYPE_COUNTER, "/IOM/MmioHandlerR3", STAMUNIT_OCCURENCES, "Number of calls to iomMmioHandlerNew from ring-3.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerR0, STAMTYPE_COUNTER, "/IOM/MmioHandlerR0", STAMUNIT_OCCURENCES, "Number of calls to iomMmioHandlerNew from ring-0.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioReadsR0ToR3, STAMTYPE_COUNTER, "/IOM/MmioR0ToR3Reads", STAMUNIT_OCCURENCES, "Number of reads deferred to ring-3.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioWritesR0ToR3, STAMTYPE_COUNTER, "/IOM/MmioR0ToR3Writes", STAMUNIT_OCCURENCES, "Number of writes deferred to ring-3.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsR0ToR3,STAMTYPE_COUNTER, "/IOM/MmioR0ToR3Commits", STAMUNIT_OCCURENCES, "Number of commits deferred to ring-3.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioPfHandler, STAMTYPE_PROFILE, "/IOM/MmioPfHandler", STAMUNIT_TICKS_PER_CALL, "Number of calls to iomMmioPfHandlerNew.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioPhysHandler, STAMTYPE_PROFILE, "/IOM/MmioPhysHandler", STAMUNIT_TICKS_PER_CALL, "Number of calls to IOMR0MmioPhysHandler.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsDirect,STAMTYPE_COUNTER, "/IOM/MmioCommitsDirect", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits direct to handler via handle hint.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsPgm, STAMTYPE_COUNTER, "/IOM/MmioCommitsPgm", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits via PGM.");
+ STAM_REL_REG(pVM, &pVM->iom.s.StatMmioStaleMappings, STAMTYPE_COUNTER, "/IOM/MmioMappingsStale", STAMUNIT_TICKS_PER_CALL, "Number of times iomMmioHandlerNew got a call for a remapped range at the old mapping.");
+ STAM_REL_REG(pVM, &pVM->iom.s.StatMmioTooDeepRecursion, STAMTYPE_COUNTER, "/IOM/MmioTooDeepRecursion", STAMUNIT_OCCURENCES, "Number of times iomMmioHandlerNew detected too deep recursion and took default action.");
+ STAM_REG(pVM, &pVM->iom.s.StatMmioDevLockContentionR0, STAMTYPE_COUNTER, "/IOM/MmioDevLockContentionR0", STAMUNIT_OCCURENCES, "Number of device lock contention force return to ring-3.");
+
+ LogFlow(("IOMR3Init: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when a VM initialization stage is completed.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat The initialization state that was completed.
+ */
+VMMR3_INT_DECL(int) IOMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+#ifdef VBOX_WITH_STATISTICS
+ if (enmWhat == VMINITCOMPLETED_RING0)
+ {
+ /*
+ * Synchronize the ring-3 I/O port and MMIO statistics indices into the
+ * ring-0 tables to simplify ring-0 code. This also make sure that any
+ * later calls to grow the statistics tables will fail.
+ */
+ if (!SUPR3IsDriverless())
+ {
+ int rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_SYNC_STATS_INDICES, 0, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Register I/O port and MMIO stats now that we're done registering MMIO
+ * regions and won't grow the table again.
+ */
+ for (uint32_t i = 0; i < pVM->iom.s.cIoPortRegs; i++)
+ {
+ PIOMIOPORTENTRYR3 pRegEntry = &pVM->iom.s.paIoPortRegs[i];
+ if ( pRegEntry->fMapped
+ && pRegEntry->idxStats != UINT16_MAX)
+ iomR3IoPortRegStats(pVM, pRegEntry);
+ }
+
+ for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
+ {
+ PIOMMMIOENTRYR3 pRegEntry = &pVM->iom.s.paMmioRegs[i];
+ if ( pRegEntry->fMapped
+ && pRegEntry->idxStats != UINT16_MAX)
+ iomR3MmioRegStats(pVM, pRegEntry);
+ }
+ }
+#else
+ RT_NOREF(pVM, enmWhat);
+#endif
+
+ /*
+ * Freeze I/O port and MMIO registrations.
+ */
+ pVM->iom.s.fIoPortsFrozen = true;
+ pVM->iom.s.fMmioFrozen = true;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) IOMR3Reset(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * The IOM will update the addresses used by the switcher.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) IOMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ RT_NOREF(pVM, offDelta);
+}
+
+/**
+ * Terminates the IOM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) IOMR3Term(PVM pVM)
+{
+ /*
+ * IOM is not owning anything but automatically freed resources,
+ * so there's nothing to do here.
+ */
+ NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles the unlikely and probably fatal merge cases.
+ *
+ * @returns Merged status code.
+ * @param rcStrict Current EM status code.
+ * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
+ * with @a rcStrict.
+ * @param rcIom For logging purposes only.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT. For logging purposes.
+ */
+DECL_NO_INLINE(static, VBOXSTRICTRC) iomR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
+ int rcIom, PVMCPU pVCpu)
+{
+ if (RT_FAILURE_NP(rcStrict))
+ return rcStrict;
+
+ if (RT_FAILURE_NP(rcStrictCommit))
+ return rcStrictCommit;
+
+ if (rcStrict == rcStrictCommit)
+ return rcStrictCommit;
+
+ AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc IOPort={%#06x<-%#xx/%u} MMIO={%RGp<-%.*Rhxs} (rcIom=%Rrc)\n",
+ VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict),
+ pVCpu->iom.s.PendingIOPortWrite.IOPort,
+ pVCpu->iom.s.PendingIOPortWrite.u32Value, pVCpu->iom.s.PendingIOPortWrite.cbValue,
+ pVCpu->iom.s.PendingMmioWrite.GCPhys,
+ pVCpu->iom.s.PendingMmioWrite.cbValue, &pVCpu->iom.s.PendingMmioWrite.abValue[0], rcIom));
+ return VERR_IOM_FF_STATUS_IPE;
+}
+
+
+/**
+ * Helper for IOMR3ProcessForceFlag.
+ *
+ * @returns Merged status code.
+ * @param rcStrict Current EM status code.
+ * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
+ * with @a rcStrict.
+ * @param rcIom Either VINF_IOM_R3_IOPORT_COMMIT_WRITE or
+ * VINF_IOM_R3_MMIO_COMMIT_WRITE.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ */
+DECLINLINE(VBOXSTRICTRC) iomR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, int rcIom, PVMCPU pVCpu)
+{
+ /* Simple. */
+ if (RT_LIKELY(rcStrict == rcIom || rcStrict == VINF_EM_RAW_TO_R3 || rcStrict == VINF_SUCCESS))
+ return rcStrictCommit;
+
+ if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
+ return rcStrict;
+
+ /* EM scheduling status codes. */
+ if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
+ && rcStrict <= VINF_EM_LAST))
+ {
+ if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
+ && rcStrictCommit <= VINF_EM_LAST))
+ return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
+ }
+
+ /* Unlikely */
+ return iomR3MergeStatusSlow(rcStrict, rcStrictCommit, rcIom, pVCpu);
+}
+
+
+/**
+ * Called by force-flag handling code when VMCPU_FF_IOM is set.
+ *
+ * @returns Merge between @a rcStrict and what the commit operation returned.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param rcStrict The status code returned by ring-0 or raw-mode.
+ * @thread EMT(pVCpu)
+ *
+ * @remarks The VMCPU_FF_IOM flag is handled before the status codes by EM, so
+ * we're very likely to see @a rcStrict set to
+ * VINF_IOM_R3_IOPORT_COMMIT_WRITE and VINF_IOM_R3_MMIO_COMMIT_WRITE
+ * here.
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) IOMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
+{
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IOM);
+ Assert(pVCpu->iom.s.PendingIOPortWrite.cbValue || pVCpu->iom.s.PendingMmioWrite.cbValue);
+
+ if (pVCpu->iom.s.PendingIOPortWrite.cbValue)
+ {
+ Log5(("IOM: Dispatching pending I/O port write: %#x LB %u -> %RTiop\n", pVCpu->iom.s.PendingIOPortWrite.u32Value,
+ pVCpu->iom.s.PendingIOPortWrite.cbValue, pVCpu->iom.s.PendingIOPortWrite.IOPort));
+ STAM_COUNTER_INC(&pVM->iom.s.StatIoPortCommits);
+ VBOXSTRICTRC rcStrictCommit = IOMIOPortWrite(pVM, pVCpu, pVCpu->iom.s.PendingIOPortWrite.IOPort,
+ pVCpu->iom.s.PendingIOPortWrite.u32Value,
+ pVCpu->iom.s.PendingIOPortWrite.cbValue);
+ pVCpu->iom.s.PendingIOPortWrite.cbValue = 0;
+ rcStrict = iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_IOPORT_COMMIT_WRITE, pVCpu);
+ }
+
+
+ if (pVCpu->iom.s.PendingMmioWrite.cbValue)
+ {
+ Log5(("IOM: Dispatching pending MMIO write: %RGp LB %#x\n",
+ pVCpu->iom.s.PendingMmioWrite.GCPhys, pVCpu->iom.s.PendingMmioWrite.cbValue));
+
+ /* Use new MMIO handle hint and bypass PGM if it still looks right. */
+ size_t idxMmioRegionHint = pVCpu->iom.s.PendingMmioWrite.idxMmioRegionHint;
+ if (idxMmioRegionHint < pVM->iom.s.cMmioRegs)
+ {
+ PIOMMMIOENTRYR3 pRegEntry = &pVM->iom.s.paMmioRegs[idxMmioRegionHint];
+ RTGCPHYS const GCPhysMapping = pRegEntry->GCPhysMapping;
+ RTGCPHYS const offRegion = pVCpu->iom.s.PendingMmioWrite.GCPhys - GCPhysMapping;
+ if (offRegion < pRegEntry->cbRegion && GCPhysMapping != NIL_RTGCPHYS)
+ {
+ STAM_COUNTER_INC(&pVM->iom.s.StatMmioCommitsDirect);
+ VBOXSTRICTRC rcStrictCommit = iomR3MmioCommitWorker(pVM, pVCpu, pRegEntry, offRegion);
+ pVCpu->iom.s.PendingMmioWrite.cbValue = 0;
+ return iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_MMIO_COMMIT_WRITE, pVCpu);
+ }
+ }
+
+ /* Fall back on PGM. */
+ STAM_COUNTER_INC(&pVM->iom.s.StatMmioCommitsPgm);
+ VBOXSTRICTRC rcStrictCommit = PGMPhysWrite(pVM, pVCpu->iom.s.PendingMmioWrite.GCPhys,
+ pVCpu->iom.s.PendingMmioWrite.abValue, pVCpu->iom.s.PendingMmioWrite.cbValue,
+ PGMACCESSORIGIN_IOM);
+ pVCpu->iom.s.PendingMmioWrite.cbValue = 0;
+ rcStrict = iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_MMIO_COMMIT_WRITE, pVCpu);
+ }
+
+ return rcStrict;
+}
+
+
+/**
+ * Notification from DBGF that the number of active I/O port or MMIO
+ * breakpoints has change.
+ *
+ * For performance reasons, IOM will only call DBGF before doing I/O and MMIO
+ * accesses where there are armed breakpoints.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fPortIo True if there are armed I/O port breakpoints.
+ * @param fMmio True if there are armed MMIO breakpoints.
+ */
+VMMR3_INT_DECL(void) IOMR3NotifyBreakpointCountChange(PVM pVM, bool fPortIo, bool fMmio)
+{
+ /** @todo I/O breakpoints. */
+ RT_NOREF3(pVM, fPortIo, fMmio);
+}
+
+
+/**
+ * Notification from DBGF that an event has been enabled or disabled.
+ *
+ * For performance reasons, IOM may cache the state of events it implements.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmEvent The event.
+ * @param fEnabled The new state.
+ */
+VMMR3_INT_DECL(void) IOMR3NotifyDebugEventChange(PVM pVM, DBGFEVENT enmEvent, bool fEnabled)
+{
+ /** @todo IOM debug events. */
+ RT_NOREF3(pVM, enmEvent, fEnabled);
+}
+
diff --git a/src/VBox/VMM/VMMR3/IOMR3IoPort.cpp b/src/VBox/VMM/VMMR3/IOMR3IoPort.cpp
new file mode 100644
index 00000000..0639d3fe
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/IOMR3IoPort.cpp
@@ -0,0 +1,781 @@
+/* $Id: IOMR3IoPort.cpp $ */
+/** @file
+ * IOM - Input / Output Monitor, I/O port related APIs.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_IOM_IOPORT
+#include <VBox/vmm/iom.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmdev.h>
+#include "IOMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/param.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+
+#include "IOMInline.h"
+
+
+#ifdef VBOX_WITH_STATISTICS
+
+/**
+ * Register statistics for an I/O port entry.
+ */
+void iomR3IoPortRegStats(PVM pVM, PIOMIOPORTENTRYR3 pRegEntry)
+{
+ bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
+ PIOMIOPORTSTATSENTRY pStats = &pVM->iom.s.paIoPortStats[pRegEntry->idxStats];
+ PCIOMIOPORTDESC pExtDesc = pRegEntry->paExtDescs;
+ unsigned uPort = pRegEntry->uPort;
+ unsigned const uFirstPort = uPort;
+ unsigned const uEndPort = uPort + pRegEntry->cPorts;
+
+ /* Register a dummy statistics for the prefix. */
+ char szName[80];
+ size_t cchPrefix;
+ if (uFirstPort < uEndPort - 1)
+ cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/IoPorts/%04x-%04x", uFirstPort, uEndPort - 1);
+ else
+ cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/IoPorts/%04x", uPort);
+ const char *pszDesc = pRegEntry->pszDesc;
+ char *pszFreeDesc = NULL;
+ if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
+ pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
+ int rc = STAMR3Register(pVM, &pStats->Total, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName,
+ STAMUNIT_NONE, pRegEntry->pszDesc);
+ AssertRC(rc);
+ RTStrFree(pszFreeDesc);
+
+ /* Register stats for each port under it */
+ do
+ {
+ size_t cchBaseNm;
+ if (uFirstPort < uEndPort - 1)
+ cchBaseNm = cchPrefix + RTStrPrintf(&szName[cchPrefix], sizeof(szName) - cchPrefix, "/%04x-", uPort);
+ else
+ {
+ szName[cchPrefix] = '/';
+ cchBaseNm = cchPrefix + 1;
+ }
+
+# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchBaseNm], a_sz, sizeof(a_sz));
+ const char * const pszInDesc = pExtDesc ? pExtDesc->pszIn : NULL;
+ const char * const pszOutDesc = pExtDesc ? pExtDesc->pszOut : NULL;
+
+ /* register the statistics counters. */
+ SET_NM_SUFFIX("In-R3");
+ rc = STAMR3Register(pVM, &pStats->InR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, pszInDesc); AssertRC(rc);
+ SET_NM_SUFFIX("Out-R3");
+ rc = STAMR3Register(pVM, &pStats->OutR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, pszOutDesc); AssertRC(rc);
+ if (fDoRZ)
+ {
+ SET_NM_SUFFIX("In-RZ");
+ rc = STAMR3Register(pVM, &pStats->InRZ, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, pszInDesc); AssertRC(rc);
+ SET_NM_SUFFIX("Out-RZ");
+ rc = STAMR3Register(pVM, &pStats->OutRZ, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, pszOutDesc); AssertRC(rc);
+ SET_NM_SUFFIX("In-RZtoR3");
+ rc = STAMR3Register(pVM, &pStats->InRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("Out-RZtoR3");
+ rc = STAMR3Register(pVM, &pStats->OutRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ }
+
+ /* Profiling */
+ SET_NM_SUFFIX("In-R3-Prof");
+ rc = STAMR3Register(pVM, &pStats->ProfInR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, pszInDesc); AssertRC(rc);
+ SET_NM_SUFFIX("Out-R3-Prof");
+ rc = STAMR3Register(pVM, &pStats->ProfOutR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, pszOutDesc); AssertRC(rc);
+ if (fDoRZ)
+ {
+ SET_NM_SUFFIX("In-RZ-Prof");
+ rc = STAMR3Register(pVM, &pStats->ProfInRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, pszInDesc); AssertRC(rc);
+ SET_NM_SUFFIX("Out-RZ-Prof");
+ rc = STAMR3Register(pVM, &pStats->ProfOutRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, pszOutDesc); AssertRC(rc);
+ }
+
+ pStats++;
+ uPort++;
+ if (pExtDesc)
+ pExtDesc = pszInDesc || pszOutDesc ? pExtDesc + 1 : NULL;
+ } while (uPort < uEndPort);
+}
+
+
+/**
+ * Deregister statistics for an I/O port entry.
+ */
+static void iomR3IoPortDeregStats(PVM pVM, PIOMIOPORTENTRYR3 pRegEntry, unsigned uPort)
+{
+ char szPrefix[80];
+ size_t cchPrefix;
+ if (pRegEntry->cPorts > 1)
+ cchPrefix = RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/IoPorts/%04x-%04x", uPort, uPort + pRegEntry->cPorts - 1);
+ else
+ cchPrefix = RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/IoPorts/%04x", uPort);
+ STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
+}
+
+#endif /* VBOX_WITH_STATISTICS */
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWIN,
+ * Dummy Port I/O Handler for IN operations.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+iomR3IOPortDummyNewIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t *pu32, unsigned cb)
+{
+ NOREF(pDevIns); NOREF(pvUser); NOREF(Port);
+ switch (cb)
+ {
+ case 1: *pu32 = 0xff; break;
+ case 2: *pu32 = 0xffff; break;
+ case 4: *pu32 = UINT32_C(0xffffffff); break;
+ default:
+ AssertReleaseMsgFailed(("cb=%d\n", cb));
+ return VERR_IOM_IOPORT_IPE_2;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWINSTRING,
+ * Dummy Port I/O Handler for string IN operations.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+iomR3IOPortDummyNewInStr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint8_t *pbDst, uint32_t *pcTransfer, unsigned cb)
+{
+ NOREF(pDevIns); NOREF(pvUser); NOREF(Port); NOREF(pbDst); NOREF(pcTransfer); NOREF(cb);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUT,
+ * Dummy Port I/O Handler for OUT operations.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+iomR3IOPortDummyNewOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb)
+{
+ NOREF(pDevIns); NOREF(pvUser); NOREF(Port); NOREF(u32); NOREF(cb);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUTSTRING,
+ * Dummy Port I/O Handler for string OUT operations.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+iomR3IOPortDummyNewOutStr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint8_t const *pbSrc, uint32_t *pcTransfer, unsigned cb)
+{
+ NOREF(pDevIns); NOREF(pvUser); NOREF(Port); NOREF(pbSrc); NOREF(pcTransfer); NOREF(cb);
+ return VINF_SUCCESS;
+}
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * Grows the statistics table.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cNewEntries The minimum number of new entrie.
+ * @see IOMR0IoPortGrowStatisticsTable
+ */
+static int iomR3IoPortGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
+{
+ AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_IOPORT_REGISTRATIONS);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_IO_PORT_STATS, cNewEntries, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ AssertReturn(cNewEntries <= pVM->iom.s.cIoPortStatsAllocation, VERR_IOM_IOPORT_IPE_2);
+ }
+ else
+ {
+ /*
+ * Validate input and state.
+ */
+ uint32_t const cOldEntries = pVM->iom.s.cIoPortStatsAllocation;
+ AssertReturn(cNewEntries > cOldEntries, VERR_IOM_IOPORT_IPE_1);
+ AssertReturn(pVM->iom.s.cIoPortStats <= cOldEntries, VERR_IOM_IOPORT_IPE_2);
+
+ /*
+ * Calc size and allocate a new table.
+ */
+ uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMIOPORTSTATSENTRY), HOST_PAGE_SIZE);
+ cNewEntries = cbNew / sizeof(IOMIOPORTSTATSENTRY);
+
+ PIOMIOPORTSTATSENTRY const paIoPortStats = (PIOMIOPORTSTATSENTRY)RTMemPageAllocZ(cbNew);
+ if (paIoPortStats)
+ {
+ /*
+ * Anything to copy over, update and free the old one.
+ */
+ PIOMIOPORTSTATSENTRY const pOldIoPortStats = pVM->iom.s.paIoPortStats;
+ if (pOldIoPortStats)
+ memcpy(paIoPortStats, pOldIoPortStats, cOldEntries * sizeof(IOMIOPORTSTATSENTRY));
+
+ pVM->iom.s.paIoPortStats = paIoPortStats;
+ pVM->iom.s.cIoPortStatsAllocation = cNewEntries;
+
+ RTMemPageFree(pOldIoPortStats, RT_ALIGN_32(cOldEntries * sizeof(IOMIOPORTSTATSENTRY), HOST_PAGE_SIZE));
+
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+
+ return rc;
+}
+#endif
+
+
+/**
+ * Grows the I/O port registration statistics table.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cNewEntries The minimum number of new entrie.
+ * @see IOMR0IoPortGrowRegistrationTables
+ */
+static int iomR3IoPortGrowTable(PVM pVM, uint32_t cNewEntries)
+{
+ AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_IOPORT_REGISTRATIONS);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_IO_PORTS, cNewEntries, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ AssertReturn(cNewEntries <= pVM->iom.s.cIoPortAlloc, VERR_IOM_IOPORT_IPE_2);
+ }
+ else
+ {
+ /*
+ * Validate input and state.
+ */
+ uint32_t const cOldEntries = pVM->iom.s.cIoPortAlloc;
+ AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_IOPORT_IPE_1);
+
+ /*
+ * Allocate the new tables. We use a single allocation for the three tables (ring-0,
+ * ring-3, lookup) and does a partial mapping of the result to ring-3.
+ */
+ uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMIOPORTENTRYR3), HOST_PAGE_SIZE);
+ uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMIOPORTLOOKUPENTRY), HOST_PAGE_SIZE);
+ uint32_t const cbNew = cbRing3 + cbShared;
+
+ /* Use the rounded up space as best we can. */
+ cNewEntries = RT_MIN(cbRing3 / sizeof(IOMIOPORTENTRYR3), cbShared / sizeof(IOMIOPORTLOOKUPENTRY));
+
+ PIOMIOPORTENTRYR3 const paRing3 = (PIOMIOPORTENTRYR3)RTMemPageAllocZ(cbNew);
+ if (paRing3)
+ {
+ PIOMIOPORTLOOKUPENTRY const paLookup = (PIOMIOPORTLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
+
+ /*
+ * Copy over the old info and initialize the idxSelf and idxStats members.
+ */
+ if (pVM->iom.s.paIoPortRegs != NULL)
+ {
+ memcpy(paRing3, pVM->iom.s.paIoPortRegs, sizeof(paRing3[0]) * cOldEntries);
+ memcpy(paLookup, pVM->iom.s.paIoPortLookup, sizeof(paLookup[0]) * cOldEntries);
+ }
+
+ size_t i = cbRing3 / sizeof(*paRing3);
+ while (i-- > cOldEntries)
+ {
+ paRing3[i].idxSelf = (uint16_t)i;
+ paRing3[i].idxStats = UINT16_MAX;
+ }
+
+ /*
+ * Update the variables and free the old memory.
+ */
+ void * const pvFree = pVM->iom.s.paIoPortRegs;
+
+ pVM->iom.s.paIoPortRegs = paRing3;
+ pVM->iom.s.paIoPortLookup = paLookup;
+ pVM->iom.s.cIoPortAlloc = cNewEntries;
+
+ RTMemPageFree(pvFree,
+ RT_ALIGN_32(cOldEntries * sizeof(IOMIOPORTENTRYR3), HOST_PAGE_SIZE)
+ + RT_ALIGN_32(cOldEntries * sizeof(IOMIOPORTLOOKUPENTRY), HOST_PAGE_SIZE));
+
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+ return rc;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnIoPortCreateEx.
+ */
+VMMR3_INT_DECL(int) IOMR3IoPortCreate(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT cPorts, uint32_t fFlags, PPDMPCIDEV pPciDev,
+ uint32_t iPciRegion, PFNIOMIOPORTNEWOUT pfnOut, PFNIOMIOPORTNEWIN pfnIn,
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr, PFNIOMIOPORTNEWINSTRING pfnInStr, RTR3PTR pvUser,
+ const char *pszDesc, PCIOMIOPORTDESC paExtDescs, PIOMIOPORTHANDLE phIoPorts)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(phIoPorts, VERR_INVALID_POINTER);
+ *phIoPorts = UINT32_MAX;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+ AssertReturn(!pVM->iom.s.fIoPortsFrozen, VERR_WRONG_ORDER);
+
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ AssertMsgReturn(cPorts > 0 && cPorts <= _8K, ("cPorts=%#x\n", cPorts), VERR_OUT_OF_RANGE);
+ AssertReturn(!(fFlags & ~IOM_IOPORT_F_VALID_MASK), VERR_INVALID_FLAGS);
+
+ AssertReturn(pfnOut || pfnIn || pfnOutStr || pfnInStr, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(pfnOut, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnIn, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnOutStr, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnInStr, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
+ AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
+ if (paExtDescs)
+ {
+ AssertPtrReturn(paExtDescs, VERR_INVALID_POINTER);
+ for (size_t i = 0;; i++)
+ {
+ const char *pszIn = paExtDescs[i].pszIn;
+ const char *pszOut = paExtDescs[i].pszIn;
+ if (!pszIn && !pszOut)
+ break;
+ AssertReturn(i < _8K, VERR_OUT_OF_RANGE);
+ AssertReturn(!pszIn || strlen(pszIn) < 128, VERR_INVALID_POINTER);
+ AssertReturn(!pszOut || strlen(pszOut) < 128, VERR_INVALID_POINTER);
+ }
+ }
+
+ /*
+ * Ensure that we've got table space for it.
+ */
+#ifndef VBOX_WITH_STATISTICS
+ uint16_t const idxStats = UINT16_MAX;
+#else
+ uint32_t const idxStats = pVM->iom.s.cIoPortStats;
+ uint32_t const cNewIoPortStats = idxStats + cPorts;
+ AssertReturn(cNewIoPortStats <= _64K, VERR_IOM_TOO_MANY_IOPORT_REGISTRATIONS);
+ if (cNewIoPortStats > pVM->iom.s.cIoPortStatsAllocation)
+ {
+ int rc = iomR3IoPortGrowStatisticsTable(pVM, cNewIoPortStats);
+ AssertRCReturn(rc, rc);
+ AssertReturn(idxStats == pVM->iom.s.cIoPortStats, VERR_IOM_IOPORT_IPE_1);
+ }
+#endif
+
+ uint32_t idx = pVM->iom.s.cIoPortRegs;
+ if (idx >= pVM->iom.s.cIoPortAlloc)
+ {
+ int rc = iomR3IoPortGrowTable(pVM, pVM->iom.s.cIoPortAlloc + 1);
+ AssertRCReturn(rc, rc);
+ AssertReturn(idx == pVM->iom.s.cIoPortRegs, VERR_IOM_IOPORT_IPE_1);
+ AssertReturn(idx < pVM->iom.s.cIoPortAlloc, VERR_IOM_IOPORT_IPE_2);
+ }
+
+ /*
+ * Enter it.
+ */
+ pVM->iom.s.paIoPortRegs[idx].pvUser = pvUser;
+ pVM->iom.s.paIoPortRegs[idx].pDevIns = pDevIns;
+ pVM->iom.s.paIoPortRegs[idx].pfnOutCallback = pfnOut ? pfnOut : iomR3IOPortDummyNewOut;
+ pVM->iom.s.paIoPortRegs[idx].pfnInCallback = pfnIn ? pfnIn : iomR3IOPortDummyNewIn;
+ pVM->iom.s.paIoPortRegs[idx].pfnOutStrCallback = pfnOutStr ? pfnOutStr : iomR3IOPortDummyNewOutStr;
+ pVM->iom.s.paIoPortRegs[idx].pfnInStrCallback = pfnInStr ? pfnInStr : iomR3IOPortDummyNewInStr;
+ pVM->iom.s.paIoPortRegs[idx].pszDesc = pszDesc;
+ pVM->iom.s.paIoPortRegs[idx].paExtDescs = paExtDescs;
+ pVM->iom.s.paIoPortRegs[idx].pPciDev = pPciDev;
+ pVM->iom.s.paIoPortRegs[idx].iPciRegion = iPciRegion;
+ pVM->iom.s.paIoPortRegs[idx].cPorts = cPorts;
+ pVM->iom.s.paIoPortRegs[idx].uPort = UINT16_MAX;
+ pVM->iom.s.paIoPortRegs[idx].idxStats = (uint16_t)idxStats;
+ pVM->iom.s.paIoPortRegs[idx].fMapped = false;
+ pVM->iom.s.paIoPortRegs[idx].fFlags = (uint8_t)fFlags;
+ pVM->iom.s.paIoPortRegs[idx].idxSelf = idx;
+
+ pVM->iom.s.cIoPortRegs = idx + 1;
+#ifdef VBOX_WITH_STATISTICS
+ pVM->iom.s.cIoPortStats = cNewIoPortStats;
+#endif
+ *phIoPorts = idx;
+ LogFlow(("IOMR3IoPortCreate: idx=%#x cPorts=%u %s\n", idx, cPorts, pszDesc));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnIoPortMap.
+ */
+VMMR3_INT_DECL(int) IOMR3IoPortMap(PVM pVM, PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts, RTIOPORT uPort)
+{
+ /*
+ * Validate input and state.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hIoPorts < pVM->iom.s.cIoPortRegs, VERR_IOM_INVALID_IOPORT_HANDLE);
+ PIOMIOPORTENTRYR3 const pRegEntry = &pVM->iom.s.paIoPortRegs[hIoPorts];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_IOPORT_HANDLE);
+
+ RTIOPORT const cPorts = pRegEntry->cPorts;
+ AssertMsgReturn(cPorts > 0 && cPorts <= _8K, ("cPorts=%s\n", cPorts), VERR_IOM_IOPORT_IPE_1);
+ AssertReturn((uint32_t)uPort + cPorts <= _64K, VERR_OUT_OF_RANGE);
+ RTIOPORT const uLastPort = uPort + cPorts - 1;
+ LogFlow(("IOMR3IoPortMap: hIoPorts=%#RX64 %RTiop..%RTiop (%u ports)\n", hIoPorts, uPort, uLastPort, cPorts));
+
+ /*
+ * Do the mapping.
+ */
+ int rc = VINF_SUCCESS;
+ IOM_LOCK_EXCL(pVM);
+
+ if (!pRegEntry->fMapped)
+ {
+ uint32_t const cEntries = RT_MIN(pVM->iom.s.cIoPortLookupEntries, pVM->iom.s.cIoPortRegs);
+ Assert(pVM->iom.s.cIoPortLookupEntries == cEntries);
+
+ PIOMIOPORTLOOKUPENTRY paEntries = pVM->iom.s.paIoPortLookup;
+ PIOMIOPORTLOOKUPENTRY pEntry;
+ if (cEntries > 0)
+ {
+ uint32_t iFirst = 0;
+ uint32_t iEnd = cEntries;
+ uint32_t i = cEntries / 2;
+ for (;;)
+ {
+ pEntry = &paEntries[i];
+ if (pEntry->uLastPort < uPort)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ {
+ /* Insert after the entry we just considered: */
+ pEntry += 1;
+ if (i < cEntries)
+ memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
+ break;
+ }
+ }
+ else if (pEntry->uFirstPort > uLastPort)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ {
+ /* Insert at the entry we just considered: */
+ if (i < cEntries)
+ memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
+ break;
+ }
+ }
+ else
+ {
+ /* Oops! We've got a conflict. */
+ AssertLogRelMsgFailed(("%x..%x (%s) conflicts with existing mapping %x..%x (%s)\n",
+ uPort, uLastPort, pRegEntry->pszDesc,
+ pEntry->uFirstPort, pEntry->uLastPort, pVM->iom.s.paIoPortRegs[pEntry->idx].pszDesc));
+ IOM_UNLOCK_EXCL(pVM);
+ return VERR_IOM_IOPORT_RANGE_CONFLICT;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+ }
+ else
+ pEntry = paEntries;
+
+ /*
+ * Fill in the entry and bump the table size.
+ */
+ pEntry->idx = hIoPorts;
+ pEntry->uFirstPort = uPort;
+ pEntry->uLastPort = uLastPort;
+ pVM->iom.s.cIoPortLookupEntries = cEntries + 1;
+
+ pRegEntry->uPort = uPort;
+ pRegEntry->fMapped = true;
+
+#ifdef VBOX_WITH_STATISTICS
+ /* Don't register stats here when we're creating the VM as the
+ statistics table may still be reallocated. */
+ if (pVM->enmVMState >= VMSTATE_CREATED)
+ iomR3IoPortRegStats(pVM, pRegEntry);
+#endif
+
+#ifdef VBOX_STRICT
+ /*
+ * Assert table sanity.
+ */
+ AssertMsg(paEntries[0].uLastPort >= paEntries[0].uFirstPort, ("%#x %#x\n", paEntries[0].uLastPort, paEntries[0].uFirstPort));
+ AssertMsg(paEntries[0].idx < pVM->iom.s.cIoPortRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cIoPortRegs));
+
+ RTIOPORT uPortPrev = paEntries[0].uLastPort;
+ for (size_t i = 1; i <= cEntries; i++)
+ {
+ AssertMsg(paEntries[i].uLastPort >= paEntries[i].uFirstPort, ("%u: %#x %#x\n", i, paEntries[i].uLastPort, paEntries[i].uFirstPort));
+ AssertMsg(paEntries[i].idx < pVM->iom.s.cIoPortRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cIoPortRegs));
+ AssertMsg(uPortPrev < paEntries[i].uFirstPort, ("%u: %#x %#x\n", i, uPortPrev, paEntries[i].uFirstPort));
+ AssertMsg(paEntries[i].uLastPort - paEntries[i].uFirstPort + 1 == pVM->iom.s.paIoPortRegs[paEntries[i].idx].cPorts,
+ ("%u: %#x %#x..%#x -> %u, expected %u\n", i, uPortPrev, paEntries[i].uFirstPort, paEntries[i].uLastPort,
+ paEntries[i].uLastPort - paEntries[i].uFirstPort + 1, pVM->iom.s.paIoPortRegs[paEntries[i].idx].cPorts));
+ uPortPrev = paEntries[i].uLastPort;
+ }
+#endif
+ }
+ else
+ {
+ AssertFailed();
+ rc = VERR_IOM_IOPORTS_ALREADY_MAPPED;
+ }
+
+ IOM_UNLOCK_EXCL(pVM);
+ return rc;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnIoPortUnmap.
+ */
+VMMR3_INT_DECL(int) IOMR3IoPortUnmap(PVM pVM, PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ /*
+ * Validate input and state.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hIoPorts < pVM->iom.s.cIoPortRegs, VERR_IOM_INVALID_IOPORT_HANDLE);
+ PIOMIOPORTENTRYR3 const pRegEntry = &pVM->iom.s.paIoPortRegs[hIoPorts];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_IOPORT_HANDLE);
+
+ /*
+ * Do the mapping.
+ */
+ int rc;
+ IOM_LOCK_EXCL(pVM);
+
+ if (pRegEntry->fMapped)
+ {
+ RTIOPORT const uPort = pRegEntry->uPort;
+ RTIOPORT const uLastPort = uPort + pRegEntry->cPorts - 1;
+ uint32_t const cEntries = RT_MIN(pVM->iom.s.cIoPortLookupEntries, pVM->iom.s.cIoPortRegs);
+ Assert(pVM->iom.s.cIoPortLookupEntries == cEntries);
+ Assert(cEntries > 0);
+ LogFlow(("IOMR3IoPortUnmap: hIoPorts=%#RX64 %RTiop..%RTiop (%u ports)\n", hIoPorts, uPort, uLastPort, pRegEntry->cPorts));
+
+ PIOMIOPORTLOOKUPENTRY paEntries = pVM->iom.s.paIoPortLookup;
+ uint32_t iFirst = 0;
+ uint32_t iEnd = cEntries;
+ uint32_t i = cEntries / 2;
+ for (;;)
+ {
+ PIOMIOPORTLOOKUPENTRY pEntry = &paEntries[i];
+ if (pEntry->uLastPort < uPort)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ {
+ rc = VERR_IOM_IOPORT_IPE_1;
+ AssertLogRelMsgFailedBreak(("%x..%x (%s) not found!\n", uPort, uLastPort, pRegEntry->pszDesc));
+ }
+ }
+ else if (pEntry->uFirstPort > uLastPort)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ {
+ rc = VERR_IOM_IOPORT_IPE_1;
+ AssertLogRelMsgFailedBreak(("%x..%x (%s) not found!\n", uPort, uLastPort, pRegEntry->pszDesc));
+ }
+ }
+ else if (pEntry->idx == hIoPorts)
+ {
+ Assert(pEntry->uFirstPort == uPort);
+ Assert(pEntry->uLastPort == uLastPort);
+#ifdef VBOX_WITH_STATISTICS
+ iomR3IoPortDeregStats(pVM, pRegEntry, uPort);
+#endif
+ if (i + 1 < cEntries)
+ memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
+ pVM->iom.s.cIoPortLookupEntries = cEntries - 1;
+ pRegEntry->uPort = UINT16_MAX;
+ pRegEntry->fMapped = false;
+ rc = VINF_SUCCESS;
+ break;
+ }
+ else
+ {
+ AssertLogRelMsgFailed(("Lookig for %x..%x (%s), found %x..%x (%s) instead!\n",
+ uPort, uLastPort, pRegEntry->pszDesc,
+ pEntry->uFirstPort, pEntry->uLastPort, pVM->iom.s.paIoPortRegs[pEntry->idx].pszDesc));
+ rc = VERR_IOM_IOPORT_IPE_1;
+ break;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+
+#ifdef VBOX_STRICT
+ /*
+ * Assert table sanity.
+ */
+ AssertMsg(paEntries[0].uLastPort >= paEntries[0].uFirstPort, ("%#x %#x\n", paEntries[0].uLastPort, paEntries[0].uFirstPort));
+ AssertMsg(paEntries[0].idx < pVM->iom.s.cIoPortRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cIoPortRegs));
+
+ RTIOPORT uPortPrev = paEntries[0].uLastPort;
+ for (i = 1; i < cEntries - 1; i++)
+ {
+ AssertMsg(paEntries[i].uLastPort >= paEntries[i].uFirstPort, ("%u: %#x %#x\n", i, paEntries[i].uLastPort, paEntries[i].uFirstPort));
+ AssertMsg(paEntries[i].idx < pVM->iom.s.cIoPortRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cIoPortRegs));
+ AssertMsg(uPortPrev < paEntries[i].uFirstPort, ("%u: %#x %#x\n", i, uPortPrev, paEntries[i].uFirstPort));
+ AssertMsg(paEntries[i].uLastPort - paEntries[i].uFirstPort + 1 == pVM->iom.s.paIoPortRegs[paEntries[i].idx].cPorts,
+ ("%u: %#x %#x..%#x -> %u, expected %u\n", i, uPortPrev, paEntries[i].uFirstPort, paEntries[i].uLastPort,
+ paEntries[i].uLastPort - paEntries[i].uFirstPort + 1, pVM->iom.s.paIoPortRegs[paEntries[i].idx].cPorts));
+ uPortPrev = paEntries[i].uLastPort;
+ }
+#endif
+ }
+ else
+ {
+ AssertFailed();
+ rc = VERR_IOM_IOPORTS_NOT_MAPPED;
+ }
+
+ IOM_UNLOCK_EXCL(pVM);
+ return rc;
+}
+
+
+/**
+ * Validates @a hIoPorts, making sure it belongs to @a pDevIns.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device which allegedly owns @a hIoPorts.
+ * @param hIoPorts The handle to validate.
+ */
+VMMR3_INT_DECL(int) IOMR3IoPortValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hIoPorts < RT_MIN(pVM->iom.s.cIoPortRegs, pVM->iom.s.cIoPortAlloc), VERR_IOM_INVALID_IOPORT_HANDLE);
+ PIOMIOPORTENTRYR3 const pRegEntry = &pVM->iom.s.paIoPortRegs[hIoPorts];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_IOPORT_HANDLE);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the mapping address of I/O ports @a hIoPorts.
+ *
+ * @returns Mapping address if mapped, UINT32_MAX if not mapped or invalid
+ * input.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device which allegedly owns @a hRegion.
+ * @param hIoPorts The handle to I/O port region.
+ */
+VMMR3_INT_DECL(uint32_t) IOMR3IoPortGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ AssertPtrReturn(pDevIns, UINT32_MAX);
+ AssertReturn(hIoPorts < RT_MIN(pVM->iom.s.cIoPortRegs, pVM->iom.s.cIoPortAlloc), UINT32_MAX);
+ IOMIOPORTENTRYR3 volatile * const pRegEntry = &pVM->iom.s.paIoPortRegs[hIoPorts];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, UINT32_MAX);
+ for (uint32_t iTry = 0; ; iTry++)
+ {
+ bool fMapped = pRegEntry->fMapped;
+ RTIOPORT uPort = pRegEntry->uPort;
+ if ( ( ASMAtomicReadBool(&pRegEntry->fMapped) == fMapped
+ && uPort == pRegEntry->uPort)
+ || iTry > 1024)
+ return fMapped ? uPort : UINT32_MAX;
+ ASMNopPause();
+ }
+}
+
+
+/**
+ * Display all registered I/O port ranges.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+DECLCALLBACK(void) iomR3IoPortInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+
+ /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
+ pHlp->pfnPrintf(pHlp,
+ "I/O port registrations: %u (%u allocated)\n"
+ " ## Ctx Ports Mapping PCI Description\n",
+ pVM->iom.s.cIoPortRegs, pVM->iom.s.cIoPortAlloc);
+ PIOMIOPORTENTRYR3 paRegs = pVM->iom.s.paIoPortRegs;
+ for (uint32_t i = 0; i < pVM->iom.s.cIoPortRegs; i++)
+ {
+ const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
+ : paRegs[i].fRawMode ? "+C " : " ";
+ if (paRegs[i].fMapped && paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %04x %04x-%04x pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cPorts,
+ paRegs[i].uPort, paRegs[i].uPort + paRegs[i].cPorts - 1,
+ paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
+ else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %04x %04x-%04x %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cPorts,
+ paRegs[i].uPort, paRegs[i].uPort + paRegs[i].cPorts - 1, paRegs[i].pszDesc);
+ else if (paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %04x unmapped pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cPorts,
+ paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
+ else
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %04x unmapped %s\n",
+ paRegs[i].idxSelf, pszRing, paRegs[i].cPorts, paRegs[i].pszDesc);
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp b/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp
new file mode 100644
index 00000000..a9fb1c20
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp
@@ -0,0 +1,701 @@
+/* $Id: IOMR3Mmio.cpp $ */
+/** @file
+ * IOM - Input / Output Monitor, MMIO related APIs.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_IOM_MMIO
+#include <VBox/vmm/iom.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmdev.h>
+#include "IOMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/param.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+
+#include "IOMInline.h"
+
+
+#ifdef VBOX_WITH_STATISTICS
+
+/**
+ * Register statistics for a MMIO entry.
+ */
+void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
+{
+ bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
+ PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
+
+ /* Format the prefix: */
+ char szName[80];
+ size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
+ pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
+
+ /* Mangle the description if this isn't the first device instance: */
+ const char *pszDesc = pRegEntry->pszDesc;
+ char *pszFreeDesc = NULL;
+ if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
+ pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
+
+ /* Register statistics: */
+ int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
+ RTStrFree(pszFreeDesc);
+
+# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
+ SET_NM_SUFFIX("/Read-Complicated");
+ rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Read-FFor00");
+ rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Read-R3");
+ rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
+ if (fDoRZ)
+ {
+ SET_NM_SUFFIX("/Read-RZ");
+ rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Read-RZtoR3");
+ rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ }
+ SET_NM_SUFFIX("/Read-Total");
+ rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+
+ SET_NM_SUFFIX("/Write-Complicated");
+ rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Write-R3");
+ rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
+ if (fDoRZ)
+ {
+ SET_NM_SUFFIX("/Write-RZ");
+ rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Write-RZtoR3");
+ rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ SET_NM_SUFFIX("/Write-RZtoR3-Commit");
+ rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+ }
+ SET_NM_SUFFIX("/Write-Total");
+ rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
+}
+
+
+/**
+ * Deregister statistics for a MMIO entry.
+ */
+static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
+{
+ char szPrefix[80];
+ RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
+ STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
+}
+
+
+/**
+ * Grows the statistics table.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cNewEntries The minimum number of new entrie.
+ * @see IOMR0IoPortGrowStatisticsTable
+ */
+static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
+{
+ AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
+ }
+ else
+ {
+ /*
+ * Validate input and state.
+ */
+ uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
+ AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
+ AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
+
+ /*
+ * Calc size and allocate a new table.
+ */
+ uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
+ cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
+
+ PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
+ if (paMmioStats)
+ {
+ /*
+ * Anything to copy over, update and free the old one.
+ */
+ PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
+ if (pOldMmioStats)
+ memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
+
+ pVM->iom.s.paMmioStats = paMmioStats;
+ pVM->iom.s.cMmioStatsAllocation = cNewEntries;
+
+ RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
+
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+
+ return rc;
+}
+
+#endif /* VBOX_WITH_STATISTICS */
+
+/**
+ * Grows the I/O port registration statistics table.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cNewEntries The minimum number of new entrie.
+ * @see IOMR0MmioGrowRegistrationTables
+ */
+static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
+{
+ AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
+
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
+ }
+ else
+ {
+ /*
+ * Validate input and state.
+ */
+ uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
+ AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
+
+ /*
+ * Allocate the new tables. We use a single allocation for the three tables (ring-0,
+ * ring-3, lookup) and does a partial mapping of the result to ring-3.
+ */
+ uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
+ uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
+ uint32_t const cbNew = cbRing3 + cbShared;
+
+ /* Use the rounded up space as best we can. */
+ cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
+
+ PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
+ if (paRing3)
+ {
+ PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
+
+ /*
+ * Copy over the old info and initialize the idxSelf and idxStats members.
+ */
+ if (pVM->iom.s.paMmioRegs != NULL)
+ {
+ memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
+ memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
+ }
+
+ size_t i = cbRing3 / sizeof(*paRing3);
+ while (i-- > cOldEntries)
+ {
+ paRing3[i].idxSelf = (uint16_t)i;
+ paRing3[i].idxStats = UINT16_MAX;
+ }
+
+ /*
+ * Update the variables and free the old memory.
+ */
+ void * const pvFree = pVM->iom.s.paMmioRegs;
+
+ pVM->iom.s.paMmioRegs = paRing3;
+ pVM->iom.s.paMmioLookup = paLookup;
+ pVM->iom.s.cMmioAlloc = cNewEntries;
+
+ RTMemPageFree(pvFree,
+ RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
+ + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
+
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+ return rc;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
+ */
+VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
+ uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
+ PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
+ *phRegion = UINT32_MAX;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+ AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
+
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
+ VERR_OUT_OF_RANGE);
+ AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
+
+ AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
+ && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
+ && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
+ ("%#x\n", fFlags),
+ VERR_INVALID_FLAGS);
+
+ AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
+
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
+ AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
+
+ /*
+ * Ensure that we've got table space for it.
+ */
+#ifndef VBOX_WITH_STATISTICS
+ uint16_t const idxStats = UINT16_MAX;
+#else
+ uint32_t const idxStats = pVM->iom.s.cMmioStats;
+ uint32_t const cNewMmioStats = idxStats + 1;
+ AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
+ if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
+ {
+ int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
+ AssertRCReturn(rc, rc);
+ AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
+ }
+#endif
+
+ uint32_t idx = pVM->iom.s.cMmioRegs;
+ if (idx >= pVM->iom.s.cMmioAlloc)
+ {
+ int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
+ AssertRCReturn(rc, rc);
+ AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
+ }
+
+ /*
+ * Enter it.
+ */
+ pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
+ pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
+ pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
+ pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
+ pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
+ pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
+ pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
+ pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
+ pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
+ pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
+ pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
+ pVM->iom.s.paMmioRegs[idx].fMapped = false;
+ pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
+ pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
+
+ pVM->iom.s.cMmioRegs = idx + 1;
+#ifdef VBOX_WITH_STATISTICS
+ pVM->iom.s.cMmioStats = cNewMmioStats;
+#endif
+ *phRegion = idx;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnMmioMap.
+ */
+VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
+{
+ /*
+ * Validate input and state.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
+ PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
+
+ RTGCPHYS const cbRegion = pRegEntry->cbRegion;
+ AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
+ RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
+
+ AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
+ ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
+ GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_IOM_INVALID_MMIO_RANGE);
+ AssertLogRelMsgReturn(GCPhysLast > GCPhys,
+ ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
+ GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_IOM_INVALID_MMIO_RANGE);
+
+ /*
+ * Do the mapping.
+ */
+ int rc = VINF_SUCCESS;
+ IOM_LOCK_EXCL(pVM);
+
+ if (!pRegEntry->fMapped)
+ {
+ uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
+ Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
+
+ PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
+ PIOMMMIOLOOKUPENTRY pEntry;
+ if (cEntries > 0)
+ {
+ uint32_t iFirst = 0;
+ uint32_t iEnd = cEntries;
+ uint32_t i = cEntries / 2;
+ for (;;)
+ {
+ pEntry = &paEntries[i];
+ if (pEntry->GCPhysLast < GCPhys)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ {
+ /* Register with PGM before we shuffle the array: */
+ ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
+ rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
+ hRegion, pRegEntry->pszDesc);
+ AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
+
+ /* Insert after the entry we just considered: */
+ pEntry += 1;
+ if (i < cEntries)
+ memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
+ break;
+ }
+ }
+ else if (pEntry->GCPhysFirst > GCPhysLast)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ {
+ /* Register with PGM before we shuffle the array: */
+ ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
+ rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
+ hRegion, pRegEntry->pszDesc);
+ AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
+
+ /* Insert at the entry we just considered: */
+ if (i < cEntries)
+ memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
+ break;
+ }
+ }
+ else
+ {
+ /* Oops! We've got a conflict. */
+ AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
+ GCPhys, GCPhysLast, pRegEntry->pszDesc,
+ pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
+ IOM_UNLOCK_EXCL(pVM);
+ return VERR_IOM_MMIO_RANGE_CONFLICT;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+ }
+ else
+ {
+ /* First entry in the lookup table: */
+ ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
+ rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType, hRegion, pRegEntry->pszDesc);
+ AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
+
+ pEntry = paEntries;
+ }
+
+ /*
+ * Fill in the entry and bump the table size.
+ */
+ pRegEntry->fMapped = true;
+ pEntry->idx = hRegion;
+ pEntry->GCPhysFirst = GCPhys;
+ pEntry->GCPhysLast = GCPhysLast;
+ pVM->iom.s.cMmioLookupEntries = cEntries + 1;
+
+#ifdef VBOX_WITH_STATISTICS
+ /* Don't register stats here when we're creating the VM as the
+ statistics table may still be reallocated. */
+ if (pVM->enmVMState >= VMSTATE_CREATED)
+ iomR3MmioRegStats(pVM, pRegEntry);
+#endif
+
+#ifdef VBOX_STRICT
+ /*
+ * Assert table sanity.
+ */
+ AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
+ AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
+
+ RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
+ for (size_t i = 1; i <= cEntries; i++)
+ {
+ AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
+ AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
+ AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
+ GCPhysPrev = paEntries[i].GCPhysLast;
+ }
+#endif
+ }
+ else
+ {
+ AssertFailed();
+ rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
+ }
+
+ IOM_UNLOCK_EXCL(pVM);
+ return rc;
+}
+
+
+/**
+ * Worker for PDMDEVHLPR3::pfnMmioUnmap.
+ */
+VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ /*
+ * Validate input and state.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
+ PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
+
+ /*
+ * Do the mapping.
+ */
+ int rc;
+ IOM_LOCK_EXCL(pVM);
+
+ if (pRegEntry->fMapped)
+ {
+ RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
+ RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
+ uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
+ Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
+ Assert(cEntries > 0);
+
+ PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
+ uint32_t iFirst = 0;
+ uint32_t iEnd = cEntries;
+ uint32_t i = cEntries / 2;
+ for (;;)
+ {
+ PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
+ if (pEntry->GCPhysLast < GCPhys)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ {
+ rc = VERR_IOM_MMIO_IPE_1;
+ AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
+ }
+ }
+ else if (pEntry->GCPhysFirst > GCPhysLast)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ {
+ rc = VERR_IOM_MMIO_IPE_1;
+ AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
+ }
+ }
+ else if (pEntry->idx == hRegion)
+ {
+ Assert(pEntry->GCPhysFirst == GCPhys);
+ Assert(pEntry->GCPhysLast == GCPhysLast);
+#ifdef VBOX_WITH_STATISTICS
+ iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
+#endif
+ if (i + 1 < cEntries)
+ memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
+ pVM->iom.s.cMmioLookupEntries = cEntries - 1;
+
+ rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRegEntry->cbRegion);
+ AssertRC(rc);
+
+ pRegEntry->fMapped = false;
+ ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
+ break;
+ }
+ else
+ {
+ AssertLogRelMsgFailed(("Lookig for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
+ GCPhys, GCPhysLast, pRegEntry->pszDesc,
+ pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
+ rc = VERR_IOM_MMIO_IPE_1;
+ break;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+
+#ifdef VBOX_STRICT
+ /*
+ * Assert table sanity.
+ */
+ AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
+ AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
+
+ RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
+ for (i = 1; i < cEntries - 1; i++)
+ {
+ AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
+ AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
+ AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
+ GCPhysPrev = paEntries[i].GCPhysLast;
+ }
+#endif
+ }
+ else
+ {
+ AssertFailed();
+ rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
+ }
+
+ IOM_UNLOCK_EXCL(pVM);
+ return rc;
+}
+
+
+VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
+{
+ RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Validates @a hRegion, making sure it belongs to @a pDevIns.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device which allegedly owns @a hRegion.
+ * @param hRegion The handle to validate.
+ */
+VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
+ AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
+ PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the mapping address of MMIO region @a hRegion.
+ *
+ * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
+ * input.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device which allegedly owns @a hRegion.
+ * @param hRegion The handle to validate.
+ */
+VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
+ AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
+ PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
+ AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
+ return pRegEntry->GCPhysMapping;
+}
+
+
+/**
+ * Display all registered MMIO ranges.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+
+ /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
+ pHlp->pfnPrintf(pHlp,
+ "MMIO registrations: %u (%u allocated)\n"
+ " ## Ctx %.*s %.*s PCI Description\n",
+ pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
+ sizeof(RTGCPHYS) * 2, "Size",
+ sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
+ PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
+ for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
+ {
+ const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
+ : paRegs[i].fRawMode ? "+C " : " ";
+ if (paRegs[i].fMapped && paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
+ paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
+ paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
+ else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
+ paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
+ else if (paRegs[i].pPciDev)
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
+ sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
+ else
+ pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
+ sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/MM.cpp b/src/VBox/VMM/VMMR3/MM.cpp
new file mode 100644
index 00000000..b3a54145
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/MM.cpp
@@ -0,0 +1,728 @@
+/* $Id: MM.cpp $ */
+/** @file
+ * MM - Memory Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_mm MM - The Memory Manager
+ *
+ * The memory manager is in charge of the following memory:
+ * - Hypervisor Memory Area (HMA) - Address space management (obsolete in 6.1).
+ * - Hypervisor Heap - A memory heap that lives in all contexts.
+ * - User-Kernel Heap - A memory heap lives in both host context.
+ * - Tagged ring-3 heap.
+ * - Page pools - Primarily used by PGM for shadow page tables.
+ * - Locked process memory - Guest RAM and other. (reduce/obsolete this)
+ * - Physical guest memory (RAM & ROM) - Moving to PGM. (obsolete this)
+ *
+ * The global memory manager (GMM) is the global counter part / partner of MM.
+ * MM will provide therefore ring-3 callable interfaces for some of the GMM APIs
+ * related to resource tracking (PGM is the user).
+ *
+ * @see grp_mm
+ *
+ *
+ * @section sec_mm_hma Hypervisor Memory Area - Obsolete in 6.1
+ *
+ * The HMA is used when executing in raw-mode. We borrow, with the help of
+ * PGMMap, some unused space (one or more page directory entries to be precise)
+ * in the guest's virtual memory context. PGM will monitor the guest's virtual
+ * address space for changes and relocate the HMA when required.
+ *
+ * To give some idea what's in the HMA, study the 'info hma' output:
+ * @verbatim
+VBoxDbg> info hma
+Hypervisor Memory Area (HMA) Layout: Base 00000000a0000000, 0x00800000 bytes
+00000000a05cc000-00000000a05cd000 DYNAMIC fence
+00000000a05c4000-00000000a05cc000 DYNAMIC Dynamic mapping
+00000000a05c3000-00000000a05c4000 DYNAMIC fence
+00000000a05b8000-00000000a05c3000 DYNAMIC Paging
+00000000a05b6000-00000000a05b8000 MMIO2 0000000000000000 PCNetShMem
+00000000a0536000-00000000a05b6000 MMIO2 0000000000000000 VGA VRam
+00000000a0523000-00000000a0536000 00002aaab3d0c000 LOCKED autofree alloc once (PDM_DEVICE)
+00000000a0522000-00000000a0523000 DYNAMIC fence
+00000000a051e000-00000000a0522000 00002aaab36f5000 LOCKED autofree VBoxDD2RC.rc
+00000000a051d000-00000000a051e000 DYNAMIC fence
+00000000a04eb000-00000000a051d000 00002aaab36c3000 LOCKED autofree VBoxDDRC.rc
+00000000a04ea000-00000000a04eb000 DYNAMIC fence
+00000000a04e9000-00000000a04ea000 00002aaab36c2000 LOCKED autofree ram range (High ROM Region)
+00000000a04e8000-00000000a04e9000 DYNAMIC fence
+00000000a040e000-00000000a04e8000 00002aaab2e6d000 LOCKED autofree VMMRC.rc
+00000000a0208000-00000000a040e000 00002aaab2c67000 LOCKED autofree alloc once (PATM)
+00000000a01f7000-00000000a0208000 00002aaaab92d000 LOCKED autofree alloc once (SELM)
+00000000a01e7000-00000000a01f7000 00002aaaab5e8000 LOCKED autofree alloc once (SELM)
+00000000a01e6000-00000000a01e7000 DYNAMIC fence
+00000000a01e5000-00000000a01e6000 00002aaaab5e7000 HCPHYS 00000000c363c000 Core Code
+00000000a01e4000-00000000a01e5000 DYNAMIC fence
+00000000a01e3000-00000000a01e4000 00002aaaaab26000 HCPHYS 00000000619cf000 GIP
+00000000a01a2000-00000000a01e3000 00002aaaabf32000 LOCKED autofree alloc once (PGM_PHYS)
+00000000a016b000-00000000a01a2000 00002aaab233f000 LOCKED autofree alloc once (PGM_POOL)
+00000000a016a000-00000000a016b000 DYNAMIC fence
+00000000a0165000-00000000a016a000 DYNAMIC CR3 mapping
+00000000a0164000-00000000a0165000 DYNAMIC fence
+00000000a0024000-00000000a0164000 00002aaab215f000 LOCKED autofree Heap
+00000000a0023000-00000000a0024000 DYNAMIC fence
+00000000a0001000-00000000a0023000 00002aaab1d24000 LOCKED pages VM
+00000000a0000000-00000000a0001000 DYNAMIC fence
+ @endverbatim
+ *
+ *
+ * @section sec_mm_hyperheap Hypervisor Heap
+ *
+ * The heap is accessible from ring-3, ring-0 and the raw-mode context. That
+ * said, it's not necessarily mapped into ring-0 on if that's possible since we
+ * don't wish to waste kernel address space without a good reason.
+ *
+ * Allocations within the heap are always in the same relative position in all
+ * contexts, so, it's possible to use offset based linking. In fact, the heap is
+ * internally using offset based linked lists tracking heap blocks. We use
+ * offset linked AVL trees and lists in a lot of places where share structures
+ * between RC, R3 and R0, so this is a strict requirement of the heap. However
+ * this means that we cannot easily extend the heap since the extension won't
+ * necessarily be in the continuation of the current heap memory in all (or any)
+ * context.
+ *
+ * All allocations are tagged. Per tag allocation statistics will be maintaining
+ * and exposed thru STAM when VBOX_WITH_STATISTICS is defined.
+ *
+ *
+ * @section sec_mm_r3heap Tagged Ring-3 Heap
+ *
+ * The ring-3 heap is a wrapper around the RTMem API adding allocation
+ * statistics and automatic cleanup on VM destruction.
+ *
+ * Per tag allocation statistics will be maintaining and exposed thru STAM when
+ * VBOX_WITH_STATISTICS is defined.
+ *
+ *
+ * @section sec_mm_page Page Pool
+ *
+ * The MM manages a page pool from which other components can allocate locked,
+ * page aligned and page sized memory objects. The pool provides facilities to
+ * convert back and forth between (host) physical and virtual addresses (within
+ * the pool of course). Several specialized interfaces are provided for the most
+ * common allocations and conversions to save the caller from bothersome casting
+ * and extra parameter passing.
+ *
+ *
+ * @section sec_mm_locked Locked Process Memory
+ *
+ * MM manages the locked process memory. This is used for a bunch of things
+ * (count the LOCKED entries in the 'info hma' output found in @ref sec_mm_hma),
+ * but the main consumer of memory is currently for guest RAM. There is an
+ * ongoing rewrite that will move all the guest RAM allocation to PGM and
+ * GMM.
+ *
+ * The locking of memory is something doing in cooperation with the VirtualBox
+ * support driver, SUPDrv (aka. VBoxDrv), thru the support library API,
+ * SUPR3 (aka. SUPLib).
+ *
+ *
+ * @section sec_mm_phys Physical Guest Memory
+ *
+ * MM is currently managing the physical memory for the guest. It relies heavily
+ * on PGM for this. There is an ongoing rewrite that will move this to PGM. (The
+ * rewrite is driven by the need for more flexible guest ram allocation, but
+ * also motivated by the fact that MMPhys is just adding stupid bureaucracy and
+ * that MMR3PhysReserve is a totally weird artifact that must go away.)
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_MM
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/gmm.h>
+#include "MMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+
+#include <VBox/log.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The current saved state version of MM. */
+#define MM_SAVED_STATE_VERSION 2
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+
+
+
+
+/**
+ * Initializes the MM members of the UVM.
+ *
+ * This is currently only the ring-3 heap.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3DECL(int) MMR3InitUVM(PUVM pUVM)
+{
+ /*
+ * Assert sizes and order.
+ */
+ AssertCompile(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
+ AssertRelease(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
+ Assert(!pUVM->mm.s.pHeap);
+
+ /*
+ * Init the heap.
+ */
+ int rc = mmR3HeapCreateU(pUVM, &pUVM->mm.s.pHeap);
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * Initializes the MM.
+ *
+ * MM is managing the virtual address space (among other things) and
+ * setup the hypervisor memory area mapping in the VM structure and
+ * the hypervisor alloc-only-heap. Assuming the current init order
+ * and components the hypervisor memory area looks like this:
+ * -# VM Structure.
+ * -# Hypervisor alloc only heap (also call Hypervisor memory region).
+ * -# Core code.
+ *
+ * MM determines the virtual address of the hypervisor memory area by
+ * checking for location at previous run. If that property isn't available
+ * it will choose a default starting location, currently 0xa0000000.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) MMR3Init(PVM pVM)
+{
+ LogFlow(("MMR3Init\n"));
+
+ /*
+ * Assert alignment, sizes and order.
+ */
+ AssertRelease(!(RT_UOFFSETOF(VM, mm.s) & 31));
+ AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
+
+ /*
+ * Register the saved state data unit.
+ */
+ int rc = SSMR3RegisterInternal(pVM, "mm", 1, MM_SAVED_STATE_VERSION, sizeof(uint32_t) * 2,
+ NULL, NULL, NULL,
+ NULL, mmR3Save, NULL,
+ NULL, mmR3Load, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Statistics.
+ */
+ STAM_REG(pVM, &pVM->mm.s.cBasePages, STAMTYPE_U64, "/MM/Reserved/cBasePages", STAMUNIT_PAGES, "Reserved number of base pages, ROM and Shadow ROM included.");
+ STAM_REG(pVM, &pVM->mm.s.cHandyPages, STAMTYPE_U32, "/MM/Reserved/cHandyPages", STAMUNIT_PAGES, "Reserved number of handy pages.");
+ STAM_REG(pVM, &pVM->mm.s.cShadowPages, STAMTYPE_U32, "/MM/Reserved/cShadowPages", STAMUNIT_PAGES, "Reserved number of shadow paging pages.");
+ STAM_REG(pVM, &pVM->mm.s.cFixedPages, STAMTYPE_U32, "/MM/Reserved/cFixedPages", STAMUNIT_PAGES, "Reserved number of fixed pages (MMIO2).");
+ STAM_REG(pVM, &pVM->mm.s.cbRamBase, STAMTYPE_U64, "/MM/cbRamBase", STAMUNIT_BYTES, "Size of the base RAM.");
+
+ return rc;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Initializes the MM parts which depends on PGM being initialized.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @remark No cleanup necessary since MMR3Term() will be called on failure.
+ */
+VMMR3DECL(int) MMR3InitPaging(PVM pVM)
+{
+ LogFlow(("MMR3InitPaging:\n"));
+
+ /*
+ * Query the CFGM values.
+ */
+ int rc;
+ PCFGMNODE pMMCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
+ if (!pMMCfg)
+ {
+ rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "MM", &pMMCfg);
+ AssertRCReturn(rc, rc);
+ }
+
+ /** @cfgm{/RamSize, uint64_t, 0, 16TB, 0}
+ * Specifies the size of the base RAM that is to be set up during
+ * VM initialization.
+ */
+ uint64_t cbRam;
+ rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ cbRam = 0;
+ else
+ AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc), rc);
+ AssertLogRelMsg(!(cbRam & ~X86_PTE_PAE_PG_MASK), ("%RGp X86_PTE_PAE_PG_MASK=%RX64\n", cbRam, X86_PTE_PAE_PG_MASK));
+ AssertLogRelMsgReturn(cbRam <= GMM_GCPHYS_LAST, ("cbRam=%RGp GMM_GCPHYS_LAST=%RX64\n", cbRam, GMM_GCPHYS_LAST), VERR_OUT_OF_RANGE);
+ cbRam &= X86_PTE_PAE_PG_MASK;
+ pVM->mm.s.cbRamBase = cbRam;
+
+ /** @cfgm{/RamHoleSize, uint32_t, 0, 4032MB, 512MB}
+ * Specifies the size of the memory hole. The memory hole is used
+ * to avoid mapping RAM to the range normally used for PCI memory regions.
+ * Must be aligned on a 4MB boundary. */
+ uint32_t cbRamHole;
+ rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamHoleSize\", rc=%Rrc.\n", rc), rc);
+ AssertLogRelMsgReturn(cbRamHole <= 4032U * _1M,
+ ("Configuration error: \"RamHoleSize\"=%#RX32 is too large.\n", cbRamHole), VERR_OUT_OF_RANGE);
+ AssertLogRelMsgReturn(cbRamHole > 16 * _1M,
+ ("Configuration error: \"RamHoleSize\"=%#RX32 is too large.\n", cbRamHole), VERR_OUT_OF_RANGE);
+ AssertLogRelMsgReturn(!(cbRamHole & (_4M - 1)),
+ ("Configuration error: \"RamHoleSize\"=%#RX32 is misaligned.\n", cbRamHole), VERR_OUT_OF_RANGE);
+ uint64_t const offRamHole = _4G - cbRamHole;
+ if (cbRam < offRamHole)
+ Log(("MM: %RU64 bytes of RAM\n", cbRam));
+ else
+ Log(("MM: %RU64 bytes of RAM with a hole at %RU64 up to 4GB.\n", cbRam, offRamHole));
+
+ /** @cfgm{/MM/Policy, string, no overcommitment}
+ * Specifies the policy to use when reserving memory for this VM. The recognized
+ * value is 'no overcommitment' (default). See GMMPOLICY.
+ */
+ GMMOCPOLICY enmOcPolicy;
+ char sz[64];
+ rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Policy", sz, sizeof(sz));
+ if (RT_SUCCESS(rc))
+ {
+ if ( !RTStrICmp(sz, "no_oc")
+ || !RTStrICmp(sz, "no overcommitment"))
+ enmOcPolicy = GMMOCPOLICY_NO_OC;
+ else
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Policy\" value \"%s\"", sz);
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ enmOcPolicy = GMMOCPOLICY_NO_OC;
+ else
+ AssertMsgFailedReturn(("Configuration error: Failed to query string \"MM/Policy\", rc=%Rrc.\n", rc), rc);
+
+ /** @cfgm{/MM/Priority, string, normal}
+ * Specifies the memory priority of this VM. The priority comes into play when the
+ * system is overcommitted and the VMs needs to be milked for memory. The recognized
+ * values are 'low', 'normal' (default) and 'high'. See GMMPRIORITY.
+ */
+ GMMPRIORITY enmPriority;
+ rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Priority", sz, sizeof(sz));
+ if (RT_SUCCESS(rc))
+ {
+ if (!RTStrICmp(sz, "low"))
+ enmPriority = GMMPRIORITY_LOW;
+ else if (!RTStrICmp(sz, "normal"))
+ enmPriority = GMMPRIORITY_NORMAL;
+ else if (!RTStrICmp(sz, "high"))
+ enmPriority = GMMPRIORITY_HIGH;
+ else
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Priority\" value \"%s\"", sz);
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ enmPriority = GMMPRIORITY_NORMAL;
+ else
+ AssertMsgFailedReturn(("Configuration error: Failed to query string \"MM/Priority\", rc=%Rrc.\n", rc), rc);
+
+ /*
+ * Make the initial memory reservation with GMM.
+ */
+ uint32_t const cbUma = _1M - 640*_1K;
+ uint64_t cBasePages = ((cbRam - cbUma) >> GUEST_PAGE_SHIFT) + pVM->mm.s.cBasePages;
+ rc = GMMR3InitialReservation(pVM,
+ RT_MAX(cBasePages + pVM->mm.s.cHandyPages, 1),
+ RT_MAX(pVM->mm.s.cShadowPages, 1),
+ RT_MAX(pVM->mm.s.cFixedPages, 1),
+ enmOcPolicy,
+ enmPriority);
+ if (RT_FAILURE(rc))
+ {
+ if (rc == VERR_GMM_MEMORY_RESERVATION_DECLINED)
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Insufficient free memory to start the VM (cbRam=%#RX64 enmOcPolicy=%d enmPriority=%d)"),
+ cbRam, enmOcPolicy, enmPriority);
+ return VMSetError(pVM, rc, RT_SRC_POS, "GMMR3InitialReservation(,%#RX64,0,0,%d,%d)",
+ cbRam >> GUEST_PAGE_SHIFT, enmOcPolicy, enmPriority);
+ }
+
+ /*
+ * If RamSize is 0 we're done now.
+ */
+ if (cbRam < GUEST_PAGE_SIZE)
+ {
+ Log(("MM: No RAM configured\n"));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Setup the base ram (PGM).
+ */
+ pVM->mm.s.cbRamHole = cbRamHole;
+ pVM->mm.s.cbRamBelow4GB = cbRam > offRamHole ? offRamHole : cbRam;
+ pVM->mm.s.cbRamAbove4GB = cbRam > offRamHole ? cbRam - offRamHole : 0;
+
+ /* First the conventional memory: */
+ rc = PGMR3PhysRegisterRam(pVM, 0, RT_MIN(cbRam, 640*_1K), "Conventional RAM");
+ if (RT_SUCCESS(rc) && cbRam >= _1M)
+ {
+ /* The extended memory from 1MiB to 2MiB to align better with large pages in NEM mode: */
+ rc = PGMR3PhysRegisterRam(pVM, _1M, RT_MIN(_1M, cbRam - _1M), "Extended RAM, 1-2MB");
+ if (cbRam > _2M)
+ {
+ /* The extended memory from 2MiB up to 4GiB: */
+ rc = PGMR3PhysRegisterRam(pVM, _2M, pVM->mm.s.cbRamBelow4GB - _2M, "Extended RAM, >2MB");
+
+ /* Then all the memory above 4GiB: */
+ if (RT_SUCCESS(rc) && pVM->mm.s.cbRamAbove4GB > 0)
+ rc = PGMR3PhysRegisterRam(pVM, _4G, cbRam - offRamHole, "Above 4GB Base RAM");
+ }
+ }
+
+ /*
+ * Enabled mmR3UpdateReservation here since we don't want the
+ * PGMR3PhysRegisterRam calls above mess things up.
+ */
+ pVM->mm.s.fDoneMMR3InitPaging = true;
+ AssertMsg(pVM->mm.s.cBasePages == cBasePages || RT_FAILURE(rc), ("%RX64 != %RX64\n", pVM->mm.s.cBasePages, cBasePages));
+
+ LogFlow(("MMR3InitPaging: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Terminates the MM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) MMR3Term(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates the UVM part of MM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3DECL(void) MMR3TermUVM(PUVM pUVM)
+{
+ /*
+ * Destroy the heap.
+ */
+ mmR3HeapDestroy(pUVM->mm.s.pHeap);
+ pUVM->mm.s.pHeap = NULL;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("mmR3Save:\n"));
+
+ /* (PGM saves the physical memory.) */
+ SSMR3PutU64(pSSM, pVM->mm.s.cBasePages);
+ return SSMR3PutU64(pSSM, pVM->mm.s.cbRamBase);
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ LogFlow(("mmR3Load:\n"));
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Validate version.
+ */
+ if ( SSM_VERSION_MAJOR_CHANGED(uVersion, MM_SAVED_STATE_VERSION)
+ || !uVersion)
+ {
+ AssertMsgFailed(("mmR3Load: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ /*
+ * Check the cBasePages and cbRamBase values.
+ */
+ int rc;
+ RTUINT cb1;
+
+ /* cBasePages (ignored) */
+ uint64_t cGuestPages;
+ if (uVersion >= 2)
+ rc = SSMR3GetU64(pSSM, &cGuestPages);
+ else
+ {
+ rc = SSMR3GetUInt(pSSM, &cb1);
+ cGuestPages = cb1 >> GUEST_PAGE_SHIFT;
+ }
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* cbRamBase */
+ uint64_t cb;
+ if (uVersion != 1)
+ rc = SSMR3GetU64(pSSM, &cb);
+ else
+ {
+ rc = SSMR3GetUInt(pSSM, &cb1);
+ cb = cb1;
+ }
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertLogRelMsgReturn(cb == pVM->mm.s.cbRamBase,
+ ("Memory configuration has changed. cbRamBase=%#RX64 save=%#RX64\n", pVM->mm.s.cbRamBase, cb),
+ VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH);
+
+ /* (PGM restores the physical memory.) */
+ return rc;
+}
+
+
+/**
+ * Updates GMM with memory reservation changes.
+ *
+ * Called when MM::cbRamRegistered, MM::cShadowPages or MM::cFixedPages changes.
+ *
+ * @returns VBox status code - see GMMR0UpdateReservation.
+ * @param pVM The cross context VM structure.
+ */
+int mmR3UpdateReservation(PVM pVM)
+{
+ VM_ASSERT_EMT(pVM);
+ if (pVM->mm.s.fDoneMMR3InitPaging)
+ return GMMR3UpdateReservation(pVM,
+ RT_MAX(pVM->mm.s.cBasePages + pVM->mm.s.cHandyPages, 1),
+ RT_MAX(pVM->mm.s.cShadowPages, 1),
+ RT_MAX(pVM->mm.s.cFixedPages, 1));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Interface for PGM to increase the reservation of RAM and ROM pages.
+ *
+ * This can be called before MMR3InitPaging.
+ *
+ * @returns VBox status code. Will set VM error on failure.
+ * @param pVM The cross context VM structure.
+ * @param cAddBasePages The number of pages to add.
+ */
+VMMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages)
+{
+ uint64_t cOld = pVM->mm.s.cBasePages;
+ pVM->mm.s.cBasePages += cAddBasePages;
+ LogFlow(("MMR3IncreaseBaseReservation: +%RU64 (%RU64 -> %RU64)\n", cAddBasePages, cOld, pVM->mm.s.cBasePages));
+ int rc = mmR3UpdateReservation(pVM);
+ if (RT_FAILURE(rc))
+ {
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserved physical memory for the RAM (%#RX64 -> %#RX64 + %#RX32)"),
+ cOld, pVM->mm.s.cBasePages, pVM->mm.s.cHandyPages);
+ pVM->mm.s.cBasePages = cOld;
+ }
+ return rc;
+}
+
+
+/**
+ * Interface for PGM to make reservations for handy pages in addition to the
+ * base memory.
+ *
+ * This can be called before MMR3InitPaging.
+ *
+ * @returns VBox status code. Will set VM error on failure.
+ * @param pVM The cross context VM structure.
+ * @param cHandyPages The number of handy pages.
+ */
+VMMR3DECL(int) MMR3ReserveHandyPages(PVM pVM, uint32_t cHandyPages)
+{
+ AssertReturn(!pVM->mm.s.cHandyPages, VERR_WRONG_ORDER);
+
+ pVM->mm.s.cHandyPages = cHandyPages;
+ LogFlow(("MMR3ReserveHandyPages: %RU32 (base %RU64)\n", pVM->mm.s.cHandyPages, pVM->mm.s.cBasePages));
+ int rc = mmR3UpdateReservation(pVM);
+ if (RT_FAILURE(rc))
+ {
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserved physical memory for the RAM (%#RX64 + %#RX32)"),
+ pVM->mm.s.cBasePages, pVM->mm.s.cHandyPages);
+ pVM->mm.s.cHandyPages = 0;
+ }
+ return rc;
+}
+
+
+/**
+ * Interface for PGM to adjust the reservation of fixed pages.
+ *
+ * This can be called before MMR3InitPaging.
+ *
+ * @returns VBox status code. Will set VM error on failure.
+ * @param pVM The cross context VM structure.
+ * @param cDeltaFixedPages The number of guest pages to add (positive) or
+ * subtract (negative).
+ * @param pszDesc Some description associated with the reservation.
+ */
+VMMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc)
+{
+ const uint32_t cOld = pVM->mm.s.cFixedPages;
+ pVM->mm.s.cFixedPages += cDeltaFixedPages;
+ LogFlow(("MMR3AdjustFixedReservation: %d (%u -> %u)\n", cDeltaFixedPages, cOld, pVM->mm.s.cFixedPages));
+ int rc = mmR3UpdateReservation(pVM);
+ if (RT_FAILURE(rc))
+ {
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory (%#x -> %#x; %s)"),
+ cOld, pVM->mm.s.cFixedPages, pszDesc);
+ pVM->mm.s.cFixedPages = cOld;
+ }
+ return rc;
+}
+
+
+/**
+ * Interface for PGM to update the reservation of shadow pages.
+ *
+ * This can be called before MMR3InitPaging.
+ *
+ * @returns VBox status code. Will set VM error on failure.
+ * @param pVM The cross context VM structure.
+ * @param cShadowPages The new page count.
+ */
+VMMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages)
+{
+ const uint32_t cOld = pVM->mm.s.cShadowPages;
+ pVM->mm.s.cShadowPages = cShadowPages;
+ LogFlow(("MMR3UpdateShadowReservation: %u -> %u\n", cOld, pVM->mm.s.cShadowPages));
+ int rc = mmR3UpdateReservation(pVM);
+ if (RT_FAILURE(rc))
+ {
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory for shadow page tables (%#x -> %#x)"), cOld, pVM->mm.s.cShadowPages);
+ pVM->mm.s.cShadowPages = cOld;
+ }
+ return rc;
+}
+
+
+/**
+ * Get the size of the base RAM.
+ * This usually means the size of the first contiguous block of physical memory.
+ *
+ * @returns The guest base RAM size.
+ * @param pVM The cross context VM structure.
+ * @thread Any.
+ *
+ * @deprecated
+ */
+VMMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM)
+{
+ return pVM->mm.s.cbRamBase;
+}
+
+
+/**
+ * Get the size of RAM below 4GB (starts at address 0x00000000).
+ *
+ * @returns The amount of RAM below 4GB in bytes.
+ * @param pVM The cross context VM structure.
+ * @thread Any.
+ */
+VMMR3DECL(uint32_t) MMR3PhysGetRamSizeBelow4GB(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+ return pVM->mm.s.cbRamBelow4GB;
+}
+
+
+/**
+ * Get the size of RAM above 4GB (starts at address 0x000100000000).
+ *
+ * @returns The amount of RAM above 4GB in bytes.
+ * @param pVM The cross context VM structure.
+ * @thread Any.
+ */
+VMMR3DECL(uint64_t) MMR3PhysGetRamSizeAbove4GB(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT64_MAX);
+ return pVM->mm.s.cbRamAbove4GB;
+}
+
+
+/**
+ * Get the size of the RAM hole below 4GB.
+ *
+ * @returns Size in bytes.
+ * @param pVM The cross context VM structure.
+ * @thread Any.
+ */
+VMMR3DECL(uint32_t) MMR3PhysGet4GBRamHoleSize(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+ return pVM->mm.s.cbRamHole;
+}
+
diff --git a/src/VBox/VMM/VMMR3/MMHeap.cpp b/src/VBox/VMM/VMMR3/MMHeap.cpp
new file mode 100644
index 00000000..44408cb8
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/MMHeap.cpp
@@ -0,0 +1,761 @@
+/* $Id: MMHeap.cpp $ */
+/** @file
+ * MM - Memory Manager - Heap.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_MM_HEAP
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pgm.h>
+#include "MMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <iprt/errcore.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void *mmR3HeapAlloc(PMMHEAP pHeap, MMTAG enmTag, size_t cbSize, bool fZero);
+
+
+
+/**
+ * Allocate and initialize a heap structure and it's associated substructures.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param ppHeap Where to store the heap pointer.
+ */
+int mmR3HeapCreateU(PUVM pUVM, PMMHEAP *ppHeap)
+{
+ PMMHEAP pHeap = (PMMHEAP)RTMemAllocZ(sizeof(MMHEAP) + sizeof(MMHEAPSTAT));
+ if (pHeap)
+ {
+ int rc = RTCritSectInit(&pHeap->Lock);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the global stat record.
+ */
+ pHeap->pUVM = pUVM;
+ pHeap->Stat.pHeap = pHeap;
+#ifdef MMR3HEAP_WITH_STATISTICS
+ PMMHEAPSTAT pStat = &pHeap->Stat;
+ STAMR3RegisterU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cAllocations", STAMUNIT_CALLS, "Number or MMR3HeapAlloc() calls.");
+ STAMR3RegisterU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cReallocations", STAMUNIT_CALLS, "Number of MMR3HeapRealloc() calls.");
+ STAMR3RegisterU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cFrees", STAMUNIT_CALLS, "Number of MMR3HeapFree() calls.");
+ STAMR3RegisterU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cFailures", STAMUNIT_COUNT, "Number of failures.");
+ STAMR3RegisterU(pUVM, &pStat->cbCurAllocated, sizeof(pStat->cbCurAllocated) == sizeof(uint32_t) ? STAMTYPE_U32 : STAMTYPE_U64,
+ STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbCurAllocated", STAMUNIT_BYTES, "Number of bytes currently allocated.");
+ STAMR3RegisterU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbAllocated", STAMUNIT_BYTES, "Total number of bytes allocated.");
+ STAMR3RegisterU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbFreed", STAMUNIT_BYTES, "Total number of bytes freed.");
+#endif
+ *ppHeap = pHeap;
+ return VINF_SUCCESS;
+ }
+ AssertRC(rc);
+ RTMemFree(pHeap);
+ }
+ AssertMsgFailed(("failed to allocate heap structure\n"));
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * MM heap statistics tree destroy callback.
+ */
+static DECLCALLBACK(int) mmR3HeapStatTreeDestroy(PAVLULNODECORE pCore, void *pvParam)
+{
+ RT_NOREF(pvParam);
+
+ /* Don't bother deregistering the stat samples as they get destroyed by STAM. */
+ RTMemFree(pCore);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy a heap.
+ *
+ * @param pHeap Heap handle.
+ */
+void mmR3HeapDestroy(PMMHEAP pHeap)
+{
+ /*
+ * Start by deleting the lock, that'll trap anyone
+ * attempting to use the heap.
+ */
+ RTCritSectDelete(&pHeap->Lock);
+
+ /*
+ * Walk the node list and free all the memory.
+ */
+ PMMHEAPHDR pHdr = pHeap->pHead;
+ while (pHdr)
+ {
+ void *pv = pHdr;
+ pHdr = pHdr->pNext;
+ RTMemFree(pv);
+ }
+
+ /*
+ * Free the stat nodes.
+ */
+ RTAvlULDestroy(&pHeap->pStatTree, mmR3HeapStatTreeDestroy, NULL);
+ RTMemFree(pHeap);
+}
+
+
+/**
+ * Allocate memory associating it with the VM for collective cleanup.
+ *
+ * The memory will be allocated from the default heap but a header
+ * is added in which we keep track of which VM it belongs to and chain
+ * all the allocations together so they can be freed in one go.
+ *
+ * This interface is typically used for memory block which will not be
+ * freed during the life of the VM.
+ *
+ * @returns Pointer to allocated memory.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ */
+VMMR3DECL(void *) MMR3HeapAllocU(PUVM pUVM, MMTAG enmTag, size_t cbSize)
+{
+ Assert(pUVM->mm.s.pHeap);
+ return mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, false);
+}
+
+
+/**
+ * Allocate memory associating it with the VM for collective cleanup.
+ *
+ * The memory will be allocated from the default heap but a header
+ * is added in which we keep track of which VM it belongs to and chain
+ * all the allocations together so they can be freed in one go.
+ *
+ * This interface is typically used for memory block which will not be
+ * freed during the life of the VM.
+ *
+ * @returns Pointer to allocated memory.
+ * @param pVM The cross context VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ */
+VMMR3DECL(void *) MMR3HeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize)
+{
+ return mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, false);
+}
+
+
+/**
+ * Same as MMR3HeapAllocU().
+ *
+ * @returns Pointer to allocated memory.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ * @param ppv Where to store the pointer to the allocated memory on success.
+ */
+VMMR3DECL(int) MMR3HeapAllocExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv)
+{
+ Assert(pUVM->mm.s.pHeap);
+ void *pv = mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, false);
+ if (pv)
+ {
+ *ppv = pv;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Same as MMR3HeapAlloc().
+ *
+ * @returns Pointer to allocated memory.
+ * @param pVM The cross context VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ * @param ppv Where to store the pointer to the allocated memory on success.
+ */
+VMMR3DECL(int) MMR3HeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv)
+{
+ void *pv = mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, false);
+ if (pv)
+ {
+ *ppv = pv;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Same as MMR3HeapAlloc() only the memory is zeroed.
+ *
+ * @returns Pointer to allocated memory.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ */
+VMMR3DECL(void *) MMR3HeapAllocZU(PUVM pUVM, MMTAG enmTag, size_t cbSize)
+{
+ return mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, true);
+}
+
+
+/**
+ * Same as MMR3HeapAlloc() only the memory is zeroed.
+ *
+ * @returns Pointer to allocated memory.
+ * @param pVM The cross context VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ */
+VMMR3DECL(void *) MMR3HeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize)
+{
+ return mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, true);
+}
+
+
+/**
+ * Same as MMR3HeapAllocZ().
+ *
+ * @returns Pointer to allocated memory.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ * @param ppv Where to store the pointer to the allocated memory on success.
+ */
+VMMR3DECL(int) MMR3HeapAllocZExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv)
+{
+ Assert(pUVM->mm.s.pHeap);
+ void *pv = mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, true);
+ if (pv)
+ {
+ *ppv = pv;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Same as MMR3HeapAllocZ().
+ *
+ * @returns Pointer to allocated memory.
+ * @param pVM The cross context VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ * @param ppv Where to store the pointer to the allocated memory on success.
+ */
+VMMR3DECL(int) MMR3HeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv)
+{
+ void *pv = mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, true);
+ if (pv)
+ {
+ *ppv = pv;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Links @a pHdr into the heap block list (tail).
+ *
+ * @param pHeap Heap handle.
+ * @param pHdr The block to link.
+ *
+ * @note Caller has locked the heap!
+ */
+DECLINLINE(void) mmR3HeapLink(PMMHEAP pHeap, PMMHEAPHDR pHdr)
+{
+ /* Tail insertion: */
+ pHdr->pNext = NULL;
+ PMMHEAPHDR pTail = pHeap->pTail;
+ pHdr->pPrev = pTail;
+ if (pTail)
+ {
+ Assert(!pTail->pNext);
+ pTail->pNext = pHdr;
+ }
+ else
+ {
+ Assert(!pHeap->pHead);
+ pHeap->pHead = pHdr;
+ }
+ pHeap->pTail = pHdr;
+}
+
+
+/**
+ * Unlinks @a pHdr from the heal block list.
+ *
+ * @param pHeap Heap handle.
+ * @param pHdr The block to unlink.
+ *
+ * @note Caller has locked the heap!
+ */
+DECLINLINE(void) mmR3HeapUnlink(PMMHEAP pHeap, PMMHEAPHDR pHdr)
+{
+ PMMHEAPHDR const pPrev = pHdr->pPrev;
+ PMMHEAPHDR const pNext = pHdr->pNext;
+ if (pPrev)
+ pPrev->pNext = pNext;
+ else
+ pHeap->pHead = pNext;
+
+ if (pNext)
+ pNext->pPrev = pPrev;
+ else
+ pHeap->pTail = pHdr->pPrev;
+}
+
+
+/**
+ * Allocate memory from the heap.
+ *
+ * @returns Pointer to allocated memory.
+ * @param pHeap Heap handle.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param cbSize Size of the block.
+ * @param fZero Whether or not to zero the memory block.
+ */
+void *mmR3HeapAlloc(PMMHEAP pHeap, MMTAG enmTag, size_t cbSize, bool fZero)
+{
+#ifdef MMR3HEAP_WITH_STATISTICS
+ RTCritSectEnter(&pHeap->Lock);
+
+ /*
+ * Find/alloc statistics nodes.
+ */
+ pHeap->Stat.cAllocations++;
+ PMMHEAPSTAT pStat = (PMMHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag);
+ if (pStat)
+ {
+ pStat->cAllocations++;
+
+ RTCritSectLeave(&pHeap->Lock);
+ }
+ else
+ {
+ pStat = (PMMHEAPSTAT)RTMemAllocZ(sizeof(MMHEAPSTAT));
+ if (!pStat)
+ {
+ pHeap->Stat.cFailures++;
+ AssertMsgFailed(("Failed to allocate heap stat record.\n"));
+ RTCritSectLeave(&pHeap->Lock);
+ return NULL;
+ }
+ pStat->Core.Key = (AVLULKEY)enmTag;
+ pStat->pHeap = pHeap;
+ RTAvlULInsert(&pHeap->pStatTree, &pStat->Core);
+
+ pStat->cAllocations++;
+ RTCritSectLeave(&pHeap->Lock);
+
+ /* register the statistics */
+ PUVM pUVM = pHeap->pUVM;
+ const char *pszTag = mmGetTagName(enmTag);
+ STAMR3RegisterFU(pUVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/R3Heap/%s", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number or MMR3HeapAlloc() calls.", "/MM/R3Heap/%s/cAllocations", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3HeapRealloc() calls.", "/MM/R3Heap/%s/cReallocations", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3HeapFree() calls.", "/MM/R3Heap/%s/cFrees", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/R3Heap/%s/cFailures", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes allocated.", "/MM/R3Heap/%s/cbAllocated", pszTag);
+ STAMR3RegisterFU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes freed.", "/MM/R3Heap/%s/cbFreed", pszTag);
+ }
+#else
+ RT_NOREF_PV(enmTag);
+#endif
+
+ /*
+ * Validate input.
+ */
+ if (cbSize == 0)
+ {
+#ifdef MMR3HEAP_WITH_STATISTICS
+ RTCritSectEnter(&pHeap->Lock);
+ pStat->cFailures++;
+ pHeap->Stat.cFailures++;
+ RTCritSectLeave(&pHeap->Lock);
+#endif
+ AssertFailed();
+ return NULL;
+ }
+
+ /*
+ * Allocate heap block.
+ */
+ cbSize = RT_ALIGN_Z(cbSize, MMR3HEAP_SIZE_ALIGNMENT) + sizeof(MMHEAPHDR);
+ PMMHEAPHDR const pHdr = (PMMHEAPHDR)(fZero ? RTMemAllocZ(cbSize) : RTMemAlloc(cbSize));
+ if (pHdr)
+ { /* likely */ }
+ else
+ {
+ AssertMsgFailed(("Failed to allocate heap block %d, enmTag=%x(%.4s).\n", cbSize, enmTag, &enmTag));
+#ifdef MMR3HEAP_WITH_STATISTICS
+ RTCritSectEnter(&pHeap->Lock);
+ pStat->cFailures++;
+ pHeap->Stat.cFailures++;
+ RTCritSectLeave(&pHeap->Lock);
+#endif
+ return NULL;
+ }
+ Assert(!((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)));
+
+ /*
+ * Init and link in the header.
+ */
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pHdr->pStat = pStat;
+#else
+ pHdr->pStat = &pHeap->Stat;
+#endif
+ pHdr->cbSize = cbSize;
+
+ RTCritSectEnter(&pHeap->Lock);
+
+ mmR3HeapLink(pHeap, pHdr);
+
+ /*
+ * Update statistics
+ */
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pStat->cbAllocated += cbSize;
+ pStat->cbCurAllocated += cbSize;
+ pHeap->Stat.cbAllocated += cbSize;
+ pHeap->Stat.cbCurAllocated += cbSize;
+#endif
+
+ RTCritSectLeave(&pHeap->Lock);
+
+ return pHdr + 1;
+}
+
+
+/**
+ * Reallocate memory allocated with MMR3HeapAlloc(), MMR3HeapAllocZ() or
+ * MMR3HeapRealloc().
+ *
+ * Any additional memory is zeroed (only reliable if the initial allocation was
+ * also of the zeroing kind).
+ *
+ * @returns Pointer to reallocated memory.
+ * @param pv Pointer to the memory block to reallocate.
+ * Must not be NULL!
+ * @param cbNewSize New block size.
+ */
+VMMR3DECL(void *) MMR3HeapRealloc(void *pv, size_t cbNewSize)
+{
+ AssertMsg(pv, ("Invalid pointer pv=%p\n", pv));
+ if (!pv)
+ return NULL;
+
+ /*
+ * If newsize is zero then this is a free.
+ */
+ if (!cbNewSize)
+ {
+ MMR3HeapFree(pv);
+ return NULL;
+ }
+
+ /*
+ * Validate header.
+ */
+ PMMHEAPHDR const pHdr = (PMMHEAPHDR)pv - 1;
+ size_t const cbOldSize = pHdr->cbSize;
+ AssertMsgReturn( !(cbOldSize & (MMR3HEAP_SIZE_ALIGNMENT - 1))
+ && !((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)),
+ ("Invalid heap header! pv=%p, size=%#x\n", pv, cbOldSize),
+ NULL);
+ Assert(pHdr->pStat != NULL);
+ Assert(!((uintptr_t)pHdr->pNext & (RTMEM_ALIGNMENT - 1)));
+ Assert(!((uintptr_t)pHdr->pPrev & (RTMEM_ALIGNMENT - 1)));
+
+ PMMHEAP pHeap = pHdr->pStat->pHeap;
+
+ /*
+ * Unlink the header before we reallocate the block.
+ */
+ RTCritSectEnter(&pHeap->Lock);
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pHdr->pStat->cReallocations++;
+ pHeap->Stat.cReallocations++;
+#endif
+ mmR3HeapUnlink(pHeap, pHdr);
+ RTCritSectLeave(&pHeap->Lock);
+
+ /*
+ * Reallocate the block. Clear added space.
+ */
+ cbNewSize = RT_ALIGN_Z(cbNewSize, MMR3HEAP_SIZE_ALIGNMENT) + sizeof(MMHEAPHDR);
+ PMMHEAPHDR pHdrNew = (PMMHEAPHDR)RTMemReallocZ(pHdr, cbOldSize, cbNewSize);
+ if (pHdrNew)
+ pHdrNew->cbSize = cbNewSize;
+ else
+ {
+ RTCritSectEnter(&pHeap->Lock);
+ mmR3HeapLink(pHeap, pHdr);
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pHdr->pStat->cFailures++;
+ pHeap->Stat.cFailures++;
+#endif
+ RTCritSectLeave(&pHeap->Lock);
+ return NULL;
+ }
+
+ RTCritSectEnter(&pHeap->Lock);
+
+ /*
+ * Relink the header.
+ */
+ mmR3HeapLink(pHeap, pHdrNew);
+
+ /*
+ * Update statistics.
+ */
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pHdrNew->pStat->cbAllocated += cbNewSize - pHdrNew->cbSize;
+ pHeap->Stat.cbAllocated += cbNewSize - pHdrNew->cbSize;
+#endif
+
+ RTCritSectLeave(&pHeap->Lock);
+
+ return pHdrNew + 1;
+}
+
+
+/**
+ * Duplicates the specified string.
+ *
+ * @returns Pointer to the duplicate.
+ * @returns NULL on failure or when input NULL.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param psz The string to duplicate. NULL is allowed.
+ */
+VMMR3DECL(char *) MMR3HeapStrDupU(PUVM pUVM, MMTAG enmTag, const char *psz)
+{
+ if (!psz)
+ return NULL;
+ AssertPtr(psz);
+
+ size_t cch = strlen(psz) + 1;
+ char *pszDup = (char *)MMR3HeapAllocU(pUVM, enmTag, cch);
+ if (pszDup)
+ memcpy(pszDup, psz, cch);
+ return pszDup;
+}
+
+
+/**
+ * Duplicates the specified string.
+ *
+ * @returns Pointer to the duplicate.
+ * @returns NULL on failure or when input NULL.
+ * @param pVM The cross context VM structure.
+ * @param enmTag Statistics tag. Statistics are collected on a per tag
+ * basis in addition to a global one. Thus we can easily
+ * identify how memory is used by the VM. See MM_TAG_*.
+ * @param psz The string to duplicate. NULL is allowed.
+ */
+VMMR3DECL(char *) MMR3HeapStrDup(PVM pVM, MMTAG enmTag, const char *psz)
+{
+ return MMR3HeapStrDupU(pVM->pUVM, enmTag, psz);
+}
+
+
+/**
+ * Allocating string printf.
+ *
+ * @returns Pointer to the string.
+ * @param pVM The cross context VM structure.
+ * @param enmTag The statistics tag.
+ * @param pszFormat The format string.
+ * @param ... Format arguments.
+ */
+VMMR3DECL(char *) MMR3HeapAPrintf(PVM pVM, MMTAG enmTag, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ char *psz = MMR3HeapAPrintfVU(pVM->pUVM, enmTag, pszFormat, va);
+ va_end(va);
+ return psz;
+}
+
+
+/**
+ * Allocating string printf.
+ *
+ * @returns Pointer to the string.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag The statistics tag.
+ * @param pszFormat The format string.
+ * @param ... Format arguments.
+ */
+VMMR3DECL(char *) MMR3HeapAPrintfU(PUVM pUVM, MMTAG enmTag, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ char *psz = MMR3HeapAPrintfVU(pUVM, enmTag, pszFormat, va);
+ va_end(va);
+ return psz;
+}
+
+
+/**
+ * Allocating string printf.
+ *
+ * @returns Pointer to the string.
+ * @param pVM The cross context VM structure.
+ * @param enmTag The statistics tag.
+ * @param pszFormat The format string.
+ * @param va Format arguments.
+ */
+VMMR3DECL(char *) MMR3HeapAPrintfV(PVM pVM, MMTAG enmTag, const char *pszFormat, va_list va)
+{
+ return MMR3HeapAPrintfVU(pVM->pUVM, enmTag, pszFormat, va);
+}
+
+
+/**
+ * Allocating string printf.
+ *
+ * @returns Pointer to the string.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmTag The statistics tag.
+ * @param pszFormat The format string.
+ * @param va Format arguments.
+ */
+VMMR3DECL(char *) MMR3HeapAPrintfVU(PUVM pUVM, MMTAG enmTag, const char *pszFormat, va_list va)
+{
+ /*
+ * The lazy bird way.
+ */
+ char *psz;
+ int cch = RTStrAPrintfV(&psz, pszFormat, va);
+ if (cch < 0)
+ return NULL;
+ Assert(psz[cch] == '\0');
+ char *pszRet = (char *)MMR3HeapAllocU(pUVM, enmTag, cch + 1);
+ if (pszRet)
+ memcpy(pszRet, psz, cch + 1);
+ RTStrFree(psz);
+ return pszRet;
+}
+
+
+/**
+ * Releases memory allocated with MMR3HeapAlloc() or MMR3HeapRealloc().
+ *
+ * The memory is cleared/filled before freeing to prevent heap spraying, info
+ * leaks, and help detect use after free trouble.
+ *
+ * @param pv Pointer to the memory block to free.
+ */
+VMMR3DECL(void) MMR3HeapFree(void *pv)
+{
+ /* Ignore NULL pointers. */
+ if (!pv)
+ return;
+
+ /*
+ * Validate header.
+ */
+ PMMHEAPHDR const pHdr = (PMMHEAPHDR)pv - 1;
+ size_t const cbAllocation = pHdr->cbSize;
+ AssertMsgReturnVoid( !(pHdr->cbSize & (MMR3HEAP_SIZE_ALIGNMENT - 1))
+ && !((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)),
+ ("Invalid heap header! pv=%p, size=%#x\n", pv, pHdr->cbSize));
+ AssertPtr(pHdr->pStat);
+ Assert(!((uintptr_t)pHdr->pNext & (RTMEM_ALIGNMENT - 1)));
+ Assert(!((uintptr_t)pHdr->pPrev & (RTMEM_ALIGNMENT - 1)));
+
+ /*
+ * Update statistics
+ */
+ PMMHEAP pHeap = pHdr->pStat->pHeap;
+ RTCritSectEnter(&pHeap->Lock);
+
+#ifdef MMR3HEAP_WITH_STATISTICS
+ pHdr->pStat->cFrees++;
+ pHeap->Stat.cFrees++;
+ pHdr->pStat->cbFreed += cbAllocation;
+ pHeap->Stat.cbFreed += cbAllocation;
+ pHdr->pStat->cbCurAllocated -= cbAllocation;
+ pHeap->Stat.cbCurAllocated -= cbAllocation;
+#endif
+
+ /*
+ * Unlink it.
+ */
+ mmR3HeapUnlink(pHeap, pHdr);
+
+ RTCritSectLeave(&pHeap->Lock);
+
+ /*
+ * Free the memory. We clear just to be on the safe size wrt
+ * heap spraying and leaking sensitive info (also helps detecting
+ * double freeing).
+ */
+ RTMemFreeZ(pHdr, cbAllocation);
+}
+
diff --git a/src/VBox/VMM/VMMR3/Makefile.kup b/src/VBox/VMM/VMMR3/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/Makefile.kup
diff --git a/src/VBox/VMM/VMMR3/NEMR3.cpp b/src/VBox/VMM/VMMR3/NEMR3.cpp
new file mode 100644
index 00000000..472c586b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/NEMR3.cpp
@@ -0,0 +1,606 @@
+/* $Id: NEMR3.cpp $ */
+/** @file
+ * NEM - Native execution manager.
+ */
+
+/*
+ * Copyright (C) 2018-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_nem NEM - Native Execution Manager.
+ *
+ * This is an alternative execution manage to HM and raw-mode. On one host
+ * (Windows) we're forced to use this, on the others we just do it because we
+ * can. Since this is host specific in nature, information about an
+ * implementation is contained in the NEMR3Native-xxxx.cpp files.
+ *
+ * @ref pg_nem_win
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NEM
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/gim.h>
+#include "NEMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#include <iprt/string.h>
+
+
+
+/**
+ * Basic init and configuration reading.
+ *
+ * Always call NEMR3Term after calling this.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) NEMR3InitConfig(PVM pVM)
+{
+ LogFlow(("NEMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, nem.s, 64);
+ AssertCompile(sizeof(pVM->nem.s) <= sizeof(pVM->nem.padding));
+
+ /*
+ * Initialize state info so NEMR3Term will always be happy.
+ * No returning prior to setting magics!
+ */
+ pVM->nem.s.u32Magic = NEM_MAGIC;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->nem.s.u32Magic = NEMCPU_MAGIC;
+ }
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
+
+ /*
+ * Validate the NEM settings.
+ */
+ int rc = CFGMR3ValidateConfig(pCfgNem,
+ "/NEM/",
+ "Enabled"
+ "|Allow64BitGuests"
+ "|LovelyMesaDrvWorkaround"
+#ifdef RT_OS_WINDOWS
+ "|UseRing0Runloop"
+#elif defined(RT_OS_DARWIN)
+ "|VmxPleGap"
+ "|VmxPleWindow"
+ "|VmxLbr"
+#endif
+ ,
+ "" /* pszValidNodes */, "NEM" /* pszWho */, 0 /* uInstance */);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /** @cfgm{/NEM/NEMEnabled, bool, true}
+ * Whether NEM is enabled. */
+ rc = CFGMR3QueryBoolDef(pCfgNem, "Enabled", &pVM->nem.s.fEnabled, true);
+ AssertLogRelRCReturn(rc, rc);
+
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ /** @cfgm{/NEM/Allow64BitGuests, bool, 32-bit:false, 64-bit:true}
+ * Enables AMD64 CPU features.
+ * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
+ * already have the support. */
+ rc = CFGMR3QueryBoolDef(pCfgNem, "Allow64BitGuests", &pVM->nem.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
+ AssertLogRelRCReturn(rc, rc);
+#else
+ pVM->nem.s.fAllow64BitGuests = false;
+#endif
+
+ /** @cfgm{/NEM/LovelyMesaDrvWorkaround, bool, false}
+ * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
+ * the hypervisor it is running under. */
+ bool f;
+ rc = CFGMR3QueryBoolDef(pCfgNem, "LovelyMesaDrvWorkaround", &f, false);
+ AssertLogRelRCReturn(rc, rc);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv = f;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This is called by HMR3Init() when HM cannot be used.
+ *
+ * Sets VM::bMainExecutionEngine to VM_EXEC_ENGINE_NATIVE_API if we can use a
+ * native hypervisor API to execute the VM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFallback Whether this is a fallback call. Cleared if the VM is
+ * configured to use NEM instead of HM.
+ * @param fForced Whether /HM/HMForced was set. If set and we fail to
+ * enable NEM, we'll return a failure status code.
+ * Otherwise we'll assume HMR3Init falls back on raw-mode.
+ */
+VMMR3_INT_DECL(int) NEMR3Init(PVM pVM, bool fFallback, bool fForced)
+{
+ Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API);
+ int rc;
+ if (pVM->nem.s.fEnabled)
+ {
+#ifdef VBOX_WITH_NATIVE_NEM
+ rc = nemR3NativeInit(pVM, fFallback, fForced);
+ ASMCompilerBarrier(); /* May have changed bMainExecutionEngine. */
+#else
+ RT_NOREF(fFallback);
+ rc = VINF_SUCCESS;
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ {
+#ifdef RT_OS_WINDOWS /* The WHv* API is extremely slow at handling VM exits. The AppleHv and
+ KVM APIs are much faster, thus the different mode name. :-) */
+ LogRel(("NEM:\n"
+ "NEM: NEMR3Init: Snail execution mode is active!\n"
+ "NEM: Note! VirtualBox is not able to run at its full potential in this execution mode.\n"
+ "NEM: To see VirtualBox run at max speed you need to disable all Windows features\n"
+ "NEM: making use of Hyper-V. That is a moving target, so google how and carefully\n"
+ "NEM: consider the consequences of disabling these features.\n"
+ "NEM:\n"));
+#else
+ LogRel(("NEM:\n"
+ "NEM: NEMR3Init: Turtle execution mode is active!\n"
+ "NEM: Note! VirtualBox is not able to run at its full potential in this execution mode.\n"
+ "NEM:\n"));
+#endif
+ }
+ else
+ {
+ LogRel(("NEM: NEMR3Init: Not available.\n"));
+ if (fForced)
+ rc = VERR_NEM_NOT_AVAILABLE;
+ }
+ }
+ else
+ LogRel(("NEM: NEMR3Init: Native init failed: %Rrc.\n", rc));
+ }
+ else
+ {
+ LogRel(("NEM: NEMR3Init: Disabled.\n"));
+ rc = fForced ? VERR_NEM_NOT_ENABLED : VINF_SUCCESS;
+ }
+ return rc;
+}
+
+
+/**
+ * Perform initialization that depends on CPUM working.
+ *
+ * This is a noop if NEM wasn't activated by a previous NEMR3Init() call.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) NEMR3InitAfterCPUM(PVM pVM)
+{
+ int rc = VINF_SUCCESS;
+ if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ {
+ /*
+ * Do native after-CPUM init.
+ */
+#ifdef VBOX_WITH_NATIVE_NEM
+ rc = nemR3NativeInitAfterCPUM(pVM);
+#else
+ RT_NOREF(pVM);
+#endif
+ }
+ return rc;
+}
+
+
+/**
+ * Called when a init phase has completed.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat The phase that completed.
+ */
+VMMR3_INT_DECL(int) NEMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ /*
+ * Check if GIM needs #UD, since that applies to everyone.
+ */
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->nem.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu);
+ }
+
+ /*
+ * Call native code.
+ */
+ int rc = VINF_SUCCESS;
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ rc = nemR3NativeInitCompleted(pVM, enmWhat);
+#else
+ RT_NOREF(pVM, enmWhat);
+#endif
+ return rc;
+}
+
+
+/**
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) NEMR3Term(PVM pVM)
+{
+ AssertReturn(pVM->nem.s.u32Magic == NEM_MAGIC, VERR_WRONG_ORDER);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ AssertReturn(pVM->apCpusR3[idCpu]->nem.s.u32Magic == NEMCPU_MAGIC, VERR_WRONG_ORDER);
+
+ /* Do native termination. */
+ int rc = VINF_SUCCESS;
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ rc = nemR3NativeTerm(pVM);
+#endif
+
+ /* Mark it as terminated. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->nem.s.u32Magic = NEMCPU_MAGIC_DEAD;
+ }
+ pVM->nem.s.u32Magic = NEM_MAGIC_DEAD;
+ return rc;
+}
+
+/**
+ * External interface for querying whether native execution API is used.
+ *
+ * @returns true if NEM is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMR3IsEnabled
+ */
+VMMR3DECL(bool) NEMR3IsEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return VM_IS_NEM_ENABLED(pVM);
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) NEMR3Reset(PVM pVM)
+{
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ nemR3NativeReset(pVM);
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used to bring up secondary CPUs on SMP as well as CPU hot plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure to reset.
+ * @param fInitIpi Set if being reset due to INIT IPI.
+ */
+VMMR3_INT_DECL(void) NEMR3ResetCpu(PVMCPU pVCpu, bool fInitIpi)
+{
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (pVCpu->pVMR3->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
+ nemR3NativeResetCpu(pVCpu, fInitIpi);
+#else
+ RT_NOREF(pVCpu, fInitIpi);
+#endif
+}
+
+
+/**
+ * Indicates to TM that TMTSCMODE_NATIVE_API should be used for TSC.
+ *
+ * @returns true if TMTSCMODE_NATIVE_API must be used, otherwise @c false.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) NEMR3NeedSpecialTscMode(PVM pVM)
+{
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (VM_IS_NEM_ENABLED(pVM))
+ return true;
+#else
+ RT_NOREF(pVM);
+#endif
+ return false;
+}
+
+
+/**
+ * Gets the name of a generic NEM exit code.
+ *
+ * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
+ * @param uExit The NEM exit to name.
+ */
+VMMR3DECL(const char *) NEMR3GetExitName(uint32_t uExit)
+{
+ switch ((NEMEXITTYPE)uExit)
+ {
+ case NEMEXITTYPE_INTTERRUPT_WINDOW: return "NEM interrupt window";
+ case NEMEXITTYPE_HALT: return "NEM halt";
+
+ case NEMEXITTYPE_UNRECOVERABLE_EXCEPTION: return "NEM unrecoverable exception";
+ case NEMEXITTYPE_INVALID_VP_REGISTER_VALUE: return "NEM invalid vp register value";
+ case NEMEXITTYPE_XCPT_UD: return "NEM #UD";
+ case NEMEXITTYPE_XCPT_DB: return "NEM #DB";
+ case NEMEXITTYPE_XCPT_BP: return "NEM #BP";
+ case NEMEXITTYPE_CANCELED: return "NEM canceled";
+ case NEMEXITTYPE_MEMORY_ACCESS: return "NEM memory access";
+
+ case NEMEXITTYPE_INTERNAL_ERROR_EMULATION: return "NEM emulation IPE";
+ case NEMEXITTYPE_INTERNAL_ERROR_FATAL: return "NEM fatal IPE";
+ case NEMEXITTYPE_INTERRUPTED: return "NEM interrupted";
+ case NEMEXITTYPE_FAILED_ENTRY: return "NEM failed VT-x/AMD-V entry";
+
+ case NEMEXITTYPE_INVALID:
+ case NEMEXITTYPE_END:
+ break;
+ }
+
+ return NULL;
+}
+
+
+VMMR3_INT_DECL(VBOXSTRICTRC) NEMR3RunGC(PVM pVM, PVMCPU pVCpu)
+{
+ Assert(VM_IS_NEM_ENABLED(pVM));
+#ifdef VBOX_WITH_NATIVE_NEM
+ return nemR3NativeRunGC(pVM, pVCpu);
+#else
+ NOREF(pVM); NOREF(pVCpu);
+ return VERR_INTERNAL_ERROR_3;
+#endif
+}
+
+
+#ifndef VBOX_WITH_NATIVE_NEM
+VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
+{
+ RT_NOREF(pVM, pVCpu);
+ return false;
+}
+#endif
+
+
+VMMR3_INT_DECL(bool) NEMR3SetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
+{
+ Assert(VM_IS_NEM_ENABLED(pVM));
+#ifdef VBOX_WITH_NATIVE_NEM
+ return nemR3NativeSetSingleInstruction(pVM, pVCpu, fEnable);
+#else
+ NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
+ return false;
+#endif
+}
+
+
+VMMR3_INT_DECL(void) NEMR3NotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
+#ifdef VBOX_WITH_NATIVE_NEM
+ nemR3NativeNotifyFF(pVM, pVCpu, fFlags);
+#else
+ RT_NOREF(pVM, pVCpu, fFlags);
+#endif
+}
+
+#ifndef VBOX_WITH_NATIVE_NEM
+
+VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
+{
+ RT_NOREF(pVCpu, fEnabled);
+}
+
+# ifdef VBOX_WITH_PGM_NEM_MODE
+
+VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return false;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
+ void *pvBitmap, size_t cbBitmap)
+{
+ RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange);
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+}
+
+# endif /* VBOX_WITH_PGM_NEM_MODE */
+#endif /* !VBOX_WITH_NATIVE_NEM */
+
+/**
+ * Notification callback from DBGF when interrupt breakpoints or generic debug
+ * event settings changes.
+ *
+ * DBGF will call NEMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
+ * function is just updating the VM globals.
+ *
+ * @param pVM The VM cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(void) NEMR3NotifyDebugEventChanged(PVM pVM)
+{
+ AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Interrupts. */
+ bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
+ || pVM->dbgf.ro.cHardIntBreakpoints > 0;
+
+ /* CPU Exceptions. */
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ /* Common VM exits. */
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ /* Done. */
+ pVM->nem.s.fUseDebugLoop = nemR3NativeNotifyDebugEventChanged(pVM, fUseDebugLoop);
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+/**
+ * Follow up notification callback to NEMR3NotifyDebugEventChanged for each CPU.
+ *
+ * NEM uses this to combine the decision made NEMR3NotifyDebugEventChanged with
+ * per CPU settings.
+ *
+ * @param pVM The VM cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+VMMR3_INT_DECL(void) NEMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
+{
+ AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ pVCpu->nem.s.fUseDebugLoop = nemR3NativeNotifyDebugEventChangedPerCpu(pVM, pVCpu,
+ pVCpu->nem.s.fSingleInstruction | pVM->nem.s.fUseDebugLoop);
+#else
+ RT_NOREF(pVM, pVCpu);
+#endif
+}
+
+
+/**
+ * Disables a CPU ISA extension, like MONITOR/MWAIT.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ * @param pszIsaExt The ISA extension name in the config tree.
+ */
+int nemR3DisableCpuIsaExt(PVM pVM, const char *pszIsaExt)
+{
+ /*
+ * Get IsaExts config node under CPUM.
+ */
+ PCFGMNODE pIsaExts = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/CPUM/IsaExts");
+ if (!pIsaExts)
+ {
+ int rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "/CPUM/IsaExts", &pIsaExts);
+ AssertLogRelMsgReturn(RT_SUCCESS(rc), ("CFGMR3InsertNode: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt), rc);
+ }
+
+ /*
+ * Look for a value by the given name (pszIsaExt).
+ */
+ /* Integer values 1 (CPUMISAEXTCFG_ENABLED_SUPPORTED) and 9 (CPUMISAEXTCFG_ENABLED_PORTABLE) will be replaced. */
+ uint64_t u64Value;
+ int rc = CFGMR3QueryInteger(pIsaExts, pszIsaExt, &u64Value);
+ if (RT_SUCCESS(rc))
+ {
+ if (u64Value != 1 && u64Value != 9)
+ {
+ LogRel(("NEM: Not disabling IsaExt '%s', already configured with int value %lld\n", pszIsaExt, u64Value));
+ return VINF_SUCCESS;
+ }
+ CFGMR3RemoveValue(pIsaExts, pszIsaExt);
+ }
+ /* String value 'default', 'enabled' and 'portable' will be replaced. */
+ else if (rc == VERR_CFGM_NOT_INTEGER)
+ {
+ char szValue[32];
+ rc = CFGMR3QueryString(pIsaExts, pszIsaExt, szValue, sizeof(szValue));
+ AssertRCReturn(rc, VINF_SUCCESS);
+
+ if ( RTStrICmpAscii(szValue, "default") != 0
+ && RTStrICmpAscii(szValue, "def") != 0
+ && RTStrICmpAscii(szValue, "enabled") != 0
+ && RTStrICmpAscii(szValue, "enable") != 0
+ && RTStrICmpAscii(szValue, "on") != 0
+ && RTStrICmpAscii(szValue, "yes") != 0
+ && RTStrICmpAscii(szValue, "portable") != 0)
+ {
+ LogRel(("NEM: Not disabling IsaExt '%s', already configured with string value '%s'\n", pszIsaExt, szValue));
+ return VINF_SUCCESS;
+ }
+ CFGMR3RemoveValue(pIsaExts, pszIsaExt);
+ }
+ else
+ AssertLogRelMsgReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, ("CFGMR3QueryInteger: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt),
+ VERR_NEM_IPE_8);
+
+ /*
+ * Insert the disabling value.
+ */
+ rc = CFGMR3InsertInteger(pIsaExts, pszIsaExt, 0 /* disabled */);
+ AssertLogRelMsgReturn(RT_SUCCESS(rc), ("CFGMR3InsertInteger: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt), rc);
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp b/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
new file mode 100644
index 00000000..671c728b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
@@ -0,0 +1,4615 @@
+/* $Id: NEMR3Native-darwin.cpp $ */
+/** @file
+ * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework.
+ *
+ * Log group 2: Exit logging.
+ * Log group 3: Log context on exit.
+ * Log group 5: Ring-3 memory management
+ */
+
+/*
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NEM
+#define VMCPU_INCL_CPUM_GST_CTX
+#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/vmm/dbgftrace.h>
+#include <VBox/vmm/gcm.h>
+#include "VMXInternal.h"
+#include "NEMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include "dtrace/VBoxVMM.h"
+
+#include <iprt/asm.h>
+#include <iprt/ldr.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+#include <iprt/system.h>
+#include <iprt/utf16.h>
+
+#include <mach/mach_time.h>
+#include <mach/kern_return.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/* No nested hwvirt (for now). */
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+# undef VBOX_WITH_NESTED_HWVIRT_VMX
+#endif
+
+
+/** @name HV return codes.
+ * @{ */
+/** Operation was successful. */
+#define HV_SUCCESS 0
+/** An error occurred during operation. */
+#define HV_ERROR 0xfae94001
+/** The operation could not be completed right now, try again. */
+#define HV_BUSY 0xfae94002
+/** One of the parameters passed wis invalid. */
+#define HV_BAD_ARGUMENT 0xfae94003
+/** Not enough resources left to fulfill the operation. */
+#define HV_NO_RESOURCES 0xfae94005
+/** The device could not be found. */
+#define HV_NO_DEVICE 0xfae94006
+/** The operation is not supportd on this platform with this configuration. */
+#define HV_UNSUPPORTED 0xfae94007
+/** @} */
+
+
+/** @name HV memory protection flags.
+ * @{ */
+/** Memory is readable. */
+#define HV_MEMORY_READ RT_BIT_64(0)
+/** Memory is writeable. */
+#define HV_MEMORY_WRITE RT_BIT_64(1)
+/** Memory is executable. */
+#define HV_MEMORY_EXEC RT_BIT_64(2)
+/** @} */
+
+
+/** @name HV shadow VMCS protection flags.
+ * @{ */
+/** Shadow VMCS field is not accessible. */
+#define HV_SHADOW_VMCS_NONE 0
+/** Shadow VMCS fild is readable. */
+#define HV_SHADOW_VMCS_READ RT_BIT_64(0)
+/** Shadow VMCS field is writeable. */
+#define HV_SHADOW_VMCS_WRITE RT_BIT_64(1)
+/** @} */
+
+
+/** Default VM creation flags. */
+#define HV_VM_DEFAULT 0
+/** Default guest address space creation flags. */
+#define HV_VM_SPACE_DEFAULT 0
+/** Default vCPU creation flags. */
+#define HV_VCPU_DEFAULT 0
+
+#define HV_DEADLINE_FOREVER UINT64_MAX
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+
+/** HV return code type. */
+typedef uint32_t hv_return_t;
+/** HV capability bitmask. */
+typedef uint64_t hv_capability_t;
+/** Option bitmask type when creating a VM. */
+typedef uint64_t hv_vm_options_t;
+/** Option bitmask when creating a vCPU. */
+typedef uint64_t hv_vcpu_options_t;
+/** HV memory protection flags type. */
+typedef uint64_t hv_memory_flags_t;
+/** Shadow VMCS protection flags. */
+typedef uint64_t hv_shadow_flags_t;
+/** Guest physical address type. */
+typedef uint64_t hv_gpaddr_t;
+
+
+/**
+ * VMX Capability enumeration.
+ */
+typedef enum
+{
+ HV_VMX_CAP_PINBASED = 0,
+ HV_VMX_CAP_PROCBASED,
+ HV_VMX_CAP_PROCBASED2,
+ HV_VMX_CAP_ENTRY,
+ HV_VMX_CAP_EXIT,
+ HV_VMX_CAP_BASIC, /* Since 11.0 */
+ HV_VMX_CAP_TRUE_PINBASED, /* Since 11.0 */
+ HV_VMX_CAP_TRUE_PROCBASED, /* Since 11.0 */
+ HV_VMX_CAP_TRUE_ENTRY, /* Since 11.0 */
+ HV_VMX_CAP_TRUE_EXIT, /* Since 11.0 */
+ HV_VMX_CAP_MISC, /* Since 11.0 */
+ HV_VMX_CAP_CR0_FIXED0, /* Since 11.0 */
+ HV_VMX_CAP_CR0_FIXED1, /* Since 11.0 */
+ HV_VMX_CAP_CR4_FIXED0, /* Since 11.0 */
+ HV_VMX_CAP_CR4_FIXED1, /* Since 11.0 */
+ HV_VMX_CAP_VMCS_ENUM, /* Since 11.0 */
+ HV_VMX_CAP_EPT_VPID_CAP, /* Since 11.0 */
+ HV_VMX_CAP_PREEMPTION_TIMER = 32
+} hv_vmx_capability_t;
+
+
+/**
+ * MSR information.
+ */
+typedef enum
+{
+ HV_VMX_INFO_MSR_IA32_ARCH_CAPABILITIES = 0,
+ HV_VMX_INFO_MSR_IA32_PERF_CAPABILITIES,
+ HV_VMX_VALID_MSR_IA32_PERFEVNTSEL,
+ HV_VMX_VALID_MSR_IA32_FIXED_CTR_CTRL,
+ HV_VMX_VALID_MSR_IA32_PERF_GLOBAL_CTRL,
+ HV_VMX_VALID_MSR_IA32_PERF_GLOBAL_STATUS,
+ HV_VMX_VALID_MSR_IA32_DEBUGCTL,
+ HV_VMX_VALID_MSR_IA32_SPEC_CTRL,
+ HV_VMX_NEED_MSR_IA32_SPEC_CTRL
+} hv_vmx_msr_info_t;
+
+
+/**
+ * HV x86 register enumeration.
+ */
+typedef enum
+{
+ HV_X86_RIP = 0,
+ HV_X86_RFLAGS,
+ HV_X86_RAX,
+ HV_X86_RCX,
+ HV_X86_RDX,
+ HV_X86_RBX,
+ HV_X86_RSI,
+ HV_X86_RDI,
+ HV_X86_RSP,
+ HV_X86_RBP,
+ HV_X86_R8,
+ HV_X86_R9,
+ HV_X86_R10,
+ HV_X86_R11,
+ HV_X86_R12,
+ HV_X86_R13,
+ HV_X86_R14,
+ HV_X86_R15,
+ HV_X86_CS,
+ HV_X86_SS,
+ HV_X86_DS,
+ HV_X86_ES,
+ HV_X86_FS,
+ HV_X86_GS,
+ HV_X86_IDT_BASE,
+ HV_X86_IDT_LIMIT,
+ HV_X86_GDT_BASE,
+ HV_X86_GDT_LIMIT,
+ HV_X86_LDTR,
+ HV_X86_LDT_BASE,
+ HV_X86_LDT_LIMIT,
+ HV_X86_LDT_AR,
+ HV_X86_TR,
+ HV_X86_TSS_BASE,
+ HV_X86_TSS_LIMIT,
+ HV_X86_TSS_AR,
+ HV_X86_CR0,
+ HV_X86_CR1,
+ HV_X86_CR2,
+ HV_X86_CR3,
+ HV_X86_CR4,
+ HV_X86_DR0,
+ HV_X86_DR1,
+ HV_X86_DR2,
+ HV_X86_DR3,
+ HV_X86_DR4,
+ HV_X86_DR5,
+ HV_X86_DR6,
+ HV_X86_DR7,
+ HV_X86_TPR,
+ HV_X86_XCR0,
+ HV_X86_REGISTERS_MAX
+} hv_x86_reg_t;
+
+
+/** MSR permission flags type. */
+typedef uint32_t hv_msr_flags_t;
+/** MSR can't be accessed. */
+#define HV_MSR_NONE 0
+/** MSR is readable by the guest. */
+#define HV_MSR_READ RT_BIT(0)
+/** MSR is writeable by the guest. */
+#define HV_MSR_WRITE RT_BIT(1)
+
+
+typedef hv_return_t FN_HV_CAPABILITY(hv_capability_t capability, uint64_t *valu);
+typedef hv_return_t FN_HV_VM_CREATE(hv_vm_options_t flags);
+typedef hv_return_t FN_HV_VM_DESTROY(void);
+typedef hv_return_t FN_HV_VM_SPACE_CREATE(hv_vm_space_t *asid);
+typedef hv_return_t FN_HV_VM_SPACE_DESTROY(hv_vm_space_t asid);
+typedef hv_return_t FN_HV_VM_MAP(const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
+typedef hv_return_t FN_HV_VM_UNMAP(hv_gpaddr_t gpa, size_t size);
+typedef hv_return_t FN_HV_VM_PROTECT(hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
+typedef hv_return_t FN_HV_VM_MAP_SPACE(hv_vm_space_t asid, const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
+typedef hv_return_t FN_HV_VM_UNMAP_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size);
+typedef hv_return_t FN_HV_VM_PROTECT_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
+typedef hv_return_t FN_HV_VM_SYNC_TSC(uint64_t tsc);
+
+typedef hv_return_t FN_HV_VCPU_CREATE(hv_vcpuid_t *vcpu, hv_vcpu_options_t flags);
+typedef hv_return_t FN_HV_VCPU_DESTROY(hv_vcpuid_t vcpu);
+typedef hv_return_t FN_HV_VCPU_SET_SPACE(hv_vcpuid_t vcpu, hv_vm_space_t asid);
+typedef hv_return_t FN_HV_VCPU_READ_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t *value);
+typedef hv_return_t FN_HV_VCPU_WRITE_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t value);
+typedef hv_return_t FN_HV_VCPU_READ_FPSTATE(hv_vcpuid_t vcpu, void *buffer, size_t size);
+typedef hv_return_t FN_HV_VCPU_WRITE_FPSTATE(hv_vcpuid_t vcpu, const void *buffer, size_t size);
+typedef hv_return_t FN_HV_VCPU_ENABLE_NATIVE_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable);
+typedef hv_return_t FN_HV_VCPU_READ_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t *value);
+typedef hv_return_t FN_HV_VCPU_WRITE_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t value);
+typedef hv_return_t FN_HV_VCPU_FLUSH(hv_vcpuid_t vcpu);
+typedef hv_return_t FN_HV_VCPU_INVALIDATE_TLB(hv_vcpuid_t vcpu);
+typedef hv_return_t FN_HV_VCPU_RUN(hv_vcpuid_t vcpu);
+typedef hv_return_t FN_HV_VCPU_RUN_UNTIL(hv_vcpuid_t vcpu, uint64_t deadline);
+typedef hv_return_t FN_HV_VCPU_INTERRUPT(hv_vcpuid_t *vcpus, unsigned int vcpu_count);
+typedef hv_return_t FN_HV_VCPU_GET_EXEC_TIME(hv_vcpuid_t *vcpus, uint64_t *time);
+
+typedef hv_return_t FN_HV_VMX_VCPU_READ_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
+typedef hv_return_t FN_HV_VMX_VCPU_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
+
+typedef hv_return_t FN_HV_VMX_VCPU_READ_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
+typedef hv_return_t FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
+typedef hv_return_t FN_HV_VMX_VCPU_SET_SHADOW_ACCESS(hv_vcpuid_t vcpu, uint32_t field, hv_shadow_flags_t flags);
+
+typedef hv_return_t FN_HV_VMX_READ_CAPABILITY(hv_vmx_capability_t field, uint64_t *value);
+typedef hv_return_t FN_HV_VMX_VCPU_SET_APIC_ADDRESS(hv_vcpuid_t vcpu, hv_gpaddr_t gpa);
+
+/* Since 11.0 */
+typedef hv_return_t FN_HV_VMX_GET_MSR_INFO(hv_vmx_msr_info_t field, uint64_t *value);
+typedef hv_return_t FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *allowed_0, uint64_t *allowed_1);
+typedef hv_return_t FN_HV_VCPU_ENABLE_MANAGED_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable);
+typedef hv_return_t FN_HV_VCPU_SET_MSR_ACCESS(hv_vcpuid_t vcpu, uint32_t msr, hv_msr_flags_t flags);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static void nemR3DarwinVmcsDump(PVMCPU pVCpu);
+
+/** NEM_DARWIN_PAGE_STATE_XXX names. */
+NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
+/** MSRs. */
+static SUPHWVIRTMSRS g_HmMsrs;
+/** VMX: Set if swapping EFER is supported. */
+static bool g_fHmVmxSupportsVmcsEfer = false;
+/** @name APIs imported from Hypervisor.framework.
+ * @{ */
+static FN_HV_CAPABILITY *g_pfnHvCapability = NULL; /* Since 10.15 */
+static FN_HV_VM_CREATE *g_pfnHvVmCreate = NULL; /* Since 10.10 */
+static FN_HV_VM_DESTROY *g_pfnHvVmDestroy = NULL; /* Since 10.10 */
+static FN_HV_VM_SPACE_CREATE *g_pfnHvVmSpaceCreate = NULL; /* Since 10.15 */
+static FN_HV_VM_SPACE_DESTROY *g_pfnHvVmSpaceDestroy = NULL; /* Since 10.15 */
+static FN_HV_VM_MAP *g_pfnHvVmMap = NULL; /* Since 10.10 */
+static FN_HV_VM_UNMAP *g_pfnHvVmUnmap = NULL; /* Since 10.10 */
+static FN_HV_VM_PROTECT *g_pfnHvVmProtect = NULL; /* Since 10.10 */
+static FN_HV_VM_MAP_SPACE *g_pfnHvVmMapSpace = NULL; /* Since 10.15 */
+static FN_HV_VM_UNMAP_SPACE *g_pfnHvVmUnmapSpace = NULL; /* Since 10.15 */
+static FN_HV_VM_PROTECT_SPACE *g_pfnHvVmProtectSpace = NULL; /* Since 10.15 */
+static FN_HV_VM_SYNC_TSC *g_pfnHvVmSyncTsc = NULL; /* Since 10.10 */
+
+static FN_HV_VCPU_CREATE *g_pfnHvVCpuCreate = NULL; /* Since 10.10 */
+static FN_HV_VCPU_DESTROY *g_pfnHvVCpuDestroy = NULL; /* Since 10.10 */
+static FN_HV_VCPU_SET_SPACE *g_pfnHvVCpuSetSpace = NULL; /* Since 10.15 */
+static FN_HV_VCPU_READ_REGISTER *g_pfnHvVCpuReadRegister = NULL; /* Since 10.10 */
+static FN_HV_VCPU_WRITE_REGISTER *g_pfnHvVCpuWriteRegister = NULL; /* Since 10.10 */
+static FN_HV_VCPU_READ_FPSTATE *g_pfnHvVCpuReadFpState = NULL; /* Since 10.10 */
+static FN_HV_VCPU_WRITE_FPSTATE *g_pfnHvVCpuWriteFpState = NULL; /* Since 10.10 */
+static FN_HV_VCPU_ENABLE_NATIVE_MSR *g_pfnHvVCpuEnableNativeMsr = NULL; /* Since 10.10 */
+static FN_HV_VCPU_READ_MSR *g_pfnHvVCpuReadMsr = NULL; /* Since 10.10 */
+static FN_HV_VCPU_WRITE_MSR *g_pfnHvVCpuWriteMsr = NULL; /* Since 10.10 */
+static FN_HV_VCPU_FLUSH *g_pfnHvVCpuFlush = NULL; /* Since 10.10 */
+static FN_HV_VCPU_INVALIDATE_TLB *g_pfnHvVCpuInvalidateTlb = NULL; /* Since 10.10 */
+static FN_HV_VCPU_RUN *g_pfnHvVCpuRun = NULL; /* Since 10.10 */
+static FN_HV_VCPU_RUN_UNTIL *g_pfnHvVCpuRunUntil = NULL; /* Since 10.15 */
+static FN_HV_VCPU_INTERRUPT *g_pfnHvVCpuInterrupt = NULL; /* Since 10.10 */
+static FN_HV_VCPU_GET_EXEC_TIME *g_pfnHvVCpuGetExecTime = NULL; /* Since 10.10 */
+
+static FN_HV_VMX_READ_CAPABILITY *g_pfnHvVmxReadCapability = NULL; /* Since 10.10 */
+static FN_HV_VMX_VCPU_READ_VMCS *g_pfnHvVmxVCpuReadVmcs = NULL; /* Since 10.10 */
+static FN_HV_VMX_VCPU_WRITE_VMCS *g_pfnHvVmxVCpuWriteVmcs = NULL; /* Since 10.10 */
+static FN_HV_VMX_VCPU_READ_SHADOW_VMCS *g_pfnHvVmxVCpuReadShadowVmcs = NULL; /* Since 10.15 */
+static FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS *g_pfnHvVmxVCpuWriteShadowVmcs = NULL; /* Since 10.15 */
+static FN_HV_VMX_VCPU_SET_SHADOW_ACCESS *g_pfnHvVmxVCpuSetShadowAccess = NULL; /* Since 10.15 */
+static FN_HV_VMX_VCPU_SET_APIC_ADDRESS *g_pfnHvVmxVCpuSetApicAddress = NULL; /* Since 10.10 */
+
+static FN_HV_VMX_GET_MSR_INFO *g_pfnHvVmxGetMsrInfo = NULL; /* Since 11.0 */
+static FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS *g_pfnHvVmxVCpuGetCapWriteVmcs = NULL; /* Since 11.0 */
+static FN_HV_VCPU_ENABLE_MANAGED_MSR *g_pfnHvVCpuEnableManagedMsr = NULL; /* Since 11.0 */
+static FN_HV_VCPU_SET_MSR_ACCESS *g_pfnHvVCpuSetMsrAccess = NULL; /* Since 11.0 */
+/** @} */
+
+
+/**
+ * Import instructions.
+ */
+static const struct
+{
+ bool fOptional; /**< Set if import is optional. */
+ void **ppfn; /**< The function pointer variable. */
+ const char *pszName; /**< The function name. */
+} g_aImports[] =
+{
+#define NEM_DARWIN_IMPORT(a_fOptional, a_Pfn, a_Name) { (a_fOptional), (void **)&(a_Pfn), #a_Name }
+ NEM_DARWIN_IMPORT(true, g_pfnHvCapability, hv_capability),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmCreate, hv_vm_create),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmDestroy, hv_vm_destroy),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceCreate, hv_vm_space_create),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceDestroy, hv_vm_space_destroy),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmMap, hv_vm_map),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmUnmap, hv_vm_unmap),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmProtect, hv_vm_protect),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmMapSpace, hv_vm_map_space),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmUnmapSpace, hv_vm_unmap_space),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmProtectSpace, hv_vm_protect_space),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmSyncTsc, hv_vm_sync_tsc),
+
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuCreate, hv_vcpu_create),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuDestroy, hv_vcpu_destroy),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetSpace, hv_vcpu_set_space),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadRegister, hv_vcpu_read_register),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteRegister, hv_vcpu_write_register),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadFpState, hv_vcpu_read_fpstate),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteFpState, hv_vcpu_write_fpstate),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuEnableNativeMsr, hv_vcpu_enable_native_msr),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadMsr, hv_vcpu_read_msr),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteMsr, hv_vcpu_write_msr),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuFlush, hv_vcpu_flush),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInvalidateTlb, hv_vcpu_invalidate_tlb),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuRun, hv_vcpu_run),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVCpuRunUntil, hv_vcpu_run_until),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInterrupt, hv_vcpu_interrupt),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVCpuGetExecTime, hv_vcpu_get_exec_time),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmxReadCapability, hv_vmx_read_capability),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuReadVmcs, hv_vmx_vcpu_read_vmcs),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuWriteVmcs, hv_vmx_vcpu_write_vmcs),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuReadShadowVmcs, hv_vmx_vcpu_read_shadow_vmcs),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuWriteShadowVmcs, hv_vmx_vcpu_write_shadow_vmcs),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuSetShadowAccess, hv_vmx_vcpu_set_shadow_access),
+ NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuSetApicAddress, hv_vmx_vcpu_set_apic_address),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmxGetMsrInfo, hv_vmx_get_msr_info),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuGetCapWriteVmcs, hv_vmx_vcpu_get_cap_write_vmcs),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVCpuEnableManagedMsr, hv_vcpu_enable_managed_msr),
+ NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetMsrAccess, hv_vcpu_set_msr_access)
+#undef NEM_DARWIN_IMPORT
+};
+
+
+/*
+ * Let the preprocessor alias the APIs to import variables for better autocompletion.
+ */
+#ifndef IN_SLICKEDIT
+# define hv_capability g_pfnHvCapability
+# define hv_vm_create g_pfnHvVmCreate
+# define hv_vm_destroy g_pfnHvVmDestroy
+# define hv_vm_space_create g_pfnHvVmSpaceCreate
+# define hv_vm_space_destroy g_pfnHvVmSpaceDestroy
+# define hv_vm_map g_pfnHvVmMap
+# define hv_vm_unmap g_pfnHvVmUnmap
+# define hv_vm_protect g_pfnHvVmProtect
+# define hv_vm_map_space g_pfnHvVmMapSpace
+# define hv_vm_unmap_space g_pfnHvVmUnmapSpace
+# define hv_vm_protect_space g_pfnHvVmProtectSpace
+# define hv_vm_sync_tsc g_pfnHvVmSyncTsc
+
+# define hv_vcpu_create g_pfnHvVCpuCreate
+# define hv_vcpu_destroy g_pfnHvVCpuDestroy
+# define hv_vcpu_set_space g_pfnHvVCpuSetSpace
+# define hv_vcpu_read_register g_pfnHvVCpuReadRegister
+# define hv_vcpu_write_register g_pfnHvVCpuWriteRegister
+# define hv_vcpu_read_fpstate g_pfnHvVCpuReadFpState
+# define hv_vcpu_write_fpstate g_pfnHvVCpuWriteFpState
+# define hv_vcpu_enable_native_msr g_pfnHvVCpuEnableNativeMsr
+# define hv_vcpu_read_msr g_pfnHvVCpuReadMsr
+# define hv_vcpu_write_msr g_pfnHvVCpuWriteMsr
+# define hv_vcpu_flush g_pfnHvVCpuFlush
+# define hv_vcpu_invalidate_tlb g_pfnHvVCpuInvalidateTlb
+# define hv_vcpu_run g_pfnHvVCpuRun
+# define hv_vcpu_run_until g_pfnHvVCpuRunUntil
+# define hv_vcpu_interrupt g_pfnHvVCpuInterrupt
+# define hv_vcpu_get_exec_time g_pfnHvVCpuGetExecTime
+
+# define hv_vmx_read_capability g_pfnHvVmxReadCapability
+# define hv_vmx_vcpu_read_vmcs g_pfnHvVmxVCpuReadVmcs
+# define hv_vmx_vcpu_write_vmcs g_pfnHvVmxVCpuWriteVmcs
+# define hv_vmx_vcpu_read_shadow_vmcs g_pfnHvVmxVCpuReadShadowVmcs
+# define hv_vmx_vcpu_write_shadow_vmcs g_pfnHvVmxVCpuWriteShadowVmcs
+# define hv_vmx_vcpu_set_shadow_access g_pfnHvVmxVCpuSetShadowAccess
+# define hv_vmx_vcpu_set_apic_address g_pfnHvVmxVCpuSetApicAddress
+
+# define hv_vmx_get_msr_info g_pfnHvVmxGetMsrInfo
+# define hv_vmx_vcpu_get_cap_write_vmcs g_pfnHvVmxVCpuGetCapWriteVmcs
+# define hv_vcpu_enable_managed_msr g_pfnHvVCpuEnableManagedMsr
+# define hv_vcpu_set_msr_access g_pfnHvVCpuSetMsrAccess
+#endif
+
+static const struct
+{
+ uint32_t u32VmcsFieldId; /**< The VMCS field identifier. */
+ const char *pszVmcsField; /**< The VMCS field name. */
+ bool f64Bit;
+} g_aVmcsFieldsCap[] =
+{
+#define NEM_DARWIN_VMCS64_FIELD_CAP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, true }
+#define NEM_DARWIN_VMCS32_FIELD_CAP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, false }
+
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PIN_EXEC),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PROC_EXEC),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_EXCEPTION_BITMAP),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_EXIT),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_ENTRY),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PROC_EXEC2),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PLE_GAP),
+ NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PLE_WINDOW),
+ NEM_DARWIN_VMCS64_FIELD_CAP(VMX_VMCS64_CTRL_TSC_OFFSET_FULL),
+ NEM_DARWIN_VMCS64_FIELD_CAP(VMX_VMCS64_GUEST_DEBUGCTL_FULL)
+#undef NEM_DARWIN_VMCS64_FIELD_CAP
+#undef NEM_DARWIN_VMCS32_FIELD_CAP
+};
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
+
+
+/**
+ * Converts a HV return code to a VBox status code.
+ *
+ * @returns VBox status code.
+ * @param hrc The HV return code to convert.
+ */
+DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
+{
+ if (hrc == HV_SUCCESS)
+ return VINF_SUCCESS;
+
+ switch (hrc)
+ {
+ case HV_ERROR: return VERR_INVALID_STATE;
+ case HV_BUSY: return VERR_RESOURCE_BUSY;
+ case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
+ case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
+ case HV_NO_DEVICE: return VERR_NOT_FOUND;
+ case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
+ }
+
+ return VERR_IPE_UNEXPECTED_STATUS;
+}
+
+
+/**
+ * Unmaps the given guest physical address range (page aligned).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The guest physical address to start unmapping at.
+ * @param cb The size of the range to unmap in bytes.
+ * @param pu2State Where to store the new state of the unmappd page, optional.
+ */
+DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
+{
+ if (*pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED)
+ {
+ Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
+ *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
+ return VINF_SUCCESS;
+ }
+
+ LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
+ hv_return_t hrc;
+ if (pVM->nem.s.fCreatedAsid)
+ hrc = hv_vm_unmap_space(pVM->nem.s.uVmAsid, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, cb);
+ else
+ hrc = hv_vm_unmap(GCPhys, cb);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ {
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
+ if (pu2State)
+ *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
+ Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
+ return VINF_SUCCESS;
+ }
+
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
+ LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
+ GCPhys, hrc));
+ return VERR_NEM_IPE_6;
+}
+
+
+/**
+ * Resolves a NEM page state from the given protection flags.
+ *
+ * @returns NEM page state.
+ * @param fPageProt The page protection flags.
+ */
+DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
+{
+ switch (fPageProt)
+ {
+ case NEM_PAGE_PROT_NONE:
+ return NEM_DARWIN_PAGE_STATE_UNMAPPED;
+ case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
+ return NEM_DARWIN_PAGE_STATE_RX;
+ case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
+ return NEM_DARWIN_PAGE_STATE_RW;
+ case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
+ return NEM_DARWIN_PAGE_STATE_RWX;
+ default:
+ break;
+ }
+
+ AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
+ return NEM_DARWIN_PAGE_STATE_UNMAPPED;
+}
+
+
+/**
+ * Maps a given guest physical address range backed by the given memory with the given
+ * protection flags.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The guest physical address to start mapping.
+ * @param pvRam The R3 pointer of the memory to back the range with.
+ * @param cb The size of the range, page aligned.
+ * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
+ * @param pu2State Where to store the state for the new page, optional.
+ */
+DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
+{
+ LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
+
+ Assert(fPageProt != NEM_PAGE_PROT_NONE);
+
+ hv_memory_flags_t fHvMemProt = 0;
+ if (fPageProt & NEM_PAGE_PROT_READ)
+ fHvMemProt |= HV_MEMORY_READ;
+ if (fPageProt & NEM_PAGE_PROT_WRITE)
+ fHvMemProt |= HV_MEMORY_WRITE;
+ if (fPageProt & NEM_PAGE_PROT_EXECUTE)
+ fHvMemProt |= HV_MEMORY_EXEC;
+
+ hv_return_t hrc;
+ if (pVM->nem.s.fCreatedAsid)
+ hrc = hv_vm_map_space(pVM->nem.s.uVmAsid, pvRam, GCPhys, cb, fHvMemProt);
+ else
+ hrc = hv_vm_map(pvRam, GCPhys, cb, fHvMemProt);
+ if (hrc == HV_SUCCESS)
+ {
+ if (pu2State)
+ *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+/**
+ * Changes the protection flags for the given guest physical address range.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The guest physical address to start mapping.
+ * @param cb The size of the range, page aligned.
+ * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
+ * @param pu2State Where to store the state for the new page, optional.
+ */
+DECLINLINE(int) nemR3DarwinProtect(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
+{
+ hv_memory_flags_t fHvMemProt = 0;
+ if (fPageProt & NEM_PAGE_PROT_READ)
+ fHvMemProt |= HV_MEMORY_READ;
+ if (fPageProt & NEM_PAGE_PROT_WRITE)
+ fHvMemProt |= HV_MEMORY_WRITE;
+ if (fPageProt & NEM_PAGE_PROT_EXECUTE)
+ fHvMemProt |= HV_MEMORY_EXEC;
+
+ hv_return_t hrc;
+ if (pVM->nem.s.fCreatedAsid)
+ hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
+ else
+ hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
+ if (hrc == HV_SUCCESS)
+ {
+ if (pu2State)
+ *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
+{
+ PGMPAGEMAPLOCK Lock;
+ int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
+ if (RT_SUCCESS(rc))
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ return rc;
+}
+
+
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
+{
+ PGMPAGEMAPLOCK Lock;
+ int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
+ if (RT_SUCCESS(rc))
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ return rc;
+}
+
+
+#ifdef LOG_ENABLED
+/**
+ * Logs the current CPU state.
+ */
+static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
+{
+ if (LogIs3Enabled())
+ {
+#if 0
+ char szRegs[4096];
+ DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
+ "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
+ "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
+ "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
+ "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
+ "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
+ "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
+ "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
+ "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
+ "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
+ "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
+ "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
+ "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
+ "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
+ "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
+ "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
+ "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
+ " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
+ " efer=%016VR{efer}\n"
+ " pat=%016VR{pat}\n"
+ " sf_mask=%016VR{sf_mask}\n"
+ "krnl_gs_base=%016VR{krnl_gs_base}\n"
+ " lstar=%016VR{lstar}\n"
+ " star=%016VR{star} cstar=%016VR{cstar}\n"
+ "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
+ );
+
+ char szInstr[256];
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
+ DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szInstr, sizeof(szInstr), NULL);
+ Log3(("%s%s\n", szRegs, szInstr));
+#else
+ RT_NOREF(pVM, pVCpu);
+#endif
+ }
+}
+#endif /* LOG_ENABLED */
+
+
+DECLINLINE(int) nemR3DarwinReadVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t *pData)
+{
+ uint64_t u64Data;
+ hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ {
+ *pData = (uint16_t)u64Data;
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3DarwinReadVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t *pData)
+{
+ uint64_t u64Data;
+ hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ {
+ *pData = (uint32_t)u64Data;
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3DarwinReadVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t *pData)
+{
+ hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, pData);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3DarwinWriteVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t u16Val)
+{
+ hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u16Val);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3DarwinWriteVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t u32Val)
+{
+ hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u32Val);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+DECLINLINE(int) nemR3DarwinWriteVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t u64Val)
+{
+ hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u64Val);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+DECLINLINE(int) nemR3DarwinMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val)
+{
+ hv_return_t hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, idMsr, pu64Val);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+#if 0 /*unused*/
+DECLINLINE(int) nemR3DarwinMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Val)
+{
+ hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, idMsr, u64Val);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+#endif
+
+static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
+{
+#define READ_GREG(a_GReg, a_Value) \
+ do \
+ { \
+ hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, (a_GReg), &(a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define READ_VMCS_FIELD(a_Field, a_Value) \
+ do \
+ { \
+ hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &(a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define READ_VMCS16_FIELD(a_Field, a_Value) \
+ do \
+ { \
+ uint64_t u64Data; \
+ hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { (a_Value) = (uint16_t)u64Data; } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define READ_VMCS32_FIELD(a_Field, a_Value) \
+ do \
+ { \
+ uint64_t u64Data; \
+ hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { (a_Value) = (uint32_t)u64Data; } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define READ_MSR(a_Msr, a_Value) \
+ do \
+ { \
+ hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, (a_Msr), &(a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ AssertFailedReturn(VERR_INTERNAL_ERROR); \
+ } while(0)
+
+ STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateImport, x);
+
+ RT_NOREF(pVM);
+ fWhat &= pVCpu->cpum.GstCtx.fExtrn;
+
+ if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
+ vmxHCImportGuestIntrState(pVCpu, &pVCpu->nem.s.VmcsInfo);
+
+ /* GPRs */
+ hv_return_t hrc;
+ if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_RAX)
+ READ_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
+ if (fWhat & CPUMCTX_EXTRN_RCX)
+ READ_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
+ if (fWhat & CPUMCTX_EXTRN_RDX)
+ READ_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
+ if (fWhat & CPUMCTX_EXTRN_RBX)
+ READ_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
+ if (fWhat & CPUMCTX_EXTRN_RSP)
+ READ_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
+ if (fWhat & CPUMCTX_EXTRN_RBP)
+ READ_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
+ if (fWhat & CPUMCTX_EXTRN_RSI)
+ READ_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
+ if (fWhat & CPUMCTX_EXTRN_RDI)
+ READ_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
+ if (fWhat & CPUMCTX_EXTRN_R8_R15)
+ {
+ READ_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
+ READ_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
+ READ_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
+ READ_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
+ READ_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
+ READ_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
+ READ_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
+ READ_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
+ }
+ }
+
+ /* RIP & Flags */
+ if (fWhat & CPUMCTX_EXTRN_RIP)
+ READ_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip);
+ if (fWhat & CPUMCTX_EXTRN_RFLAGS)
+ {
+ uint64_t fRFlagsTmp = 0;
+ READ_GREG(HV_X86_RFLAGS, fRFlagsTmp);
+ pVCpu->cpum.GstCtx.rflags.u = fRFlagsTmp;
+ }
+
+ /* Segments */
+#define READ_SEG(a_SReg, a_enmName) \
+ do { \
+ READ_VMCS16_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \
+ READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \
+ READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \
+ READ_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \
+ (a_SReg).ValidSel = (a_SReg).Sel; \
+ } while (0)
+ if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_ES)
+ READ_SEG(pVCpu->cpum.GstCtx.es, ES);
+ if (fWhat & CPUMCTX_EXTRN_CS)
+ READ_SEG(pVCpu->cpum.GstCtx.cs, CS);
+ if (fWhat & CPUMCTX_EXTRN_SS)
+ READ_SEG(pVCpu->cpum.GstCtx.ss, SS);
+ if (fWhat & CPUMCTX_EXTRN_DS)
+ READ_SEG(pVCpu->cpum.GstCtx.ds, DS);
+ if (fWhat & CPUMCTX_EXTRN_FS)
+ READ_SEG(pVCpu->cpum.GstCtx.fs, FS);
+ if (fWhat & CPUMCTX_EXTRN_GS)
+ READ_SEG(pVCpu->cpum.GstCtx.gs, GS);
+ }
+
+ /* Descriptor tables and the task segment. */
+ if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_LDTR)
+ READ_SEG(pVCpu->cpum.GstCtx.ldtr, LDTR);
+
+ if (fWhat & CPUMCTX_EXTRN_TR)
+ {
+ /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
+ avoid to trigger sanity assertions around the code, always fix this. */
+ READ_SEG(pVCpu->cpum.GstCtx.tr, TR);
+ switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
+ {
+ case X86_SEL_TYPE_SYS_386_TSS_BUSY:
+ case X86_SEL_TYPE_SYS_286_TSS_BUSY:
+ break;
+ case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
+ pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
+ break;
+ case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
+ pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
+ break;
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_IDTR)
+ {
+ READ_VMCS32_FIELD(VMX_VMCS32_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt);
+ READ_VMCS_FIELD(VMX_VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt);
+ }
+ if (fWhat & CPUMCTX_EXTRN_GDTR)
+ {
+ READ_VMCS32_FIELD(VMX_VMCS32_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt);
+ READ_VMCS_FIELD(VMX_VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt);
+ }
+ }
+
+ /* Control registers. */
+ bool fMaybeChangedMode = false;
+ bool fUpdateCr3 = false;
+ if (fWhat & CPUMCTX_EXTRN_CR_MASK)
+ {
+ uint64_t u64CrTmp = 0;
+
+ if (fWhat & CPUMCTX_EXTRN_CR0)
+ {
+ READ_GREG(HV_X86_CR0, u64CrTmp);
+ if (pVCpu->cpum.GstCtx.cr0 != u64CrTmp)
+ {
+ CPUMSetGuestCR0(pVCpu, u64CrTmp);
+ fMaybeChangedMode = true;
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_CR2)
+ READ_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
+ if (fWhat & CPUMCTX_EXTRN_CR3)
+ {
+ READ_GREG(HV_X86_CR3, u64CrTmp);
+ if (pVCpu->cpum.GstCtx.cr3 != u64CrTmp)
+ {
+ CPUMSetGuestCR3(pVCpu, u64CrTmp);
+ fUpdateCr3 = true;
+ }
+
+ /*
+ * If the guest is in PAE mode, sync back the PDPE's into the guest state.
+ * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
+ */
+ if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
+ {
+ X86PDPE aPaePdpes[4];
+ READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE0_FULL, aPaePdpes[0].u);
+ READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE1_FULL, aPaePdpes[1].u);
+ READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE2_FULL, aPaePdpes[2].u);
+ READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE3_FULL, aPaePdpes[3].u);
+ if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
+ {
+ memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
+ fUpdateCr3 = true;
+ }
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_CR4)
+ {
+ READ_GREG(HV_X86_CR4, u64CrTmp);
+ u64CrTmp &= ~VMX_V_CR4_FIXED0;
+
+ if (pVCpu->cpum.GstCtx.cr4 != u64CrTmp)
+ {
+ CPUMSetGuestCR4(pVCpu, u64CrTmp);
+ fMaybeChangedMode = true;
+ }
+ }
+ }
+
+#if 0 /* Always done. */
+ if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
+ {
+ uint64_t u64Cr8 = 0;
+
+ READ_GREG(HV_X86_TPR, u64Cr8);
+ APICSetTpr(pVCpu, u64Cr8 << 4);
+ }
+#endif
+
+ if (fWhat & CPUMCTX_EXTRN_XCRx)
+ READ_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
+
+ /* Debug registers. */
+ if (fWhat & CPUMCTX_EXTRN_DR7)
+ {
+ uint64_t u64Dr7;
+ READ_GREG(HV_X86_DR7, u64Dr7);
+ if (pVCpu->cpum.GstCtx.dr[7] != u64Dr7)
+ CPUMSetGuestDR7(pVCpu, u64Dr7);
+ pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
+ }
+ if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
+ {
+ uint64_t u64DrTmp;
+
+ READ_GREG(HV_X86_DR0, u64DrTmp);
+ if (pVCpu->cpum.GstCtx.dr[0] != u64DrTmp)
+ CPUMSetGuestDR0(pVCpu, u64DrTmp);
+ READ_GREG(HV_X86_DR1, u64DrTmp);
+ if (pVCpu->cpum.GstCtx.dr[1] != u64DrTmp)
+ CPUMSetGuestDR1(pVCpu, u64DrTmp);
+ READ_GREG(HV_X86_DR2, u64DrTmp);
+ if (pVCpu->cpum.GstCtx.dr[2] != u64DrTmp)
+ CPUMSetGuestDR2(pVCpu, u64DrTmp);
+ READ_GREG(HV_X86_DR3, u64DrTmp);
+ if (pVCpu->cpum.GstCtx.dr[3] != u64DrTmp)
+ CPUMSetGuestDR3(pVCpu, u64DrTmp);
+ }
+ if (fWhat & CPUMCTX_EXTRN_DR6)
+ {
+ uint64_t u64Dr6;
+ READ_GREG(HV_X86_DR6, u64Dr6);
+ if (pVCpu->cpum.GstCtx.dr[6] != u64Dr6)
+ CPUMSetGuestDR6(pVCpu, u64Dr6);
+ }
+
+ if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
+ {
+ hrc = hv_vcpu_read_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
+ if (hrc == HV_SUCCESS)
+ { /* likely */ }
+ else
+ {
+ STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
+ return nemR3DarwinHvSts2Rc(hrc);
+ }
+ }
+
+ /* MSRs */
+ if (fWhat & CPUMCTX_EXTRN_EFER)
+ {
+ uint64_t u64Efer;
+
+ READ_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, u64Efer);
+ if (u64Efer != pVCpu->cpum.GstCtx.msrEFER)
+ {
+ Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, u64Efer));
+ if ((u64Efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
+ PGMNotifyNxeChanged(pVCpu, RT_BOOL(u64Efer & MSR_K6_EFER_NXE));
+ pVCpu->cpum.GstCtx.msrEFER = u64Efer;
+ fMaybeChangedMode = true;
+ }
+ }
+
+ if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+ READ_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
+ if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
+ {
+ uint64_t u64Tmp;
+ READ_MSR(MSR_IA32_SYSENTER_EIP, u64Tmp);
+ pVCpu->cpum.GstCtx.SysEnter.eip = u64Tmp;
+ READ_MSR(MSR_IA32_SYSENTER_ESP, u64Tmp);
+ pVCpu->cpum.GstCtx.SysEnter.esp = u64Tmp;
+ READ_MSR(MSR_IA32_SYSENTER_CS, u64Tmp);
+ pVCpu->cpum.GstCtx.SysEnter.cs = u64Tmp;
+ }
+ if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
+ {
+ READ_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
+ READ_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
+ READ_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
+ READ_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
+ }
+ if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
+ {
+ PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
+ READ_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
+ }
+ if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
+ {
+ /* Last Branch Record. */
+ if (pVM->nem.s.fLbr)
+ {
+ PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
+ uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
+ uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
+ uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
+ uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
+ Assert(cLbrStack <= 32);
+ for (uint32_t i = 0; i < cLbrStack; i++)
+ {
+ READ_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]);
+
+ /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
+ if (idToIpMsrStart != 0)
+ READ_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]);
+ if (idInfoMsrStart != 0)
+ READ_MSR(idInfoMsrStart + i, pVmcsInfoShared->au64LbrInfoMsr[i]);
+ }
+
+ READ_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr);
+
+ if (pVM->nem.s.idLerFromIpMsr)
+ READ_MSR(pVM->nem.s.idLerFromIpMsr, pVmcsInfoShared->u64LerFromIpMsr);
+ if (pVM->nem.s.idLerToIpMsr)
+ READ_MSR(pVM->nem.s.idLerToIpMsr, pVmcsInfoShared->u64LerToIpMsr);
+ }
+ }
+
+ /* Almost done, just update extrn flags and maybe change PGM mode. */
+ pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+
+#ifdef LOG_ENABLED
+ nemR3DarwinLogState(pVM, pVCpu);
+#endif
+
+ /* Typical. */
+ if (!fMaybeChangedMode && !fUpdateCr3)
+ {
+ STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Slow.
+ */
+ if (fMaybeChangedMode)
+ {
+ int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
+ false /* fForce */);
+ AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
+ }
+
+ if (fUpdateCr3)
+ {
+ int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
+ if (rc == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
+ }
+
+ STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
+
+ return VINF_SUCCESS;
+#undef READ_GREG
+#undef READ_VMCS_FIELD
+#undef READ_VMCS32_FIELD
+#undef READ_SEG
+#undef READ_MSR
+}
+
+
+/**
+ * State to pass between vmxHCExitEptViolation
+ * and nemR3DarwinHandleMemoryAccessPageCheckerCallback.
+ */
+typedef struct NEMHCDARWINHMACPCCSTATE
+{
+ /** Input: Write access. */
+ bool fWriteAccess;
+ /** Output: Set if we did something. */
+ bool fDidSomething;
+ /** Output: Set it we should resume. */
+ bool fCanResume;
+} NEMHCDARWINHMACPCCSTATE;
+
+/**
+ * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
+ * Worker for vmxHCExitEptViolation; pvUser points to a
+ * NEMHCDARWINHMACPCCSTATE structure. }
+ */
+static DECLCALLBACK(int)
+nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
+{
+ RT_NOREF(pVCpu);
+
+ NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
+ pState->fDidSomething = false;
+ pState->fCanResume = false;
+
+ uint8_t u2State = pInfo->u2NemState;
+
+ /*
+ * Consolidate current page state with actual page protection and access type.
+ * We don't really consider downgrades here, as they shouldn't happen.
+ */
+ switch (u2State)
+ {
+ case NEM_DARWIN_PAGE_STATE_UNMAPPED:
+ {
+ if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
+ {
+ Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
+ return VINF_SUCCESS;
+ }
+
+ /* Don't bother remapping it if it's a write request to a non-writable page. */
+ if ( pState->fWriteAccess
+ && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
+ {
+ Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
+ return VINF_SUCCESS;
+ }
+
+ int rc = VINF_SUCCESS;
+ if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
+ {
+ void *pvPage;
+ rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhys, &pvPage);
+ if (RT_SUCCESS(rc))
+ rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
+ }
+ else if (pInfo->fNemProt & NEM_PAGE_PROT_READ)
+ {
+ const void *pvPage;
+ rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
+ if (RT_SUCCESS(rc))
+ rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
+ }
+ else /* Only EXECUTE doesn't work. */
+ AssertReleaseFailed();
+
+ pInfo->u2NemState = u2State;
+ Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
+ GCPhys, g_apszPageStates[u2State], rc));
+ pState->fDidSomething = true;
+ pState->fCanResume = true;
+ return rc;
+ }
+ case NEM_DARWIN_PAGE_STATE_RX:
+ if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
+ && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
+ {
+ pState->fCanResume = true;
+ Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case NEM_DARWIN_PAGE_STATE_RW:
+ case NEM_DARWIN_PAGE_STATE_RWX:
+ if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
+ {
+ pState->fCanResume = true;
+ if ( pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_RW
+ || pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_RWX)
+ Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: Spurious EPT fault\n", GCPhys));
+ return VINF_SUCCESS;
+ }
+ break;
+
+ default:
+ AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
+ }
+
+ /* Unmap and restart the instruction. */
+ int rc = nemR3DarwinUnmap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE, &u2State);
+ if (RT_SUCCESS(rc))
+ {
+ pInfo->u2NemState = u2State;
+ pState->fDidSomething = true;
+ pState->fCanResume = true;
+ Log5(("NEM GPA unmapped/exit: %RGp (was %s)\n", GCPhys, g_apszPageStates[u2State]));
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhys=%RGp %s rc=%Rrc\n",
+ GCPhys, g_apszPageStates[u2State], rc));
+ return VERR_NEM_UNMAP_PAGES_FAILED;
+}
+
+
+DECL_FORCE_INLINE(bool) nemR3DarwinIsUnrestrictedGuest(PCVMCC pVM)
+{
+ RT_NOREF(pVM);
+ return true;
+}
+
+
+DECL_FORCE_INLINE(bool) nemR3DarwinIsNestedPaging(PCVMCC pVM)
+{
+ RT_NOREF(pVM);
+ return true;
+}
+
+
+DECL_FORCE_INLINE(bool) nemR3DarwinIsPreemptTimerUsed(PCVMCC pVM)
+{
+ RT_NOREF(pVM);
+ return false;
+}
+
+
+#if 0 /* unused */
+DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM)
+{
+ RT_NOREF(pVM);
+ return false;
+}
+#endif
+
+
+/*
+ * Instantiate the code we share with ring-0.
+ */
+#define IN_NEM_DARWIN
+//#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
+//#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
+//#define HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
+#define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s
+#define VCPU_2_VMXSTATS(a_pVCpu) (*(a_pVCpu)->nem.s.pVmxStats)
+
+#define VM_IS_VMX_UNRESTRICTED_GUEST(a_pVM) nemR3DarwinIsUnrestrictedGuest((a_pVM))
+#define VM_IS_VMX_NESTED_PAGING(a_pVM) nemR3DarwinIsNestedPaging((a_pVM))
+#define VM_IS_VMX_PREEMPT_TIMER_USED(a_pVM) nemR3DarwinIsPreemptTimerUsed((a_pVM))
+#define VM_IS_VMX_LBR(a_pVM) nemR3DarwinIsVmxLbr((a_pVM))
+
+#define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs16((a_pVCpu), (a_FieldEnc), (a_Val))
+#define VMX_VMCS_WRITE_32(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs32((a_pVCpu), (a_FieldEnc), (a_Val))
+#define VMX_VMCS_WRITE_64(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
+#define VMX_VMCS_WRITE_NW(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
+
+#define VMX_VMCS_READ_16(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs16((a_pVCpu), (a_FieldEnc), (a_pVal))
+#define VMX_VMCS_READ_32(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs32((a_pVCpu), (a_FieldEnc), (a_pVal))
+#define VMX_VMCS_READ_64(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
+#define VMX_VMCS_READ_NW(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
+
+#include "../VMMAll/VMXAllTemplate.cpp.h"
+
+#undef VMX_VMCS_WRITE_16
+#undef VMX_VMCS_WRITE_32
+#undef VMX_VMCS_WRITE_64
+#undef VMX_VMCS_WRITE_NW
+
+#undef VMX_VMCS_READ_16
+#undef VMX_VMCS_READ_32
+#undef VMX_VMCS_READ_64
+#undef VMX_VMCS_READ_NW
+
+#undef VM_IS_VMX_PREEMPT_TIMER_USED
+#undef VM_IS_VMX_NESTED_PAGING
+#undef VM_IS_VMX_UNRESTRICTED_GUEST
+#undef VCPU_2_VMXSTATS
+#undef VCPU_2_VMXSTATE
+
+
+/**
+ * Exports the guest GP registers to HV for execution.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ */
+static int nemR3DarwinExportGuestGprs(PVMCPUCC pVCpu)
+{
+#define WRITE_GREG(a_GReg, a_Value) \
+ do \
+ { \
+ hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+
+ uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->nem.s.fCtxChanged);
+ if (fCtxChanged & HM_CHANGED_GUEST_GPRS_MASK)
+ {
+ if (fCtxChanged & HM_CHANGED_GUEST_RAX)
+ WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
+ if (fCtxChanged & HM_CHANGED_GUEST_RCX)
+ WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
+ if (fCtxChanged & HM_CHANGED_GUEST_RDX)
+ WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
+ if (fCtxChanged & HM_CHANGED_GUEST_RBX)
+ WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
+ if (fCtxChanged & HM_CHANGED_GUEST_RSP)
+ WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
+ if (fCtxChanged & HM_CHANGED_GUEST_RBP)
+ WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
+ if (fCtxChanged & HM_CHANGED_GUEST_RSI)
+ WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
+ if (fCtxChanged & HM_CHANGED_GUEST_RDI)
+ WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
+ if (fCtxChanged & HM_CHANGED_GUEST_R8_R15)
+ {
+ WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
+ WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
+ WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
+ WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
+ WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
+ WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
+ WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
+ WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
+ }
+
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_GPRS_MASK);
+ }
+
+ if (fCtxChanged & HM_CHANGED_GUEST_CR2)
+ {
+ WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_CR2);
+ }
+
+ return VINF_SUCCESS;
+#undef WRITE_GREG
+}
+
+
+/**
+ * Exports the guest debug registers into the guest-state applying any hypervisor
+ * debug related states (hardware breakpoints from the debugger, etc.).
+ *
+ * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmxTransient The VMX-transient structure.
+ */
+static int nemR3DarwinExportDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+ PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
+
+#ifdef VBOX_STRICT
+ /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
+ if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
+ {
+ /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
+ Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
+ Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
+ }
+#endif
+
+ bool fSteppingDB = false;
+ bool fInterceptMovDRx = false;
+ uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
+ if (pVCpu->nem.s.fSingleInstruction)
+ {
+ /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
+ if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
+ {
+ uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
+ Assert(fSteppingDB == false);
+ }
+ else
+ {
+ pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_TF;
+ pVCpu->nem.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
+ pVCpu->nem.s.fClearTrapFlag = true;
+ fSteppingDB = true;
+ }
+ }
+
+ uint64_t u64GuestDr7;
+ if ( fSteppingDB
+ || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
+ {
+ /*
+ * Use the combined guest and host DRx values found in the hypervisor register set
+ * because the hypervisor debugger has breakpoints active or someone is single stepping
+ * on the host side without a monitor trap flag.
+ *
+ * Note! DBGF expects a clean DR6 state before executing guest code.
+ */
+ if (!CPUMIsHyperDebugStateActive(pVCpu))
+ {
+ /*
+ * Make sure the hypervisor values are up to date.
+ */
+ CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */);
+
+ CPUMR3NemActivateHyperDebugState(pVCpu);
+
+ Assert(CPUMIsHyperDebugStateActive(pVCpu));
+ Assert(!CPUMIsGuestDebugStateActive(pVCpu));
+ }
+
+ /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
+ u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
+ pVCpu->nem.s.fUsingHyperDR7 = true;
+ fInterceptMovDRx = true;
+ }
+ else
+ {
+ /*
+ * If the guest has enabled debug registers, we need to load them prior to
+ * executing guest code so they'll trigger at the right time.
+ */
+ HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
+ if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
+ {
+ if (!CPUMIsGuestDebugStateActive(pVCpu))
+ {
+ CPUMR3NemActivateGuestDebugState(pVCpu);
+
+ Assert(CPUMIsGuestDebugStateActive(pVCpu));
+ Assert(!CPUMIsHyperDebugStateActive(pVCpu));
+ }
+ Assert(!fInterceptMovDRx);
+ }
+ else if (!CPUMIsGuestDebugStateActive(pVCpu))
+ {
+ /*
+ * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
+ * must intercept #DB in order to maintain a correct DR6 guest value, and
+ * because we need to intercept it to prevent nested #DBs from hanging the
+ * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
+ */
+ fInterceptMovDRx = true;
+ }
+
+ /* Update DR7 with the actual guest value. */
+ u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
+ pVCpu->nem.s.fUsingHyperDR7 = false;
+ }
+
+ /** @todo The DRx handling is not quite correct breaking debugging inside the guest with gdb,
+ * see @ticketref{21413} and @ticketref{21546}, so this is disabled for now. See @bugref{10504}
+ * as well.
+ */
+#if 0
+ if (fInterceptMovDRx)
+ uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
+ else
+ uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
+#endif
+
+ /*
+ * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
+ * monitor-trap flag and update our cache.
+ */
+ if (uProcCtls != pVmcsInfo->u32ProcCtls)
+ {
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
+ AssertRC(rc);
+ pVmcsInfo->u32ProcCtls = uProcCtls;
+ }
+
+ /*
+ * Update guest DR7.
+ */
+ int rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_GUEST_DR7, u64GuestDr7);
+ AssertRC(rc);
+
+ /*
+ * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
+ * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
+ *
+ * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
+ */
+ if (fSteppingDB)
+ {
+ Assert(pVCpu->nem.s.fSingleInstruction);
+ Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
+
+ uint32_t fIntrState = 0;
+ rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
+ AssertRC(rc);
+
+ if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
+ {
+ fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
+ rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
+ AssertRC(rc);
+ }
+ }
+
+ /*
+ * Store status of the shared guest/host debug state at the time of VM-entry.
+ */
+ pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
+ pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Converts the given CPUM externalized bitmask to the appropriate HM changed bitmask.
+ *
+ * @returns Bitmask of HM changed flags.
+ * @param fCpumExtrn The CPUM extern bitmask.
+ */
+static uint64_t nemR3DarwinCpumExtrnToHmChanged(uint64_t fCpumExtrn)
+{
+ uint64_t fHmChanged = 0;
+
+ /* Invert to gt a mask of things which are kept in CPUM. */
+ uint64_t fCpumIntern = ~fCpumExtrn;
+
+ if (fCpumIntern & CPUMCTX_EXTRN_GPRS_MASK)
+ {
+ if (fCpumIntern & CPUMCTX_EXTRN_RAX)
+ fHmChanged |= HM_CHANGED_GUEST_RAX;
+ if (fCpumIntern & CPUMCTX_EXTRN_RCX)
+ fHmChanged |= HM_CHANGED_GUEST_RCX;
+ if (fCpumIntern & CPUMCTX_EXTRN_RDX)
+ fHmChanged |= HM_CHANGED_GUEST_RDX;
+ if (fCpumIntern & CPUMCTX_EXTRN_RBX)
+ fHmChanged |= HM_CHANGED_GUEST_RBX;
+ if (fCpumIntern & CPUMCTX_EXTRN_RSP)
+ fHmChanged |= HM_CHANGED_GUEST_RSP;
+ if (fCpumIntern & CPUMCTX_EXTRN_RBP)
+ fHmChanged |= HM_CHANGED_GUEST_RBP;
+ if (fCpumIntern & CPUMCTX_EXTRN_RSI)
+ fHmChanged |= HM_CHANGED_GUEST_RSI;
+ if (fCpumIntern & CPUMCTX_EXTRN_RDI)
+ fHmChanged |= HM_CHANGED_GUEST_RDI;
+ if (fCpumIntern & CPUMCTX_EXTRN_R8_R15)
+ fHmChanged |= HM_CHANGED_GUEST_R8_R15;
+ }
+
+ /* RIP & Flags */
+ if (fCpumIntern & CPUMCTX_EXTRN_RIP)
+ fHmChanged |= HM_CHANGED_GUEST_RIP;
+ if (fCpumIntern & CPUMCTX_EXTRN_RFLAGS)
+ fHmChanged |= HM_CHANGED_GUEST_RFLAGS;
+
+ /* Segments */
+ if (fCpumIntern & CPUMCTX_EXTRN_SREG_MASK)
+ {
+ if (fCpumIntern & CPUMCTX_EXTRN_ES)
+ fHmChanged |= HM_CHANGED_GUEST_ES;
+ if (fCpumIntern & CPUMCTX_EXTRN_CS)
+ fHmChanged |= HM_CHANGED_GUEST_CS;
+ if (fCpumIntern & CPUMCTX_EXTRN_SS)
+ fHmChanged |= HM_CHANGED_GUEST_SS;
+ if (fCpumIntern & CPUMCTX_EXTRN_DS)
+ fHmChanged |= HM_CHANGED_GUEST_DS;
+ if (fCpumIntern & CPUMCTX_EXTRN_FS)
+ fHmChanged |= HM_CHANGED_GUEST_FS;
+ if (fCpumIntern & CPUMCTX_EXTRN_GS)
+ fHmChanged |= HM_CHANGED_GUEST_GS;
+ }
+
+ /* Descriptor tables & task segment. */
+ if (fCpumIntern & CPUMCTX_EXTRN_TABLE_MASK)
+ {
+ if (fCpumIntern & CPUMCTX_EXTRN_LDTR)
+ fHmChanged |= HM_CHANGED_GUEST_LDTR;
+ if (fCpumIntern & CPUMCTX_EXTRN_TR)
+ fHmChanged |= HM_CHANGED_GUEST_TR;
+ if (fCpumIntern & CPUMCTX_EXTRN_IDTR)
+ fHmChanged |= HM_CHANGED_GUEST_IDTR;
+ if (fCpumIntern & CPUMCTX_EXTRN_GDTR)
+ fHmChanged |= HM_CHANGED_GUEST_GDTR;
+ }
+
+ /* Control registers. */
+ if (fCpumIntern & CPUMCTX_EXTRN_CR_MASK)
+ {
+ if (fCpumIntern & CPUMCTX_EXTRN_CR0)
+ fHmChanged |= HM_CHANGED_GUEST_CR0;
+ if (fCpumIntern & CPUMCTX_EXTRN_CR2)
+ fHmChanged |= HM_CHANGED_GUEST_CR2;
+ if (fCpumIntern & CPUMCTX_EXTRN_CR3)
+ fHmChanged |= HM_CHANGED_GUEST_CR3;
+ if (fCpumIntern & CPUMCTX_EXTRN_CR4)
+ fHmChanged |= HM_CHANGED_GUEST_CR4;
+ }
+ if (fCpumIntern & CPUMCTX_EXTRN_APIC_TPR)
+ fHmChanged |= HM_CHANGED_GUEST_APIC_TPR;
+
+ /* Debug registers. */
+ if (fCpumIntern & CPUMCTX_EXTRN_DR0_DR3)
+ fHmChanged |= HM_CHANGED_GUEST_DR0_DR3;
+ if (fCpumIntern & CPUMCTX_EXTRN_DR6)
+ fHmChanged |= HM_CHANGED_GUEST_DR6;
+ if (fCpumIntern & CPUMCTX_EXTRN_DR7)
+ fHmChanged |= HM_CHANGED_GUEST_DR7;
+
+ /* Floating point state. */
+ if (fCpumIntern & CPUMCTX_EXTRN_X87)
+ fHmChanged |= HM_CHANGED_GUEST_X87;
+ if (fCpumIntern & CPUMCTX_EXTRN_SSE_AVX)
+ fHmChanged |= HM_CHANGED_GUEST_SSE_AVX;
+ if (fCpumIntern & CPUMCTX_EXTRN_OTHER_XSAVE)
+ fHmChanged |= HM_CHANGED_GUEST_OTHER_XSAVE;
+ if (fCpumIntern & CPUMCTX_EXTRN_XCRx)
+ fHmChanged |= HM_CHANGED_GUEST_XCRx;
+
+ /* MSRs */
+ if (fCpumIntern & CPUMCTX_EXTRN_EFER)
+ fHmChanged |= HM_CHANGED_GUEST_EFER_MSR;
+ if (fCpumIntern & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+ fHmChanged |= HM_CHANGED_GUEST_KERNEL_GS_BASE;
+ if (fCpumIntern & CPUMCTX_EXTRN_SYSENTER_MSRS)
+ fHmChanged |= HM_CHANGED_GUEST_SYSENTER_MSR_MASK;
+ if (fCpumIntern & CPUMCTX_EXTRN_SYSCALL_MSRS)
+ fHmChanged |= HM_CHANGED_GUEST_SYSCALL_MSRS;
+ if (fCpumIntern & CPUMCTX_EXTRN_TSC_AUX)
+ fHmChanged |= HM_CHANGED_GUEST_TSC_AUX;
+ if (fCpumIntern & CPUMCTX_EXTRN_OTHER_MSRS)
+ fHmChanged |= HM_CHANGED_GUEST_OTHER_MSRS;
+
+ return fHmChanged;
+}
+
+
+/**
+ * Exports the guest state to HV for execution.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param pVmxTransient The transient VMX structure.
+ */
+static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+#define WRITE_GREG(a_GReg, a_Value) \
+ do \
+ { \
+ hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define WRITE_VMCS_FIELD(a_Field, a_Value) \
+ do \
+ { \
+ hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ return VERR_INTERNAL_ERROR; \
+ } while(0)
+#define WRITE_MSR(a_Msr, a_Value) \
+ do \
+ { \
+ hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, (a_Msr), (a_Value)); \
+ if (RT_LIKELY(hrc == HV_SUCCESS)) \
+ { /* likely */ } \
+ else \
+ AssertFailedReturn(VERR_INTERNAL_ERROR); \
+ } while(0)
+
+ RT_NOREF(pVM);
+
+#ifdef LOG_ENABLED
+ nemR3DarwinLogState(pVM, pVCpu);
+#endif
+
+ STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateExport, x);
+
+ uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
+ if (!fWhat)
+ return VINF_SUCCESS;
+
+ pVCpu->nem.s.fCtxChanged |= nemR3DarwinCpumExtrnToHmChanged(pVCpu->cpum.GstCtx.fExtrn);
+
+ int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
+
+ rc = nemR3DarwinExportGuestGprs(pVCpu);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
+
+ rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
+
+ VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
+ if (rcStrict == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ {
+ Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+
+ rc = nemR3DarwinExportDebugState(pVCpu, pVmxTransient);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
+
+ vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
+ vmxHCExportGuestRip(pVCpu);
+ //vmxHCExportGuestRsp(pVCpu);
+ vmxHCExportGuestRflags(pVCpu, pVmxTransient);
+
+ rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
+
+ if (fWhat & CPUMCTX_EXTRN_XCRx)
+ {
+ WRITE_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_XCRx);
+ }
+
+ if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
+ {
+ Assert(pVCpu->nem.s.fCtxChanged & HM_CHANGED_GUEST_APIC_TPR);
+ vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
+
+ rc = APICGetTpr(pVCpu, &pVmxTransient->u8GuestTpr, NULL /*pfPending*/, NULL /*pu8PendingIntr*/);
+ AssertRC(rc);
+
+ WRITE_GREG(HV_X86_TPR, pVmxTransient->u8GuestTpr);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
+ }
+
+ /* Debug registers. */
+ if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
+ {
+ WRITE_GREG(HV_X86_DR0, CPUMGetHyperDR0(pVCpu));
+ WRITE_GREG(HV_X86_DR1, CPUMGetHyperDR1(pVCpu));
+ WRITE_GREG(HV_X86_DR2, CPUMGetHyperDR2(pVCpu));
+ WRITE_GREG(HV_X86_DR3, CPUMGetHyperDR3(pVCpu));
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR0_DR3);
+ }
+ if (fWhat & CPUMCTX_EXTRN_DR6)
+ {
+ WRITE_GREG(HV_X86_DR6, CPUMGetHyperDR6(pVCpu));
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR6);
+ }
+ if (fWhat & CPUMCTX_EXTRN_DR7)
+ {
+ WRITE_GREG(HV_X86_DR7, CPUMGetHyperDR7(pVCpu));
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR7);
+ }
+
+ if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
+ {
+ hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
+ if (hrc == HV_SUCCESS)
+ { /* likely */ }
+ else
+ return nemR3DarwinHvSts2Rc(hrc);
+
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(HM_CHANGED_GUEST_X87 | HM_CHANGED_GUEST_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE));
+ }
+
+ /* MSRs */
+ if (fWhat & CPUMCTX_EXTRN_EFER)
+ {
+ WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
+ }
+ if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+ {
+ WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_KERNEL_GS_BASE);
+ }
+ if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
+ {
+ WRITE_MSR(MSR_IA32_SYSENTER_CS, pVCpu->cpum.GstCtx.SysEnter.cs);
+ WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip);
+ WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
+ }
+ if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
+ {
+ WRITE_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
+ WRITE_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
+ WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
+ WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSCALL_MSRS);
+ }
+ if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
+ {
+ PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
+
+ WRITE_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
+ }
+ if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
+ {
+ /* Last Branch Record. */
+ if (pVM->nem.s.fLbr)
+ {
+ PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
+ uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
+ uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
+ uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
+ uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
+ Assert(cLbrStack <= 32);
+ for (uint32_t i = 0; i < cLbrStack; i++)
+ {
+ WRITE_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]);
+
+ /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
+ if (idToIpMsrStart != 0)
+ WRITE_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]);
+ if (idInfoMsrStart != 0)
+ WRITE_MSR(idInfoMsrStart + i, pVmcsInfoShared->au64LbrInfoMsr[i]);
+ }
+
+ WRITE_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr);
+ if (pVM->nem.s.idLerFromIpMsr)
+ WRITE_MSR(pVM->nem.s.idLerFromIpMsr, pVmcsInfoShared->u64LerFromIpMsr);
+ if (pVM->nem.s.idLerToIpMsr)
+ WRITE_MSR(pVM->nem.s.idLerToIpMsr, pVmcsInfoShared->u64LerToIpMsr);
+ }
+
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
+ }
+
+ hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);
+ hv_vcpu_flush(pVCpu->nem.s.hVCpuId);
+
+ pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
+
+ /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~( HM_CHANGED_GUEST_HWVIRT
+ | HM_CHANGED_VMX_GUEST_AUTO_MSRS
+ | HM_CHANGED_VMX_GUEST_LAZY_MSRS
+ | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
+
+ STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateExport, x);
+ return VINF_SUCCESS;
+#undef WRITE_GREG
+#undef WRITE_VMCS_FIELD
+}
+
+
+/**
+ * Common worker for both nemR3DarwinHandleExit() and nemR3DarwinHandleExitDebug().
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param pVmxTransient The transient VMX structure.
+ */
+DECLINLINE(int) nemR3DarwinHandleExitCommon(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+ uint32_t uExitReason;
+ int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
+ AssertRC(rc);
+ pVmxTransient->fVmcsFieldsRead = 0;
+ pVmxTransient->fIsNestedGuest = false;
+ pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
+ pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
+
+ if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed))
+ AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n",
+ pVCpu->idCpu, pVmxTransient->uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
+ VERR_NEM_IPE_0);
+
+ /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that
+ * when handling exits). */
+ /*
+ * Note! What is being fetched here must match the default value for the
+ * a_fDonePostExit parameter of vmxHCImportGuestState exactly!
+ */
+ rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
+ AssertRCReturn(rc, rc);
+
+ STAM_COUNTER_INC(&pVCpu->nem.s.pVmxStats->aStatExitReason[pVmxTransient->uExitReason & MASK_EXITREASON_STAT]);
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.pVmxStats->StatExitAll);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles an exit from hv_vcpu_run().
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param pVmxTransient The transient VMX structure.
+ */
+static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+ int rc = nemR3DarwinHandleExitCommon(pVM, pVCpu, pVmxTransient);
+ AssertRCReturn(rc, rc);
+
+#ifndef HMVMX_USE_FUNCTION_TABLE
+ return vmxHCHandleExit(pVCpu, pVmxTransient);
+#else
+ return g_aVMExitHandlers[pVmxTransient->uExitReason].pfn(pVCpu, pVmxTransient);
+#endif
+}
+
+
+/**
+ * Handles an exit from hv_vcpu_run() - debug runloop variant.
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param pVmxTransient The transient VMX structure.
+ * @param pDbgState The debug state structure.
+ */
+static VBOXSTRICTRC nemR3DarwinHandleExitDebug(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
+{
+ int rc = nemR3DarwinHandleExitCommon(pVM, pVCpu, pVmxTransient);
+ AssertRCReturn(rc, rc);
+
+ return vmxHCRunDebugHandleExit(pVCpu, pVmxTransient, pDbgState);
+}
+
+
+/**
+ * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
+ *
+ * @returns VBox status code.
+ * @param fForced Whether the HMForced flag is set and we should
+ * fail if we cannot initialize.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3DarwinLoadHv(bool fForced, PRTERRINFO pErrInfo)
+{
+ RTLDRMOD hMod = NIL_RTLDRMOD;
+ static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
+
+ int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
+ {
+ int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
+ if (RT_SUCCESS(rc2))
+ {
+ if (g_aImports[i].fOptional)
+ LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
+ g_aImports[i].pszName));
+ }
+ else
+ {
+ *g_aImports[i].ppfn = NULL;
+
+ LogRel(("NEM: %s: Failed to import Hypervisor!%s: %Rrc\n",
+ g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
+ g_aImports[i].pszName, rc2));
+ if (!g_aImports[i].fOptional)
+ {
+ if (RTErrInfoIsSet(pErrInfo))
+ RTErrInfoAddF(pErrInfo, rc2, ", Hypervisor!%s", g_aImports[i].pszName);
+ else
+ rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: Hypervisor!%s", g_aImports[i].pszName);
+ Assert(RT_FAILURE(rc));
+ }
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ Assert(!RTErrInfoIsSet(pErrInfo));
+ }
+
+ RTLdrClose(hMod);
+ }
+ else
+ {
+ RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
+ rc = VERR_NEM_INIT_FAILED;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Read and initialize the global capabilities supported by this CPU.
+ *
+ * @returns VBox status code.
+ */
+static int nemR3DarwinCapsInit(void)
+{
+ RT_ZERO(g_HmMsrs);
+
+ hv_return_t hrc = hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &g_HmMsrs.u.vmx.PinCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &g_HmMsrs.u.vmx.ProcCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &g_HmMsrs.u.vmx.EntryCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_EXIT, &g_HmMsrs.u.vmx.ExitCtls.u);
+ if (hrc == HV_SUCCESS)
+ {
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_BASIC, &g_HmMsrs.u.vmx.u64Basic);
+ if (hrc == HV_SUCCESS)
+ {
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_MISC, &g_HmMsrs.u.vmx.u64Misc);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED0, &g_HmMsrs.u.vmx.u64Cr0Fixed0);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED1, &g_HmMsrs.u.vmx.u64Cr0Fixed1);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED0, &g_HmMsrs.u.vmx.u64Cr4Fixed0);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED1, &g_HmMsrs.u.vmx.u64Cr4Fixed1);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_VMCS_ENUM, &g_HmMsrs.u.vmx.u64VmcsEnum);
+ if ( hrc == HV_SUCCESS
+ && RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
+ {
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PINBASED, &g_HmMsrs.u.vmx.TruePinCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PROCBASED, &g_HmMsrs.u.vmx.TrueProcCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_ENTRY, &g_HmMsrs.u.vmx.TrueEntryCtls.u);
+ if (hrc == HV_SUCCESS)
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_EXIT, &g_HmMsrs.u.vmx.TrueExitCtls.u);
+ }
+ }
+ else
+ {
+ /* Likely running on anything < 11.0 (BigSur) so provide some sensible defaults. */
+ g_HmMsrs.u.vmx.u64Cr0Fixed0 = 0x80000021;
+ g_HmMsrs.u.vmx.u64Cr0Fixed1 = 0xffffffff;
+ g_HmMsrs.u.vmx.u64Cr4Fixed0 = 0x2000;
+ g_HmMsrs.u.vmx.u64Cr4Fixed1 = 0x1767ff;
+ hrc = HV_SUCCESS;
+ }
+ }
+
+ if ( hrc == HV_SUCCESS
+ && g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
+ {
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &g_HmMsrs.u.vmx.ProcCtls2.u);
+
+ if ( hrc == HV_SUCCESS
+ && g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
+ {
+ hrc = hv_vmx_read_capability(HV_VMX_CAP_EPT_VPID_CAP, &g_HmMsrs.u.vmx.u64EptVpidCaps);
+ if (hrc != HV_SUCCESS)
+ hrc = HV_SUCCESS; /* Probably just outdated OS. */
+ }
+
+ g_HmMsrs.u.vmx.u64VmFunc = 0; /* No way to read that on macOS. */
+ }
+
+ if (hrc == HV_SUCCESS)
+ {
+ /*
+ * Check for EFER swapping support.
+ */
+ g_fHmVmxSupportsVmcsEfer = true; //(g_HmMsrs.u.vmx.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
+ //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
+ //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR);
+ }
+
+ /*
+ * Get MSR_IA32_ARCH_CAPABILITIES and expand it into the host feature structure.
+ * This is only available with 11.0+ (BigSur) as the required API is only available there,
+ * we could in theory initialize this when creating the EMTs using hv_vcpu_read_msr() but
+ * the required vCPU handle is created after CPUM was initialized which is too late.
+ * Given that the majority of users is on 11.0 and later we don't care for now.
+ */
+ if ( hrc == HV_SUCCESS
+ && hv_vmx_get_msr_info)
+ {
+ g_CpumHostFeatures.s.fArchRdclNo = 0;
+ g_CpumHostFeatures.s.fArchIbrsAll = 0;
+ g_CpumHostFeatures.s.fArchRsbOverride = 0;
+ g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = 0;
+ g_CpumHostFeatures.s.fArchMdsNo = 0;
+ uint32_t const cStdRange = ASMCpuId_EAX(0);
+ if ( RTX86IsValidStdRange(cStdRange)
+ && cStdRange >= 7)
+ {
+ uint32_t const fStdFeaturesEdx = ASMCpuId_EDX(1);
+ uint32_t fStdExtFeaturesEdx;
+ ASMCpuIdExSlow(7, 0, 0, 0, NULL, NULL, NULL, &fStdExtFeaturesEdx);
+ if ( (fStdExtFeaturesEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
+ && (fStdFeaturesEdx & X86_CPUID_FEATURE_EDX_MSR))
+ {
+ uint64_t fArchVal;
+ hrc = hv_vmx_get_msr_info(HV_VMX_INFO_MSR_IA32_ARCH_CAPABILITIES, &fArchVal);
+ if (hrc == HV_SUCCESS)
+ {
+ g_CpumHostFeatures.s.fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO);
+ g_CpumHostFeatures.s.fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL);
+ g_CpumHostFeatures.s.fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO);
+ g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D);
+ g_CpumHostFeatures.s.fArchMdsNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO);
+ }
+ }
+ else
+ g_CpumHostFeatures.s.fArchCap = 0;
+ }
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+/**
+ * Sets up the LBR MSR ranges based on the host CPU.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ *
+ * @sa hmR0VmxSetupLbrMsrRange
+ */
+static int nemR3DarwinSetupLbrMsrRange(PVMCC pVM)
+{
+ Assert(pVM->nem.s.fLbr);
+ uint32_t idLbrFromIpMsrFirst;
+ uint32_t idLbrFromIpMsrLast;
+ uint32_t idLbrToIpMsrFirst;
+ uint32_t idLbrToIpMsrLast;
+ uint32_t idLbrInfoMsrFirst;
+ uint32_t idLbrInfoMsrLast;
+ uint32_t idLbrTosMsr;
+ uint32_t idLbrSelectMsr;
+ uint32_t idLerFromIpMsr;
+ uint32_t idLerToIpMsr;
+
+ /*
+ * Determine the LBR MSRs supported for this host CPU family and model.
+ *
+ * See Intel spec. 17.4.8 "LBR Stack".
+ * See Intel "Model-Specific Registers" spec.
+ */
+ uint32_t const uFamilyModel = (g_CpumHostFeatures.s.uFamily << 8)
+ | g_CpumHostFeatures.s.uModel;
+ switch (uFamilyModel)
+ {
+ case 0x0f01: case 0x0f02:
+ idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
+ idLbrFromIpMsrLast = MSR_P4_LASTBRANCH_3;
+ idLbrToIpMsrFirst = 0x0;
+ idLbrToIpMsrLast = 0x0;
+ idLbrInfoMsrFirst = 0x0;
+ idLbrInfoMsrLast = 0x0;
+ idLbrTosMsr = MSR_P4_LASTBRANCH_TOS;
+ idLbrSelectMsr = 0x0;
+ idLerFromIpMsr = 0x0;
+ idLerToIpMsr = 0x0;
+ break;
+
+ case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
+ case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
+ case 0x066a: case 0x066c: case 0x067d: case 0x067e:
+ idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
+ idLbrFromIpMsrLast = MSR_LASTBRANCH_31_FROM_IP;
+ idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
+ idLbrToIpMsrLast = MSR_LASTBRANCH_31_TO_IP;
+ idLbrInfoMsrFirst = MSR_LASTBRANCH_0_INFO;
+ idLbrInfoMsrLast = MSR_LASTBRANCH_31_INFO;
+ idLbrTosMsr = MSR_LASTBRANCH_TOS;
+ idLbrSelectMsr = MSR_LASTBRANCH_SELECT;
+ idLerFromIpMsr = MSR_LER_FROM_IP;
+ idLerToIpMsr = MSR_LER_TO_IP;
+ break;
+
+ case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
+ case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
+ case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
+ case 0x062e: case 0x0625: case 0x062c: case 0x062f:
+ idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
+ idLbrFromIpMsrLast = MSR_LASTBRANCH_15_FROM_IP;
+ idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
+ idLbrToIpMsrLast = MSR_LASTBRANCH_15_TO_IP;
+ idLbrInfoMsrFirst = MSR_LASTBRANCH_0_INFO;
+ idLbrInfoMsrLast = MSR_LASTBRANCH_15_INFO;
+ idLbrTosMsr = MSR_LASTBRANCH_TOS;
+ idLbrSelectMsr = MSR_LASTBRANCH_SELECT;
+ idLerFromIpMsr = MSR_LER_FROM_IP;
+ idLerToIpMsr = MSR_LER_TO_IP;
+ break;
+
+ case 0x0617: case 0x061d: case 0x060f:
+ idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
+ idLbrFromIpMsrLast = MSR_CORE2_LASTBRANCH_3_FROM_IP;
+ idLbrToIpMsrFirst = MSR_CORE2_LASTBRANCH_0_TO_IP;
+ idLbrToIpMsrLast = MSR_CORE2_LASTBRANCH_3_TO_IP;
+ idLbrInfoMsrFirst = 0x0;
+ idLbrInfoMsrLast = 0x0;
+ idLbrTosMsr = MSR_CORE2_LASTBRANCH_TOS;
+ idLbrSelectMsr = 0x0;
+ idLerFromIpMsr = 0x0;
+ idLerToIpMsr = 0x0;
+ break;
+
+ /* Atom and related microarchitectures we don't care about:
+ case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
+ case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
+ case 0x0636: */
+ /* All other CPUs: */
+ default:
+ {
+ LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
+ VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+ }
+
+ /*
+ * Validate.
+ */
+ uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
+ PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
+ AssertCompile( RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr)
+ == RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrToIpMsr));
+ AssertCompile( RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr)
+ == RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrInfoMsr));
+ if (cLbrStack > RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr))
+ {
+ LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
+ VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+ NOREF(pVCpu0);
+
+ /*
+ * Update the LBR info. to the VM struct. for use later.
+ */
+ pVM->nem.s.idLbrTosMsr = idLbrTosMsr;
+ pVM->nem.s.idLbrSelectMsr = idLbrSelectMsr;
+
+ pVM->nem.s.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
+ pVM->nem.s.idLbrFromIpMsrLast = idLbrFromIpMsrLast;
+
+ pVM->nem.s.idLbrToIpMsrFirst = idLbrToIpMsrFirst;
+ pVM->nem.s.idLbrToIpMsrLast = idLbrToIpMsrLast;
+
+ pVM->nem.s.idLbrInfoMsrFirst = idLbrInfoMsrFirst;
+ pVM->nem.s.idLbrInfoMsrLast = idLbrInfoMsrLast;
+
+ pVM->nem.s.idLerFromIpMsr = idLerFromIpMsr;
+ pVM->nem.s.idLerToIpMsr = idLerToIpMsr;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Sets up pin-based VM-execution controls in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static int nemR3DarwinVmxSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ //PVMCC pVM = pVCpu->CTX_SUFF(pVM);
+ uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
+ uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
+
+ if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
+ fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
+
+#if 0 /** @todo Use preemption timer */
+ /* Enable the VMX-preemption timer. */
+ if (pVM->hmr0.s.vmx.fUsePreemptTimer)
+ {
+ Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
+ fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
+ }
+
+ /* Enable posted-interrupt processing. */
+ if (pVM->hm.s.fPostedIntrs)
+ {
+ Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
+ Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
+ fVal |= VMX_PIN_CTLS_POSTED_INT;
+ }
+#endif
+
+ if ((fVal & fZap) != fVal)
+ {
+ LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
+ g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
+ pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+
+ /* Commit it to the VMCS and update our cache. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
+ AssertRC(rc);
+ pVmcsInfo->u32PinCtls = fVal;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Sets up secondary processor-based VM-execution controls in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static int nemR3DarwinVmxSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ PVMCC pVM = pVCpu->CTX_SUFF(pVM);
+ uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
+ uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
+
+ /* WBINVD causes a VM-exit. */
+ if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
+ fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
+
+ /* Enable the INVPCID instruction if we expose it to the guest and is supported
+ by the hardware. Without this, guest executing INVPCID would cause a #UD. */
+ if ( pVM->cpum.ro.GuestFeatures.fInvpcid
+ && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
+ fVal |= VMX_PROC_CTLS2_INVPCID;
+
+#if 0 /** @todo */
+ /* Enable VPID. */
+ if (pVM->hmr0.s.vmx.fVpid)
+ fVal |= VMX_PROC_CTLS2_VPID;
+
+ if (pVM->hm.s.fVirtApicRegs)
+ {
+ /* Enable APIC-register virtualization. */
+ Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
+ fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
+
+ /* Enable virtual-interrupt delivery. */
+ Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
+ fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
+ }
+
+ /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
+ where the TPR shadow resides. */
+ /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
+ * done dynamically. */
+ if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
+ {
+ fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
+ hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
+ }
+#endif
+
+ /* Enable the RDTSCP instruction if we expose it to the guest and is supported
+ by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
+ if ( pVM->cpum.ro.GuestFeatures.fRdTscP
+ && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
+ fVal |= VMX_PROC_CTLS2_RDTSCP;
+
+ /* Enable Pause-Loop exiting. */
+ if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
+ && pVM->nem.s.cPleGapTicks
+ && pVM->nem.s.cPleWindowTicks)
+ {
+ fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
+
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->nem.s.cPleGapTicks); AssertRC(rc);
+ rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->nem.s.cPleWindowTicks); AssertRC(rc);
+ }
+
+ if ((fVal & fZap) != fVal)
+ {
+ LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
+ g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
+ pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+
+ /* Commit it to the VMCS and update our cache. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
+ AssertRC(rc);
+ pVmcsInfo->u32ProcCtls2 = fVal;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enables native access for the given MSR.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param idMsr The MSR to enable native access for.
+ */
+static int nemR3DarwinMsrSetNative(PVMCPUCC pVCpu, uint32_t idMsr)
+{
+ hv_return_t hrc = hv_vcpu_enable_native_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/);
+ if (hrc == HV_SUCCESS)
+ return VINF_SUCCESS;
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+/**
+ * Sets the MSR to managed for the given vCPU allowing the guest to access it.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param idMsr The MSR to enable managed access for.
+ * @param fMsrPerm The MSR permissions flags.
+ */
+static int nemR3DarwinMsrSetManaged(PVMCPUCC pVCpu, uint32_t idMsr, hv_msr_flags_t fMsrPerm)
+{
+ Assert(hv_vcpu_enable_managed_msr);
+
+ hv_return_t hrc = hv_vcpu_enable_managed_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/);
+ if (hrc == HV_SUCCESS)
+ {
+ hrc = hv_vcpu_set_msr_access(pVCpu->nem.s.hVCpuId, idMsr, fMsrPerm);
+ if (hrc == HV_SUCCESS)
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+/**
+ * Sets up the MSR permissions which don't change through the lifetime of the VM.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static int nemR3DarwinSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ RT_NOREF(pVmcsInfo);
+
+ /*
+ * The guest can access the following MSRs (read, write) without causing
+ * VM-exits; they are loaded/stored automatically using fields in the VMCS.
+ */
+ PVMCC pVM = pVCpu->CTX_SUFF(pVM);
+ int rc;
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_CS); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_ESP); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_EIP); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_GS_BASE); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_FS_BASE); AssertRCReturn(rc, rc);
+
+ /*
+ * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
+ * associated with then. We never need to intercept access (writes need to be
+ * executed without causing a VM-exit, reads will #GP fault anyway).
+ *
+ * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
+ * read/write them. We swap the guest/host MSR value using the
+ * auto-load/store MSR area.
+ */
+ if (pVM->cpum.ro.GuestFeatures.fIbpb)
+ {
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_PRED_CMD);
+ AssertRCReturn(rc, rc);
+ }
+#if 0 /* Doesn't work. */
+ if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
+ {
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_FLUSH_CMD);
+ AssertRCReturn(rc, rc);
+ }
+#endif
+ if (pVM->cpum.ro.GuestFeatures.fIbrs)
+ {
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SPEC_CTRL);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Allow full read/write access for the following MSRs (mandatory for VT-x)
+ * required for 64-bit guests.
+ */
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_LSTAR); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K6_STAR); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_SF_MASK); AssertRCReturn(rc, rc);
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_KERNEL_GS_BASE); AssertRCReturn(rc, rc);
+
+ /* Required for enabling the RDTSCP instruction. */
+ rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_TSC_AUX); AssertRCReturn(rc, rc);
+
+ /* Last Branch Record. */
+ if (pVM->nem.s.fLbr)
+ {
+ uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
+ uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
+ uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
+ uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
+ Assert(cLbrStack <= 32);
+ for (uint32_t i = 0; i < cLbrStack; i++)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, idFromIpMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+
+ /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
+ if (idToIpMsrStart != 0)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, idToIpMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (idInfoMsrStart != 0)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, idInfoMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLbrTosMsr, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+
+ if (pVM->nem.s.idLerFromIpMsr)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLerFromIpMsr, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (pVM->nem.s.idLerToIpMsr)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLerToIpMsr, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (pVM->nem.s.idLbrSelectMsr)
+ {
+ rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLbrSelectMsr, HV_MSR_READ | HV_MSR_WRITE);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Sets up processor-based VM-execution controls in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static int nemR3DarwinVmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
+ uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
+
+ /** @todo The DRx handling is not quite correct breaking debugging inside the guest with gdb,
+ * see @ticketref{21413} and @ticketref{21546}, so intercepting mov drX is disabled for now. See @bugref{10504}
+ * as well. This will break the hypervisor debugger but only very few people use it and even less on macOS
+ * using the NEM backend.
+ */
+ fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
+// | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
+// | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
+ | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
+ | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
+ | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
+ | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
+
+#ifdef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
+ fVal |= VMX_PROC_CTLS_CR3_LOAD_EXIT
+ | VMX_PROC_CTLS_CR3_STORE_EXIT;
+#endif
+
+ /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
+ if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
+ || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
+ {
+ pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+
+ /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
+ if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
+ fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
+
+ if ((fVal & fZap) != fVal)
+ {
+ LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
+ g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
+ pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
+ return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
+ }
+
+ /* Commit it to the VMCS and update our cache. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
+ AssertRC(rc);
+ pVmcsInfo->u32ProcCtls = fVal;
+
+ /* Set up MSR permissions that don't change through the lifetime of the VM. */
+ rc = nemR3DarwinSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Set up secondary processor-based VM-execution controls
+ * (we assume the CPU to always support it as we rely on unrestricted guest execution support).
+ */
+ Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
+ return nemR3DarwinVmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
+}
+
+
+/**
+ * Sets up miscellaneous (everything other than Pin, Processor and secondary
+ * Processor-based VM-execution) control fields in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static int nemR3DarwinVmxSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ int rc = VINF_SUCCESS;
+ //rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); TODO
+ if (RT_SUCCESS(rc))
+ {
+ uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
+ uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
+
+ rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
+ rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
+
+ pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
+ pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
+
+ if (pVCpu->CTX_SUFF(pVM)->nem.s.fLbr)
+ {
+ rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
+ AssertRC(rc);
+ }
+ return VINF_SUCCESS;
+ }
+ else
+ LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Sets up the initial exception bitmap in the VMCS based on static conditions.
+ *
+ * We shall setup those exception intercepts that don't change during the
+ * lifetime of the VM here. The rest are done dynamically while loading the
+ * guest state.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmcsInfo The VMCS info. object.
+ */
+static void nemR3DarwinVmxSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
+{
+ /*
+ * The following exceptions are always intercepted:
+ *
+ * #AC - To prevent the guest from hanging the CPU and for dealing with
+ * split-lock detecting host configs.
+ * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
+ * recursive #DBs can cause a CPU hang.
+ */
+ /** @todo The DRx handling is not quite correct breaking debugging inside the guest with gdb,
+ * see @ticketref{21413} and @ticketref{21546}, so intercepting \#DB is disabled for now. See @bugref{10504}
+ * as well. This will break the hypervisor debugger but only very few people use it and even less on macOS
+ * using the NEM backend.
+ */
+ uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
+ /*| RT_BIT(X86_XCPT_DB)*/;
+
+ /* Commit it to the VMCS. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
+ AssertRC(rc);
+
+ /* Update our cache of the exception bitmap. */
+ pVmcsInfo->u32XcptBitmap = uXcptBitmap;
+}
+
+
+/**
+ * Initialize the VMCS information field for the given vCPU.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ */
+static int nemR3DarwinInitVmcs(PVMCPU pVCpu)
+{
+ int rc = nemR3DarwinVmxSetupVmcsPinCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
+ if (RT_SUCCESS(rc))
+ {
+ rc = nemR3DarwinVmxSetupVmcsProcCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
+ if (RT_SUCCESS(rc))
+ {
+ rc = nemR3DarwinVmxSetupVmcsMiscCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
+ if (RT_SUCCESS(rc))
+ {
+ rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &pVCpu->nem.s.VmcsInfo.u32EntryCtls);
+ if (RT_SUCCESS(rc))
+ {
+ rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_EXIT, &pVCpu->nem.s.VmcsInfo.u32ExitCtls);
+ if (RT_SUCCESS(rc))
+ {
+ nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo);
+ return VINF_SUCCESS;
+ }
+ LogRelFunc(("Failed to read the exit controls. rc=%Rrc\n", rc));
+ }
+ else
+ LogRelFunc(("Failed to read the entry controls. rc=%Rrc\n", rc));
+ }
+ else
+ LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
+ }
+ else
+ LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
+ }
+ else
+ LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+
+/**
+ * Registers statistics for the given vCPU.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idCpu The CPU ID.
+ * @param pNemCpu The NEM CPU structure.
+ */
+static int nemR3DarwinStatisticsRegister(PVM pVM, VMCPUID idCpu, PNEMCPU pNemCpu)
+{
+#define NEM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
+ int rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
+ AssertRC(rc); \
+ } while (0)
+#define NEM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
+ NEM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
+#define NEM_REG_COUNTER(a, b, desc) NEM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
+
+ PVMXSTATISTICS const pVmxStats = pNemCpu->pVmxStats;
+
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR0Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR2Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR3Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR4Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR8Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR0Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR2Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR3Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR4Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
+ NEM_REG_COUNTER(&pVmxStats->StatExitCR8Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
+
+ NEM_REG_COUNTER(&pVmxStats->StatExitAll, "/NEM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
+
+ NEM_REG_COUNTER(&pVmxStats->StatImportGuestStateFallback, "/NEM/CPU%u/ImportGuestStateFallback", "Times vmxHCImportGuestState took the fallback code path.");
+ NEM_REG_COUNTER(&pVmxStats->StatReadToTransientFallback, "/NEM/CPU%u/ReadToTransientFallback", "Times vmxHCReadToTransient took the fallback code path.");
+
+#ifdef VBOX_WITH_STATISTICS
+ NEM_REG_PROFILE(&pNemCpu->StatProfGstStateImport, "/NEM/CPU%u/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
+ NEM_REG_PROFILE(&pNemCpu->StatProfGstStateExport, "/NEM/CPU%u/ExportGuestState", "Profiling of exporting guest state from hardware after VM-exit.");
+
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ const char *pszExitName = HMGetVmxExitName(j);
+ if (pszExitName)
+ {
+ int rc = STAMR3RegisterF(pVM, &pVmxStats->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, pszExitName, "/NEM/CPU%u/Exit/Reason/%02x", idCpu, j);
+ AssertRCReturn(rc, rc);
+ }
+ }
+#endif
+
+ return VINF_SUCCESS;
+
+#undef NEM_REG_COUNTER
+#undef NEM_REG_PROFILE
+#undef NEM_REG_STAT
+}
+
+
+/**
+ * Displays the HM Last-Branch-Record info. for the guest.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) nemR3DarwinInfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ Assert(pVM->nem.s.fLbr);
+
+ PCVMXVMCSINFOSHARED pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
+ uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
+
+ /** @todo r=ramshankar: The index technically varies depending on the CPU, but
+ * 0xf should cover everything we support thus far. Fix if necessary
+ * later. */
+ uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
+ if (idxTopOfStack > cLbrStack)
+ {
+ pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
+ idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
+ return;
+ }
+
+ /*
+ * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
+ */
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
+ if (pVM->nem.s.idLerFromIpMsr)
+ pHlp->pfnPrintf(pHlp, "LER: From IP=%#016RX64 - To IP=%#016RX64\n",
+ pVmcsInfoShared->u64LerFromIpMsr, pVmcsInfoShared->u64LerToIpMsr);
+ uint32_t idxCurrent = idxTopOfStack;
+ Assert(idxTopOfStack < cLbrStack);
+ Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
+ Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
+ for (;;)
+ {
+ if (pVM->nem.s.idLbrToIpMsrFirst)
+ pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64 (Info: %#016RX64)\n", idxCurrent,
+ pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent],
+ pVmcsInfoShared->au64LbrToIpMsr[idxCurrent],
+ pVmcsInfoShared->au64LbrInfoMsr[idxCurrent]);
+ else
+ pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
+
+ idxCurrent = (idxCurrent - 1) % cLbrStack;
+ if (idxCurrent == idxTopOfStack)
+ break;
+ }
+}
+
+
+/**
+ * Try initialize the native API.
+ *
+ * This may only do part of the job, more can be done in
+ * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFallback Whether we're in fallback mode or use-NEM mode. In
+ * the latter we'll fail if we cannot initialize.
+ * @param fForced Whether the HMForced flag is set and we should
+ * fail if we cannot initialize.
+ */
+int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
+{
+ AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
+
+ /*
+ * Some state init.
+ */
+ PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
+
+ /** @cfgm{/NEM/VmxPleGap, uint32_t, 0}
+ * The pause-filter exiting gap in TSC ticks. When the number of ticks between
+ * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
+ * latest PAUSE instruction to be start of a new PAUSE loop.
+ */
+ int rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleGap", &pVM->nem.s.cPleGapTicks, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/NEM/VmxPleWindow, uint32_t, 0}
+ * The pause-filter exiting window in TSC ticks. When the number of ticks
+ * between the current PAUSE instruction and first PAUSE of a loop exceeds
+ * VmxPleWindow, a VM-exit is triggered.
+ *
+ * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
+ */
+ rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleWindow", &pVM->nem.s.cPleWindowTicks, 0);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/NEM/VmxLbr, bool, false}
+ * Whether to enable LBR for the guest. This is disabled by default as it's only
+ * useful while debugging and enabling it causes a noticeable performance hit. */
+ rc = CFGMR3QueryBoolDef(pCfgNem, "VmxLbr", &pVM->nem.s.fLbr, false);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Error state.
+ * The error message will be non-empty on failure and 'rc' will be set too.
+ */
+ RTERRINFOSTATIC ErrInfo;
+ PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
+ rc = nemR3DarwinLoadHv(fForced, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ if ( !hv_vcpu_enable_managed_msr
+ && pVM->nem.s.fLbr)
+ {
+ LogRel(("NEM: LBR recording is disabled because the Hypervisor API misses hv_vcpu_enable_managed_msr/hv_vcpu_set_msr_access functionality\n"));
+ pVM->nem.s.fLbr = false;
+ }
+
+ /*
+ * While hv_vcpu_run_until() is available starting with Catalina (10.15) it sometimes returns
+ * an error there for no obvious reasons and there is no indication as to why this happens
+ * and Apple doesn't document anything. Starting with BigSur (11.0) it appears to work correctly
+ * so pretend that hv_vcpu_run_until() doesn't exist on Catalina which can be determined by checking
+ * whether another method is available which was introduced with BigSur.
+ */
+ if (!hv_vmx_get_msr_info) /* Not available means this runs on < 11.0 */
+ hv_vcpu_run_until = NULL;
+
+ if (hv_vcpu_run_until)
+ {
+ struct mach_timebase_info TimeInfo;
+
+ if (mach_timebase_info(&TimeInfo) == KERN_SUCCESS)
+ {
+ pVM->nem.s.cMachTimePerNs = RT_MIN(1, (double)TimeInfo.denom / (double)TimeInfo.numer);
+ LogRel(("NEM: cMachTimePerNs=%llu (TimeInfo.numer=%u TimeInfo.denom=%u)\n",
+ pVM->nem.s.cMachTimePerNs, TimeInfo.numer, TimeInfo.denom));
+ }
+ else
+ hv_vcpu_run_until = NULL; /* To avoid running forever (TM asserts when the guest runs for longer than 4 seconds). */
+ }
+
+ hv_return_t hrc = hv_vm_create(HV_VM_DEFAULT);
+ if (hrc == HV_SUCCESS)
+ {
+ if (hv_vm_space_create)
+ {
+ hrc = hv_vm_space_create(&pVM->nem.s.uVmAsid);
+ if (hrc == HV_SUCCESS)
+ {
+ LogRel(("NEM: Successfully created ASID: %u\n", pVM->nem.s.uVmAsid));
+ pVM->nem.s.fCreatedAsid = true;
+ }
+ else
+ LogRel(("NEM: Failed to create ASID for VM (hrc=%#x), continuing...\n", pVM->nem.s.uVmAsid));
+ }
+ pVM->nem.s.fCreatedVm = true;
+
+ /* Register release statistics */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
+ PVMXSTATISTICS pVmxStats = (PVMXSTATISTICS)RTMemAllocZ(sizeof(*pVmxStats));
+ if (RT_LIKELY(pVmxStats))
+ {
+ pNemCpu->pVmxStats = pVmxStats;
+ rc = nemR3DarwinStatisticsRegister(pVM, idCpu, pNemCpu);
+ AssertRC(rc);
+ }
+ else
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
+ Log(("NEM: Marked active!\n"));
+ PGMR3EnableNemMode(pVM);
+ }
+ }
+ else
+ rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "hv_vm_create() failed: %#x", hrc);
+ }
+
+ /*
+ * We only fail if in forced mode, otherwise just log the complaint and return.
+ */
+ Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
+ if ( (fForced || !fFallback)
+ && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
+ return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
+
+ if (pVM->nem.s.fLbr)
+ {
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the NEM LBR info.", nemR3DarwinInfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (RTErrInfoIsSet(pErrInfo))
+ LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
+ *
+ * @returns VBox status code
+ * @param pVM The VM handle.
+ * @param pVCpu The vCPU handle.
+ * @param idCpu ID of the CPU to create.
+ */
+static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
+{
+ hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpuId, HV_VCPU_DEFAULT);
+ if (hrc != HV_SUCCESS)
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
+
+ if (idCpu == 0)
+ {
+ /* First call initializs the MSR structure holding the capabilities of the host CPU. */
+ int rc = nemR3DarwinCapsInit();
+ AssertRCReturn(rc, rc);
+
+ if (hv_vmx_vcpu_get_cap_write_vmcs)
+ {
+ /* Log the VMCS field write capabilities. */
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aVmcsFieldsCap); i++)
+ {
+ uint64_t u64Allowed0 = 0;
+ uint64_t u64Allowed1 = 0;
+
+ hrc = hv_vmx_vcpu_get_cap_write_vmcs(pVCpu->nem.s.hVCpuId, g_aVmcsFieldsCap[i].u32VmcsFieldId,
+ &u64Allowed0, &u64Allowed1);
+ if (hrc == HV_SUCCESS)
+ {
+ if (g_aVmcsFieldsCap[i].f64Bit)
+ LogRel(("NEM: %s = (allowed_0=%#016RX64 allowed_1=%#016RX64)\n",
+ g_aVmcsFieldsCap[i].pszVmcsField, u64Allowed0, u64Allowed1));
+ else
+ LogRel(("NEM: %s = (allowed_0=%#08RX32 allowed_1=%#08RX32)\n",
+ g_aVmcsFieldsCap[i].pszVmcsField, (uint32_t)u64Allowed0, (uint32_t)u64Allowed1));
+
+ uint32_t cBits = g_aVmcsFieldsCap[i].f64Bit ? 64 : 32;
+ for (uint32_t iBit = 0; iBit < cBits; iBit++)
+ {
+ bool fAllowed0 = RT_BOOL(u64Allowed0 & RT_BIT_64(iBit));
+ bool fAllowed1 = RT_BOOL(u64Allowed1 & RT_BIT_64(iBit));
+
+ if (!fAllowed0 && !fAllowed1)
+ LogRel(("NEM: Bit %02u = Must NOT be set\n", iBit));
+ else if (!fAllowed0 && fAllowed1)
+ LogRel(("NEM: Bit %02u = Can be set or not be set\n", iBit));
+ else if (fAllowed0 && !fAllowed1)
+ LogRel(("NEM: Bit %02u = UNDEFINED (AppleHV error)!\n", iBit));
+ else if (fAllowed0 && fAllowed1)
+ LogRel(("NEM: Bit %02u = MUST be set\n", iBit));
+ else
+ AssertFailed();
+ }
+ }
+ else
+ LogRel(("NEM: %s = failed to query (hrc=%d)\n", g_aVmcsFieldsCap[i].pszVmcsField, hrc));
+ }
+ }
+ }
+
+ int rc = nemR3DarwinInitVmcs(pVCpu);
+ AssertRCReturn(rc, rc);
+
+ if (pVM->nem.s.fCreatedAsid)
+ {
+ hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);
+ AssertReturn(hrc == HV_SUCCESS, VERR_NEM_VM_CREATE_FAILED);
+ }
+
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
+ *
+ * @returns VBox status code
+ * @param pVCpu The vCPU handle.
+ */
+static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
+{
+ hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
+ Assert(hrc == HV_SUCCESS);
+
+ hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
+ Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker to setup the TPR shadowing feature if available on the CPU and the VM has an APIC enabled.
+ *
+ * @returns VBox status code
+ * @param pVM The VM handle.
+ * @param pVCpu The vCPU handle.
+ */
+static DECLCALLBACK(int) nemR3DarwinNativeInitTprShadowing(PVM pVM, PVMCPU pVCpu)
+{
+ PVMXVMCSINFO pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
+ uint32_t fVal = pVmcsInfo->u32ProcCtls;
+
+ /* Use TPR shadowing if supported by the CPU. */
+ if ( PDMHasApic(pVM)
+ && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
+ {
+ fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
+ /* CR8 writes cause a VM-exit based on TPR threshold. */
+ Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
+ Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
+ }
+ else
+ {
+ fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
+ | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
+ }
+
+ /* Commit it to the VMCS and update our cache. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
+ AssertRC(rc);
+ pVmcsInfo->u32ProcCtls = fVal;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This is called after CPUMR3Init is done.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle..
+ */
+int nemR3NativeInitAfterCPUM(PVM pVM)
+{
+ /*
+ * Validate sanity.
+ */
+ AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
+ AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
+
+ if (pVM->nem.s.fLbr)
+ {
+ int rc = nemR3DarwinSetupLbrMsrRange(pVM);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Setup the EMTs.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
+ if (RT_FAILURE(rc))
+ {
+ /* Rollback. */
+ while (idCpu--)
+ VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
+
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
+ }
+ }
+
+ pVM->nem.s.fCreatedEmts = true;
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ {
+ /* Now that PDM is initialized the APIC state is known in order to enable the TPR shadowing feature on all EMTs. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitTprShadowing, 2, pVM, pVCpu);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Setting up TPR shadowing failed: %Rrc", rc);
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeTerm(PVM pVM)
+{
+ /*
+ * Delete the VM.
+ */
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /*
+ * Need to do this or hv_vm_space_destroy() fails later on (on 10.15 at least). Could've been documented in
+ * API reference so I wouldn't have to decompile the kext to find this out but we are talking
+ * about Apple here unfortunately, API documentation is not their strong suit...
+ * Would have been of course even better to just automatically drop the address space reference when the vCPU
+ * gets destroyed.
+ */
+ hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
+ Assert(hrc == HV_SUCCESS);
+
+ /*
+ * Apple's documentation states that the vCPU should be destroyed
+ * on the thread running the vCPU but as all the other EMTs are gone
+ * at this point, destroying the VM would hang.
+ *
+ * We seem to be at luck here though as destroying apparently works
+ * from EMT(0) as well.
+ */
+ hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
+ Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
+
+ if (pVCpu->nem.s.pVmxStats)
+ {
+ RTMemFree(pVCpu->nem.s.pVmxStats);
+ pVCpu->nem.s.pVmxStats = NULL;
+ }
+ }
+
+ pVM->nem.s.fCreatedEmts = false;
+
+ if (pVM->nem.s.fCreatedAsid)
+ {
+ hv_return_t hrc = hv_vm_space_destroy(pVM->nem.s.uVmAsid);
+ Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
+ pVM->nem.s.fCreatedAsid = false;
+ }
+
+ if (pVM->nem.s.fCreatedVm)
+ {
+ hv_return_t hrc = hv_vm_destroy();
+ if (hrc != HV_SUCCESS)
+ LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
+
+ pVM->nem.s.fCreatedVm = false;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * VM reset notification.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void nemR3NativeReset(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Reset CPU due to INIT IPI or hot (un)plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the CPU being
+ * reset.
+ * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
+ */
+void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
+{
+ RT_NOREF(fInitIpi);
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
+}
+
+
+/**
+ * Dumps the VMCS in response to a faild hv_vcpu_run{_until}() call.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static void nemR3DarwinVmcsDump(PVMCPU pVCpu)
+{
+ static const struct
+ {
+ uint32_t u32VmcsFieldId; /**< The VMCS field identifier. */
+ const char *pszVmcsField; /**< The VMCS field name. */
+ bool f64Bit;
+ } s_aVmcsFieldsDump[] =
+ {
+ #define NEM_DARWIN_VMCSNW_FIELD_DUMP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, true }
+ #define NEM_DARWIN_VMCS64_FIELD_DUMP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, true }
+ #define NEM_DARWIN_VMCS32_FIELD_DUMP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, false }
+ #define NEM_DARWIN_VMCS16_FIELD_DUMP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, false }
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_VPID),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_EPTP_INDEX),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_ES_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_CS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_SS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_DS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_FS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_GS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_LDTR_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_TR_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_INTR_STATUS),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_GUEST_PML_INDEX),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_ES_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_CS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_SS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_DS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_FS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_GS_SEL),
+ NEM_DARWIN_VMCS16_FIELD_DUMP(VMX_VMCS16_HOST_TR_SEL),
+
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_MSR_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_MSR_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_TSC_OFFSET_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_TSC_OFFSET_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EPTP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EPTP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EPTP_LIST_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_EPTP_LIST_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_SPPTP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_SPPTP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_PROC_EXEC3_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_PROC_EXEC3_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_DEBUGCTL_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_DEBUGCTL_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PAT_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PAT_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_EFER_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_EFER_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE0_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE0_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE1_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE1_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE2_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE2_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE3_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PDPTE3_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_BNDCFGS_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_BNDCFGS_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_RTIT_CTL_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_RTIT_CTL_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PKRS_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_GUEST_PKRS_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PAT_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PAT_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_EFER_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_EFER_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PKRS_FULL),
+ NEM_DARWIN_VMCS64_FIELD_DUMP(VMX_VMCS64_HOST_PKRS_HIGH),
+
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PIN_EXEC),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PROC_EXEC),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_EXCEPTION_BITMAP),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_CR3_TARGET_COUNT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_EXIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_ENTRY),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_TPR_THRESHOLD),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PROC_EXEC2),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PLE_GAP),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_CTRL_PLE_WINDOW),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_VM_INSTR_ERROR),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_EXIT_REASON),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_IDT_VECTORING_INFO),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_EXIT_INSTR_LENGTH),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_RO_EXIT_INSTR_INFO),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_ES_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_CS_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_SS_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_DS_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_FS_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_GS_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_LDTR_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_TR_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_GDTR_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_IDTR_LIMIT),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_INT_STATE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_ACTIVITY_STATE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_SMBASE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_GUEST_SYSENTER_CS),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_PREEMPT_TIMER_VALUE),
+ NEM_DARWIN_VMCS32_FIELD_DUMP(VMX_VMCS32_HOST_SYSENTER_CS),
+
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR0_MASK),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR4_MASK),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR0_READ_SHADOW),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR4_READ_SHADOW),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR3_TARGET_VAL0),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR3_TARGET_VAL1),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR3_TARGET_VAL2),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_CTRL_CR3_TARGET_VAL3),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_EXIT_QUALIFICATION),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_IO_RCX),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_IO_RSI),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_IO_RDI),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_IO_RIP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_RO_GUEST_LINEAR_ADDR),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_CR0),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_CR3),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_CR4),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_ES_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_CS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_SS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_DS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_FS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_GS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_LDTR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_TR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_GDTR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_IDTR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_DR7),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_RSP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_RIP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_RFLAGS),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_SYSENTER_ESP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_SYSENTER_EIP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_S_CET),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_SSP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_CR0),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_CR3),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_CR4),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_FS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_GS_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_TR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_GDTR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_IDTR_BASE),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_SYSENTER_ESP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_SYSENTER_EIP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_RSP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_RIP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_S_CET),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_SSP),
+ NEM_DARWIN_VMCSNW_FIELD_DUMP(VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR)
+ #undef NEM_DARWIN_VMCSNW_FIELD_DUMP
+ #undef NEM_DARWIN_VMCS64_FIELD_DUMP
+ #undef NEM_DARWIN_VMCS32_FIELD_DUMP
+ #undef NEM_DARWIN_VMCS16_FIELD_DUMP
+ };
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(s_aVmcsFieldsDump); i++)
+ {
+ if (s_aVmcsFieldsDump[i].f64Bit)
+ {
+ uint64_t u64Val;
+ int rc = nemR3DarwinReadVmcs64(pVCpu, s_aVmcsFieldsDump[i].u32VmcsFieldId, &u64Val);
+ if (RT_SUCCESS(rc))
+ LogRel(("NEM/VMCS: %040s: 0x%016RX64\n", s_aVmcsFieldsDump[i].pszVmcsField, u64Val));
+ else
+ LogRel(("NEM/VMCS: %040s: rc=%Rrc\n", s_aVmcsFieldsDump[i].pszVmcsField, rc));
+ }
+ else
+ {
+ uint32_t u32Val;
+ int rc = nemR3DarwinReadVmcs32(pVCpu, s_aVmcsFieldsDump[i].u32VmcsFieldId, &u32Val);
+ if (RT_SUCCESS(rc))
+ LogRel(("NEM/VMCS: %040s: 0x%08RX32\n", s_aVmcsFieldsDump[i].pszVmcsField, u32Val));
+ else
+ LogRel(("NEM/VMCS: %040s: rc=%Rrc\n", s_aVmcsFieldsDump[i].pszVmcsField, rc));
+ }
+ }
+}
+
+
+/**
+ * Runs the guest once until an exit occurs.
+ *
+ * @returns HV status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmxTransient The transient VMX execution structure.
+ */
+static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+ TMNotifyStartOfExecution(pVM, pVCpu);
+
+ Assert(!pVCpu->nem.s.fCtxChanged);
+ hv_return_t hrc;
+ if (hv_vcpu_run_until) /** @todo Configur the deadline dynamically based on when the next timer triggers. */
+ hrc = hv_vcpu_run_until(pVCpu->nem.s.hVCpuId, mach_absolute_time() + 2 * RT_NS_1SEC_64 * pVM->nem.s.cMachTimePerNs);
+ else
+ hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId);
+
+ TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
+
+ if (hrc != HV_SUCCESS)
+ nemR3DarwinVmcsDump(pVCpu);
+
+ /*
+ * Sync the TPR shadow with our APIC state.
+ */
+ if ( !pVmxTransient->fIsNestedGuest
+ && (pVCpu->nem.s.VmcsInfo.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
+ {
+ uint64_t u64Tpr;
+ hv_return_t hrc2 = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, HV_X86_TPR, &u64Tpr);
+ Assert(hrc2 == HV_SUCCESS); RT_NOREF(hrc2);
+
+ if (pVmxTransient->u8GuestTpr != (uint8_t)u64Tpr)
+ {
+ int rc = APICSetTpr(pVCpu, (uint8_t)u64Tpr);
+ AssertRC(rc);
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
+ }
+ }
+
+ return hrc;
+}
+
+
+/**
+ * Prepares the VM to run the guest.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pVmxTransient The VMX transient state.
+ * @param fSingleStepping Flag whether we run in single stepping mode.
+ */
+static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fSingleStepping)
+{
+ /*
+ * Check and process force flag actions, some of which might require us to go back to ring-3.
+ */
+ VBOXSTRICTRC rcStrict = vmxHCCheckForceFlags(pVCpu, false /*fIsNestedGuest*/, fSingleStepping);
+ if (rcStrict == VINF_SUCCESS)
+ { /*likely */ }
+ else
+ return rcStrict;
+
+ /*
+ * Do not execute in HV if the A20 isn't enabled.
+ */
+ if (PGMPhysIsA20Enabled(pVCpu))
+ { /* likely */ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
+ return VINF_EM_RESCHEDULE_REM;
+ }
+
+ /*
+ * Evaluate events to be injected into the guest.
+ *
+ * Events in TRPM can be injected without inspecting the guest state.
+ * If any new events (interrupts/NMI) are pending currently, we try to set up the
+ * guest to cause a VM-exit the next time they are ready to receive the event.
+ */
+ if (TRPMHasTrap(pVCpu))
+ vmxHCTrpmTrapToPendingEvent(pVCpu);
+
+ uint32_t fIntrState;
+ rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, &fIntrState);
+
+ /*
+ * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
+ * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
+ * also result in triple-faulting the VM.
+ *
+ * With nested-guests, the above does not apply since unrestricted guest execution is a
+ * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
+ */
+ rcStrict = vmxHCInjectPendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, fIntrState, fSingleStepping);
+ if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+ { /* likely */ }
+ else
+ return rcStrict;
+
+ int rc = nemR3DarwinExportGuestState(pVM, pVCpu, pVmxTransient);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("Running vCPU\n"));
+ pVCpu->nem.s.Event.fPending = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * The normal runloop (no debugging features enabled).
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * The run loop.
+ *
+ * Current approach to state updating to use the sledgehammer and sync
+ * everything every time. This will be optimized later.
+ */
+ VMXTRANSIENT VmxTransient;
+ RT_ZERO(VmxTransient);
+ VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
+
+ /*
+ * Poll timers and run for a bit.
+ */
+ /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
+ * the whole polling job when timers have changed... */
+ uint64_t offDeltaIgnored;
+ uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+ for (unsigned iLoop = 0;; iLoop++)
+ {
+ rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, &VmxTransient, false /* fSingleStepping */);
+ if (rcStrict != VINF_SUCCESS)
+ break;
+
+ hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient);
+ if (hrc == HV_SUCCESS)
+ {
+ /*
+ * Deal with the message.
+ */
+ rcStrict = nemR3DarwinHandleExit(pVM, pVCpu, &VmxTransient);
+ if (rcStrict == VINF_SUCCESS)
+ { /* hopefully likely */ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
+ break;
+ }
+ }
+ else
+ {
+ AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n",
+ pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
+ VERR_NEM_IPE_0);
+ }
+ } /* the run loop */
+
+ return rcStrict;
+}
+
+
+/**
+ * Checks if any expensive dtrace probes are enabled and we should go to the
+ * debug loop.
+ *
+ * @returns true if we should use debug loop, false if not.
+ */
+static bool nemR3DarwinAnyExpensiveProbesEnabled(void)
+{
+ /** @todo Check performance penalty when checking these over and over */
+ return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED() /* expensive too due to context */
+ | VBOXVMM_XCPT_DE_ENABLED()
+ | VBOXVMM_XCPT_DB_ENABLED()
+ | VBOXVMM_XCPT_BP_ENABLED()
+ | VBOXVMM_XCPT_OF_ENABLED()
+ | VBOXVMM_XCPT_BR_ENABLED()
+ | VBOXVMM_XCPT_UD_ENABLED()
+ | VBOXVMM_XCPT_NM_ENABLED()
+ | VBOXVMM_XCPT_DF_ENABLED()
+ | VBOXVMM_XCPT_TS_ENABLED()
+ | VBOXVMM_XCPT_NP_ENABLED()
+ | VBOXVMM_XCPT_SS_ENABLED()
+ | VBOXVMM_XCPT_GP_ENABLED()
+ | VBOXVMM_XCPT_PF_ENABLED()
+ | VBOXVMM_XCPT_MF_ENABLED()
+ | VBOXVMM_XCPT_AC_ENABLED()
+ | VBOXVMM_XCPT_XF_ENABLED()
+ | VBOXVMM_XCPT_VE_ENABLED()
+ | VBOXVMM_XCPT_SX_ENABLED()
+ | VBOXVMM_INT_SOFTWARE_ENABLED()
+ /* not available in R3 | VBOXVMM_INT_HARDWARE_ENABLED()*/
+ ) != 0
+ || ( VBOXVMM_INSTR_HALT_ENABLED()
+ | VBOXVMM_INSTR_MWAIT_ENABLED()
+ | VBOXVMM_INSTR_MONITOR_ENABLED()
+ | VBOXVMM_INSTR_CPUID_ENABLED()
+ | VBOXVMM_INSTR_INVD_ENABLED()
+ | VBOXVMM_INSTR_WBINVD_ENABLED()
+ | VBOXVMM_INSTR_INVLPG_ENABLED()
+ | VBOXVMM_INSTR_RDTSC_ENABLED()
+ | VBOXVMM_INSTR_RDTSCP_ENABLED()
+ | VBOXVMM_INSTR_RDPMC_ENABLED()
+ | VBOXVMM_INSTR_RDMSR_ENABLED()
+ | VBOXVMM_INSTR_WRMSR_ENABLED()
+ | VBOXVMM_INSTR_CRX_READ_ENABLED()
+ | VBOXVMM_INSTR_CRX_WRITE_ENABLED()
+ | VBOXVMM_INSTR_DRX_READ_ENABLED()
+ | VBOXVMM_INSTR_DRX_WRITE_ENABLED()
+ | VBOXVMM_INSTR_PAUSE_ENABLED()
+ | VBOXVMM_INSTR_XSETBV_ENABLED()
+ | VBOXVMM_INSTR_SIDT_ENABLED()
+ | VBOXVMM_INSTR_LIDT_ENABLED()
+ | VBOXVMM_INSTR_SGDT_ENABLED()
+ | VBOXVMM_INSTR_LGDT_ENABLED()
+ | VBOXVMM_INSTR_SLDT_ENABLED()
+ | VBOXVMM_INSTR_LLDT_ENABLED()
+ | VBOXVMM_INSTR_STR_ENABLED()
+ | VBOXVMM_INSTR_LTR_ENABLED()
+ | VBOXVMM_INSTR_GETSEC_ENABLED()
+ | VBOXVMM_INSTR_RSM_ENABLED()
+ | VBOXVMM_INSTR_RDRAND_ENABLED()
+ | VBOXVMM_INSTR_RDSEED_ENABLED()
+ | VBOXVMM_INSTR_XSAVES_ENABLED()
+ | VBOXVMM_INSTR_XRSTORS_ENABLED()
+ | VBOXVMM_INSTR_VMM_CALL_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMREAD_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMXON_ENABLED()
+ | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED()
+ | VBOXVMM_INSTR_VMX_INVEPT_ENABLED()
+ | VBOXVMM_INSTR_VMX_INVVPID_ENABLED()
+ | VBOXVMM_INSTR_VMX_INVPCID_ENABLED()
+ ) != 0
+ || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED()
+ | VBOXVMM_EXIT_HALT_ENABLED()
+ | VBOXVMM_EXIT_MWAIT_ENABLED()
+ | VBOXVMM_EXIT_MONITOR_ENABLED()
+ | VBOXVMM_EXIT_CPUID_ENABLED()
+ | VBOXVMM_EXIT_INVD_ENABLED()
+ | VBOXVMM_EXIT_WBINVD_ENABLED()
+ | VBOXVMM_EXIT_INVLPG_ENABLED()
+ | VBOXVMM_EXIT_RDTSC_ENABLED()
+ | VBOXVMM_EXIT_RDTSCP_ENABLED()
+ | VBOXVMM_EXIT_RDPMC_ENABLED()
+ | VBOXVMM_EXIT_RDMSR_ENABLED()
+ | VBOXVMM_EXIT_WRMSR_ENABLED()
+ | VBOXVMM_EXIT_CRX_READ_ENABLED()
+ | VBOXVMM_EXIT_CRX_WRITE_ENABLED()
+ | VBOXVMM_EXIT_DRX_READ_ENABLED()
+ | VBOXVMM_EXIT_DRX_WRITE_ENABLED()
+ | VBOXVMM_EXIT_PAUSE_ENABLED()
+ | VBOXVMM_EXIT_XSETBV_ENABLED()
+ | VBOXVMM_EXIT_SIDT_ENABLED()
+ | VBOXVMM_EXIT_LIDT_ENABLED()
+ | VBOXVMM_EXIT_SGDT_ENABLED()
+ | VBOXVMM_EXIT_LGDT_ENABLED()
+ | VBOXVMM_EXIT_SLDT_ENABLED()
+ | VBOXVMM_EXIT_LLDT_ENABLED()
+ | VBOXVMM_EXIT_STR_ENABLED()
+ | VBOXVMM_EXIT_LTR_ENABLED()
+ | VBOXVMM_EXIT_GETSEC_ENABLED()
+ | VBOXVMM_EXIT_RSM_ENABLED()
+ | VBOXVMM_EXIT_RDRAND_ENABLED()
+ | VBOXVMM_EXIT_RDSEED_ENABLED()
+ | VBOXVMM_EXIT_XSAVES_ENABLED()
+ | VBOXVMM_EXIT_XRSTORS_ENABLED()
+ | VBOXVMM_EXIT_VMM_CALL_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMREAD_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMXON_ENABLED()
+ | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED()
+ | VBOXVMM_EXIT_VMX_INVEPT_ENABLED()
+ | VBOXVMM_EXIT_VMX_INVVPID_ENABLED()
+ | VBOXVMM_EXIT_VMX_INVPCID_ENABLED()
+ | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED()
+ | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED()
+ | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED()
+ | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED()
+ ) != 0;
+}
+
+
+/**
+ * The debug runloop.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * The run loop.
+ *
+ * Current approach to state updating to use the sledgehammer and sync
+ * everything every time. This will be optimized later.
+ */
+ VMXTRANSIENT VmxTransient;
+ RT_ZERO(VmxTransient);
+ VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
+
+ bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
+ pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
+ pVCpu->nem.s.fDebugWantRdTscExit = false;
+ pVCpu->nem.s.fUsingDebugLoop = true;
+
+ /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
+ VMXRUNDBGSTATE DbgState;
+ vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
+ vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
+
+ /*
+ * Poll timers and run for a bit.
+ */
+ /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
+ * the whole polling job when timers have changed... */
+ uint64_t offDeltaIgnored;
+ uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+ for (unsigned iLoop = 0;; iLoop++)
+ {
+ bool fStepping = pVCpu->nem.s.fSingleInstruction;
+
+ /* Set up VM-execution controls the next two can respond to. */
+ vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
+
+ rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, &VmxTransient, fStepping);
+ if (rcStrict != VINF_SUCCESS)
+ break;
+
+ /* Override any obnoxious code in the above call. */
+ vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
+
+ hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient);
+ if (hrc == HV_SUCCESS)
+ {
+ /*
+ * Deal with the message.
+ */
+ rcStrict = nemR3DarwinHandleExitDebug(pVM, pVCpu, &VmxTransient, &DbgState);
+ if (rcStrict == VINF_SUCCESS)
+ { /* hopefully likely */ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExitDebug -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
+ break;
+ }
+
+ /*
+ * Stepping: Did the RIP change, if so, consider it a single step.
+ * Otherwise, make sure one of the TFs gets set.
+ */
+ if (fStepping)
+ {
+ int rc = vmxHCImportGuestStateEx(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
+ AssertRC(rc);
+ if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
+ || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
+ {
+ rcStrict = VINF_EM_DBG_STEPPED;
+ break;
+ }
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
+ }
+ }
+ else
+ {
+ AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n",
+ pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
+ VERR_NEM_IPE_0);
+ }
+ } /* the run loop */
+
+ /*
+ * Clear the X86_EFL_TF if necessary.
+ */
+ if (pVCpu->nem.s.fClearTrapFlag)
+ {
+ int rc = vmxHCImportGuestStateEx(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
+ AssertRC(rc);
+ pVCpu->nem.s.fClearTrapFlag = false;
+ pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
+ }
+
+ pVCpu->nem.s.fUsingDebugLoop = false;
+ pVCpu->nem.s.fDebugWantRdTscExit = false;
+ pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
+
+ /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */
+ return vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
+}
+
+
+VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
+{
+ LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u));
+#ifdef LOG_ENABLED
+ if (LogIs3Enabled())
+ nemR3DarwinLogState(pVM, pVCpu);
+#endif
+
+ AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
+
+ /*
+ * Try switch to NEM runloop state.
+ */
+ if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
+ { /* likely */ }
+ else
+ {
+ VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
+ LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
+ return VINF_SUCCESS;
+ }
+
+ VBOXSTRICTRC rcStrict;
+ if ( !pVCpu->nem.s.fUseDebugLoop
+ && !nemR3DarwinAnyExpensiveProbesEnabled()
+ && !DBGFIsStepping(pVCpu)
+ && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
+ rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
+ else
+ rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
+
+ if (rcStrict == VINF_EM_RAW_TO_R3)
+ rcStrict = VINF_SUCCESS;
+
+ /*
+ * Convert any pending HM events back to TRPM due to premature exits.
+ *
+ * This is because execution may continue from IEM and we would need to inject
+ * the event from there (hence place it back in TRPM).
+ */
+ if (pVCpu->nem.s.Event.fPending)
+ {
+ vmxHCPendingEventToTrpmTrap(pVCpu);
+ Assert(!pVCpu->nem.s.Event.fPending);
+
+ /* Clear the events from the VMCS. */
+ int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
+ rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
+ }
+
+
+ if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
+ VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
+
+ if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
+ {
+ /* Try anticipate what we might need. */
+ uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
+ if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
+ || RT_FAILURE(rcStrict))
+ fImport = CPUMCTX_EXTRN_ALL;
+ else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
+ fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
+
+ if (pVCpu->cpum.GstCtx.fExtrn & fImport)
+ {
+ /* Only import what is external currently. */
+ int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
+ if (RT_SUCCESS(rc2))
+ pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
+ else if (RT_SUCCESS(rcStrict))
+ rcStrict = rc2;
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
+ {
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
+ }
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
+ }
+ else
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
+ }
+ else
+ {
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+ ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
+ }
+
+ LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
+ pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
+ return rcStrict;
+}
+
+
+VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
+{
+ NOREF(pVM);
+ return PGMPhysIsA20Enabled(pVCpu);
+}
+
+
+bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ bool fOld = pVCpu->nem.s.fSingleInstruction;
+ pVCpu->nem.s.fSingleInstruction = fEnable;
+ pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
+ return fOld;
+}
+
+
+void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
+
+ RT_NOREF(pVM, fFlags);
+
+ hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1);
+ if (hrc != HV_SUCCESS)
+ LogRel(("NEM: hv_vcpu_interrupt(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpuId, hrc));
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
+{
+ for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
+ !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
+ enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
+ fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
+
+ return fUseDebugLoop;
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
+{
+ RT_NOREF(pVM, pVCpu);
+ return fUseDebugLoop;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, puNemRange);
+
+ Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
+#if defined(VBOX_WITH_PGM_NEM_MODE)
+ if (pvR3)
+ {
+ int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
+ return VERR_NEM_MAP_PAGES_FAILED;
+ }
+ }
+ return VINF_SUCCESS;
+#else
+ RT_NOREF(pVM, GCPhys, cb, pvR3);
+ return VERR_NEM_MAP_PAGES_FAILED;
+#endif
+}
+
+
+VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return false;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, puNemRange, pvRam, fFlags);
+
+ Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
+
+#if defined(VBOX_WITH_PGM_NEM_MODE)
+ /*
+ * Unmap the RAM we're replacing.
+ */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
+ if (RT_SUCCESS(rc))
+ { /* likely */ }
+ else if (pvMmio2)
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
+ GCPhys, cb, fFlags, rc));
+ else
+ {
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
+ GCPhys, cb, fFlags, rc));
+ return VERR_NEM_UNMAP_PAGES_FAILED;
+ }
+ }
+
+ /*
+ * Map MMIO2 if any.
+ */
+ if (pvMmio2)
+ {
+ Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
+ int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE, pu2State);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
+ GCPhys, cb, fFlags, pvMmio2, rc));
+ return VERR_NEM_MAP_PAGES_FAILED;
+ }
+ }
+ else
+ Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
+
+#else
+ RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
+ *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
+#endif
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
+ void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, puNemRange);
+
+ Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
+
+ int rc = VINF_SUCCESS;
+#if defined(VBOX_WITH_PGM_NEM_MODE)
+ /*
+ * Unmap the MMIO2 pages.
+ */
+ /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
+ * we may have more stuff to unmap even in case of pure MMIO... */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
+ {
+ rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
+ GCPhys, cb, fFlags, rc));
+ return VERR_NEM_UNMAP_PAGES_FAILED;
+ }
+ }
+
+ /* Ensure the page is masked as unmapped if relevant. */
+ Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
+
+ /*
+ * Restore the RAM we replaced.
+ */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ AssertPtr(pvRam);
+ rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
+ if (RT_SUCCESS(rc))
+ { /* likely */ }
+ else
+ {
+ LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
+ rc = VERR_NEM_MAP_PAGES_FAILED;
+ }
+ }
+
+ RT_NOREF(pvMmio2);
+#else
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
+ if (pu2State)
+ *pu2State = UINT8_MAX;
+ rc = VERR_NEM_UNMAP_PAGES_FAILED;
+#endif
+ return rc;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
+ void *pvBitmap, size_t cbBitmap)
+{
+ RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
+ AssertFailed();
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ RT_NOREF(pvPages);
+
+ Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
+ if (fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE)
+ {
+ int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("NEMR3NotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
+ GCPhys, cb, fFlags, rc));
+ return VERR_NEM_UNMAP_PAGES_FAILED;
+ }
+ }
+
+ *puNemRange = 0;
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
+ uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
+ *pu2State = UINT8_MAX;
+ RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
+ return VINF_SUCCESS;
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
+ RTR3PTR pvMemR3, uint8_t *pu2State)
+{
+ Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
+ GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
+ *pu2State = UINT8_MAX;
+ RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
+}
+
+
+VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
+{
+ Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
+ RT_NOREF(pVCpu, fEnabled);
+}
+
+
+void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
+{
+ Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
+ NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
+}
+
+
+void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
+ RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
+{
+ Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
+ GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
+ NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
+}
+
+
+int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
+ PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhys, fPageProt, enmType, *pu2State));
+ RT_NOREF(HCPhys, fPageProt, enmType);
+
+ return nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
+ PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp pvR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhys, pvR3, fPageProt, enmType, *pu2State));
+ RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
+
+ uint8_t u2StateOld = *pu2State;
+ /* Can return early if this is an unmap request and the page is not mapped. */
+ if ( fPageProt == NEM_PAGE_PROT_NONE
+ && u2StateOld == NEM_DARWIN_PAGE_STATE_UNMAPPED)
+ {
+ Assert(!pvR3);
+ return;
+ }
+
+ int rc;
+ if (u2StateOld == NEM_DARWIN_PAGE_STATE_UNMAPPED)
+ {
+ AssertPtr(pvR3);
+ rc = nemR3DarwinMap(pVM, GCPhys, pvR3, X86_PAGE_SIZE, fPageProt, pu2State);
+ }
+ else
+ rc = nemR3DarwinProtect(pVM, GCPhys, X86_PAGE_SIZE, fPageProt, pu2State);
+ AssertLogRelMsgRC(rc, ("NEMHCNotifyPhysPageProtChanged: nemR3DarwinMap/nemR3DarwinProtect(,%p,%RGp,%RGp,) u2StateOld=%u -> %Rrc\n",
+ pvR3, GCPhys, X86_PAGE_SIZE, u2StateOld, rc));
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
+ RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
+ RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
+
+ int rc = nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
+ if (RT_SUCCESS(rc))
+ {
+ rc = nemR3DarwinMap(pVM, GCPhys, pvNewR3, X86_PAGE_SIZE, fPageProt, pu2State);
+ AssertLogRelMsgRC(rc, ("NEMHCNotifyPhysPageChanged: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
+ pvNewR3, GCPhys, X86_PAGE_SIZE, rc));
+ }
+ else
+ AssertReleaseFailed();
+}
+
+
+/**
+ * Interface for importing state on demand (used by IEM).
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context CPU structure.
+ * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
+ */
+VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
+{
+ LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
+
+ return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
+}
+
+
+/**
+ * Query the CPU tick counter and optionally the TSC_AUX MSR value.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context CPU structure.
+ * @param pcTicks Where to return the CPU tick count.
+ * @param puAux Where to return the TSC_AUX register value.
+ */
+VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
+{
+ LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
+
+ int rc = nemR3DarwinMsrRead(pVCpu, MSR_IA32_TSC, pcTicks);
+ if ( RT_SUCCESS(rc)
+ && puAux)
+ {
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX)
+ {
+ uint64_t u64Aux;
+ rc = nemR3DarwinMsrRead(pVCpu, MSR_K8_TSC_AUX, &u64Aux);
+ if (RT_SUCCESS(rc))
+ *puAux = (uint32_t)u64Aux;
+ }
+ else
+ *puAux = CPUMGetGuestTscAux(pVCpu);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Resumes CPU clock (TSC) on all virtual CPUs.
+ *
+ * This is called by TM when the VM is started, restored, resumed or similar.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context CPU structure of the calling EMT.
+ * @param uPausedTscValue The TSC value at the time of pausing.
+ */
+VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
+{
+ LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
+ VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
+ AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
+
+ hv_return_t hrc = hv_vm_sync_tsc(uPausedTscValue);
+ if (RT_LIKELY(hrc == HV_SUCCESS))
+ {
+ ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
+ return VINF_SUCCESS;
+ }
+
+ return nemR3DarwinHvSts2Rc(hrc);
+}
+
+
+/**
+ * Returns features supported by the NEM backend.
+ *
+ * @returns Flags of features supported by the native NEM backend.
+ * @param pVM The cross context VM structure.
+ */
+VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
+{
+ RT_NOREF(pVM);
+ /*
+ * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
+ * and unrestricted guest execution support so we can safely return these flags here always.
+ */
+ return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
+}
+
+
+/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
+ *
+ * @todo Add notes as the implementation progresses...
+ */
+
diff --git a/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp b/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
new file mode 100644
index 00000000..fa731415
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
@@ -0,0 +1,2838 @@
+/* $Id: NEMR3Native-linux.cpp $ */
+/** @file
+ * NEM - Native execution manager, native ring-3 Linux backend.
+ */
+
+/*
+ * Copyright (C) 2021-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NEM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/trpm.h>
+#include "NEMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <iprt/alloca.h>
+#include <iprt/string.h>
+#include <iprt/system.h>
+#include <iprt/x86.h>
+
+#include <errno.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <linux/kvm.h>
+
+/*
+ * Supply stuff missing from the kvm.h on the build box.
+ */
+#ifndef KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON /* since 5.4 */
+# define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+#endif
+
+
+
+/**
+ * Worker for nemR3NativeInit that gets the hypervisor capabilities.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3LnxInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
+{
+ AssertReturn(pVM->nem.s.fdVm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
+
+ /*
+ * Capabilities.
+ */
+ static const struct
+ {
+ const char *pszName;
+ int iCap;
+ uint32_t offNem : 24;
+ uint32_t cbNem : 3;
+ uint32_t fReqNonZero : 1;
+ uint32_t uReserved : 4;
+ } s_aCaps[] =
+ {
+#define CAP_ENTRY__L(a_Define) { #a_Define, a_Define, UINT32_C(0x00ffffff), 0, 0, 0 }
+#define CAP_ENTRY__S(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 0, 0 }
+#define CAP_ENTRY_MS(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 1, 0 }
+#define CAP_ENTRY__U(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 0, 0 }
+#define CAP_ENTRY_ML(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 1, 0 }
+
+ CAP_ENTRY__L(KVM_CAP_IRQCHIP), /* 0 */
+ CAP_ENTRY_ML(KVM_CAP_HLT),
+ CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL),
+ CAP_ENTRY_ML(KVM_CAP_USER_MEMORY),
+ CAP_ENTRY__L(KVM_CAP_SET_TSS_ADDR),
+ CAP_ENTRY__U(5),
+ CAP_ENTRY__L(KVM_CAP_VAPIC),
+ CAP_ENTRY__L(KVM_CAP_EXT_CPUID),
+ CAP_ENTRY__L(KVM_CAP_CLOCKSOURCE),
+ CAP_ENTRY__L(KVM_CAP_NR_VCPUS),
+ CAP_ENTRY_MS(KVM_CAP_NR_MEMSLOTS, cMaxMemSlots), /* 10 */
+ CAP_ENTRY__L(KVM_CAP_PIT),
+ CAP_ENTRY__L(KVM_CAP_NOP_IO_DELAY),
+ CAP_ENTRY__L(KVM_CAP_PV_MMU),
+ CAP_ENTRY__L(KVM_CAP_MP_STATE),
+ CAP_ENTRY__L(KVM_CAP_COALESCED_MMIO),
+ CAP_ENTRY__L(KVM_CAP_SYNC_MMU),
+ CAP_ENTRY__U(17),
+ CAP_ENTRY__L(KVM_CAP_IOMMU),
+ CAP_ENTRY__U(19), /* Buggy KVM_CAP_JOIN_MEMORY_REGIONS? */
+ CAP_ENTRY__U(20), /* Mon-working KVM_CAP_DESTROY_MEMORY_REGION? */
+ CAP_ENTRY__L(KVM_CAP_DESTROY_MEMORY_REGION_WORKS), /* 21 */
+ CAP_ENTRY__L(KVM_CAP_USER_NMI),
+#ifdef __KVM_HAVE_GUEST_DEBUG
+ CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG),
+#endif
+#ifdef __KVM_HAVE_PIT
+ CAP_ENTRY__L(KVM_CAP_REINJECT_CONTROL),
+#endif
+ CAP_ENTRY__L(KVM_CAP_IRQ_ROUTING),
+ CAP_ENTRY__L(KVM_CAP_IRQ_INJECT_STATUS),
+ CAP_ENTRY__U(27),
+ CAP_ENTRY__U(28),
+ CAP_ENTRY__L(KVM_CAP_ASSIGN_DEV_IRQ),
+ CAP_ENTRY__L(KVM_CAP_JOIN_MEMORY_REGIONS_WORKS), /* 30 */
+#ifdef __KVM_HAVE_MCE
+ CAP_ENTRY__L(KVM_CAP_MCE),
+#endif
+ CAP_ENTRY__L(KVM_CAP_IRQFD),
+#ifdef __KVM_HAVE_PIT
+ CAP_ENTRY__L(KVM_CAP_PIT2),
+#endif
+ CAP_ENTRY__L(KVM_CAP_SET_BOOT_CPU_ID),
+#ifdef __KVM_HAVE_PIT_STATE2
+ CAP_ENTRY__L(KVM_CAP_PIT_STATE2),
+#endif
+ CAP_ENTRY__L(KVM_CAP_IOEVENTFD),
+ CAP_ENTRY__L(KVM_CAP_SET_IDENTITY_MAP_ADDR),
+#ifdef __KVM_HAVE_XEN_HVM
+ CAP_ENTRY__L(KVM_CAP_XEN_HVM),
+#endif
+ CAP_ENTRY_ML(KVM_CAP_ADJUST_CLOCK),
+ CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */
+#ifdef __KVM_HAVE_VCPU_EVENTS
+ CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS),
+#else
+ CAP_ENTRY_MU(41),
+#endif
+ CAP_ENTRY__L(KVM_CAP_S390_PSW),
+ CAP_ENTRY__L(KVM_CAP_PPC_SEGSTATE),
+ CAP_ENTRY__L(KVM_CAP_HYPERV),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_VAPIC),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_SPIN),
+ CAP_ENTRY__L(KVM_CAP_PCI_SEGMENT),
+ CAP_ENTRY__L(KVM_CAP_PPC_PAIRED_SINGLES),
+ CAP_ENTRY__L(KVM_CAP_INTR_SHADOW),
+#ifdef __KVM_HAVE_DEBUGREGS
+ CAP_ENTRY__L(KVM_CAP_DEBUGREGS), /* 50 */
+#endif
+ CAP_ENTRY__S(KVM_CAP_X86_ROBUST_SINGLESTEP, fRobustSingleStep),
+ CAP_ENTRY__L(KVM_CAP_PPC_OSI),
+ CAP_ENTRY__L(KVM_CAP_PPC_UNSET_IRQ),
+ CAP_ENTRY__L(KVM_CAP_ENABLE_CAP),
+#ifdef __KVM_HAVE_XSAVE
+ CAP_ENTRY_ML(KVM_CAP_XSAVE),
+#else
+ CAP_ENTRY_MU(55),
+#endif
+#ifdef __KVM_HAVE_XCRS
+ CAP_ENTRY_ML(KVM_CAP_XCRS),
+#else
+ CAP_ENTRY_MU(56),
+#endif
+ CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO),
+ CAP_ENTRY__L(KVM_CAP_PPC_IRQ_LEVEL),
+ CAP_ENTRY__L(KVM_CAP_ASYNC_PF),
+ CAP_ENTRY__L(KVM_CAP_TSC_CONTROL), /* 60 */
+ CAP_ENTRY__L(KVM_CAP_GET_TSC_KHZ),
+ CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_SREGS),
+ CAP_ENTRY__L(KVM_CAP_SPAPR_TCE),
+ CAP_ENTRY__L(KVM_CAP_PPC_SMT),
+ CAP_ENTRY__L(KVM_CAP_PPC_RMA),
+ CAP_ENTRY__L(KVM_CAP_MAX_VCPUS),
+ CAP_ENTRY__L(KVM_CAP_PPC_HIOR),
+ CAP_ENTRY__L(KVM_CAP_PPC_PAPR),
+ CAP_ENTRY__L(KVM_CAP_SW_TLB),
+ CAP_ENTRY__L(KVM_CAP_ONE_REG), /* 70 */
+ CAP_ENTRY__L(KVM_CAP_S390_GMAP),
+ CAP_ENTRY__L(KVM_CAP_TSC_DEADLINE_TIMER),
+ CAP_ENTRY__L(KVM_CAP_S390_UCONTROL),
+ CAP_ENTRY__L(KVM_CAP_SYNC_REGS),
+ CAP_ENTRY__L(KVM_CAP_PCI_2_3),
+ CAP_ENTRY__L(KVM_CAP_KVMCLOCK_CTRL),
+ CAP_ENTRY__L(KVM_CAP_SIGNAL_MSI),
+ CAP_ENTRY__L(KVM_CAP_PPC_GET_SMMU_INFO),
+ CAP_ENTRY__L(KVM_CAP_S390_COW),
+ CAP_ENTRY__L(KVM_CAP_PPC_ALLOC_HTAB), /* 80 */
+ CAP_ENTRY__L(KVM_CAP_READONLY_MEM),
+ CAP_ENTRY__L(KVM_CAP_IRQFD_RESAMPLE),
+ CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_WATCHDOG),
+ CAP_ENTRY__L(KVM_CAP_PPC_HTAB_FD),
+ CAP_ENTRY__L(KVM_CAP_S390_CSS_SUPPORT),
+ CAP_ENTRY__L(KVM_CAP_PPC_EPR),
+ CAP_ENTRY__L(KVM_CAP_ARM_PSCI),
+ CAP_ENTRY__L(KVM_CAP_ARM_SET_DEVICE_ADDR),
+ CAP_ENTRY__L(KVM_CAP_DEVICE_CTRL),
+ CAP_ENTRY__L(KVM_CAP_IRQ_MPIC), /* 90 */
+ CAP_ENTRY__L(KVM_CAP_PPC_RTAS),
+ CAP_ENTRY__L(KVM_CAP_IRQ_XICS),
+ CAP_ENTRY__L(KVM_CAP_ARM_EL1_32BIT),
+ CAP_ENTRY__L(KVM_CAP_SPAPR_MULTITCE),
+ CAP_ENTRY__L(KVM_CAP_EXT_EMUL_CPUID),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_TIME),
+ CAP_ENTRY__L(KVM_CAP_IOAPIC_POLARITY_IGNORED),
+ CAP_ENTRY__L(KVM_CAP_ENABLE_CAP_VM),
+ CAP_ENTRY__L(KVM_CAP_S390_IRQCHIP),
+ CAP_ENTRY__L(KVM_CAP_IOEVENTFD_NO_LENGTH), /* 100 */
+ CAP_ENTRY__L(KVM_CAP_VM_ATTRIBUTES),
+ CAP_ENTRY__L(KVM_CAP_ARM_PSCI_0_2),
+ CAP_ENTRY__L(KVM_CAP_PPC_FIXUP_HCALL),
+ CAP_ENTRY__L(KVM_CAP_PPC_ENABLE_HCALL),
+ CAP_ENTRY__L(KVM_CAP_CHECK_EXTENSION_VM),
+ CAP_ENTRY__L(KVM_CAP_S390_USER_SIGP),
+ CAP_ENTRY__L(KVM_CAP_S390_VECTOR_REGISTERS),
+ CAP_ENTRY__L(KVM_CAP_S390_MEM_OP),
+ CAP_ENTRY__L(KVM_CAP_S390_USER_STSI),
+ CAP_ENTRY__L(KVM_CAP_S390_SKEYS), /* 110 */
+ CAP_ENTRY__L(KVM_CAP_MIPS_FPU),
+ CAP_ENTRY__L(KVM_CAP_MIPS_MSA),
+ CAP_ENTRY__L(KVM_CAP_S390_INJECT_IRQ),
+ CAP_ENTRY__L(KVM_CAP_S390_IRQ_STATE),
+ CAP_ENTRY__L(KVM_CAP_PPC_HWRNG),
+ CAP_ENTRY__L(KVM_CAP_DISABLE_QUIRKS),
+ CAP_ENTRY__L(KVM_CAP_X86_SMM),
+ CAP_ENTRY__L(KVM_CAP_MULTI_ADDRESS_SPACE),
+ CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_BPS),
+ CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_WPS), /* 120 */
+ CAP_ENTRY__L(KVM_CAP_SPLIT_IRQCHIP),
+ CAP_ENTRY__L(KVM_CAP_IOEVENTFD_ANY_LENGTH),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC),
+ CAP_ENTRY__L(KVM_CAP_S390_RI),
+ CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_64),
+ CAP_ENTRY__L(KVM_CAP_ARM_PMU_V3),
+ CAP_ENTRY__L(KVM_CAP_VCPU_ATTRIBUTES),
+ CAP_ENTRY__L(KVM_CAP_MAX_VCPU_ID),
+ CAP_ENTRY__L(KVM_CAP_X2APIC_API),
+ CAP_ENTRY__L(KVM_CAP_S390_USER_INSTR0), /* 130 */
+ CAP_ENTRY__L(KVM_CAP_MSI_DEVID),
+ CAP_ENTRY__L(KVM_CAP_PPC_HTM),
+ CAP_ENTRY__L(KVM_CAP_SPAPR_RESIZE_HPT),
+ CAP_ENTRY__L(KVM_CAP_PPC_MMU_RADIX),
+ CAP_ENTRY__L(KVM_CAP_PPC_MMU_HASH_V3),
+ CAP_ENTRY__L(KVM_CAP_IMMEDIATE_EXIT),
+ CAP_ENTRY__L(KVM_CAP_MIPS_VZ),
+ CAP_ENTRY__L(KVM_CAP_MIPS_TE),
+ CAP_ENTRY__L(KVM_CAP_MIPS_64BIT),
+ CAP_ENTRY__L(KVM_CAP_S390_GS), /* 140 */
+ CAP_ENTRY__L(KVM_CAP_S390_AIS),
+ CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_VFIO),
+ CAP_ENTRY__L(KVM_CAP_X86_DISABLE_EXITS),
+ CAP_ENTRY__L(KVM_CAP_ARM_USER_IRQ),
+ CAP_ENTRY__L(KVM_CAP_S390_CMMA_MIGRATION),
+ CAP_ENTRY__L(KVM_CAP_PPC_FWNMI),
+ CAP_ENTRY__L(KVM_CAP_PPC_SMT_POSSIBLE),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC2),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_VP_INDEX),
+ CAP_ENTRY__L(KVM_CAP_S390_AIS_MIGRATION), /* 150 */
+ CAP_ENTRY__L(KVM_CAP_PPC_GET_CPU_CHAR),
+ CAP_ENTRY__L(KVM_CAP_S390_BPB),
+ CAP_ENTRY__L(KVM_CAP_GET_MSR_FEATURES),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_EVENTFD),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_TLBFLUSH),
+ CAP_ENTRY__L(KVM_CAP_S390_HPAGE_1M),
+ CAP_ENTRY__L(KVM_CAP_NESTED_STATE),
+ CAP_ENTRY__L(KVM_CAP_ARM_INJECT_SERROR_ESR),
+ CAP_ENTRY__L(KVM_CAP_MSR_PLATFORM_INFO),
+ CAP_ENTRY__L(KVM_CAP_PPC_NESTED_HV), /* 160 */
+ CAP_ENTRY__L(KVM_CAP_HYPERV_SEND_IPI),
+ CAP_ENTRY__L(KVM_CAP_COALESCED_PIO),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_ENLIGHTENED_VMCS),
+ CAP_ENTRY__L(KVM_CAP_EXCEPTION_PAYLOAD),
+ CAP_ENTRY__L(KVM_CAP_ARM_VM_IPA_SIZE),
+ CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_CPUID),
+ CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2),
+ CAP_ENTRY__L(KVM_CAP_PPC_IRQ_XIVE),
+ CAP_ENTRY__L(KVM_CAP_ARM_SVE), /* 170 */
+ CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_ADDRESS),
+ CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_GENERIC),
+ CAP_ENTRY__L(KVM_CAP_PMU_EVENT_FILTER),
+ CAP_ENTRY__L(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2),
+ CAP_ENTRY__L(KVM_CAP_HYPERV_DIRECT_TLBFLUSH),
+ CAP_ENTRY__L(KVM_CAP_PPC_GUEST_DEBUG_SSTEP),
+ CAP_ENTRY__L(KVM_CAP_ARM_NISV_TO_USER),
+ CAP_ENTRY__L(KVM_CAP_ARM_INJECT_EXT_DABT),
+ CAP_ENTRY__L(KVM_CAP_S390_VCPU_RESETS),
+ CAP_ENTRY__L(KVM_CAP_S390_PROTECTED), /* 180 */
+ CAP_ENTRY__L(KVM_CAP_PPC_SECURE_GUEST),
+ CAP_ENTRY__L(KVM_CAP_HALT_POLL),
+ CAP_ENTRY__L(KVM_CAP_ASYNC_PF_INT),
+ CAP_ENTRY__L(KVM_CAP_LAST_CPU),
+ CAP_ENTRY__L(KVM_CAP_SMALLER_MAXPHYADDR),
+ CAP_ENTRY__L(KVM_CAP_S390_DIAG318),
+ CAP_ENTRY__L(KVM_CAP_STEAL_TIME),
+ CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */
+ CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER),
+ CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */
+ CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID),
+ CAP_ENTRY__L(KVM_CAP_DIRTY_LOG_RING),
+ CAP_ENTRY__L(KVM_CAP_X86_BUS_LOCK_EXIT),
+ CAP_ENTRY__L(KVM_CAP_PPC_DAWR1),
+ CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG2),
+ CAP_ENTRY__L(KVM_CAP_SGX_ATTRIBUTE),
+ CAP_ENTRY__L(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM),
+ CAP_ENTRY__L(KVM_CAP_PTP_KVM),
+ CAP_ENTRY__U(199),
+ CAP_ENTRY__U(200),
+ CAP_ENTRY__U(201),
+ CAP_ENTRY__U(202),
+ CAP_ENTRY__U(203),
+ CAP_ENTRY__U(204),
+ CAP_ENTRY__U(205),
+ CAP_ENTRY__U(206),
+ CAP_ENTRY__U(207),
+ CAP_ENTRY__U(208),
+ CAP_ENTRY__U(209),
+ CAP_ENTRY__U(210),
+ CAP_ENTRY__U(211),
+ CAP_ENTRY__U(212),
+ CAP_ENTRY__U(213),
+ CAP_ENTRY__U(214),
+ CAP_ENTRY__U(215),
+ CAP_ENTRY__U(216),
+ };
+
+ LogRel(("NEM: KVM capabilities (system):\n"));
+ int rcRet = VINF_SUCCESS;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aCaps); i++)
+ {
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_CHECK_EXTENSION, s_aCaps[i].iCap);
+ if (rc >= 10)
+ LogRel(("NEM: %36s: %#x (%d)\n", s_aCaps[i].pszName, rc, rc));
+ else if (rc >= 0)
+ LogRel(("NEM: %36s: %d\n", s_aCaps[i].pszName, rc));
+ else
+ LogRel(("NEM: %s failed: %d/%d\n", s_aCaps[i].pszName, rc, errno));
+ switch (s_aCaps[i].cbNem)
+ {
+ case 0:
+ break;
+ case 1:
+ {
+ uint8_t *puValue = (uint8_t *)&pVM->nem.padding[s_aCaps[i].offNem];
+ AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
+ *puValue = (uint8_t)rc;
+ AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
+ break;
+ }
+ case 2:
+ {
+ uint16_t *puValue = (uint16_t *)&pVM->nem.padding[s_aCaps[i].offNem];
+ AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
+ *puValue = (uint16_t)rc;
+ AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
+ break;
+ }
+ case 4:
+ {
+ uint32_t *puValue = (uint32_t *)&pVM->nem.padding[s_aCaps[i].offNem];
+ AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
+ *puValue = (uint32_t)rc;
+ AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
+ break;
+ }
+ default:
+ rcRet = RTErrInfoSetF(pErrInfo, VERR_NEM_IPE_0, "s_aCaps[%u] is bad: cbNem=%#x - %s",
+ i, s_aCaps[i].pszName, s_aCaps[i].cbNem);
+ AssertFailedReturn(rcRet);
+ }
+
+ /*
+ * Is a require non-zero entry zero or failing?
+ */
+ if (s_aCaps[i].fReqNonZero && rc <= 0)
+ rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE,
+ "Required capability '%s' is missing!", s_aCaps[i].pszName);
+ }
+
+ /*
+ * Get per VCpu KVM_RUN MMAP area size.
+ */
+ int rc = ioctl(pVM->nem.s.fdKvm, KVM_GET_VCPU_MMAP_SIZE, 0UL);
+ if ((unsigned)rc < _64M)
+ {
+ pVM->nem.s.cbVCpuMmap = (uint32_t)rc;
+ LogRel(("NEM: %36s: %#x (%d)\n", "KVM_GET_VCPU_MMAP_SIZE", rc, rc));
+ }
+ else if (rc < 0)
+ rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, "KVM_GET_VCPU_MMAP_SIZE failed: %d", errno);
+ else
+ rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc);
+
+ /*
+ * Init the slot ID bitmap.
+ */
+ ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0); /* don't use slot 0 */
+ if (pVM->nem.s.cMaxMemSlots < _32K)
+ ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K);
+ ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1); /* don't use the last slot */
+
+ return rcRet;
+}
+
+
+/**
+ * Does the early setup of a KVM VM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo)
+{
+ AssertReturn(pVM->nem.s.fdVm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
+
+ /*
+ * Enable user space MSRs and let us check everything KVM cannot handle.
+ * We will set up filtering later when ring-3 init has completed.
+ */
+ struct kvm_enable_cap CapEn =
+ {
+ KVM_CAP_X86_USER_SPACE_MSR, 0,
+ { KVM_MSR_EXIT_REASON_FILTER | KVM_MSR_EXIT_REASON_UNKNOWN | KVM_MSR_EXIT_REASON_INVAL, 0, 0, 0}
+ };
+ int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_ENABLE_CAP, &CapEn);
+ if (rcLnx == -1)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "Failed to enable KVM_CAP_X86_USER_SPACE_MSR failed: %u", errno);
+
+ /*
+ * Create the VCpus.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /* Create it. */
+ pVCpu->nem.s.fdVCpu = ioctl(pVM->nem.s.fdVm, KVM_CREATE_VCPU, (unsigned long)idCpu);
+ if (pVCpu->nem.s.fdVCpu < 0)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VCPU failed for VCpu #%u: %d", idCpu, errno);
+
+ /* Map the KVM_RUN area. */
+ pVCpu->nem.s.pRun = (struct kvm_run *)mmap(NULL, pVM->nem.s.cbVCpuMmap, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pVCpu->nem.s.fdVCpu, 0 /*offset*/);
+ if ((void *)pVCpu->nem.s.pRun == MAP_FAILED)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "mmap failed for VCpu #%u: %d", idCpu, errno);
+
+ /* We want all x86 registers and events on each exit. */
+ pVCpu->nem.s.pRun->kvm_valid_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS | KVM_SYNC_X86_EVENTS;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNVMMEMTRENDEZVOUS} */
+static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ RT_NOREF(pVM, pvUser);
+ int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/);
+ AssertLogRelRC(rc);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Try initialize the native API.
+ *
+ * This may only do part of the job, more can be done in
+ * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFallback Whether we're in fallback mode or use-NEM mode. In
+ * the latter we'll fail if we cannot initialize.
+ * @param fForced Whether the HMForced flag is set and we should
+ * fail if we cannot initialize.
+ */
+int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
+{
+ RT_NOREF(pVM, fFallback, fForced);
+ /*
+ * Some state init.
+ */
+ pVM->nem.s.fdKvm = -1;
+ pVM->nem.s.fdVm = -1;
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
+ pNemCpu->fdVCpu = -1;
+ }
+
+ /*
+ * Error state.
+ * The error message will be non-empty on failure and 'rc' will be set too.
+ */
+ RTERRINFOSTATIC ErrInfo;
+ PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
+
+ /*
+ * Open kvm subsystem so we can issue system ioctls.
+ */
+ int rc;
+ int fdKvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
+ if (fdKvm >= 0)
+ {
+ pVM->nem.s.fdKvm = fdKvm;
+
+ /*
+ * Create an empty VM since it is recommended we check capabilities on
+ * the VM rather than the system descriptor.
+ */
+ int fdVm = ioctl(fdKvm, KVM_CREATE_VM, 0UL /* Type must be zero on x86 */);
+ if (fdVm >= 0)
+ {
+ pVM->nem.s.fdVm = fdVm;
+
+ /*
+ * Check capabilities.
+ */
+ rc = nemR3LnxInitCheckCapabilities(pVM, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Set up the VM (more on this later).
+ */
+ rc = nemR3LnxInitSetupVm(pVM, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Set ourselves as the execution engine and make config adjustments.
+ */
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
+ Log(("NEM: Marked active!\n"));
+ PGMR3EnableNemMode(pVM);
+
+ /*
+ * Register release statistics
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when importing from KVM", "/NEM/CPU%u/ImportPendingInterrupt", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when exporting to KVM", "/NEM/CPU%u/ExportPendingInterrupt", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn1Loop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-01-loop", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn2Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-02-loops", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn3Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-03-loops", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn4PlusLoops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-04-to-7-loops", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits", "/NEM/CPU%u/Exit", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO", "/NEM/CPU%u/Exit/Io", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO", "/NEM/CPU%u/Exit/Mmio", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitSetTpr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_SET_TRP", "/NEM/CPU%u/Exit/SetTpr", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitTprAccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_TPR_ACCESS", "/NEM/CPU%u/Exit/TprAccess", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitRdMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_RDMSR", "/NEM/CPU%u/Exit/RdMsr", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitWrMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_WRMSR", "/NEM/CPU%u/Exit/WrMsr", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitIrqWindowOpen, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IRQ_WINDOWS_OPEN", "/NEM/CPU%u/Exit/IrqWindowOpen", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HLT", "/NEM/CPU%u/Exit/Hlt", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR", "/NEM/CPU%u/Exit/Intr", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL", "/NEM/CPU%u/Exit/Hypercall", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG", "/NEM/CPU%u/Exit/Debug", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK", "/NEM/CPU%u/Exit/BusLock", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorEmulation, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/EMULATION", "/NEM/CPU%u/Exit/InternalErrorEmulation", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorFatal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/*", "/NEM/CPU%u/Exit/InternalErrorFatal", idCpu);
+ }
+
+ /*
+ * Success.
+ */
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Bail out.
+ */
+ }
+ close(fdVm);
+ pVM->nem.s.fdVm = -1;
+ }
+ else
+ rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VM failed: %u", errno);
+ close(fdKvm);
+ pVM->nem.s.fdKvm = -1;
+ }
+ else if (errno == EACCES)
+ rc = RTErrInfoSet(pErrInfo, VERR_ACCESS_DENIED, "Do not have access to open /dev/kvm for reading & writing.");
+ else if (errno == ENOENT)
+ rc = RTErrInfoSet(pErrInfo, VERR_NOT_SUPPORTED, "KVM is not availble (/dev/kvm does not exist)");
+ else
+ rc = RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno), "Failed to open '/dev/kvm': %u", errno);
+
+ /*
+ * We only fail if in forced mode, otherwise just log the complaint and return.
+ */
+ Assert(RTErrInfoIsSet(pErrInfo));
+ if ( (fForced || !fFallback)
+ && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
+ return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
+ LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This is called after CPUMR3Init is done.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle..
+ */
+int nemR3NativeInitAfterCPUM(PVM pVM)
+{
+ /*
+ * Validate sanity.
+ */
+ AssertReturn(pVM->nem.s.fdKvm >= 0, VERR_WRONG_ORDER);
+ AssertReturn(pVM->nem.s.fdVm >= 0, VERR_WRONG_ORDER);
+ AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
+
+ /** @todo */
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Update the CPUID leaves for a VCPU.
+ *
+ * The KVM_SET_CPUID2 call replaces any previous leaves, so we have to redo
+ * everything when there really just are single bit changes. That said, it
+ * looks like KVM update the XCR/XSAVE related stuff as well as the APIC enabled
+ * bit(s), so it should suffice if we do this at startup, I hope.
+ */
+static int nemR3LnxUpdateCpuIdsLeaves(PVM pVM, PVMCPU pVCpu)
+{
+ uint32_t cLeaves = 0;
+ PCCPUMCPUIDLEAF const paLeaves = CPUMR3CpuIdGetPtr(pVM, &cLeaves);
+ struct kvm_cpuid2 *pReq = (struct kvm_cpuid2 *)alloca(RT_UOFFSETOF_DYN(struct kvm_cpuid2, entries[cLeaves + 2]));
+
+ pReq->nent = cLeaves;
+ pReq->padding = 0;
+
+ for (uint32_t i = 0; i < cLeaves; i++)
+ {
+ CPUMGetGuestCpuId(pVCpu, paLeaves[i].uLeaf, paLeaves[i].uSubLeaf, -1 /*f64BitMode*/,
+ &pReq->entries[i].eax,
+ &pReq->entries[i].ebx,
+ &pReq->entries[i].ecx,
+ &pReq->entries[i].edx);
+ pReq->entries[i].function = paLeaves[i].uLeaf;
+ pReq->entries[i].index = paLeaves[i].uSubLeaf;
+ pReq->entries[i].flags = !paLeaves[i].fSubLeafMask ? 0 : KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ pReq->entries[i].padding[0] = 0;
+ pReq->entries[i].padding[1] = 0;
+ pReq->entries[i].padding[2] = 0;
+ }
+
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_CPUID2, pReq);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d cLeaves=%#x\n", rcLnx, errno, cLeaves), RTErrConvertFromErrno(errno));
+
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ /*
+ * Make RTThreadPoke work again (disabled for avoiding unnecessary
+ * critical section issues in ring-0).
+ */
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, nemR3LnxFixThreadPoke, NULL);
+
+ /*
+ * Configure CPUIDs after ring-3 init has been done.
+ */
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ {
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ int rc = nemR3LnxUpdateCpuIdsLeaves(pVM, pVM->apCpusR3[idCpu]);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /*
+ * Configure MSRs after ring-3 init is done.
+ *
+ * We only need to tell KVM which MSRs it can handle, as we already
+ * requested KVM_MSR_EXIT_REASON_FILTER, KVM_MSR_EXIT_REASON_UNKNOWN
+ * and KVM_MSR_EXIT_REASON_INVAL in nemR3LnxInitSetupVm, and here we
+ * will use KVM_MSR_FILTER_DEFAULT_DENY. So, all MSRs w/o a 1 in the
+ * bitmaps should be deferred to ring-3.
+ */
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ {
+ struct kvm_msr_filter MsrFilters = {0}; /* Structure with a couple of implicit paddings on 64-bit systems. */
+ MsrFilters.flags = KVM_MSR_FILTER_DEFAULT_DENY;
+
+ unsigned iRange = 0;
+#define MSR_RANGE_BEGIN(a_uBase, a_uEnd, a_fFlags) \
+ AssertCompile(0x3000 <= KVM_MSR_FILTER_MAX_BITMAP_SIZE * 8); \
+ uint64_t RT_CONCAT(bm, a_uBase)[0x3000 / 64] = {0}; \
+ do { \
+ uint64_t * const pbm = RT_CONCAT(bm, a_uBase); \
+ uint32_t const uBase = UINT32_C(a_uBase); \
+ uint32_t const cMsrs = UINT32_C(a_uEnd) - UINT32_C(a_uBase); \
+ MsrFilters.ranges[iRange].base = UINT32_C(a_uBase); \
+ MsrFilters.ranges[iRange].nmsrs = cMsrs; \
+ MsrFilters.ranges[iRange].flags = (a_fFlags); \
+ MsrFilters.ranges[iRange].bitmap = (uint8_t *)&RT_CONCAT(bm, a_uBase)[0]
+#define MSR_RANGE_ADD(a_Msr) \
+ do { Assert((uint32_t)(a_Msr) - uBase < cMsrs); ASMBitSet(pbm, (uint32_t)(a_Msr) - uBase); } while (0)
+#define MSR_RANGE_END(a_cMinMsrs) \
+ /* optimize the range size before closing: */ \
+ uint32_t cBitmap = cMsrs / 64; \
+ while (cBitmap > ((a_cMinMsrs) + 63 / 64) && pbm[cBitmap - 1] == 0) \
+ cBitmap -= 1; \
+ MsrFilters.ranges[iRange].nmsrs = cBitmap * 64; \
+ iRange++; \
+ } while (0)
+
+ /* 1st Intel range: 0000_0000 to 0000_3000. */
+ MSR_RANGE_BEGIN(0x00000000, 0x00003000, KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE);
+ MSR_RANGE_ADD(MSR_IA32_TSC);
+ MSR_RANGE_ADD(MSR_IA32_SYSENTER_CS);
+ MSR_RANGE_ADD(MSR_IA32_SYSENTER_ESP);
+ MSR_RANGE_ADD(MSR_IA32_SYSENTER_EIP);
+ MSR_RANGE_ADD(MSR_IA32_CR_PAT);
+ /** @todo more? */
+ MSR_RANGE_END(64);
+
+ /* 1st AMD range: c000_0000 to c000_3000 */
+ MSR_RANGE_BEGIN(0xc0000000, 0xc0003000, KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE);
+ MSR_RANGE_ADD(MSR_K6_EFER);
+ MSR_RANGE_ADD(MSR_K6_STAR);
+ MSR_RANGE_ADD(MSR_K8_GS_BASE);
+ MSR_RANGE_ADD(MSR_K8_KERNEL_GS_BASE);
+ MSR_RANGE_ADD(MSR_K8_LSTAR);
+ MSR_RANGE_ADD(MSR_K8_CSTAR);
+ MSR_RANGE_ADD(MSR_K8_SF_MASK);
+ MSR_RANGE_ADD(MSR_K8_TSC_AUX);
+ /** @todo add more? */
+ MSR_RANGE_END(64);
+
+ /** @todo Specify other ranges too? Like hyper-V and KVM to make sure we get
+ * the MSR requests instead of KVM. */
+
+ int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_X86_SET_MSR_FILTER, &MsrFilters);
+ if (rcLnx == -1)
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Failed to enable KVM_X86_SET_MSR_FILTER failed: %u", errno);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeTerm(PVM pVM)
+{
+ /*
+ * Per-cpu data
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ if (pVCpu->nem.s.fdVCpu != -1)
+ {
+ close(pVCpu->nem.s.fdVCpu);
+ pVCpu->nem.s.fdVCpu = -1;
+ }
+ if (pVCpu->nem.s.pRun)
+ {
+ munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap);
+ pVCpu->nem.s.pRun = NULL;
+ }
+ }
+
+ /*
+ * Global data.
+ */
+ if (pVM->nem.s.fdVm != -1)
+ {
+ close(pVM->nem.s.fdVm);
+ pVM->nem.s.fdVm = -1;
+ }
+
+ if (pVM->nem.s.fdKvm != -1)
+ {
+ close(pVM->nem.s.fdKvm);
+ pVM->nem.s.fdKvm = -1;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * VM reset notification.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void nemR3NativeReset(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Reset CPU due to INIT IPI or hot (un)plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the CPU being
+ * reset.
+ * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
+ */
+void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
+{
+ RT_NOREF(pVCpu, fInitIpi);
+}
+
+
+/*********************************************************************************************************************************
+* Memory management *
+*********************************************************************************************************************************/
+
+
+/**
+ * Allocates a memory slot ID.
+ *
+ * @returns Slot ID on success, UINT16_MAX on failure.
+ */
+static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM)
+{
+ /* Use the hint first. */
+ uint16_t idHint = pVM->nem.s.idPrevSlot;
+ if (idHint < _32K - 1)
+ {
+ int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint);
+ Assert(idx < _32K);
+ if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
+ return pVM->nem.s.idPrevSlot = (uint16_t)idx;
+ }
+
+ /*
+ * Search the whole map from the start.
+ */
+ int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K);
+ Assert(idx < _32K);
+ if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
+ return pVM->nem.s.idPrevSlot = (uint16_t)idx;
+
+ Assert(idx < 0 /*shouldn't trigger unless there is a race */);
+ return UINT16_MAX; /* caller is expected to assert. */
+}
+
+
+/**
+ * Frees a memory slot ID
+ */
+static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot)
+{
+ if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot)))
+ { /*likely*/ }
+ else
+ AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot));
+}
+
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
+ AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
+
+ Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n",
+ GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot));
+
+ struct kvm_userspace_memory_region Region;
+ Region.slot = idSlot;
+ Region.flags = 0;
+ Region.guest_phys_addr = GCPhys;
+ Region.memory_size = cb;
+ Region.userspace_addr = (uintptr_t)pvR3;
+
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
+ if (rc == 0)
+ {
+ *pu2State = 0;
+ *puNemRange = idSlot;
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno));
+ nemR3LnxMemSlotIdFree(pVM, idSlot);
+ return VERR_NEM_MAP_PAGES_FAILED;
+}
+
+
+VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return true;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
+ RT_NOREF(pvRam);
+
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ /** @todo implement splitting and whatnot of ranges if we want to be 100%
+ * conforming (just modify RAM registrations in MM.cpp to test). */
+ AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
+ VERR_NEM_MAP_PAGES_FAILED);
+ }
+
+ /*
+ * Register MMIO2.
+ */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
+ {
+ AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED);
+ AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED);
+
+ uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
+ AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
+
+ struct kvm_userspace_memory_region Region;
+ Region.slot = idSlot;
+ Region.flags = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0;
+ Region.guest_phys_addr = GCPhys;
+ Region.memory_size = cb;
+ Region.userspace_addr = (uintptr_t)pvMmio2;
+
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
+ if (rc == 0)
+ {
+ *pu2State = 0;
+ *puNemRange = idSlot;
+ Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n",
+ GCPhys, cb, fFlags, pvMmio2, idSlot));
+ return VINF_SUCCESS;
+ }
+
+ nemR3LnxMemSlotIdFree(pVM, idSlot);
+ AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
+ GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
+ VERR_NEM_MAP_PAGES_FAILED);
+ }
+
+ /* MMIO, don't care. */
+ *pu2State = 0;
+ *puNemRange = UINT32_MAX;
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
+ void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
+
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ /** @todo implement splitting and whatnot of ranges if we want to be 100%
+ * conforming (just modify RAM registrations in MM.cpp to test). */
+ AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
+ VERR_NEM_UNMAP_PAGES_FAILED);
+ }
+
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
+ {
+ uint32_t const idSlot = *puNemRange;
+ AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
+ AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
+
+ struct kvm_userspace_memory_region Region;
+ Region.slot = idSlot;
+ Region.flags = 0;
+ Region.guest_phys_addr = GCPhys;
+ Region.memory_size = 0; /* this deregisters it. */
+ Region.userspace_addr = (uintptr_t)pvMmio2;
+
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
+ if (rc == 0)
+ {
+ if (pu2State)
+ *pu2State = 0;
+ *puNemRange = UINT32_MAX;
+ nemR3LnxMemSlotIdFree(pVM, idSlot);
+ return VINF_SUCCESS;
+ }
+
+ AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
+ GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
+ VERR_NEM_UNMAP_PAGES_FAILED);
+ }
+
+ if (pu2State)
+ *pu2State = UINT8_MAX;
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
+ void *pvBitmap, size_t cbBitmap)
+{
+ AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4);
+ AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4);
+
+ RT_NOREF(GCPhys, cbBitmap);
+
+ struct kvm_dirty_log DirtyLog;
+ DirtyLog.slot = uNemRange;
+ DirtyLog.padding1 = 0;
+ DirtyLog.dirty_bitmap = pvBitmap;
+
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog);
+ AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc),
+ VERR_NEM_QUERY_DIRTY_BITMAP_FAILED);
+
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
+ *pu2State = UINT8_MAX;
+
+ /* Don't support puttint ROM where there is already RAM. For
+ now just shuffle the registrations till it works... */
+ AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags),
+ VERR_NEM_MAP_PAGES_FAILED);
+
+ /** @todo figure out how to do shadow ROMs. */
+
+ /*
+ * We only allocate a slot number here in case we need to use it to
+ * fend of physical handler fun.
+ */
+ uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
+ AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
+
+ *pu2State = 0;
+ *puNemRange = idSlot;
+ Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
+ GCPhys, cb, fFlags, pvPages, idSlot));
+ RT_NOREF(GCPhys, cb, fFlags, pvPages);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
+ uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
+
+ AssertPtrReturn(pvPages, VERR_NEM_IPE_5);
+
+ uint32_t const idSlot = *puNemRange;
+ AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
+ AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
+
+ *pu2State = UINT8_MAX;
+
+ /*
+ * Do the actual setting of the user pages here now that we've
+ * got a valid pvPages (typically isn't available during the early
+ * notification, unless we're replacing RAM).
+ */
+ struct kvm_userspace_memory_region Region;
+ Region.slot = idSlot;
+ Region.flags = 0;
+ Region.guest_phys_addr = GCPhys;
+ Region.memory_size = cb;
+ Region.userspace_addr = (uintptr_t)pvPages;
+
+ int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
+ if (rc == 0)
+ {
+ *pu2State = 0;
+ Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
+ GCPhys, cb, fFlags, pvPages, idSlot));
+ return VINF_SUCCESS;
+ }
+ AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n",
+ GCPhys, cb, fFlags, pvPages, idSlot, errno, rc),
+ VERR_NEM_MAP_PAGES_FAILED);
+}
+
+
+VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
+{
+ Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
+ Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
+ RT_NOREF(pVCpu, fEnabled);
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
+ RTR3PTR pvMemR3, uint8_t *pu2State)
+{
+ Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
+ GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
+
+ *pu2State = UINT8_MAX;
+ RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
+}
+
+
+void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
+{
+ Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
+ RT_NOREF(pVM, enmKind, GCPhys, cb);
+}
+
+
+void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
+ RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
+{
+ Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
+ GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
+ RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM);
+}
+
+
+int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
+ PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhys, fPageProt, enmType, *pu2State));
+ RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
+ return VINF_SUCCESS;
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
+ PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhys, fPageProt, enmType, *pu2State));
+ Assert(VM_IS_NEM_ENABLED(pVM));
+ RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
+
+}
+
+
+VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
+ RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
+{
+ Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
+ GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
+ Assert(VM_IS_NEM_ENABLED(pVM));
+ RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
+}
+
+
+/*********************************************************************************************************************************
+* CPU State *
+*********************************************************************************************************************************/
+
+/**
+ * Worker that imports selected state from KVM.
+ */
+static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, PCPUMCTX pCtx, struct kvm_run *pRun)
+{
+ fWhat &= pVCpu->cpum.GstCtx.fExtrn;
+ if (!fWhat)
+ return VINF_SUCCESS;
+
+ /*
+ * Stuff that goes into kvm_run::s.regs.regs:
+ */
+ if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK))
+ {
+ if (fWhat & CPUMCTX_EXTRN_RIP)
+ pCtx->rip = pRun->s.regs.regs.rip;
+ if (fWhat & CPUMCTX_EXTRN_RFLAGS)
+ pCtx->rflags.u = pRun->s.regs.regs.rflags;
+
+ if (fWhat & CPUMCTX_EXTRN_RAX)
+ pCtx->rax = pRun->s.regs.regs.rax;
+ if (fWhat & CPUMCTX_EXTRN_RCX)
+ pCtx->rcx = pRun->s.regs.regs.rcx;
+ if (fWhat & CPUMCTX_EXTRN_RDX)
+ pCtx->rdx = pRun->s.regs.regs.rdx;
+ if (fWhat & CPUMCTX_EXTRN_RBX)
+ pCtx->rbx = pRun->s.regs.regs.rbx;
+ if (fWhat & CPUMCTX_EXTRN_RSP)
+ pCtx->rsp = pRun->s.regs.regs.rsp;
+ if (fWhat & CPUMCTX_EXTRN_RBP)
+ pCtx->rbp = pRun->s.regs.regs.rbp;
+ if (fWhat & CPUMCTX_EXTRN_RSI)
+ pCtx->rsi = pRun->s.regs.regs.rsi;
+ if (fWhat & CPUMCTX_EXTRN_RDI)
+ pCtx->rdi = pRun->s.regs.regs.rdi;
+ if (fWhat & CPUMCTX_EXTRN_R8_R15)
+ {
+ pCtx->r8 = pRun->s.regs.regs.r8;
+ pCtx->r9 = pRun->s.regs.regs.r9;
+ pCtx->r10 = pRun->s.regs.regs.r10;
+ pCtx->r11 = pRun->s.regs.regs.r11;
+ pCtx->r12 = pRun->s.regs.regs.r12;
+ pCtx->r13 = pRun->s.regs.regs.r13;
+ pCtx->r14 = pRun->s.regs.regs.r14;
+ pCtx->r15 = pRun->s.regs.regs.r15;
+ }
+ }
+
+ /*
+ * Stuff that goes into kvm_run::s.regs.sregs.
+ *
+ * Note! The apic_base can be ignored because we gets all MSR writes to it
+ * and VBox always keeps the correct value.
+ */
+ bool fMaybeChangedMode = false;
+ bool fUpdateCr3 = false;
+ if (fWhat & ( CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK
+ | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR))
+ {
+ /** @todo what about Attr.n.u4LimitHigh? */
+#define NEM_LNX_IMPORT_SEG(a_CtxSeg, a_KvmSeg) do { \
+ (a_CtxSeg).u64Base = (a_KvmSeg).base; \
+ (a_CtxSeg).u32Limit = (a_KvmSeg).limit; \
+ (a_CtxSeg).ValidSel = (a_CtxSeg).Sel = (a_KvmSeg).selector; \
+ (a_CtxSeg).Attr.n.u4Type = (a_KvmSeg).type; \
+ (a_CtxSeg).Attr.n.u1DescType = (a_KvmSeg).s; \
+ (a_CtxSeg).Attr.n.u2Dpl = (a_KvmSeg).dpl; \
+ (a_CtxSeg).Attr.n.u1Present = (a_KvmSeg).present; \
+ (a_CtxSeg).Attr.n.u1Available = (a_KvmSeg).avl; \
+ (a_CtxSeg).Attr.n.u1Long = (a_KvmSeg).l; \
+ (a_CtxSeg).Attr.n.u1DefBig = (a_KvmSeg).db; \
+ (a_CtxSeg).Attr.n.u1Granularity = (a_KvmSeg).g; \
+ (a_CtxSeg).Attr.n.u1Unusable = (a_KvmSeg).unusable; \
+ (a_CtxSeg).fFlags = CPUMSELREG_FLAGS_VALID; \
+ CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &(a_CtxSeg)); \
+ } while (0)
+
+ if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_ES)
+ NEM_LNX_IMPORT_SEG(pCtx->es, pRun->s.regs.sregs.es);
+ if (fWhat & CPUMCTX_EXTRN_CS)
+ NEM_LNX_IMPORT_SEG(pCtx->cs, pRun->s.regs.sregs.cs);
+ if (fWhat & CPUMCTX_EXTRN_SS)
+ NEM_LNX_IMPORT_SEG(pCtx->ss, pRun->s.regs.sregs.ss);
+ if (fWhat & CPUMCTX_EXTRN_DS)
+ NEM_LNX_IMPORT_SEG(pCtx->ds, pRun->s.regs.sregs.ds);
+ if (fWhat & CPUMCTX_EXTRN_FS)
+ NEM_LNX_IMPORT_SEG(pCtx->fs, pRun->s.regs.sregs.fs);
+ if (fWhat & CPUMCTX_EXTRN_GS)
+ NEM_LNX_IMPORT_SEG(pCtx->gs, pRun->s.regs.sregs.gs);
+ }
+ if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_GDTR)
+ {
+ pCtx->gdtr.pGdt = pRun->s.regs.sregs.gdt.base;
+ pCtx->gdtr.cbGdt = pRun->s.regs.sregs.gdt.limit;
+ }
+ if (fWhat & CPUMCTX_EXTRN_IDTR)
+ {
+ pCtx->idtr.pIdt = pRun->s.regs.sregs.idt.base;
+ pCtx->idtr.cbIdt = pRun->s.regs.sregs.idt.limit;
+ }
+ if (fWhat & CPUMCTX_EXTRN_LDTR)
+ NEM_LNX_IMPORT_SEG(pCtx->ldtr, pRun->s.regs.sregs.ldt);
+ if (fWhat & CPUMCTX_EXTRN_TR)
+ NEM_LNX_IMPORT_SEG(pCtx->tr, pRun->s.regs.sregs.tr);
+ }
+ if (fWhat & CPUMCTX_EXTRN_CR_MASK)
+ {
+ if (fWhat & CPUMCTX_EXTRN_CR0)
+ {
+ if (pVCpu->cpum.GstCtx.cr0 != pRun->s.regs.sregs.cr0)
+ {
+ CPUMSetGuestCR0(pVCpu, pRun->s.regs.sregs.cr0);
+ fMaybeChangedMode = true;
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_CR2)
+ pCtx->cr2 = pRun->s.regs.sregs.cr2;
+ if (fWhat & CPUMCTX_EXTRN_CR3)
+ {
+ if (pCtx->cr3 != pRun->s.regs.sregs.cr3)
+ {
+ CPUMSetGuestCR3(pVCpu, pRun->s.regs.sregs.cr3);
+ fUpdateCr3 = true;
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_CR4)
+ {
+ if (pCtx->cr4 != pRun->s.regs.sregs.cr4)
+ {
+ CPUMSetGuestCR4(pVCpu, pRun->s.regs.sregs.cr4);
+ fMaybeChangedMode = true;
+ }
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
+ APICSetTpr(pVCpu, (uint8_t)pRun->s.regs.sregs.cr8 << 4);
+ if (fWhat & CPUMCTX_EXTRN_EFER)
+ {
+ if (pCtx->msrEFER != pRun->s.regs.sregs.efer)
+ {
+ Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, pRun->s.regs.sregs.efer));
+ if ((pRun->s.regs.sregs.efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
+ PGMNotifyNxeChanged(pVCpu, RT_BOOL(pRun->s.regs.sregs.efer & MSR_K6_EFER_NXE));
+ pCtx->msrEFER = pRun->s.regs.sregs.efer;
+ fMaybeChangedMode = true;
+ }
+ }
+#undef NEM_LNX_IMPORT_SEG
+ }
+
+ /*
+ * Debug registers.
+ */
+ if (fWhat & CPUMCTX_EXTRN_DR_MASK)
+ {
+ struct kvm_debugregs DbgRegs = {{0}};
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_DEBUGREGS, &DbgRegs);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+
+ if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
+ {
+ pCtx->dr[0] = DbgRegs.db[0];
+ pCtx->dr[1] = DbgRegs.db[1];
+ pCtx->dr[2] = DbgRegs.db[2];
+ pCtx->dr[3] = DbgRegs.db[3];
+ }
+ if (fWhat & CPUMCTX_EXTRN_DR6)
+ pCtx->dr[6] = DbgRegs.dr6;
+ if (fWhat & CPUMCTX_EXTRN_DR7)
+ pCtx->dr[7] = DbgRegs.dr7;
+ }
+
+ /*
+ * FPU, SSE, AVX, ++.
+ */
+ if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx))
+ {
+ if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
+ {
+ fWhat |= CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE; /* we do all or nothing at all */
+
+ AssertCompile(sizeof(pCtx->XState) >= sizeof(struct kvm_xsave));
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &pCtx->XState);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+ }
+
+ if (fWhat & CPUMCTX_EXTRN_XCRx)
+ {
+ struct kvm_xcrs Xcrs =
+ { /*.nr_xcrs = */ 2,
+ /*.flags = */ 0,
+ /*.xcrs= */ {
+ { /*.xcr =*/ 0, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[0] },
+ { /*.xcr =*/ 1, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[1] },
+ }
+ };
+
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XCRS, &Xcrs);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+
+ pCtx->aXcr[0] = Xcrs.xcrs[0].value;
+ pCtx->aXcr[1] = Xcrs.xcrs[1].value;
+ }
+ }
+
+ /*
+ * MSRs.
+ */
+ if (fWhat & ( CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS
+ | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
+ {
+ union
+ {
+ struct kvm_msrs Core;
+ uint64_t padding[2 + sizeof(struct kvm_msr_entry) * 32];
+ } uBuf;
+ uint64_t *pauDsts[32];
+ uint32_t iMsr = 0;
+ PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
+
+#define ADD_MSR(a_Msr, a_uValue) do { \
+ Assert(iMsr < 32); \
+ uBuf.Core.entries[iMsr].index = (a_Msr); \
+ uBuf.Core.entries[iMsr].reserved = 0; \
+ uBuf.Core.entries[iMsr].data = UINT64_MAX; \
+ pauDsts[iMsr] = &(a_uValue); \
+ iMsr += 1; \
+ } while (0)
+
+ if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+ ADD_MSR(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
+ if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
+ {
+ ADD_MSR(MSR_K6_STAR, pCtx->msrSTAR);
+ ADD_MSR(MSR_K8_LSTAR, pCtx->msrLSTAR);
+ ADD_MSR(MSR_K8_CSTAR, pCtx->msrCSTAR);
+ ADD_MSR(MSR_K8_SF_MASK, pCtx->msrSFMASK);
+ }
+ if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
+ {
+ ADD_MSR(MSR_IA32_SYSENTER_CS, pCtx->SysEnter.cs);
+ ADD_MSR(MSR_IA32_SYSENTER_EIP, pCtx->SysEnter.eip);
+ ADD_MSR(MSR_IA32_SYSENTER_ESP, pCtx->SysEnter.esp);
+ }
+ if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
+ ADD_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
+ if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
+ {
+ ADD_MSR(MSR_IA32_CR_PAT, pCtx->msrPAT);
+ /** @todo What do we _have_ to add here?
+ * We also have: Mttr*, MiscEnable, FeatureControl. */
+ }
+
+ uBuf.Core.pad = 0;
+ uBuf.Core.nmsrs = iMsr;
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MSRS, &uBuf);
+ AssertMsgReturn(rc == (int)iMsr,
+ ("rc=%d iMsr=%d (->%#x) errno=%d\n",
+ rc, iMsr, (uint32_t)rc < iMsr ? uBuf.Core.entries[rc].index : 0, errno),
+ VERR_NEM_IPE_3);
+
+ while (iMsr-- > 0)
+ *pauDsts[iMsr] = uBuf.Core.entries[iMsr].data;
+#undef ADD_MSR
+ }
+
+ /*
+ * Interruptibility state and pending interrupts.
+ */
+ if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
+ {
+ fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI; /* always do both, see export and interrupt FF handling */
+
+ struct kvm_vcpu_events KvmEvents = {0};
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_VCPU_EVENTS, &KvmEvents);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_3);
+
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
+ pVCpu->cpum.GstCtx.rip = pRun->s.regs.regs.rip;
+
+ CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
+ RT_BOOL(KvmEvents.interrupt.shadow & KVM_X86_SHADOW_INT_MOV_SS),
+ RT_BOOL(KvmEvents.interrupt.shadow & KVM_X86_SHADOW_INT_STI),
+ pVCpu->cpum.GstCtx.rip);
+ CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0);
+
+ if (KvmEvents.interrupt.injected)
+ {
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportPendingInterrupt);
+ TRPMAssertTrap(pVCpu, KvmEvents.interrupt.nr, !KvmEvents.interrupt.soft ? TRPM_HARDWARE_INT : TRPM_SOFTWARE_INT);
+ }
+
+ Assert(KvmEvents.nmi.injected == 0);
+ Assert(KvmEvents.nmi.pending == 0);
+ }
+
+ /*
+ * Update the external mask.
+ */
+ pCtx->fExtrn &= ~fWhat;
+ pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+
+ /*
+ * We sometimes need to update PGM on the guest status.
+ */
+ if (!fMaybeChangedMode && !fUpdateCr3)
+ { /* likely */ }
+ else
+ {
+ /*
+ * Make sure we got all the state PGM might need.
+ */
+ Log7(("nemHCLnxImportState: fMaybeChangedMode=%d fUpdateCr3=%d fExtrnNeeded=%#RX64\n", fMaybeChangedMode, fUpdateCr3,
+ pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER) ));
+ if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER))
+ {
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR0)
+ {
+ if (pVCpu->cpum.GstCtx.cr0 != pRun->s.regs.sregs.cr0)
+ {
+ CPUMSetGuestCR0(pVCpu, pRun->s.regs.sregs.cr0);
+ fMaybeChangedMode = true;
+ }
+ }
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR3)
+ {
+ if (pCtx->cr3 != pRun->s.regs.sregs.cr3)
+ {
+ CPUMSetGuestCR3(pVCpu, pRun->s.regs.sregs.cr3);
+ fUpdateCr3 = true;
+ }
+ }
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR4)
+ {
+ if (pCtx->cr4 != pRun->s.regs.sregs.cr4)
+ {
+ CPUMSetGuestCR4(pVCpu, pRun->s.regs.sregs.cr4);
+ fMaybeChangedMode = true;
+ }
+ }
+ if (fWhat & CPUMCTX_EXTRN_EFER)
+ {
+ if (pCtx->msrEFER != pRun->s.regs.sregs.efer)
+ {
+ Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, pRun->s.regs.sregs.efer));
+ if ((pRun->s.regs.sregs.efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
+ PGMNotifyNxeChanged(pVCpu, RT_BOOL(pRun->s.regs.sregs.efer & MSR_K6_EFER_NXE));
+ pCtx->msrEFER = pRun->s.regs.sregs.efer;
+ fMaybeChangedMode = true;
+ }
+ }
+
+ pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER);
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+ }
+
+ /*
+ * Notify PGM about the changes.
+ */
+ if (fMaybeChangedMode)
+ {
+ int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4,
+ pVCpu->cpum.GstCtx.msrEFER, false /*fForce*/);
+ AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
+ }
+
+ if (fUpdateCr3)
+ {
+ int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
+ if (rc == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Interface for importing state on demand (used by IEM).
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context CPU structure.
+ * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
+ */
+VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
+{
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
+ return nemHCLnxImportState(pVCpu, fWhat, &pVCpu->cpum.GstCtx, pVCpu->nem.s.pRun);
+}
+
+
+/**
+ * Exports state to KVM.
+ */
+static int nemHCLnxExportState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, struct kvm_run *pRun)
+{
+ uint64_t const fExtrn = ~pCtx->fExtrn & CPUMCTX_EXTRN_ALL;
+ Assert((~fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL);
+
+ /*
+ * Stuff that goes into kvm_run::s.regs.regs:
+ */
+ if (fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK))
+ {
+ if (fExtrn & CPUMCTX_EXTRN_RIP)
+ pRun->s.regs.regs.rip = pCtx->rip;
+ if (fExtrn & CPUMCTX_EXTRN_RFLAGS)
+ pRun->s.regs.regs.rflags = pCtx->rflags.u;
+
+ if (fExtrn & CPUMCTX_EXTRN_RAX)
+ pRun->s.regs.regs.rax = pCtx->rax;
+ if (fExtrn & CPUMCTX_EXTRN_RCX)
+ pRun->s.regs.regs.rcx = pCtx->rcx;
+ if (fExtrn & CPUMCTX_EXTRN_RDX)
+ pRun->s.regs.regs.rdx = pCtx->rdx;
+ if (fExtrn & CPUMCTX_EXTRN_RBX)
+ pRun->s.regs.regs.rbx = pCtx->rbx;
+ if (fExtrn & CPUMCTX_EXTRN_RSP)
+ pRun->s.regs.regs.rsp = pCtx->rsp;
+ if (fExtrn & CPUMCTX_EXTRN_RBP)
+ pRun->s.regs.regs.rbp = pCtx->rbp;
+ if (fExtrn & CPUMCTX_EXTRN_RSI)
+ pRun->s.regs.regs.rsi = pCtx->rsi;
+ if (fExtrn & CPUMCTX_EXTRN_RDI)
+ pRun->s.regs.regs.rdi = pCtx->rdi;
+ if (fExtrn & CPUMCTX_EXTRN_R8_R15)
+ {
+ pRun->s.regs.regs.r8 = pCtx->r8;
+ pRun->s.regs.regs.r9 = pCtx->r9;
+ pRun->s.regs.regs.r10 = pCtx->r10;
+ pRun->s.regs.regs.r11 = pCtx->r11;
+ pRun->s.regs.regs.r12 = pCtx->r12;
+ pRun->s.regs.regs.r13 = pCtx->r13;
+ pRun->s.regs.regs.r14 = pCtx->r14;
+ pRun->s.regs.regs.r15 = pCtx->r15;
+ }
+ pRun->kvm_dirty_regs |= KVM_SYNC_X86_REGS;
+ }
+
+ /*
+ * Stuff that goes into kvm_run::s.regs.sregs:
+ *
+ * The APIC base register updating is a little suboptimal... But at least
+ * VBox always has the right base register value, so it's one directional.
+ */
+ uint64_t const uApicBase = APICGetBaseMsrNoCheck(pVCpu);
+ if ( (fExtrn & ( CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK
+ | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR))
+ || uApicBase != pVCpu->nem.s.uKvmApicBase)
+ {
+ if ((pVCpu->nem.s.uKvmApicBase ^ uApicBase) & MSR_IA32_APICBASE_EN)
+ Log(("NEM/%u: APICBASE_EN changed %#010RX64 -> %#010RX64\n", pVCpu->idCpu, pVCpu->nem.s.uKvmApicBase, uApicBase));
+ pRun->s.regs.sregs.apic_base = uApicBase;
+ pVCpu->nem.s.uKvmApicBase = uApicBase;
+
+ if (fExtrn & CPUMCTX_EXTRN_APIC_TPR)
+ pRun->s.regs.sregs.cr8 = CPUMGetGuestCR8(pVCpu);
+
+#define NEM_LNX_EXPORT_SEG(a_KvmSeg, a_CtxSeg) do { \
+ (a_KvmSeg).base = (a_CtxSeg).u64Base; \
+ (a_KvmSeg).limit = (a_CtxSeg).u32Limit; \
+ (a_KvmSeg).selector = (a_CtxSeg).Sel; \
+ (a_KvmSeg).type = (a_CtxSeg).Attr.n.u4Type; \
+ (a_KvmSeg).s = (a_CtxSeg).Attr.n.u1DescType; \
+ (a_KvmSeg).dpl = (a_CtxSeg).Attr.n.u2Dpl; \
+ (a_KvmSeg).present = (a_CtxSeg).Attr.n.u1Present; \
+ (a_KvmSeg).avl = (a_CtxSeg).Attr.n.u1Available; \
+ (a_KvmSeg).l = (a_CtxSeg).Attr.n.u1Long; \
+ (a_KvmSeg).db = (a_CtxSeg).Attr.n.u1DefBig; \
+ (a_KvmSeg).g = (a_CtxSeg).Attr.n.u1Granularity; \
+ (a_KvmSeg).unusable = (a_CtxSeg).Attr.n.u1Unusable; \
+ (a_KvmSeg).padding = 0; \
+ } while (0)
+
+ if (fExtrn & CPUMCTX_EXTRN_SREG_MASK)
+ {
+ if (fExtrn & CPUMCTX_EXTRN_ES)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.es, pCtx->es);
+ if (fExtrn & CPUMCTX_EXTRN_CS)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.cs, pCtx->cs);
+ if (fExtrn & CPUMCTX_EXTRN_SS)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ss, pCtx->ss);
+ if (fExtrn & CPUMCTX_EXTRN_DS)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ds, pCtx->ds);
+ if (fExtrn & CPUMCTX_EXTRN_FS)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.fs, pCtx->fs);
+ if (fExtrn & CPUMCTX_EXTRN_GS)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.gs, pCtx->gs);
+ }
+ if (fExtrn & CPUMCTX_EXTRN_TABLE_MASK)
+ {
+ if (fExtrn & CPUMCTX_EXTRN_GDTR)
+ {
+ pRun->s.regs.sregs.gdt.base = pCtx->gdtr.pGdt;
+ pRun->s.regs.sregs.gdt.limit = pCtx->gdtr.cbGdt;
+ pRun->s.regs.sregs.gdt.padding[0] = 0;
+ pRun->s.regs.sregs.gdt.padding[1] = 0;
+ pRun->s.regs.sregs.gdt.padding[2] = 0;
+ }
+ if (fExtrn & CPUMCTX_EXTRN_IDTR)
+ {
+ pRun->s.regs.sregs.idt.base = pCtx->idtr.pIdt;
+ pRun->s.regs.sregs.idt.limit = pCtx->idtr.cbIdt;
+ pRun->s.regs.sregs.idt.padding[0] = 0;
+ pRun->s.regs.sregs.idt.padding[1] = 0;
+ pRun->s.regs.sregs.idt.padding[2] = 0;
+ }
+ if (fExtrn & CPUMCTX_EXTRN_LDTR)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ldt, pCtx->ldtr);
+ if (fExtrn & CPUMCTX_EXTRN_TR)
+ NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.tr, pCtx->tr);
+ }
+ if (fExtrn & CPUMCTX_EXTRN_CR_MASK)
+ {
+ if (fExtrn & CPUMCTX_EXTRN_CR0)
+ pRun->s.regs.sregs.cr0 = pCtx->cr0;
+ if (fExtrn & CPUMCTX_EXTRN_CR2)
+ pRun->s.regs.sregs.cr2 = pCtx->cr2;
+ if (fExtrn & CPUMCTX_EXTRN_CR3)
+ pRun->s.regs.sregs.cr3 = pCtx->cr3;
+ if (fExtrn & CPUMCTX_EXTRN_CR4)
+ pRun->s.regs.sregs.cr4 = pCtx->cr4;
+ }
+ if (fExtrn & CPUMCTX_EXTRN_EFER)
+ pRun->s.regs.sregs.efer = pCtx->msrEFER;
+
+ RT_ZERO(pRun->s.regs.sregs.interrupt_bitmap); /* this is an alternative interrupt injection interface */
+
+ pRun->kvm_dirty_regs |= KVM_SYNC_X86_SREGS;
+ }
+
+ /*
+ * Debug registers.
+ */
+ if (fExtrn & CPUMCTX_EXTRN_DR_MASK)
+ {
+ struct kvm_debugregs DbgRegs = {{0}};
+
+ if ((fExtrn & CPUMCTX_EXTRN_DR_MASK) != CPUMCTX_EXTRN_DR_MASK)
+ {
+ /* Partial debug state, we must get DbgRegs first so we can merge: */
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_DEBUGREGS, &DbgRegs);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+ }
+
+ if (fExtrn & CPUMCTX_EXTRN_DR0_DR3)
+ {
+ DbgRegs.db[0] = pCtx->dr[0];
+ DbgRegs.db[1] = pCtx->dr[1];
+ DbgRegs.db[2] = pCtx->dr[2];
+ DbgRegs.db[3] = pCtx->dr[3];
+ }
+ if (fExtrn & CPUMCTX_EXTRN_DR6)
+ DbgRegs.dr6 = pCtx->dr[6];
+ if (fExtrn & CPUMCTX_EXTRN_DR7)
+ DbgRegs.dr7 = pCtx->dr[7];
+
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_DEBUGREGS, &DbgRegs);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+ }
+
+ /*
+ * FPU, SSE, AVX, ++.
+ */
+ if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx))
+ {
+ if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
+ {
+ /** @todo could IEM just grab state partial control in some situations? */
+ Assert( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
+ == (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)); /* no partial states */
+
+ AssertCompile(sizeof(pCtx->XState) >= sizeof(struct kvm_xsave));
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_XSAVE, &pCtx->XState);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+ }
+
+ if (fExtrn & CPUMCTX_EXTRN_XCRx)
+ {
+ struct kvm_xcrs Xcrs =
+ { /*.nr_xcrs = */ 2,
+ /*.flags = */ 0,
+ /*.xcrs= */ {
+ { /*.xcr =*/ 0, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[0] },
+ { /*.xcr =*/ 1, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[1] },
+ }
+ };
+
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_XCRS, &Xcrs);
+ AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3);
+ }
+ }
+
+ /*
+ * MSRs.
+ */
+ if (fExtrn & ( CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS
+ | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
+ {
+ union
+ {
+ struct kvm_msrs Core;
+ uint64_t padding[2 + sizeof(struct kvm_msr_entry) * 32];
+ } uBuf;
+ uint32_t iMsr = 0;
+ PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
+
+#define ADD_MSR(a_Msr, a_uValue) do { \
+ Assert(iMsr < 32); \
+ uBuf.Core.entries[iMsr].index = (a_Msr); \
+ uBuf.Core.entries[iMsr].reserved = 0; \
+ uBuf.Core.entries[iMsr].data = (a_uValue); \
+ iMsr += 1; \
+ } while (0)
+
+ if (fExtrn & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+ ADD_MSR(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
+ if (fExtrn & CPUMCTX_EXTRN_SYSCALL_MSRS)
+ {
+ ADD_MSR(MSR_K6_STAR, pCtx->msrSTAR);
+ ADD_MSR(MSR_K8_LSTAR, pCtx->msrLSTAR);
+ ADD_MSR(MSR_K8_CSTAR, pCtx->msrCSTAR);
+ ADD_MSR(MSR_K8_SF_MASK, pCtx->msrSFMASK);
+ }
+ if (fExtrn & CPUMCTX_EXTRN_SYSENTER_MSRS)
+ {
+ ADD_MSR(MSR_IA32_SYSENTER_CS, pCtx->SysEnter.cs);
+ ADD_MSR(MSR_IA32_SYSENTER_EIP, pCtx->SysEnter.eip);
+ ADD_MSR(MSR_IA32_SYSENTER_ESP, pCtx->SysEnter.esp);
+ }
+ if (fExtrn & CPUMCTX_EXTRN_TSC_AUX)
+ ADD_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
+ if (fExtrn & CPUMCTX_EXTRN_OTHER_MSRS)
+ {
+ ADD_MSR(MSR_IA32_CR_PAT, pCtx->msrPAT);
+ /** @todo What do we _have_ to add here?
+ * We also have: Mttr*, MiscEnable, FeatureControl. */
+ }
+
+ uBuf.Core.pad = 0;
+ uBuf.Core.nmsrs = iMsr;
+ int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_MSRS, &uBuf);
+ AssertMsgReturn(rc == (int)iMsr,
+ ("rc=%d iMsr=%d (->%#x) errno=%d\n",
+ rc, iMsr, (uint32_t)rc < iMsr ? uBuf.Core.entries[rc].index : 0, errno),
+ VERR_NEM_IPE_3);
+ }
+
+ /*
+ * Interruptibility state.
+ *
+ * Note! This I/O control function sets most fields passed in, so when
+ * raising an interrupt, NMI, SMI or exception, this must be done
+ * by the code doing the rasing or we'll overwrite it here.
+ */
+ if (fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
+ {
+ Assert( (fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
+ == (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI));
+
+ struct kvm_vcpu_events KvmEvents = {0};
+
+ KvmEvents.flags = KVM_VCPUEVENT_VALID_SHADOW;
+ if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
+ { /* probably likely */ }
+ else
+ KvmEvents.interrupt.shadow = (CPUMIsInInterruptShadowAfterSs() ? KVM_X86_SHADOW_INT_MOV_SS : 0)
+ | (CPUMIsInInterruptShadowAfterSti() ? KVM_X86_SHADOW_INT_STI : 0);
+
+ /* No flag - this is updated unconditionally. */
+ KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
+
+ if (TRPMHasTrap(pVCpu))
+ {
+ TRPMEVENT enmType = TRPM_32BIT_HACK;
+ uint8_t bTrapNo = 0;
+ TRPMQueryTrap(pVCpu, &bTrapNo, &enmType);
+ Log(("nemHCLnxExportState: Pending trap: bTrapNo=%#x enmType=%d\n", bTrapNo, enmType));
+ if ( enmType == TRPM_HARDWARE_INT
+ || enmType == TRPM_SOFTWARE_INT)
+ {
+ KvmEvents.interrupt.soft = enmType == TRPM_SOFTWARE_INT;
+ KvmEvents.interrupt.nr = bTrapNo;
+ KvmEvents.interrupt.injected = 1;
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExportPendingInterrupt);
+ TRPMResetTrap(pVCpu);
+ }
+ else
+ AssertFailed();
+ }
+
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_VCPU_EVENTS, &KvmEvents);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_3);
+ }
+
+ /*
+ * KVM now owns all the state.
+ */
+ pCtx->fExtrn = CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL;
+
+ RT_NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Query the CPU tick counter and optionally the TSC_AUX MSR value.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context CPU structure.
+ * @param pcTicks Where to return the CPU tick count.
+ * @param puAux Where to return the TSC_AUX register value.
+ */
+VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
+{
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
+ // KVM_GET_CLOCK?
+ RT_NOREF(pVCpu, pcTicks, puAux);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resumes CPU clock (TSC) on all virtual CPUs.
+ *
+ * This is called by TM when the VM is started, restored, resumed or similar.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context CPU structure of the calling EMT.
+ * @param uPausedTscValue The TSC value at the time of pausing.
+ */
+VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
+{
+ // KVM_SET_CLOCK?
+ RT_NOREF(pVM, pVCpu, uPausedTscValue);
+ return VINF_SUCCESS;
+}
+
+
+VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
+{
+ RT_NOREF(pVM);
+ return NEM_FEAT_F_NESTED_PAGING
+ | NEM_FEAT_F_FULL_GST_EXEC
+ | NEM_FEAT_F_XSAVE_XRSTOR;
+}
+
+
+
+/*********************************************************************************************************************************
+* Execution *
+*********************************************************************************************************************************/
+
+
+VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Only execute when the A20 gate is enabled as I cannot immediately
+ * spot any A20 support in KVM.
+ */
+ RT_NOREF(pVM);
+ Assert(VM_IS_NEM_ENABLED(pVM));
+ return PGMPhysIsA20Enabled(pVCpu);
+}
+
+
+bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
+{
+ NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
+ return false;
+}
+
+
+void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ int rc = RTThreadPoke(pVCpu->hThread);
+ LogFlow(("nemR3NativeNotifyFF: #%u -> %Rrc\n", pVCpu->idCpu, rc));
+ AssertRC(rc);
+ RT_NOREF(pVM, fFlags);
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
+{
+ RT_NOREF(pVM, fUseDebugLoop);
+ return false;
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
+{
+ RT_NOREF(pVM, pVCpu, fUseDebugLoop);
+ return false;
+}
+
+
+/**
+ * Deals with pending interrupt FFs prior to executing guest code.
+ */
+static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu, struct kvm_run *pRun)
+{
+ RT_NOREF_PV(pVM);
+
+ /*
+ * Do not doing anything if TRPM has something pending already as we can
+ * only inject one event per KVM_RUN call. This can only happend if we
+ * can directly from the loop in EM, so the inhibit bits must be internal.
+ */
+ if (!TRPMHasTrap(pVCpu))
+ { /* semi likely */ }
+ else
+ {
+ Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
+ Log8(("nemHCLnxHandleInterruptFF: TRPM has an pending event already\n"));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * First update APIC. We ASSUME this won't need TPR/CR8.
+ */
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
+ {
+ APICUpdatePendingInterrupts(pVCpu);
+ if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * We don't currently implement SMIs.
+ */
+ AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
+
+ /*
+ * In KVM the CPUMCTX_EXTRN_INHIBIT_INT and CPUMCTX_EXTRN_INHIBIT_NMI states
+ * are tied together with interrupt and NMI delivery, so we must get and
+ * synchronize these all in one go and set both CPUMCTX_EXTRN_INHIBIT_XXX flags.
+ * If we don't we may lose the interrupt/NMI we marked pending here when the
+ * state is exported again before execution.
+ */
+ struct kvm_vcpu_events KvmEvents = {0};
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_VCPU_EVENTS, &KvmEvents);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5);
+
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
+ pRun->s.regs.regs.rip = pVCpu->cpum.GstCtx.rip;
+
+ KvmEvents.flags |= KVM_VCPUEVENT_VALID_SHADOW;
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT))
+ KvmEvents.interrupt.shadow = !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx) ? 0
+ : (CPUMIsInInterruptShadowAfterSs() ? KVM_X86_SHADOW_INT_MOV_SS : 0)
+ | (CPUMIsInInterruptShadowAfterSti() ? KVM_X86_SHADOW_INT_STI : 0);
+ else
+ CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
+ RT_BOOL(KvmEvents.interrupt.shadow & KVM_X86_SHADOW_INT_MOV_SS),
+ RT_BOOL(KvmEvents.interrupt.shadow & KVM_X86_SHADOW_INT_MOV_STI),
+ pRun->s.regs.regs.rip);
+
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI))
+ KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
+ else
+ CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0);
+
+ /* KVM will own the INT + NMI inhibit state soon: */
+ pVCpu->cpum.GstCtx.fExtrn = (pVCpu->cpum.GstCtx.fExtrn & ~CPUMCTX_EXTRN_KEEPER_MASK)
+ | CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
+
+ /*
+ * NMI? Try deliver it first.
+ */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
+ {
+#if 0
+ int rcLnx = ioctl(pVCpu->nem.s.fdVm, KVM_NMI, 0UL);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5);
+#else
+ KvmEvents.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
+ KvmEvents.nmi.pending = 1;
+#endif
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+ Log8(("Queuing NMI on %u\n", pVCpu->idCpu));
+ }
+
+ /*
+ * APIC or PIC interrupt?
+ */
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
+ {
+ if (pRun->s.regs.regs.rflags & X86_EFL_IF)
+ {
+ if (KvmEvents.interrupt.shadow == 0)
+ {
+ /*
+ * If CR8 is in KVM, update the VBox copy so PDMGetInterrupt will
+ * work correctly.
+ */
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_APIC_TPR)
+ APICSetTpr(pVCpu, (uint8_t)pRun->cr8 << 4);
+
+ uint8_t bInterrupt;
+ int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(KvmEvents.interrupt.injected == false);
+#if 0
+ int rcLnx = ioctl(pVCpu->nem.s.fdVm, KVM_INTERRUPT, (unsigned long)bInterrupt);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5);
+#else
+ KvmEvents.interrupt.nr = bInterrupt;
+ KvmEvents.interrupt.soft = false;
+ KvmEvents.interrupt.injected = true;
+#endif
+ Log8(("Queuing interrupt %#x on %u: %04x:%08RX64 efl=%#x\n", bInterrupt, pVCpu->idCpu,
+ pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u));
+ }
+ else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) /** @todo this isn't extremely efficient if we get a lot of exits... */
+ Log8(("VERR_APIC_INTR_MASKED_BY_TPR\n")); /* We'll get a TRP exit - no interrupt window needed. */
+ else
+ Log8(("PDMGetInterrupt failed -> %Rrc\n", rc));
+ }
+ else
+ {
+ pRun->request_interrupt_window = 1;
+ Log8(("Interrupt window pending on %u (#2)\n", pVCpu->idCpu));
+ }
+ }
+ else
+ {
+ pRun->request_interrupt_window = 1;
+ Log8(("Interrupt window pending on %u (#1)\n", pVCpu->idCpu));
+ }
+ }
+
+ /*
+ * Now, update the state.
+ */
+ /** @todo skip when possible... */
+ rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_VCPU_EVENTS, &KvmEvents);
+ AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles KVM_EXIT_INTERNAL_ERROR.
+ */
+static VBOXSTRICTRC nemR3LnxHandleInternalError(PVMCPU pVCpu, struct kvm_run *pRun)
+{
+ Log(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%d) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror,
+ pRun->internal.suberror, pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
+
+ /*
+ * Deal with each suberror, returning if we don't want IEM to handle it.
+ */
+ switch (pRun->internal.suberror)
+ {
+ case KVM_INTERNAL_ERROR_EMULATION:
+ {
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_EMULATION),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorEmulation);
+ break;
+ }
+
+ case KVM_INTERNAL_ERROR_SIMUL_EX:
+ case KVM_INTERNAL_ERROR_DELIVERY_EV:
+ case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON:
+ default:
+ {
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_FATAL),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorFatal);
+ const char *pszName;
+ switch (pRun->internal.suberror)
+ {
+ case KVM_INTERNAL_ERROR_EMULATION: pszName = "KVM_INTERNAL_ERROR_EMULATION"; break;
+ case KVM_INTERNAL_ERROR_SIMUL_EX: pszName = "KVM_INTERNAL_ERROR_SIMUL_EX"; break;
+ case KVM_INTERNAL_ERROR_DELIVERY_EV: pszName = "KVM_INTERNAL_ERROR_DELIVERY_EV"; break;
+ case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON: pszName = "KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON"; break;
+ default: pszName = "unknown"; break;
+ }
+ LogRel(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%s) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror, pszName,
+ pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
+ return VERR_NEM_IPE_0;
+ }
+ }
+
+ /*
+ * Execute instruction in IEM and try get on with it.
+ */
+ Log2(("nemR3LnxHandleInternalError: Executing instruction at %04x:%08RX64 in IEM\n",
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip));
+ VBOXSTRICTRC rcStrict = nemHCLnxImportState(pVCpu,
+ IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT
+ | CPUMCTX_EXTRN_INHIBIT_NMI,
+ &pVCpu->cpum.GstCtx, pRun);
+ if (RT_SUCCESS(rcStrict))
+ rcStrict = IEMExecOne(pVCpu);
+ return rcStrict;
+}
+
+
+/**
+ * Handles KVM_EXIT_IO.
+ */
+static VBOXSTRICTRC nemHCLnxHandleExitIo(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
+{
+ /*
+ * Input validation.
+ */
+ Assert(pRun->io.count > 0);
+ Assert(pRun->io.size == 1 || pRun->io.size == 2 || pRun->io.size == 4);
+ Assert(pRun->io.direction == KVM_EXIT_IO_IN || pRun->io.direction == KVM_EXIT_IO_OUT);
+ Assert(pRun->io.data_offset < pVM->nem.s.cbVCpuMmap);
+ Assert(pRun->io.data_offset + pRun->io.size * pRun->io.count <= pVM->nem.s.cbVCpuMmap);
+
+ /*
+ * We cannot easily act on the exit history here, because the I/O port
+ * exit is stateful and the instruction will be completed in the next
+ * KVM_RUN call. There seems no way to avoid this.
+ */
+ EMHistoryAddExit(pVCpu,
+ pRun->io.count == 1
+ ? ( pRun->io.direction == KVM_EXIT_IO_IN
+ ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ)
+ : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE))
+ : ( pRun->io.direction == KVM_EXIT_IO_IN
+ ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)
+ : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+
+ /*
+ * Do the requested job.
+ */
+ VBOXSTRICTRC rcStrict;
+ RTPTRUNION uPtrData;
+ uPtrData.pu8 = (uint8_t *)pRun + pRun->io.data_offset;
+ if (pRun->io.count == 1)
+ {
+ if (pRun->io.direction == KVM_EXIT_IO_IN)
+ {
+ uint32_t uValue = 0;
+ rcStrict = IOMIOPortRead(pVM, pVCpu, pRun->io.port, &uValue, pRun->io.size);
+ Log4(("IOExit/%u: %04x:%08RX64: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->io.port, pRun->io.size, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
+ if (IOM_SUCCESS(rcStrict))
+ {
+ if (pRun->io.size == 4)
+ *uPtrData.pu32 = uValue;
+ else if (pRun->io.size == 2)
+ *uPtrData.pu16 = (uint16_t)uValue;
+ else
+ *uPtrData.pu8 = (uint8_t)uValue;
+ }
+ }
+ else
+ {
+ uint32_t const uValue = pRun->io.size == 4 ? *uPtrData.pu32
+ : pRun->io.size == 2 ? *uPtrData.pu16
+ : *uPtrData.pu8;
+ rcStrict = IOMIOPortWrite(pVM, pVCpu, pRun->io.port, uValue, pRun->io.size);
+ Log4(("IOExit/%u: %04x:%08RX64: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->io.port, uValue, pRun->io.size, VBOXSTRICTRC_VAL(rcStrict) ));
+ }
+ }
+ else
+ {
+ uint32_t cTransfers = pRun->io.count;
+ if (pRun->io.direction == KVM_EXIT_IO_IN)
+ {
+ rcStrict = IOMIOPortReadString(pVM, pVCpu, pRun->io.port, uPtrData.pv, &cTransfers, pRun->io.size);
+ Log4(("IOExit/%u: %04x:%08RX64: REP INS %#x LB %u * %#x times -> rcStrict=%Rrc cTransfers=%d\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->io.port, pRun->io.size, pRun->io.count, VBOXSTRICTRC_VAL(rcStrict), cTransfers ));
+ }
+ else
+ {
+ rcStrict = IOMIOPortWriteString(pVM, pVCpu, pRun->io.port, uPtrData.pv, &cTransfers, pRun->io.size);
+ Log4(("IOExit/%u: %04x:%08RX64: REP OUTS %#x LB %u * %#x times -> rcStrict=%Rrc cTransfers=%d\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->io.port, pRun->io.size, pRun->io.count, VBOXSTRICTRC_VAL(rcStrict), cTransfers ));
+ }
+ Assert(cTransfers == 0);
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Handles KVM_EXIT_MMIO.
+ */
+static VBOXSTRICTRC nemHCLnxHandleExitMmio(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
+{
+ /*
+ * Input validation.
+ */
+ Assert(pRun->mmio.len <= sizeof(pRun->mmio.data));
+ Assert(pRun->mmio.is_write <= 1);
+
+ /*
+ * We cannot easily act on the exit history here, because the MMIO port
+ * exit is stateful and the instruction will be completed in the next
+ * KVM_RUN call. There seems no way to circumvent this.
+ */
+ EMHistoryAddExit(pVCpu,
+ pRun->mmio.is_write
+ ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
+ : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+
+ /*
+ * Do the requested job.
+ */
+ VBOXSTRICTRC rcStrict;
+ if (pRun->mmio.is_write)
+ {
+ rcStrict = PGMPhysWrite(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
+ Log4(("MmioExit/%u: %04x:%08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
+ }
+ else
+ {
+ rcStrict = PGMPhysRead(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
+ Log4(("MmioExit/%u: %04x:%08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Handles KVM_EXIT_RDMSR
+ */
+static VBOXSTRICTRC nemHCLnxHandleExitRdMsr(PVMCPUCC pVCpu, struct kvm_run *pRun)
+{
+ /*
+ * Input validation.
+ */
+ Assert( pRun->msr.reason == KVM_MSR_EXIT_REASON_INVAL
+ || pRun->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN
+ || pRun->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
+
+ /*
+ * We cannot easily act on the exit history here, because the MSR exit is
+ * stateful and the instruction will be completed in the next KVM_RUN call.
+ * There seems no way to circumvent this.
+ */
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+
+ /*
+ * Do the requested job.
+ */
+ uint64_t uValue = 0;
+ VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRun->msr.index, &uValue);
+ pRun->msr.data = uValue;
+ if (rcStrict != VERR_CPUM_RAISE_GP_0)
+ {
+ Log3(("MsrRead/%u: %04x:%08RX64: msr=%#010x (reason=%#x) -> %#RX64 rcStrict=%Rrc\n", pVCpu->idCpu,
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->msr.index, pRun->msr.reason, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
+ pRun->msr.error = 0;
+ }
+ else
+ {
+ Log3(("MsrRead/%u: %04x:%08RX64: msr=%#010x (reason%#x)-> %#RX64 rcStrict=#GP!\n", pVCpu->idCpu,
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->msr.index, pRun->msr.reason, uValue));
+ pRun->msr.error = 1;
+ rcStrict = VINF_SUCCESS;
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Handles KVM_EXIT_WRMSR
+ */
+static VBOXSTRICTRC nemHCLnxHandleExitWrMsr(PVMCPUCC pVCpu, struct kvm_run *pRun)
+{
+ /*
+ * Input validation.
+ */
+ Assert( pRun->msr.reason == KVM_MSR_EXIT_REASON_INVAL
+ || pRun->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN
+ || pRun->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
+
+ /*
+ * We cannot easily act on the exit history here, because the MSR exit is
+ * stateful and the instruction will be completed in the next KVM_RUN call.
+ * There seems no way to circumvent this.
+ */
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+
+ /*
+ * Do the requested job.
+ */
+ VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRun->msr.index, pRun->msr.data);
+ if (rcStrict != VERR_CPUM_RAISE_GP_0)
+ {
+ Log3(("MsrWrite/%u: %04x:%08RX64: msr=%#010x := %#RX64 (reason=%#x) -> rcStrict=%Rrc\n", pVCpu->idCpu,
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->msr.index, pRun->msr.data, pRun->msr.reason, VBOXSTRICTRC_VAL(rcStrict) ));
+ pRun->msr.error = 0;
+ }
+ else
+ {
+ Log3(("MsrWrite/%u: %04x:%08RX64: msr=%#010x := %#RX64 (reason%#x)-> rcStrict=#GP!\n", pVCpu->idCpu,
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->msr.index, pRun->msr.data, pRun->msr.reason));
+ pRun->msr.error = 1;
+ rcStrict = VINF_SUCCESS;
+ }
+ return rcStrict;
+}
+
+
+
+static VBOXSTRICTRC nemHCLnxHandleExit(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun, bool *pfStatefulExit)
+{
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTotal);
+ switch (pRun->exit_reason)
+ {
+ case KVM_EXIT_EXCEPTION:
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_IO:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIo);
+ *pfStatefulExit = true;
+ return nemHCLnxHandleExitIo(pVM, pVCpu, pRun);
+
+ case KVM_EXIT_MMIO:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMmio);
+ *pfStatefulExit = true;
+ return nemHCLnxHandleExitMmio(pVM, pVCpu, pRun);
+
+ case KVM_EXIT_IRQ_WINDOW_OPEN:
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIrqWindowOpen);
+ Log5(("IrqWinOpen/%u: %d\n", pVCpu->idCpu, pRun->request_interrupt_window));
+ pRun->request_interrupt_window = 0;
+ return VINF_SUCCESS;
+
+ case KVM_EXIT_SET_TPR:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitSetTpr);
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_TPR_ACCESS:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTprAccess);
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_X86_RDMSR:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitRdMsr);
+ *pfStatefulExit = true;
+ return nemHCLnxHandleExitRdMsr(pVCpu, pRun);
+
+ case KVM_EXIT_X86_WRMSR:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitWrMsr);
+ *pfStatefulExit = true;
+ return nemHCLnxHandleExitWrMsr(pVCpu, pRun);
+
+ case KVM_EXIT_HLT:
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
+ Log5(("Halt/%u\n", pVCpu->idCpu));
+ return VINF_EM_HALT;
+
+ case KVM_EXIT_INTR: /* EINTR */
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERRUPTED),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIntr);
+ Log5(("Intr/%u\n", pVCpu->idCpu));
+ return VINF_SUCCESS;
+
+ case KVM_EXIT_HYPERCALL:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHypercall);
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_DEBUG:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitDebug);
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_SYSTEM_EVENT:
+ AssertFailed();
+ break;
+ case KVM_EXIT_IOAPIC_EOI:
+ AssertFailed();
+ break;
+ case KVM_EXIT_HYPERV:
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_DIRTY_RING_FULL:
+ AssertFailed();
+ break;
+ case KVM_EXIT_AP_RESET_HOLD:
+ AssertFailed();
+ break;
+ case KVM_EXIT_X86_BUS_LOCK:
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitBusLock);
+ AssertFailed();
+ break;
+
+
+ case KVM_EXIT_SHUTDOWN:
+ AssertFailed();
+ break;
+
+ case KVM_EXIT_FAIL_ENTRY:
+ LogRel(("NEM: KVM_EXIT_FAIL_ENTRY! hardware_entry_failure_reason=%#x cpu=%#x\n",
+ pRun->fail_entry.hardware_entry_failure_reason, pRun->fail_entry.cpu));
+ EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_FAILED_ENTRY),
+ pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
+ return VERR_NEM_IPE_1;
+
+ case KVM_EXIT_INTERNAL_ERROR:
+ /* we're counting sub-reasons inside the function. */
+ return nemR3LnxHandleInternalError(pVCpu, pRun);
+
+ /*
+ * Foreign and unknowns.
+ */
+ case KVM_EXIT_NMI:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_NMI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_EPR:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_EPR on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_WATCHDOG:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_WATCHDOG on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_ARM_NISV:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_ARM_NISV on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_S390_STSI:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_STSI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_S390_TSCH:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_TSCH on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_OSI:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_OSI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_PAPR_HCALL:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_PAPR_HCALL on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_S390_UCONTROL:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_UCONTROL on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_DCR:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_DCR on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_S390_SIEIC:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_SIEIC on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_S390_RESET:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_RESET on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_UNKNOWN:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_UNKNOWN on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ case KVM_EXIT_XEN:
+ AssertLogRelMsgFailedReturn(("KVM_EXIT_XEN on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ default:
+ AssertLogRelMsgFailedReturn(("Unknown exit reason %u on VCpu #%u at %04x:%RX64!\n", pRun->exit_reason, pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1);
+ }
+
+ RT_NOREF(pVM, pVCpu, pRun);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
+{
+ /*
+ * Try switch to NEM runloop state.
+ */
+ if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
+ { /* likely */ }
+ else
+ {
+ VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
+ LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * The run loop.
+ */
+ struct kvm_run * const pRun = pVCpu->nem.s.pRun;
+ const bool fSingleStepping = DBGFIsStepping(pVCpu);
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+ bool fStatefulExit = false; /* For MMIO and IO exits. */
+ for (unsigned iLoop = 0;; iLoop++)
+ {
+ /*
+ * Pending interrupts or such? Need to check and deal with this prior
+ * to the state syncing.
+ */
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
+ {
+ /* Try inject interrupt. */
+ rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu, pRun);
+ if (rcStrict == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: nemHCLnxHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
+ break;
+ }
+ }
+
+ /*
+ * Do not execute in KVM if the A20 isn't enabled.
+ */
+ if (PGMPhysIsA20Enabled(pVCpu))
+ { /* likely */ }
+ else
+ {
+ rcStrict = VINF_EM_RESCHEDULE_REM;
+ LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
+ break;
+ }
+
+ /*
+ * Ensure KVM has the whole state.
+ */
+ if ((pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL)
+ {
+ int rc2 = nemHCLnxExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, pRun);
+ AssertRCReturn(rc2, rc2);
+ }
+
+ /*
+ * Poll timers and run for a bit.
+ *
+ * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
+ * so we take the time of the next timer event and uses that as a deadline.
+ * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
+ */
+ /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
+ * the whole polling job when timers have changed... */
+ uint64_t offDeltaIgnored;
+ uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
+ if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
+ && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
+ {
+ if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
+ {
+ LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
+ pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
+ !!(pRun->s.regs.regs.rflags & X86_EFL_IF), pRun->s.regs.regs.rflags,
+ pRun->s.regs.sregs.ss.selector, pRun->s.regs.regs.rsp, pRun->s.regs.sregs.cr0));
+ TMNotifyStartOfExecution(pVM, pVCpu);
+
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
+
+ VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
+ TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
+
+#ifdef LOG_ENABLED
+ if (LogIsFlowEnabled())
+ {
+ struct kvm_mp_state MpState = {UINT32_MAX};
+ ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MP_STATE, &MpState);
+ LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x %#lx\n", pVCpu->idCpu,
+ pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag,
+ pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason,
+ pRun->ready_for_interrupt_injection, pRun->flags, MpState.mp_state));
+ }
+#endif
+ fStatefulExit = false;
+ if (RT_LIKELY(rcLnx == 0 || errno == EINTR))
+ {
+ /*
+ * Deal with the exit.
+ */
+ rcStrict = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
+ if (rcStrict == VINF_SUCCESS)
+ { /* hopefully likely */ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: nemHCLnxHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
+ break;
+ }
+ }
+ else
+ {
+ int rc2 = RTErrConvertFromErrno(errno);
+ AssertLogRelMsgFailedReturn(("KVM_RUN failed: rcLnx=%d errno=%u rc=%Rrc\n", rcLnx, errno, rc2), rc2);
+ }
+
+ /*
+ * If no relevant FFs are pending, loop.
+ */
+ if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
+ && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
+ { /* likely */ }
+ else
+ {
+
+ /** @todo Try handle pending flags, not just return to EM loops. Take care
+ * not to set important RCs here unless we've handled an exit. */
+ LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
+ pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
+ break;
+ }
+ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
+ break;
+ }
+ }
+ else
+ {
+ LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
+ break;
+ }
+ } /* the run loop */
+
+
+ /*
+ * If the last exit was stateful, commit the state we provided before
+ * returning to the EM loop so we have a consistent state and can safely
+ * be rescheduled and whatnot. This may require us to make multiple runs
+ * for larger MMIO and I/O operations. Sigh^3.
+ *
+ * Note! There is no 'ing way to reset the kernel side completion callback
+ * for these stateful i/o exits. Very annoying interface.
+ */
+ /** @todo check how this works with string I/O and string MMIO. */
+ if (fStatefulExit && RT_SUCCESS(rcStrict))
+ {
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn);
+ uint32_t const uOrgExit = pRun->exit_reason;
+ for (uint32_t i = 0; ; i++)
+ {
+ pRun->immediate_exit = 1;
+ int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
+ Log(("NEM/%u: Flushed stateful exit -> %d/%d exit_reason=%d\n", pVCpu->idCpu, rcLnx, errno, pRun->exit_reason));
+ if (rcLnx == -1 && errno == EINTR)
+ {
+ switch (i)
+ {
+ case 0: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn1Loop); break;
+ case 1: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn2Loops); break;
+ case 2: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn3Loops); break;
+ default: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn4PlusLoops); break;
+ }
+ break;
+ }
+ AssertLogRelMsgBreakStmt(rcLnx == 0 && pRun->exit_reason == uOrgExit,
+ ("rcLnx=%d errno=%d exit_reason=%d uOrgExit=%d\n", rcLnx, errno, pRun->exit_reason, uOrgExit),
+ rcStrict = VERR_NEM_IPE_6);
+ VBOXSTRICTRC rcStrict2 = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
+ if (rcStrict2 == VINF_SUCCESS || rcStrict2 == rcStrict)
+ { /* likely */ }
+ else if (RT_FAILURE(rcStrict2))
+ {
+ rcStrict = rcStrict2;
+ break;
+ }
+ else
+ {
+ AssertLogRelMsgBreakStmt(rcStrict == VINF_SUCCESS,
+ ("rcStrict=%Rrc rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2)),
+ rcStrict = VERR_NEM_IPE_7);
+ rcStrict = rcStrict2;
+ }
+ }
+ pRun->immediate_exit = 0;
+ }
+
+ /*
+ * If the CPU is running, make sure to stop it before we try sync back the
+ * state and return to EM. We don't sync back the whole state if we can help it.
+ */
+ if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
+ VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
+
+ if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
+ {
+ /* Try anticipate what we might need. */
+ uint64_t fImport = CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI /* Required for processing APIC,PIC,NMI & SMI FFs. */
+ | IEM_CPUMCTX_EXTRN_MUST_MASK /*?*/;
+ if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
+ || RT_FAILURE(rcStrict))
+ fImport = CPUMCTX_EXTRN_ALL;
+# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
+ else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
+ || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
+ fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS;
+ else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
+ fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS;
+# endif
+ else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
+ | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
+ fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
+
+ if (pVCpu->cpum.GstCtx.fExtrn & fImport)
+ {
+ int rc2 = nemHCLnxImportState(pVCpu, fImport, &pVCpu->cpum.GstCtx, pRun);
+ if (RT_SUCCESS(rc2))
+ pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
+ else if (RT_SUCCESS(rcStrict))
+ rcStrict = rc2;
+ if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
+ }
+ else
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
+ }
+ else
+ {
+ pVCpu->cpum.GstCtx.fExtrn = 0;
+ STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
+ }
+
+ LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
+ pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
+ return rcStrict;
+}
+
+
+/** @page pg_nem_linux NEM/linux - Native Execution Manager, Linux.
+ *
+ * This is using KVM.
+ *
+ */
+
diff --git a/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp b/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
new file mode 100644
index 00000000..f3044e16
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
@@ -0,0 +1,3019 @@
+/* $Id: NEMR3Native-win.cpp $ */
+/** @file
+ * NEM - Native execution manager, native ring-3 Windows backend.
+ *
+ * Log group 2: Exit logging.
+ * Log group 3: Log context on exit.
+ * Log group 5: Ring-3 memory management
+ * Log group 6: Ring-0 memory management
+ * Log group 12: API intercepts.
+ */
+
+/*
+ * Copyright (C) 2018-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NEM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <iprt/nt/nt-and-windows.h>
+#include <iprt/nt/hyperv.h>
+#include <iprt/nt/vid.h>
+#include <WinHvPlatform.h>
+
+#ifndef _WIN32_WINNT_WIN10
+# error "Missing _WIN32_WINNT_WIN10"
+#endif
+#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
+# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
+#endif
+#include <sysinfoapi.h>
+#include <debugapi.h>
+#include <errhandlingapi.h>
+#include <fileapi.h>
+#include <winerror.h> /* no api header for this. */
+
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/dbgftrace.h>
+#include "NEMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <iprt/ldr.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+#include <iprt/system.h>
+#include <iprt/utf16.h>
+
+#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
+HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
+# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
+#endif
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#ifdef LOG_ENABLED
+# define NEM_WIN_INTERCEPT_NT_IO_CTLS
+#endif
+
+/** VID I/O control detection: Fake partition handle input. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE ((HANDLE)(uintptr_t)38479125)
+/** VID I/O control detection: Fake partition ID return. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID UINT64_C(0xfa1e000042424242)
+/** VID I/O control detection: The property we get via VidGetPartitionProperty. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_CODE HvPartitionPropertyProcessorVendor
+/** VID I/O control detection: Fake property value return. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_VALUE UINT64_C(0xf00dface01020304)
+/** VID I/O control detection: Fake CPU index input. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX UINT32_C(42)
+/** VID I/O control detection: Fake timeout input. */
+#define NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT UINT32_C(0x00080286)
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** @name APIs imported from WinHvPlatform.dll
+ * @{ */
+static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
+static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
+static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
+static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
+static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
+static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
+static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
+static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
+static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
+static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
+static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
+static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
+static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
+static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
+static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
+static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
+/** @} */
+
+/** @name APIs imported from Vid.dll
+ * @{ */
+static decltype(VidGetHvPartitionId) *g_pfnVidGetHvPartitionId;
+static decltype(VidGetPartitionProperty) *g_pfnVidGetPartitionProperty;
+#ifdef LOG_ENABLED
+static decltype(VidStartVirtualProcessor) *g_pfnVidStartVirtualProcessor;
+static decltype(VidStopVirtualProcessor) *g_pfnVidStopVirtualProcessor;
+static decltype(VidMessageSlotMap) *g_pfnVidMessageSlotMap;
+static decltype(VidMessageSlotHandleAndGetNext) *g_pfnVidMessageSlotHandleAndGetNext;
+static decltype(VidGetVirtualProcessorState) *g_pfnVidGetVirtualProcessorState;
+static decltype(VidSetVirtualProcessorState) *g_pfnVidSetVirtualProcessorState;
+static decltype(VidGetVirtualProcessorRunningStatus) *g_pfnVidGetVirtualProcessorRunningStatus;
+#endif
+/** @} */
+
+/** The Windows build number. */
+static uint32_t g_uBuildNo = 17134;
+
+
+
+/**
+ * Import instructions.
+ */
+static const struct
+{
+ uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
+ bool fOptional; /**< Set if import is optional. */
+ PFNRT *ppfn; /**< The function pointer variable. */
+ const char *pszName; /**< The function name. */
+} g_aImports[] =
+{
+#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
+ NEM_WIN_IMPORT(0, false, WHvGetCapability),
+ NEM_WIN_IMPORT(0, false, WHvCreatePartition),
+ NEM_WIN_IMPORT(0, false, WHvSetupPartition),
+ NEM_WIN_IMPORT(0, false, WHvDeletePartition),
+ NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
+ NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
+ NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
+ NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
+ NEM_WIN_IMPORT(0, false, WHvTranslateGva),
+ NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
+ NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
+ NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
+ NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
+ NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
+ NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
+ NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
+
+ NEM_WIN_IMPORT(1, true, VidGetHvPartitionId),
+ NEM_WIN_IMPORT(1, true, VidGetPartitionProperty),
+#ifdef LOG_ENABLED
+ NEM_WIN_IMPORT(1, false, VidMessageSlotMap),
+ NEM_WIN_IMPORT(1, false, VidMessageSlotHandleAndGetNext),
+ NEM_WIN_IMPORT(1, false, VidStartVirtualProcessor),
+ NEM_WIN_IMPORT(1, false, VidStopVirtualProcessor),
+ NEM_WIN_IMPORT(1, false, VidGetVirtualProcessorState),
+ NEM_WIN_IMPORT(1, false, VidSetVirtualProcessorState),
+ NEM_WIN_IMPORT(1, false, VidGetVirtualProcessorRunningStatus),
+#endif
+#undef NEM_WIN_IMPORT
+};
+
+
+/** The real NtDeviceIoControlFile API in NTDLL. */
+static decltype(NtDeviceIoControlFile) *g_pfnNtDeviceIoControlFile;
+/** Pointer to the NtDeviceIoControlFile import table entry. */
+static decltype(NtDeviceIoControlFile) **g_ppfnVidNtDeviceIoControlFile;
+#ifdef LOG_ENABLED
+/** Info about the VidGetHvPartitionId I/O control interface. */
+static NEMWINIOCTL g_IoCtlGetHvPartitionId;
+/** Info about the VidGetPartitionProperty I/O control interface. */
+static NEMWINIOCTL g_IoCtlGetPartitionProperty;
+/** Info about the VidStartVirtualProcessor I/O control interface. */
+static NEMWINIOCTL g_IoCtlStartVirtualProcessor;
+/** Info about the VidStopVirtualProcessor I/O control interface. */
+static NEMWINIOCTL g_IoCtlStopVirtualProcessor;
+/** Info about the VidMessageSlotHandleAndGetNext I/O control interface. */
+static NEMWINIOCTL g_IoCtlMessageSlotHandleAndGetNext;
+/** Info about the VidMessageSlotMap I/O control interface - for logging. */
+static NEMWINIOCTL g_IoCtlMessageSlotMap;
+/** Info about the VidGetVirtualProcessorState I/O control interface - for logging. */
+static NEMWINIOCTL g_IoCtlGetVirtualProcessorState;
+/** Info about the VidSetVirtualProcessorState I/O control interface - for logging. */
+static NEMWINIOCTL g_IoCtlSetVirtualProcessorState;
+/** Pointer to what nemR3WinIoctlDetector_ForLogging should fill in. */
+static NEMWINIOCTL *g_pIoCtlDetectForLogging;
+#endif
+
+#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
+/** Mapping slot for CPU #0.
+ * @{ */
+static VID_MESSAGE_MAPPING_HEADER *g_pMsgSlotMapping = NULL;
+static const HV_MESSAGE_HEADER *g_pHvMsgHdr;
+static const HV_X64_INTERCEPT_MESSAGE_HEADER *g_pX64MsgHdr;
+/** @} */
+#endif
+
+
+/*
+ * Let the preprocessor alias the APIs to import variables for better autocompletion.
+ */
+#ifndef IN_SLICKEDIT
+# define WHvGetCapability g_pfnWHvGetCapability
+# define WHvCreatePartition g_pfnWHvCreatePartition
+# define WHvSetupPartition g_pfnWHvSetupPartition
+# define WHvDeletePartition g_pfnWHvDeletePartition
+# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
+# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
+# define WHvMapGpaRange g_pfnWHvMapGpaRange
+# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
+# define WHvTranslateGva g_pfnWHvTranslateGva
+# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
+# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
+# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
+# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
+# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
+# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
+# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
+# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
+
+# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
+# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
+# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
+
+#endif
+
+/** WHV_MEMORY_ACCESS_TYPE names */
+static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
+
+/*
+ * Instantate the code we used to share with ring-0.
+ */
+#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
+
+
+
+#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
+/**
+ * Wrapper that logs the call from VID.DLL.
+ *
+ * This is very handy for figuring out why an API call fails.
+ */
+static NTSTATUS WINAPI
+nemR3WinLogWrapper_NtDeviceIoControlFile(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+
+ char szFunction[32];
+ const char *pszFunction;
+ if (uFunction == g_IoCtlMessageSlotHandleAndGetNext.uFunction)
+ pszFunction = "VidMessageSlotHandleAndGetNext";
+ else if (uFunction == g_IoCtlStartVirtualProcessor.uFunction)
+ pszFunction = "VidStartVirtualProcessor";
+ else if (uFunction == g_IoCtlStopVirtualProcessor.uFunction)
+ pszFunction = "VidStopVirtualProcessor";
+ else if (uFunction == g_IoCtlMessageSlotMap.uFunction)
+ pszFunction = "VidMessageSlotMap";
+ else if (uFunction == g_IoCtlGetVirtualProcessorState.uFunction)
+ pszFunction = "VidGetVirtualProcessorState";
+ else if (uFunction == g_IoCtlSetVirtualProcessorState.uFunction)
+ pszFunction = "VidSetVirtualProcessorState";
+ else
+ {
+ RTStrPrintf(szFunction, sizeof(szFunction), "%#x", uFunction);
+ pszFunction = szFunction;
+ }
+
+ if (cbInput > 0 && pvInput)
+ Log12(("VID!NtDeviceIoControlFile: %s/input: %.*Rhxs\n", pszFunction, RT_MIN(cbInput, 32), pvInput));
+ NTSTATUS rcNt = g_pfnNtDeviceIoControlFile(hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, uFunction,
+ pvInput, cbInput, pvOutput, cbOutput);
+ if (!hEvt && !pfnApcCallback && !pvApcCtx)
+ Log12(("VID!NtDeviceIoControlFile: hFile=%#zx pIos=%p->{s:%#x, i:%#zx} uFunction=%s Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
+ hFile, pIos, pIos->Status, pIos->Information, pszFunction, pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
+ else
+ Log12(("VID!NtDeviceIoControlFile: hFile=%#zx hEvt=%#zx Apc=%p/%p pIos=%p->{s:%#x, i:%#zx} uFunction=%s Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
+ hFile, hEvt, RT_CB_LOG_CAST(pfnApcCallback), pvApcCtx, pIos, pIos->Status, pIos->Information, pszFunction,
+ pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
+ if (cbOutput > 0 && pvOutput)
+ {
+ Log12(("VID!NtDeviceIoControlFile: %s/output: %.*Rhxs\n", pszFunction, RT_MIN(cbOutput, 32), pvOutput));
+ if (uFunction == 0x2210cc && g_pMsgSlotMapping == NULL && cbOutput >= sizeof(void *))
+ {
+ g_pMsgSlotMapping = *(VID_MESSAGE_MAPPING_HEADER **)pvOutput;
+ g_pHvMsgHdr = (const HV_MESSAGE_HEADER *)(g_pMsgSlotMapping + 1);
+ g_pX64MsgHdr = (const HV_X64_INTERCEPT_MESSAGE_HEADER *)(g_pHvMsgHdr + 1);
+ Log12(("VID!NtDeviceIoControlFile: Message slot mapping: %p\n", g_pMsgSlotMapping));
+ }
+ }
+ if ( g_pMsgSlotMapping
+ && ( uFunction == g_IoCtlMessageSlotHandleAndGetNext.uFunction
+ || uFunction == g_IoCtlStopVirtualProcessor.uFunction
+ || uFunction == g_IoCtlMessageSlotMap.uFunction
+ ))
+ Log12(("VID!NtDeviceIoControlFile: enmVidMsgType=%#x cb=%#x msg=%#x payload=%u cs:rip=%04x:%08RX64 (%s)\n",
+ g_pMsgSlotMapping->enmVidMsgType, g_pMsgSlotMapping->cbMessage,
+ g_pHvMsgHdr->MessageType, g_pHvMsgHdr->PayloadSize,
+ g_pX64MsgHdr->CsSegment.Selector, g_pX64MsgHdr->Rip, pszFunction));
+
+ return rcNt;
+}
+#endif /* NEM_WIN_INTERCEPT_NT_IO_CTLS */
+
+
+/**
+ * Patches the call table of VID.DLL so we can intercept NtDeviceIoControlFile.
+ *
+ * This is for used to figure out the I/O control codes and in logging builds
+ * for logging API calls that WinHvPlatform.dll does.
+ *
+ * @returns VBox status code.
+ * @param hLdrModVid The VID module handle.
+ * @param pErrInfo Where to return additional error information.
+ */
+static int nemR3WinInitVidIntercepts(RTLDRMOD hLdrModVid, PRTERRINFO pErrInfo)
+{
+ /*
+ * Locate the real API.
+ */
+ g_pfnNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) *)RTLdrGetSystemSymbol("NTDLL.DLL", "NtDeviceIoControlFile");
+ AssertReturn(g_pfnNtDeviceIoControlFile != NULL,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Failed to resolve NtDeviceIoControlFile from NTDLL.DLL"));
+
+ /*
+ * Locate the PE header and get what we need from it.
+ */
+ uint8_t const *pbImage = (uint8_t const *)RTLdrGetNativeHandle(hLdrModVid);
+ IMAGE_DOS_HEADER const *pMzHdr = (IMAGE_DOS_HEADER const *)pbImage;
+ AssertReturn(pMzHdr->e_magic == IMAGE_DOS_SIGNATURE,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL mapping doesn't start with MZ signature: %#x", pMzHdr->e_magic));
+ IMAGE_NT_HEADERS const *pNtHdrs = (IMAGE_NT_HEADERS const *)&pbImage[pMzHdr->e_lfanew];
+ AssertReturn(pNtHdrs->Signature == IMAGE_NT_SIGNATURE,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL has invalid PE signaturre: %#x @%#x",
+ pNtHdrs->Signature, pMzHdr->e_lfanew));
+
+ uint32_t const cbImage = pNtHdrs->OptionalHeader.SizeOfImage;
+ IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
+
+ /*
+ * Walk the import descriptor table looking for NTDLL.DLL.
+ */
+ AssertReturn( ImportDir.Size > 0
+ && ImportDir.Size < cbImage,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory size: %#x", ImportDir.Size));
+ AssertReturn( ImportDir.VirtualAddress > 0
+ && ImportDir.VirtualAddress <= cbImage - ImportDir.Size,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory RVA: %#x", ImportDir.VirtualAddress));
+
+ for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
+ pImps->Name != 0 && pImps->FirstThunk != 0;
+ pImps++)
+ {
+ AssertReturn(pImps->Name < cbImage,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory entry name: %#x", pImps->Name));
+ const char *pszModName = (const char *)&pbImage[pImps->Name];
+ if (RTStrICmpAscii(pszModName, "ntdll.dll"))
+ continue;
+ AssertReturn(pImps->FirstThunk < cbImage,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad FirstThunk: %#x", pImps->FirstThunk));
+ AssertReturn(pImps->OriginalFirstThunk < cbImage,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad FirstThunk: %#x", pImps->FirstThunk));
+
+ /*
+ * Walk the thunks table(s) looking for NtDeviceIoControlFile.
+ */
+ uintptr_t *puFirstThunk = (uintptr_t *)&pbImage[pImps->FirstThunk]; /* update this. */
+ if ( pImps->OriginalFirstThunk != 0
+ && pImps->OriginalFirstThunk != pImps->FirstThunk)
+ {
+ uintptr_t const *puOrgThunk = (uintptr_t const *)&pbImage[pImps->OriginalFirstThunk]; /* read from this. */
+ uintptr_t cLeft = (cbImage - (RT_MAX(pImps->FirstThunk, pImps->OriginalFirstThunk)))
+ / sizeof(*puFirstThunk);
+ while (cLeft-- > 0 && *puOrgThunk != 0)
+ {
+ if (!(*puOrgThunk & IMAGE_ORDINAL_FLAG64)) /* ASSUMES 64-bit */
+ {
+ AssertReturn(*puOrgThunk > 0 && *puOrgThunk < cbImage,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad thunk entry: %#x", *puOrgThunk));
+
+ const char *pszSymbol = (const char *)&pbImage[*puOrgThunk + 2];
+ if (strcmp(pszSymbol, "NtDeviceIoControlFile") == 0)
+ g_ppfnVidNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) **)puFirstThunk;
+ }
+
+ puOrgThunk++;
+ puFirstThunk++;
+ }
+ }
+ else
+ {
+ /* No original thunk table, so scan the resolved symbols for a match
+ with the NtDeviceIoControlFile address. */
+ uintptr_t const uNeedle = (uintptr_t)g_pfnNtDeviceIoControlFile;
+ uintptr_t cLeft = (cbImage - pImps->FirstThunk) / sizeof(*puFirstThunk);
+ while (cLeft-- > 0 && *puFirstThunk != 0)
+ {
+ if (*puFirstThunk == uNeedle)
+ g_ppfnVidNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) **)puFirstThunk;
+ puFirstThunk++;
+ }
+ }
+ }
+
+ if (g_ppfnVidNtDeviceIoControlFile != NULL)
+ {
+ /* Make the thunk writable we can freely modify it. */
+ DWORD fOldProt = PAGE_READONLY;
+ VirtualProtect((void *)(uintptr_t)g_ppfnVidNtDeviceIoControlFile, sizeof(uintptr_t), PAGE_EXECUTE_READWRITE, &fOldProt);
+
+#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinLogWrapper_NtDeviceIoControlFile;
+#endif
+ return VINF_SUCCESS;
+ }
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Failed to patch NtDeviceIoControlFile import in VID.DLL!");
+}
+
+
+/**
+ * Worker for nemR3NativeInit that probes and load the native API.
+ *
+ * @returns VBox status code.
+ * @param fForced Whether the HMForced flag is set and we should
+ * fail if we cannot initialize.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
+{
+ /*
+ * Check that the DLL files we need are present, but without loading them.
+ * We'd like to avoid loading them unnecessarily.
+ */
+ WCHAR wszPath[MAX_PATH + 64];
+ UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
+ if (cwcPath >= MAX_PATH || cwcPath < 2)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
+
+ if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
+ wszPath[cwcPath++] = '\\';
+ RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
+ if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
+
+ /*
+ * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
+ */
+ if (!ASMHasCpuId())
+ return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID support");
+ if (!RTX86IsValidStdRange(ASMCpuId_EAX(0)))
+ return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID leaf #1");
+ if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_HVP))
+ return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Not in a hypervisor partition (HVP=0)");
+
+ uint32_t cMaxHyperLeaf = 0;
+ uint32_t uEbx = 0;
+ uint32_t uEcx = 0;
+ uint32_t uEdx = 0;
+ ASMCpuIdExSlow(0x40000000, 0, 0, 0, &cMaxHyperLeaf, &uEbx, &uEcx, &uEdx);
+ if (!RTX86IsValidHypervisorRange(cMaxHyperLeaf))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Invalid hypervisor CPUID range (%#x %#x %#x %#x)",
+ cMaxHyperLeaf, uEbx, uEcx, uEdx);
+ if ( uEbx != UINT32_C(0x7263694d) /* Micr */
+ || uEcx != UINT32_C(0x666f736f) /* osof */
+ || uEdx != UINT32_C(0x76482074) /* t Hv */)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
+ "Not Hyper-V CPUID signature: %#x %#x %#x (expected %#x %#x %#x)",
+ uEbx, uEcx, uEdx, UINT32_C(0x7263694d), UINT32_C(0x666f736f), UINT32_C(0x76482074));
+ if (cMaxHyperLeaf < UINT32_C(0x40000005))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Too narrow hypervisor CPUID range (%#x)", cMaxHyperLeaf);
+
+ /** @todo would be great if we could recognize a root partition from the
+ * CPUID info, but I currently don't dare do that. */
+
+ /*
+ * Now try load the DLLs and resolve the APIs.
+ */
+ static const char * const s_apszDllNames[2] = { "WinHvPlatform.dll", "vid.dll" };
+ RTLDRMOD ahMods[2] = { NIL_RTLDRMOD, NIL_RTLDRMOD };
+ int rc = VINF_SUCCESS;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
+ {
+ int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
+ if (RT_FAILURE(rc2))
+ {
+ if (!RTErrInfoIsSet(pErrInfo))
+ RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
+ else
+ RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
+ ahMods[i] = NIL_RTLDRMOD;
+ rc = VERR_NEM_INIT_FAILED;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ rc = nemR3WinInitVidIntercepts(ahMods[1], pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
+ {
+ int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
+ if (RT_SUCCESS(rc2))
+ {
+ if (g_aImports[i].fOptional)
+ LogRel(("NEM: info: Found optional import %s!%s.\n",
+ s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
+ }
+ else
+ {
+ *g_aImports[i].ppfn = NULL;
+
+ LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
+ g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
+ s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
+ if (!g_aImports[i].fOptional)
+ {
+ if (RTErrInfoIsSet(pErrInfo))
+ RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
+ s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
+ else
+ rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
+ s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
+ Assert(RT_FAILURE(rc));
+ }
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ Assert(!RTErrInfoIsSet(pErrInfo));
+ }
+ }
+
+ for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
+ RTLdrClose(ahMods[i]);
+ return rc;
+}
+
+
+/**
+ * Wrapper for different WHvGetCapability signatures.
+ */
+DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
+{
+ return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
+}
+
+
+/**
+ * Worker for nemR3NativeInit that gets the hypervisor capabilities.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
+{
+#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
+#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
+#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
+
+ /*
+ * Is the hypervisor present with the desired capability?
+ *
+ * In build 17083 this translates into:
+ * - CPUID[0x00000001].HVP is set
+ * - CPUID[0x40000000] == "Microsoft Hv"
+ * - CPUID[0x40000001].eax == "Hv#1"
+ * - CPUID[0x40000003].ebx[12] is set.
+ * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
+ * a non-zero value.
+ */
+ /**
+ * @todo Someone at Microsoft please explain weird API design:
+ * 1. Pointless CapabilityCode duplication int the output;
+ * 2. No output size.
+ */
+ WHV_CAPABILITY Caps;
+ RT_ZERO(Caps);
+ SetLastError(0);
+ HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
+ DWORD rcWin = GetLastError();
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ if (!Caps.HypervisorPresent)
+ {
+ if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
+ "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
+ }
+ LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
+
+
+ /*
+ * Check what extended VM exits are supported.
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
+ pVM->nem.s.fExtendedMsrExit = RT_BOOL(Caps.ExtendedVmExits.X64MsrExit);
+ pVM->nem.s.fExtendedCpuIdExit = RT_BOOL(Caps.ExtendedVmExits.X64CpuidExit);
+ pVM->nem.s.fExtendedXcptExit = RT_BOOL(Caps.ExtendedVmExits.ExceptionExit);
+ NEM_LOG_REL_CAP_SUB("fExtendedMsrExit", pVM->nem.s.fExtendedMsrExit);
+ NEM_LOG_REL_CAP_SUB("fExtendedCpuIdExit", pVM->nem.s.fExtendedCpuIdExit);
+ NEM_LOG_REL_CAP_SUB("fExtendedXcptExit", pVM->nem.s.fExtendedXcptExit);
+ if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
+ LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
+ /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
+
+ /*
+ * Check features in case they end up defining any.
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ if (Caps.Features.AsUINT64 & ~(uint64_t)0)
+ LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
+ /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
+
+ /*
+ * Check supported exception exit bitmap bits.
+ * We don't currently require this, so we just log failure.
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExceptionExitBitmap, &Caps, sizeof(Caps));
+ if (SUCCEEDED(hrc))
+ LogRel(("NEM: Supported exception exit bitmap: %#RX64\n", Caps.ExceptionExitBitmap));
+ else
+ LogRel(("NEM: Warning! WHvGetCapability/WHvCapabilityCodeExceptionExitBitmap failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+
+ /*
+ * Check that the CPU vendor is supported.
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ switch (Caps.ProcessorVendor)
+ {
+ /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
+ case WHvProcessorVendorIntel:
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - Intel", Caps.ProcessorVendor);
+ pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_INTEL;
+ break;
+ case WHvProcessorVendorAmd:
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - AMD", Caps.ProcessorVendor);
+ pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_AMD;
+ break;
+ default:
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
+ }
+
+ /*
+ * CPU features, guessing these are virtual CPU features?
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
+#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
+ NEM_LOG_REL_CPU_FEATURE(Sse3Support);
+ NEM_LOG_REL_CPU_FEATURE(LahfSahfSupport);
+ NEM_LOG_REL_CPU_FEATURE(Ssse3Support);
+ NEM_LOG_REL_CPU_FEATURE(Sse4_1Support);
+ NEM_LOG_REL_CPU_FEATURE(Sse4_2Support);
+ NEM_LOG_REL_CPU_FEATURE(Sse4aSupport);
+ NEM_LOG_REL_CPU_FEATURE(XopSupport);
+ NEM_LOG_REL_CPU_FEATURE(PopCntSupport);
+ NEM_LOG_REL_CPU_FEATURE(Cmpxchg16bSupport);
+ NEM_LOG_REL_CPU_FEATURE(Altmovcr8Support);
+ NEM_LOG_REL_CPU_FEATURE(LzcntSupport);
+ NEM_LOG_REL_CPU_FEATURE(MisAlignSseSupport);
+ NEM_LOG_REL_CPU_FEATURE(MmxExtSupport);
+ NEM_LOG_REL_CPU_FEATURE(Amd3DNowSupport);
+ NEM_LOG_REL_CPU_FEATURE(ExtendedAmd3DNowSupport);
+ NEM_LOG_REL_CPU_FEATURE(Page1GbSupport);
+ NEM_LOG_REL_CPU_FEATURE(AesSupport);
+ NEM_LOG_REL_CPU_FEATURE(PclmulqdqSupport);
+ NEM_LOG_REL_CPU_FEATURE(PcidSupport);
+ NEM_LOG_REL_CPU_FEATURE(Fma4Support);
+ NEM_LOG_REL_CPU_FEATURE(F16CSupport);
+ NEM_LOG_REL_CPU_FEATURE(RdRandSupport);
+ NEM_LOG_REL_CPU_FEATURE(RdWrFsGsSupport);
+ NEM_LOG_REL_CPU_FEATURE(SmepSupport);
+ NEM_LOG_REL_CPU_FEATURE(EnhancedFastStringSupport);
+ NEM_LOG_REL_CPU_FEATURE(Bmi1Support);
+ NEM_LOG_REL_CPU_FEATURE(Bmi2Support);
+ /* two reserved bits here, see below */
+ NEM_LOG_REL_CPU_FEATURE(MovbeSupport);
+ NEM_LOG_REL_CPU_FEATURE(Npiep1Support);
+ NEM_LOG_REL_CPU_FEATURE(DepX87FPUSaveSupport);
+ NEM_LOG_REL_CPU_FEATURE(RdSeedSupport);
+ NEM_LOG_REL_CPU_FEATURE(AdxSupport);
+ NEM_LOG_REL_CPU_FEATURE(IntelPrefetchSupport);
+ NEM_LOG_REL_CPU_FEATURE(SmapSupport);
+ NEM_LOG_REL_CPU_FEATURE(HleSupport);
+ NEM_LOG_REL_CPU_FEATURE(RtmSupport);
+ NEM_LOG_REL_CPU_FEATURE(RdtscpSupport);
+ NEM_LOG_REL_CPU_FEATURE(ClflushoptSupport);
+ NEM_LOG_REL_CPU_FEATURE(ClwbSupport);
+ NEM_LOG_REL_CPU_FEATURE(ShaSupport);
+ NEM_LOG_REL_CPU_FEATURE(X87PointersSavedSupport);
+#undef NEM_LOG_REL_CPU_FEATURE
+ if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(43) - 1) | RT_BIT_64(27) | RT_BIT_64(28)))
+ LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
+ pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
+ /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
+
+ /*
+ * The cache line flush size.
+ */
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
+ if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
+ pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
+
+ /*
+ * See if they've added more properties that we're not aware of.
+ */
+ /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
+ if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
+ {
+ static const struct
+ {
+ uint32_t iMin, iMax; } s_aUnknowns[] =
+ {
+ { 0x0004, 0x000f },
+ { 0x1003, 0x100f },
+ { 0x2000, 0x200f },
+ { 0x3000, 0x300f },
+ { 0x4000, 0x400f },
+ };
+ for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
+ for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
+ {
+ RT_ZERO(Caps);
+ hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
+ if (SUCCEEDED(hrc))
+ LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
+ }
+ }
+
+ /*
+ * For proper operation, we require CPUID exits.
+ */
+ if (!pVM->nem.s.fExtendedCpuIdExit)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended CPUID exit support");
+ if (!pVM->nem.s.fExtendedMsrExit)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended MSR exit support");
+ if (!pVM->nem.s.fExtendedXcptExit)
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended exception exit support");
+
+#undef NEM_LOG_REL_CAP_EX
+#undef NEM_LOG_REL_CAP_SUB_EX
+#undef NEM_LOG_REL_CAP_SUB
+ return VINF_SUCCESS;
+}
+
+#ifdef LOG_ENABLED
+
+/**
+ * Used to fill in g_IoCtlGetHvPartitionId.
+ */
+static NTSTATUS WINAPI
+nemR3WinIoctlDetector_GetHvPartitionId(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
+ RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
+ AssertLogRelMsgReturn(cbInput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
+ RT_NOREF(pvInput);
+
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvOutput), ("pvOutput=%p\n", pvOutput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == sizeof(HV_PARTITION_ID), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ *(HV_PARTITION_ID *)pvOutput = NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID;
+
+ g_IoCtlGetHvPartitionId.cbInput = cbInput;
+ g_IoCtlGetHvPartitionId.cbOutput = cbOutput;
+ g_IoCtlGetHvPartitionId.uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Used to fill in g_IoCtlGetHvPartitionId.
+ */
+static NTSTATUS WINAPI
+nemR3WinIoctlDetector_GetPartitionProperty(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
+ RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
+ AssertLogRelMsgReturn(cbInput == sizeof(VID_PARTITION_PROPERTY_CODE), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(*(VID_PARTITION_PROPERTY_CODE *)pvInput == NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_CODE,
+ ("*pvInput=%#x, expected %#x\n", *(HV_PARTITION_PROPERTY_CODE *)pvInput,
+ NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_CODE), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvOutput), ("pvOutput=%p\n", pvOutput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == sizeof(HV_PARTITION_PROPERTY), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ *(HV_PARTITION_PROPERTY *)pvOutput = NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_VALUE;
+
+ g_IoCtlGetPartitionProperty.cbInput = cbInput;
+ g_IoCtlGetPartitionProperty.cbOutput = cbOutput;
+ g_IoCtlGetPartitionProperty.uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Used to fill in g_IoCtlStartVirtualProcessor.
+ */
+static NTSTATUS WINAPI
+nemR3WinIoctlDetector_StartVirtualProcessor(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
+ RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
+ AssertLogRelMsgReturn(cbInput == sizeof(HV_VP_INDEX), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(*(HV_VP_INDEX *)pvInput == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
+ ("*piCpu=%u\n", *(HV_VP_INDEX *)pvInput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ RT_NOREF(pvOutput);
+
+ g_IoCtlStartVirtualProcessor.cbInput = cbInput;
+ g_IoCtlStartVirtualProcessor.cbOutput = cbOutput;
+ g_IoCtlStartVirtualProcessor.uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Used to fill in g_IoCtlStartVirtualProcessor.
+ */
+static NTSTATUS WINAPI
+nemR3WinIoctlDetector_StopVirtualProcessor(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
+ RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
+ AssertLogRelMsgReturn(cbInput == sizeof(HV_VP_INDEX), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(*(HV_VP_INDEX *)pvInput == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
+ ("*piCpu=%u\n", *(HV_VP_INDEX *)pvInput), STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ RT_NOREF(pvOutput);
+
+ g_IoCtlStopVirtualProcessor.cbInput = cbInput;
+ g_IoCtlStopVirtualProcessor.cbOutput = cbOutput;
+ g_IoCtlStopVirtualProcessor.uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Used to fill in g_IoCtlMessageSlotHandleAndGetNext
+ */
+static NTSTATUS WINAPI
+nemR3WinIoctlDetector_MessageSlotHandleAndGetNext(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
+ RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
+
+ if (g_uBuildNo >= 17758)
+ {
+ /* No timeout since about build 17758, it's now always an infinite wait. So, a somewhat compatible change. */
+ AssertLogRelMsgReturn(cbInput == RT_UOFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
+ ("cbInput=%#x\n", cbInput),
+ STATUS_INVALID_PARAMETER_8);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
+ PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT pVidIn = (PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)pvInput;
+ AssertLogRelMsgReturn( pVidIn->iCpu == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX
+ && pVidIn->fFlags == VID_MSHAGN_F_HANDLE_MESSAGE,
+ ("iCpu=%u fFlags=%#x cMillies=%#x\n", pVidIn->iCpu, pVidIn->fFlags, pVidIn->cMillies),
+ STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ }
+ else
+ {
+ AssertLogRelMsgReturn(cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), ("cbInput=%#x\n", cbInput),
+ STATUS_INVALID_PARAMETER_8);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
+ PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT pVidIn = (PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)pvInput;
+ AssertLogRelMsgReturn( pVidIn->iCpu == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX
+ && pVidIn->fFlags == VID_MSHAGN_F_HANDLE_MESSAGE
+ && pVidIn->cMillies == NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT,
+ ("iCpu=%u fFlags=%#x cMillies=%#x\n", pVidIn->iCpu, pVidIn->fFlags, pVidIn->cMillies),
+ STATUS_INVALID_PARAMETER_9);
+ AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
+ RT_NOREF(pvOutput);
+ }
+
+ g_IoCtlMessageSlotHandleAndGetNext.cbInput = cbInput;
+ g_IoCtlMessageSlotHandleAndGetNext.cbOutput = cbOutput;
+ g_IoCtlMessageSlotHandleAndGetNext.uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+/**
+ * Used to fill in what g_pIoCtlDetectForLogging points to.
+ */
+static NTSTATUS WINAPI nemR3WinIoctlDetector_ForLogging(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
+ PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
+ PVOID pvOutput, ULONG cbOutput)
+{
+ RT_NOREF(hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pvInput, pvOutput);
+
+ g_pIoCtlDetectForLogging->cbInput = cbInput;
+ g_pIoCtlDetectForLogging->cbOutput = cbOutput;
+ g_pIoCtlDetectForLogging->uFunction = uFunction;
+
+ return STATUS_SUCCESS;
+}
+
+#endif /* LOG_ENABLED */
+
+/**
+ * Worker for nemR3NativeInit that detect I/O control function numbers for VID.
+ *
+ * We use the function numbers directly in ring-0 and to name functions when
+ * logging NtDeviceIoControlFile calls.
+ *
+ * @note We could alternatively do this by disassembling the respective
+ * functions, but hooking NtDeviceIoControlFile and making fake calls
+ * more easily provides the desired information.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure. Will set I/O
+ * control info members.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3WinInitDiscoverIoControlProperties(PVM pVM, PRTERRINFO pErrInfo)
+{
+ RT_NOREF(pVM, pErrInfo);
+
+ /*
+ * Probe the I/O control information for select VID APIs so we can use
+ * them directly from ring-0 and better log them.
+ *
+ */
+#ifdef LOG_ENABLED
+ decltype(NtDeviceIoControlFile) * const pfnOrg = *g_ppfnVidNtDeviceIoControlFile;
+
+ /* VidGetHvPartitionId - must work due to our memory management. */
+ BOOL fRet;
+ if (g_pfnVidGetHvPartitionId)
+ {
+ HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_GetHvPartitionId;
+ fRet = g_pfnVidGetHvPartitionId(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, &idHvPartition);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ AssertReturn(fRet && idHvPartition == NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID && g_IoCtlGetHvPartitionId.uFunction != 0,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "Problem figuring out VidGetHvPartitionId: fRet=%u idHvPartition=%#x dwErr=%u",
+ fRet, idHvPartition, GetLastError()) );
+ LogRel(("NEM: VidGetHvPartitionId -> fun:%#x in:%#x out:%#x\n",
+ g_IoCtlGetHvPartitionId.uFunction, g_IoCtlGetHvPartitionId.cbInput, g_IoCtlGetHvPartitionId.cbOutput));
+ }
+
+ /* VidGetPartitionProperty - must work as it's fallback for VidGetHvPartitionId. */
+ if (g_ppfnVidNtDeviceIoControlFile)
+ {
+ HV_PARTITION_PROPERTY uPropValue = ~NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_VALUE;
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_GetPartitionProperty;
+ fRet = g_pfnVidGetPartitionProperty(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_CODE,
+ &uPropValue);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ AssertReturn( fRet
+ && uPropValue == NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_PROPERTY_VALUE
+ && g_IoCtlGetHvPartitionId.uFunction != 0,
+ RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
+ "Problem figuring out VidGetPartitionProperty: fRet=%u uPropValue=%#x dwErr=%u",
+ fRet, uPropValue, GetLastError()) );
+ LogRel(("NEM: VidGetPartitionProperty -> fun:%#x in:%#x out:%#x\n",
+ g_IoCtlGetPartitionProperty.uFunction, g_IoCtlGetPartitionProperty.cbInput, g_IoCtlGetPartitionProperty.cbOutput));
+ }
+
+ /* VidStartVirtualProcessor */
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_StartVirtualProcessor;
+ fRet = g_pfnVidStartVirtualProcessor(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ AssertStmt(fRet && g_IoCtlStartVirtualProcessor.uFunction != 0,
+ RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
+ "Problem figuring out VidStartVirtualProcessor: fRet=%u dwErr=%u", fRet, GetLastError()) );
+ LogRel(("NEM: VidStartVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStartVirtualProcessor.uFunction,
+ g_IoCtlStartVirtualProcessor.cbInput, g_IoCtlStartVirtualProcessor.cbOutput));
+
+ /* VidStopVirtualProcessor */
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_StopVirtualProcessor;
+ fRet = g_pfnVidStopVirtualProcessor(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ AssertStmt(fRet && g_IoCtlStopVirtualProcessor.uFunction != 0,
+ RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
+ "Problem figuring out VidStopVirtualProcessor: fRet=%u dwErr=%u", fRet, GetLastError()) );
+ LogRel(("NEM: VidStopVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStopVirtualProcessor.uFunction,
+ g_IoCtlStopVirtualProcessor.cbInput, g_IoCtlStopVirtualProcessor.cbOutput));
+
+ /* VidMessageSlotHandleAndGetNext */
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_MessageSlotHandleAndGetNext;
+ fRet = g_pfnVidMessageSlotHandleAndGetNext(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE,
+ NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX, VID_MSHAGN_F_HANDLE_MESSAGE,
+ NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ AssertStmt(fRet && g_IoCtlMessageSlotHandleAndGetNext.uFunction != 0,
+ RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
+ "Problem figuring out VidMessageSlotHandleAndGetNext: fRet=%u dwErr=%u",
+ fRet, GetLastError()) );
+ LogRel(("NEM: VidMessageSlotHandleAndGetNext -> fun:%#x in:%#x out:%#x\n",
+ g_IoCtlMessageSlotHandleAndGetNext.uFunction, g_IoCtlMessageSlotHandleAndGetNext.cbInput,
+ g_IoCtlMessageSlotHandleAndGetNext.cbOutput));
+
+ /* The following are only for logging: */
+ union
+ {
+ VID_MAPPED_MESSAGE_SLOT MapSlot;
+ HV_REGISTER_NAME Name;
+ HV_REGISTER_VALUE Value;
+ } uBuf;
+
+ /* VidMessageSlotMap */
+ g_pIoCtlDetectForLogging = &g_IoCtlMessageSlotMap;
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
+ fRet = g_pfnVidMessageSlotMap(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, &uBuf.MapSlot, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ Assert(fRet);
+ LogRel(("NEM: VidMessageSlotMap -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
+ g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
+
+ /* VidGetVirtualProcessorState */
+ uBuf.Name = HvRegisterExplicitSuspend;
+ g_pIoCtlDetectForLogging = &g_IoCtlGetVirtualProcessorState;
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
+ fRet = g_pfnVidGetVirtualProcessorState(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
+ &uBuf.Name, 1, &uBuf.Value);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ Assert(fRet);
+ LogRel(("NEM: VidGetVirtualProcessorState -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
+ g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
+
+ /* VidSetVirtualProcessorState */
+ uBuf.Name = HvRegisterExplicitSuspend;
+ g_pIoCtlDetectForLogging = &g_IoCtlSetVirtualProcessorState;
+ *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
+ fRet = g_pfnVidSetVirtualProcessorState(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
+ &uBuf.Name, 1, &uBuf.Value);
+ *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
+ Assert(fRet);
+ LogRel(("NEM: VidSetVirtualProcessorState -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
+ g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
+
+ g_pIoCtlDetectForLogging = NULL;
+#endif /* LOG_ENABLED */
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates and sets up a Hyper-V (exo) partition.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pErrInfo Where to always return error info.
+ */
+static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
+{
+ AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
+ AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
+
+ /*
+ * Create the partition.
+ */
+ WHV_PARTITION_HANDLE hPartition;
+ HRESULT hrc = WHvCreatePartition(&hPartition);
+ if (FAILED(hrc))
+ return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+
+ int rc;
+
+ /*
+ * Set partition properties, most importantly the CPU count.
+ */
+ /**
+ * @todo Someone at Microsoft please explain another weird API:
+ * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
+ * argument rather than as part of the struct. That is so weird if you've
+ * used any other NT or windows API, including WHvGetCapability().
+ * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
+ * technically only need 9 bytes for setting/getting
+ * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
+ WHV_PARTITION_PROPERTY Property;
+ RT_ZERO(Property);
+ Property.ProcessorCount = pVM->cCpus;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
+ if (SUCCEEDED(hrc))
+ {
+ RT_ZERO(Property);
+ Property.ExtendedVmExits.X64CpuidExit = pVM->nem.s.fExtendedCpuIdExit; /** @todo Register fixed results and restrict cpuid exits */
+ Property.ExtendedVmExits.X64MsrExit = pVM->nem.s.fExtendedMsrExit;
+ Property.ExtendedVmExits.ExceptionExit = pVM->nem.s.fExtendedXcptExit;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
+ if (SUCCEEDED(hrc))
+ {
+ /*
+ * We'll continue setup in nemR3NativeInitAfterCPUM.
+ */
+ pVM->nem.s.fCreatedEmts = false;
+ pVM->nem.s.hPartition = hPartition;
+ LogRel(("NEM: Created partition %p.\n", hPartition));
+ return VINF_SUCCESS;
+ }
+
+ rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
+ "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
+ Property.ExtendedVmExits.AsUINT64, hrc);
+ }
+ else
+ rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
+ "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
+ pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+ WHvDeletePartition(hPartition);
+
+ Assert(!pVM->nem.s.hPartitionDevice);
+ Assert(!pVM->nem.s.hPartition);
+ return rc;
+}
+
+
+/**
+ * Makes sure APIC and firmware will not allow X2APIC mode.
+ *
+ * This is rather ugly.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ */
+static int nemR3WinDisableX2Apic(PVM pVM)
+{
+ /*
+ * First make sure the 'Mode' config value of the APIC isn't set to X2APIC.
+ * This defaults to APIC, so no need to change unless it's X2APIC.
+ */
+ PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/Devices/apic/0/Config");
+ if (pCfg)
+ {
+ uint8_t bMode = 0;
+ int rc = CFGMR3QueryU8(pCfg, "Mode", &bMode);
+ AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND, ("%Rrc\n", rc), rc);
+ if (RT_SUCCESS(rc) && bMode == PDMAPICMODE_X2APIC)
+ {
+ LogRel(("NEM: Adjusting APIC configuration from X2APIC to APIC max mode. X2APIC is not supported by the WinHvPlatform API!\n"));
+ LogRel(("NEM: Disable Hyper-V if you need X2APIC for your guests!\n"));
+ rc = CFGMR3RemoveValue(pCfg, "Mode");
+ rc = CFGMR3InsertInteger(pCfg, "Mode", PDMAPICMODE_APIC);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+
+ /*
+ * Now the firmwares.
+ * These also defaults to APIC and only needs adjusting if configured to X2APIC (2).
+ */
+ static const char * const s_apszFirmwareConfigs[] =
+ {
+ "/Devices/efi/0/Config",
+ "/Devices/pcbios/0/Config",
+ };
+ for (unsigned i = 0; i < RT_ELEMENTS(s_apszFirmwareConfigs); i++)
+ {
+ pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/Devices/APIC/0/Config");
+ if (pCfg)
+ {
+ uint8_t bMode = 0;
+ int rc = CFGMR3QueryU8(pCfg, "APIC", &bMode);
+ AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND, ("%Rrc\n", rc), rc);
+ if (RT_SUCCESS(rc) && bMode == 2)
+ {
+ LogRel(("NEM: Adjusting %s/Mode from 2 (X2APIC) to 1 (APIC).\n", s_apszFirmwareConfigs[i]));
+ rc = CFGMR3RemoveValue(pCfg, "APIC");
+ rc = CFGMR3InsertInteger(pCfg, "APIC", 1);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Try initialize the native API.
+ *
+ * This may only do part of the job, more can be done in
+ * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFallback Whether we're in fallback mode or use-NEM mode. In
+ * the latter we'll fail if we cannot initialize.
+ * @param fForced Whether the HMForced flag is set and we should
+ * fail if we cannot initialize.
+ */
+int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
+{
+ g_uBuildNo = RTSystemGetNtBuildNo();
+
+ /*
+ * Some state init.
+ */
+#ifdef NEM_WIN_WITH_A20
+ pVM->nem.s.fA20Enabled = true;
+#endif
+#if 0
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
+ }
+#endif
+
+ /*
+ * Error state.
+ * The error message will be non-empty on failure and 'rc' will be set too.
+ */
+ RTERRINFOSTATIC ErrInfo;
+ PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
+ int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Check the capabilties of the hypervisor, starting with whether it's present.
+ */
+ rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Discover the VID I/O control function numbers we need (for interception
+ * only these days).
+ */
+ rc = nemR3WinInitDiscoverIoControlProperties(pVM, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create and initialize a partition.
+ */
+ rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Set ourselves as the execution engine and make config adjustments.
+ */
+ VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
+ Log(("NEM: Marked active!\n"));
+ nemR3WinDisableX2Apic(pVM);
+ nemR3DisableCpuIsaExt(pVM, "MONITOR"); /* MONITOR is not supported by Hyper-V (MWAIT is sometimes). */
+ PGMR3EnableNemMode(pVM);
+
+ /*
+ * Register release statistics
+ */
+ STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
+ STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
+ STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
+ STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
+ STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
+ STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
+ STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
+ STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
+ STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
+ STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
+ }
+
+ if (!SUPR3IsDriverless())
+ {
+ PUVM pUVM = pVM->pUVM;
+ STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
+ "/NEM/R0Stats/cPagesAvailable");
+ STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
+ "/NEM/R0Stats/cPagesInUse");
+ }
+
+ }
+ }
+ }
+ }
+
+ /*
+ * We only fail if in forced mode, otherwise just log the complaint and return.
+ */
+ Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
+ if ( (fForced || !fFallback)
+ && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
+ return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
+
+ if (RTErrInfoIsSet(pErrInfo))
+ LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This is called after CPUMR3Init is done.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle..
+ */
+int nemR3NativeInitAfterCPUM(PVM pVM)
+{
+ /*
+ * Validate sanity.
+ */
+ WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
+ AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
+ AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
+ AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
+ AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
+
+ /*
+ * Continue setting up the partition now that we've got most of the CPUID feature stuff.
+ */
+ WHV_PARTITION_PROPERTY Property;
+ HRESULT hrc;
+
+#if 0
+ /* Not sure if we really need to set the vendor.
+ Update: Apparently we don't. WHvPartitionPropertyCodeProcessorVendor was removed in 17110. */
+ RT_ZERO(Property);
+ Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
+ : WHvProcessorVendorIntel;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));
+ if (FAILED(hrc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
+ Property.ProcessorVendor, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+#endif
+
+ /* Not sure if we really need to set the cache line flush size. */
+ RT_ZERO(Property);
+ Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
+ if (FAILED(hrc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
+ pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+
+ /* Intercept #DB, #BP and #UD exceptions. */
+ RT_ZERO(Property);
+ Property.ExceptionExitBitmap = RT_BIT_64(WHvX64ExceptionTypeDebugTrapOrFault)
+ | RT_BIT_64(WHvX64ExceptionTypeBreakpointTrap)
+ | RT_BIT_64(WHvX64ExceptionTypeInvalidOpcodeFault);
+
+ /* Intercept #GP to workaround the buggy mesa vmwgfx driver. */
+ PVMCPU pVCpu = pVM->apCpusR3[0]; /** @todo In theory per vCPU, in practice same for all. */
+ if (pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv)
+ Property.ExceptionExitBitmap |= RT_BIT_64(WHvX64ExceptionTypeGeneralProtectionFault);
+
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExceptionExitBitmap, &Property, sizeof(Property));
+ if (FAILED(hrc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Failed to set WHvPartitionPropertyCodeExceptionExitBitmap to %#RX64: %Rhrc (Last=%#x/%u)",
+ Property.ExceptionExitBitmap, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+
+
+ /*
+ * Sync CPU features with CPUM.
+ */
+ /** @todo sync CPU features with CPUM. */
+
+ /* Set the partition property. */
+ RT_ZERO(Property);
+ Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
+ if (FAILED(hrc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
+ pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+
+ /*
+ * Set up the partition.
+ *
+ * Seems like this is where the partition is actually instantiated and we get
+ * a handle to it.
+ */
+ hrc = WHvSetupPartition(hPartition);
+ if (FAILED(hrc))
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
+ hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
+
+ /*
+ * Hysterical raisins: Get the handle (could also fish this out via VID.DLL NtDeviceIoControlFile intercepting).
+ */
+ HANDLE hPartitionDevice;
+ __try
+ {
+ hPartitionDevice = ((HANDLE *)hPartition)[1];
+ if (!hPartitionDevice)
+ hPartitionDevice = INVALID_HANDLE_VALUE;
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ hrc = GetExceptionCode();
+ hPartitionDevice = INVALID_HANDLE_VALUE;
+ }
+
+ /* Test the handle. */
+ HV_PARTITION_PROPERTY uValue = 0;
+ if ( g_pfnVidGetPartitionProperty
+ && hPartitionDevice != INVALID_HANDLE_VALUE
+ && !g_pfnVidGetPartitionProperty(hPartitionDevice, HvPartitionPropertyProcessorVendor, &uValue))
+ hPartitionDevice = INVALID_HANDLE_VALUE;
+ LogRel(("NEM: HvPartitionPropertyProcessorVendor=%#llx (%lld)\n", uValue, uValue));
+
+ /*
+ * More hysterical rasins: Get the partition ID if we can.
+ */
+ HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
+ if ( g_pfnVidGetHvPartitionId
+ && hPartitionDevice != INVALID_HANDLE_VALUE
+ && !g_pfnVidGetHvPartitionId(hPartitionDevice, &idHvPartition))
+ {
+ idHvPartition = HV_PARTITION_ID_INVALID;
+ Log(("NEM: VidGetHvPartitionId failed: %#x\n", GetLastError()));
+ }
+ pVM->nem.s.hPartitionDevice = hPartitionDevice;
+
+ /*
+ * Setup the EMTs.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ pVCpu = pVM->apCpusR3[idCpu];
+
+ hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
+ if (FAILED(hrc))
+ {
+ NTSTATUS const rcNtLast = RTNtLastStatusValue();
+ DWORD const dwErrLast = RTNtLastErrorValue();
+ while (idCpu-- > 0)
+ {
+ HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
+ AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
+ hPartition, idCpu, hrc2, RTNtLastStatusValue(),
+ RTNtLastErrorValue()));
+ }
+ return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+ "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
+ }
+ }
+ pVM->nem.s.fCreatedEmts = true;
+
+ LogRel(("NEM: Successfully set up partition (device handle %p, partition ID %#llx)\n", hPartitionDevice, idHvPartition));
+
+ /*
+ * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
+ */
+ /** @todo stats */
+
+ /*
+ * Adjust features.
+ *
+ * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
+ * the first init call.
+ */
+
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
+ //AssertLogRel(fRet);
+
+ NOREF(pVM); NOREF(enmWhat);
+ return VINF_SUCCESS;
+}
+
+
+int nemR3NativeTerm(PVM pVM)
+{
+ /*
+ * Delete the partition.
+ */
+ WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
+ pVM->nem.s.hPartition = NULL;
+ pVM->nem.s.hPartitionDevice = NULL;
+ if (hPartition != NULL)
+ {
+ VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
+ LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
+ while (idCpu-- > 0)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->nem.s.pvMsgSlotMapping = NULL;
+ HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
+ AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
+ hPartition, idCpu, hrc, RTNtLastStatusValue(),
+ RTNtLastErrorValue()));
+ }
+ WHvDeletePartition(hPartition);
+ }
+ pVM->nem.s.fCreatedEmts = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * VM reset notification.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void nemR3NativeReset(PVM pVM)
+{
+#if 0
+ /* Unfix the A20 gate. */
+ pVM->nem.s.fA20Fixed = false;
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+/**
+ * Reset CPU due to INIT IPI or hot (un)plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the CPU being
+ * reset.
+ * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
+ */
+void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
+{
+#ifdef NEM_WIN_WITH_A20
+ /* Lock the A20 gate if INIT IPI, make sure it's enabled. */
+ if (fInitIpi && pVCpu->idCpu > 0)
+ {
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ if (!pVM->nem.s.fA20Enabled)
+ nemR3NativeNotifySetA20(pVCpu, true);
+ pVM->nem.s.fA20Enabled = true;
+ pVM->nem.s.fA20Fixed = true;
+ }
+#else
+ RT_NOREF(pVCpu, fInitIpi);
+#endif
+}
+
+
+VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
+{
+ return nemHCWinRunGC(pVM, pVCpu);
+}
+
+
+VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
+{
+ Assert(VM_IS_NEM_ENABLED(pVM));
+
+#ifndef NEM_WIN_WITH_A20
+ /*
+ * Only execute when the A20 gate is enabled because this lovely Hyper-V
+ * blackbox does not seem to have any way to enable or disable A20.
+ */
+ RT_NOREF(pVM);
+ return PGMPhysIsA20Enabled(pVCpu);
+#else
+ RT_NOREF(pVM, pVCpu);
+ return true;
+#endif
+}
+
+
+bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
+{
+ NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
+ return false;
+}
+
+
+void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
+{
+ Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
+ HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
+ AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
+ RT_NOREF_PV(hrc);
+ RT_NOREF_PV(fFlags);
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
+{
+ RT_NOREF(pVM, fUseDebugLoop);
+ return false;
+}
+
+
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
+{
+ RT_NOREF(pVM, pVCpu, fUseDebugLoop);
+ return false;
+}
+
+
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
+{
+ PGMPAGEMAPLOCK Lock;
+ int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
+ if (RT_SUCCESS(rc))
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ return rc;
+}
+
+
+DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
+{
+ PGMPAGEMAPLOCK Lock;
+ int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
+ if (RT_SUCCESS(rc))
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ return rc;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
+ GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
+
+ *pu2State = UINT8_MAX;
+ RT_NOREF(puNemRange);
+
+ if (pvR3)
+ {
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
+ HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
+ WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
+ if (SUCCEEDED(hrc))
+ *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
+ else
+ {
+ LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
+ return VERR_NEM_MAP_PAGES_FAILED;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
+{
+ RT_NOREF(pVM);
+ return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
+ RT_NOREF(puNemRange);
+
+ /*
+ * Unmap the RAM we're replacing.
+ */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
+ HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else if (pvMmio2)
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
+ GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ else
+ {
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
+ return VERR_NEM_UNMAP_PAGES_FAILED;
+ }
+ }
+
+ /*
+ * Map MMIO2 if any.
+ */
+ if (pvMmio2)
+ {
+ Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
+ WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
+ if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
+ fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
+ HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
+ if (SUCCEEDED(hrc))
+ *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
+ else
+ {
+ LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
+ return VERR_NEM_MAP_PAGES_FAILED;
+ }
+ }
+ else
+ {
+ Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
+ *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
+ }
+ RT_NOREF(pvRam);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
+ void *pvRam, void *pvMmio2, uint32_t *puNemRange)
+{
+ RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
+ void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ int rc = VINF_SUCCESS;
+ Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
+ GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
+
+ /*
+ * Unmap the MMIO2 pages.
+ */
+ /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
+ * we may have more stuff to unmap even in case of pure MMIO... */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
+ {
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
+ HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
+ if (FAILED(hrc))
+ {
+ LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
+ GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ rc = VERR_NEM_UNMAP_PAGES_FAILED;
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
+ }
+ }
+
+ /*
+ * Restore the RAM we replaced.
+ */
+ if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
+ {
+ AssertPtr(pvRam);
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
+ HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
+ WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else
+ {
+ LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ rc = VERR_NEM_MAP_PAGES_FAILED;
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
+ }
+ if (pu2State)
+ *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
+ }
+ /* Mark the pages as unmapped if relevant. */
+ else if (pu2State)
+ *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
+
+ RT_NOREF(pvMmio2, puNemRange);
+ return rc;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
+ void *pvBitmap, size_t cbBitmap)
+{
+ Assert(VM_IS_NEM_ENABLED(pVM));
+ AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
+ Assert(cbBitmap == (uint32_t)cbBitmap);
+ RT_NOREF(uNemRange);
+
+ /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
+ HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
+ if (SUCCEEDED(hrc))
+ return VINF_SUCCESS;
+
+ AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
+ uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
+ *pu2State = UINT8_MAX;
+ *puNemRange = 0;
+
+#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
+ RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
+ for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
+ {
+ const void *pvPage;
+ int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
+ if (RT_SUCCESS(rc))
+ {
+ HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
+ WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else
+ {
+ LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ return VERR_NEM_INIT_FAILED;
+ }
+ }
+ else
+ {
+ LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
+ return rc;
+ }
+ }
+ RT_NOREF_PV(fFlags);
+#else
+ RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
+#endif
+ return VINF_SUCCESS;
+}
+
+
+VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
+ uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
+{
+ Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
+ GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
+ *pu2State = UINT8_MAX;
+
+ /*
+ * (Re-)map readonly.
+ */
+ AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
+ STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
+ HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
+ STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
+ if (SUCCEEDED(hrc))
+ *pu2State = NEM_WIN_PAGE_STATE_READABLE;
+ else
+ {
+ LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
+ return VERR_NEM_MAP_PAGES_FAILED;
+ }
+ RT_NOREF(fFlags, puNemRange);
+ return VINF_SUCCESS;
+}
+
+#ifdef NEM_WIN_WITH_A20
+
+/**
+ * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
+ */
+static DECLCALLBACK(int) nemR3WinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
+ PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
+{
+ /* We'll just unmap the memory. */
+ if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
+ {
+ HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
+ if (SUCCEEDED(hrc))
+ {
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
+ uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+ Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
+ pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
+ }
+ else
+ {
+ STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
+ LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
+ GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
+ return VERR_INTERNAL_ERROR_2;
+ }
+ }
+ RT_NOREF(pVCpu, pvUser);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
+ *
+ * @returns The PGMPhysNemQueryPageInfo result.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPhys The page to unmap.
+ */
+static int nemR3WinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
+{
+ PGMPHYSNEMPAGEINFO Info;
+ return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
+ nemR3WinUnsetForA20CheckerCallback, NULL);
+}
+
+#endif /* NEM_WIN_WITH_A20 */
+
+VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
+{
+ Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
+ Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
+#ifdef NEM_WIN_WITH_A20
+ PVM pVM = pVCpu->CTX_SUFF(pVM);
+ if (!pVM->nem.s.fA20Fixed)
+ {
+ pVM->nem.s.fA20Enabled = fEnabled;
+ for (RTGCPHYS GCPhys = _1M; GCPhys < _1M + _64K; GCPhys += X86_PAGE_SIZE)
+ nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys);
+ }
+#else
+ RT_NOREF(pVCpu, fEnabled);
+#endif
+}
+
+
+/** @page pg_nem_win NEM/win - Native Execution Manager, Windows.
+ *
+ * On Windows the Hyper-V root partition (dom0 in zen terminology) does not have
+ * nested VT-x or AMD-V capabilities. Early on raw-mode worked inside it, but
+ * for a while now we've been getting \#GPs when trying to modify CR4 in the
+ * world switcher. So, when Hyper-V is active on Windows we have little choice
+ * but to use Hyper-V to run our VMs.
+ *
+ *
+ * @section sub_nem_win_whv The WinHvPlatform API
+ *
+ * Since Windows 10 build 17083 there is a documented API for managing Hyper-V
+ * VMs: header file WinHvPlatform.h and implementation in WinHvPlatform.dll.
+ * This interface is a wrapper around the undocumented Virtualization
+ * Infrastructure Driver (VID) API - VID.DLL and VID.SYS. The wrapper is
+ * written in C++, namespaced, early versions (at least) was using standard C++
+ * container templates in several places.
+ *
+ * When creating a VM using WHvCreatePartition, it will only create the
+ * WinHvPlatform structures for it, to which you get an abstract pointer. The
+ * VID API that actually creates the partition is first engaged when you call
+ * WHvSetupPartition after first setting a lot of properties using
+ * WHvSetPartitionProperty. Since the VID API is just a very thin wrapper
+ * around CreateFile and NtDeviceIoControlFile, it returns an actual HANDLE for
+ * the partition to WinHvPlatform. We fish this HANDLE out of the WinHvPlatform
+ * partition structures because we need to talk directly to VID for reasons
+ * we'll get to in a bit. (Btw. we could also intercept the CreateFileW or
+ * NtDeviceIoControlFile calls from VID.DLL to get the HANDLE should fishing in
+ * the partition structures become difficult.)
+ *
+ * The WinHvPlatform API requires us to both set the number of guest CPUs before
+ * setting up the partition and call WHvCreateVirtualProcessor for each of them.
+ * The CPU creation function boils down to a VidMessageSlotMap call that sets up
+ * and maps a message buffer into ring-3 for async communication with hyper-V
+ * and/or the VID.SYS thread actually running the CPU thru
+ * WinHvRunVpDispatchLoop(). When for instance a VMEXIT is encountered, hyper-V
+ * sends a message that the WHvRunVirtualProcessor API retrieves (and later
+ * acknowledges) via VidMessageSlotHandleAndGetNext. Since or about build
+ * 17757 a register page is also mapped into user space when creating the
+ * virtual CPU. It should be noteded that WHvDeleteVirtualProcessor doesn't do
+ * much as there seems to be no partner function VidMessagesSlotMap that
+ * reverses what it did.
+ *
+ * Memory is managed thru calls to WHvMapGpaRange and WHvUnmapGpaRange (GPA does
+ * not mean grade point average here, but rather guest physical addressspace),
+ * which corresponds to VidCreateVaGpaRangeSpecifyUserVa and VidDestroyGpaRange
+ * respectively. As 'UserVa' indicates, the functions works on user process
+ * memory. The mappings are also subject to quota restrictions, so the number
+ * of ranges are limited and probably their total size as well. Obviously
+ * VID.SYS keeps track of the ranges, but so does WinHvPlatform, which means
+ * there is a bit of overhead involved and quota restrctions makes sense.
+ *
+ * Running guest code is done through the WHvRunVirtualProcessor function. It
+ * asynchronously starts or resumes hyper-V CPU execution and then waits for an
+ * VMEXIT message. Hyper-V / VID.SYS will return information about the message
+ * in the message buffer mapping, and WHvRunVirtualProcessor will convert that
+ * finto it's own WHV_RUN_VP_EXIT_CONTEXT format.
+ *
+ * Other threads can interrupt the execution by using WHvCancelVirtualProcessor,
+ * which since or about build 17757 uses VidMessageSlotHandleAndGetNext to do
+ * the work (earlier builds would open the waiting thread, do a dummy
+ * QueueUserAPC on it, and let it upon return use VidStopVirtualProcessor to
+ * do the actual stopping). While there is certainly a race between cancelation
+ * and the CPU causing a natural VMEXIT, it is not known whether this still
+ * causes extra work on subsequent WHvRunVirtualProcessor calls (it did in and
+ * earlier than 17134).
+ *
+ * Registers are retrieved and set via WHvGetVirtualProcessorRegisters and
+ * WHvSetVirtualProcessorRegisters. In addition, several VMEXITs include
+ * essential register state in the exit context information, potentially making
+ * it possible to emulate the instruction causing the exit without involving
+ * WHvGetVirtualProcessorRegisters.
+ *
+ *
+ * @subsection subsec_nem_win_whv_cons Issues & Feedback
+ *
+ * Here are some observations (mostly against build 17101):
+ *
+ * - The VMEXIT performance is dismal (build 17134).
+ *
+ * Our proof of concept implementation with a kernel runloop (i.e. not using
+ * WHvRunVirtualProcessor and friends, but calling VID.SYS fast I/O control
+ * entry point directly) delivers 9-10% of the port I/O performance and only
+ * 6-7% of the MMIO performance that we have with our own hypervisor.
+ *
+ * When using the offical WinHvPlatform API, the numbers are %3 for port I/O
+ * and 5% for MMIO.
+ *
+ * While the tests we've done are using tight tight loops only doing port I/O
+ * and MMIO, the problem is clearly visible when running regular guest OSes.
+ * Anything that hammers the VGA device would be suffering, for example:
+ *
+ * - Windows 2000 boot screen animation overloads us with MMIO exits
+ * and won't even boot because all the time is spent in interrupt
+ * handlers and redrawin the screen.
+ *
+ * - DSL 4.4 and its bootmenu logo is slower than molasses in january.
+ *
+ * We have not found a workaround for this yet.
+ *
+ * Something that might improve the issue a little is to detect blocks with
+ * excessive MMIO and port I/O exits and emulate instructions to cover
+ * multiple exits before letting Hyper-V have a go at the guest execution
+ * again. This will only improve the situation under some circumstances,
+ * since emulating instructions without recompilation can be expensive, so
+ * there will only be real gains if the exitting instructions are tightly
+ * packed.
+ *
+ * Update: Security fixes during the summer of 2018 caused the performance to
+ * dropped even more.
+ *
+ * Update [build 17757]: Some performance improvements here, but they don't
+ * yet make up for what was lost this summer.
+ *
+ *
+ * - We need a way to directly modify the TSC offset (or bias if you like).
+ *
+ * The current approach of setting the WHvX64RegisterTsc register one by one
+ * on each virtual CPU in sequence will introduce random inaccuracies,
+ * especially if the thread doing the job is reschduled at a bad time.
+ *
+ *
+ * - Unable to access WHvX64RegisterMsrMtrrCap (build 17134).
+ *
+ *
+ * - On AMD Ryzen grub/debian 9.0 ends up with a unrecoverable exception
+ * when IA32_MTRR_PHYSMASK0 is written.
+ *
+ *
+ * - The IA32_APIC_BASE register does not work right:
+ *
+ * - Attempts by the guest to clear bit 11 (EN) are ignored, both the
+ * guest and the VMM reads back the old value.
+ *
+ * - Attempts to modify the base address (bits NN:12) seems to be ignored
+ * in the same way.
+ *
+ * - The VMM can modify both the base address as well as the the EN and
+ * BSP bits, however this is useless if we cannot intercept the WRMSR.
+ *
+ * - Attempts by the guest to set the EXTD bit (X2APIC) result in \#GP(0),
+ * while the VMM ends up with with ERROR_HV_INVALID_PARAMETER. Seems
+ * there is no way to support X2APIC.
+ *
+ *
+ * - Not sure if this is a thing, but WHvCancelVirtualProcessor seems to cause
+ * cause a lot more spurious WHvRunVirtualProcessor returns that what we get
+ * with the replacement code. By spurious returns we mean that the
+ * subsequent call to WHvRunVirtualProcessor would return immediately.
+ *
+ * Update [build 17757]: New cancelation code might have addressed this, but
+ * haven't had time to test it yet.
+ *
+ *
+ * - There is no API for modifying protection of a page within a GPA range.
+ *
+ * From what we can tell, the only way to modify the protection (like readonly
+ * -> writable, or vice versa) is to first unmap the range and then remap it
+ * with the new protection.
+ *
+ * We are for instance doing this quite a bit in order to track dirty VRAM
+ * pages. VRAM pages starts out as readonly, when the guest writes to a page
+ * we take an exit, notes down which page it is, makes it writable and restart
+ * the instruction. After refreshing the display, we reset all the writable
+ * pages to readonly again, bulk fashion.
+ *
+ * Now to work around this issue, we do page sized GPA ranges. In addition to
+ * add a lot of tracking overhead to WinHvPlatform and VID.SYS, this also
+ * causes us to exceed our quota before we've even mapped a default sized
+ * (128MB) VRAM page-by-page. So, to work around this quota issue we have to
+ * lazily map pages and actively restrict the number of mappings.
+ *
+ * Our best workaround thus far is bypassing WinHvPlatform and VID entirely
+ * when in comes to guest memory management and instead use the underlying
+ * hypercalls (HvCallMapGpaPages, HvCallUnmapGpaPages) to do it ourselves.
+ * (This also maps a whole lot better into our own guest page management
+ * infrastructure.)
+ *
+ * Update [build 17757]: Introduces a KVM like dirty logging API which could
+ * help tracking dirty VGA pages, while being useless for shadow ROM and
+ * devices trying catch the guest updating descriptors and such.
+ *
+ *
+ * - Observed problems doing WHvUnmapGpaRange immediately followed by
+ * WHvMapGpaRange.
+ *
+ * As mentioned above, we've been forced to use this sequence when modifying
+ * page protection. However, when transitioning from readonly to writable,
+ * we've ended up looping forever with the same write to readonly memory
+ * VMEXIT. We're wondering if this issue might be related to the lazy mapping
+ * logic in WinHvPlatform.
+ *
+ * Workaround: Insert a WHvRunVirtualProcessor call and make sure to get a GPA
+ * unmapped exit between the two calls. Not entirely great performance wise
+ * (or the santity of our code).
+ *
+ *
+ * - Implementing A20 gate behavior is tedious, where as correctly emulating the
+ * A20M# pin (present on 486 and later) is near impossible for SMP setups
+ * (e.g. possiblity of two CPUs with different A20 status).
+ *
+ * Workaround #1 (obsolete): Only do A20 on CPU 0, restricting the emulation
+ * to HMA. We unmap all pages related to HMA (0x100000..0x10ffff) when the A20
+ * state changes, lazily syncing the right pages back when accessed.
+ *
+ * Workaround #2 (used): Use IEM when the A20 gate is disabled.
+ *
+ *
+ * - WHVRunVirtualProcessor wastes time converting VID/Hyper-V messages to its
+ * own format (WHV_RUN_VP_EXIT_CONTEXT).
+ *
+ * We understand this might be because Microsoft wishes to remain free to
+ * modify the VID/Hyper-V messages, but it's still rather silly and does slow
+ * things down a little. We'd much rather just process the messages directly.
+ *
+ *
+ * - WHVRunVirtualProcessor would've benefited from using a callback interface:
+ *
+ * - The potential size changes of the exit context structure wouldn't be
+ * an issue, since the function could manage that itself.
+ *
+ * - State handling could probably be simplified (like cancelation).
+ *
+ *
+ * - WHvGetVirtualProcessorRegisters and WHvSetVirtualProcessorRegisters
+ * internally converts register names, probably using temporary heap buffers.
+ *
+ * From the looks of things, they are converting from WHV_REGISTER_NAME to
+ * HV_REGISTER_NAME from in the "Virtual Processor Register Names" section in
+ * the "Hypervisor Top-Level Functional Specification" document. This feels
+ * like an awful waste of time.
+ *
+ * We simply cannot understand why HV_REGISTER_NAME isn't used directly here,
+ * or at least the same values, making any conversion reduntant. Restricting
+ * access to certain registers could easily be implement by scanning the
+ * inputs.
+ *
+ * To avoid the heap + conversion overhead, we're currently using the
+ * HvCallGetVpRegisters and HvCallSetVpRegisters calls directly, at least for
+ * the ring-0 code.
+ *
+ * Update [build 17757]: Register translation has been very cleverly
+ * optimized and made table driven (2 top level tables, 4 + 1 leaf tables).
+ * Register information consists of the 32-bit HV register name, register page
+ * offset, and flags (giving valid offset, size and more). Register
+ * getting/settings seems to be done by hoping that the register page provides
+ * it all, and falling back on the VidSetVirtualProcessorState if one or more
+ * registers are not available there.
+ *
+ * Note! We have currently not updated our ring-0 code to take the register
+ * page into account, so it's suffering a little compared to the ring-3 code
+ * that now uses the offical APIs for registers.
+ *
+ *
+ * - The YMM and XCR0 registers are not yet named (17083). This probably
+ * wouldn't be a problem if HV_REGISTER_NAME was used, see previous point.
+ *
+ * Update [build 17757]: XCR0 is added. YMM register values seems to be put
+ * into a yet undocumented XsaveState interface. Approach is a little bulky,
+ * but saves number of enums and dispenses with register transation. Also,
+ * the underlying Vid setter API duplicates the input buffer on the heap,
+ * adding a 16 byte header.
+ *
+ *
+ * - Why does VID.SYS only query/set 32 registers at the time thru the
+ * HvCallGetVpRegisters and HvCallSetVpRegisters hypercalls?
+ *
+ * We've not trouble getting/setting all the registers defined by
+ * WHV_REGISTER_NAME in one hypercall (around 80). Some kind of stack
+ * buffering or similar?
+ *
+ *
+ * - To handle the VMMCALL / VMCALL instructions, it seems we need to intercept
+ * \#UD exceptions and inspect the opcodes. A dedicated exit for hypercalls
+ * would be more efficient, esp. for guests using \#UD for other purposes..
+ *
+ *
+ * - Wrong instruction length in the VpContext with unmapped GPA memory exit
+ * contexts on 17115/AMD.
+ *
+ * One byte "PUSH CS" was reported as 2 bytes, while a two byte
+ * "MOV [EBX],EAX" was reported with a 1 byte instruction length. Problem
+ * naturally present in untranslated hyper-v messages.
+ *
+ *
+ * - The I/O port exit context information seems to be missing the address size
+ * information needed for correct string I/O emulation.
+ *
+ * VT-x provides this information in bits 7:9 in the instruction information
+ * field on newer CPUs. AMD-V in bits 7:9 in the EXITINFO1 field in the VMCB.
+ *
+ * We can probably work around this by scanning the instruction bytes for
+ * address size prefixes. Haven't investigated it any further yet.
+ *
+ *
+ * - Querying WHvCapabilityCodeExceptionExitBitmap returns zero even when
+ * intercepts demonstrably works (17134).
+ *
+ *
+ * - Querying HvPartitionPropertyDebugChannelId via HvCallGetPartitionProperty
+ * (hypercall) hangs the host (17134).
+ *
+ * - CommonUtilities::GuidToString needs a 'static' before the hex digit array,
+ * looks pointless to re-init a stack copy it for each call (novice mistake).
+ *
+ *
+ * Old concerns that have been addressed:
+ *
+ * - The WHvCancelVirtualProcessor API schedules a dummy usermode APC callback
+ * in order to cancel any current or future alertable wait in VID.SYS during
+ * the VidMessageSlotHandleAndGetNext call.
+ *
+ * IIRC this will make the kernel schedule the specified callback thru
+ * NTDLL!KiUserApcDispatcher by modifying the thread context and quite
+ * possibly the userland thread stack. When the APC callback returns to
+ * KiUserApcDispatcher, it will call NtContinue to restore the old thread
+ * context and resume execution from there. This naturally adds up to some
+ * CPU cycles, ring transitions aren't for free, especially after Spectre &
+ * Meltdown mitigations.
+ *
+ * Using NtAltertThread call could do the same without the thread context
+ * modifications and the extra kernel call.
+ *
+ * Update: All concerns have addressed in or about build 17757.
+ *
+ * The WHvCancelVirtualProcessor API is now implemented using a new
+ * VidMessageSlotHandleAndGetNext() flag (4). Codepath is slightly longer
+ * than NtAlertThread, but has the added benefit that spurious wakeups can be
+ * more easily reduced.
+ *
+ *
+ * - When WHvRunVirtualProcessor returns without a message, or on a terse
+ * VID message like HLT, it will make a kernel call to get some registers.
+ * This is potentially inefficient if the caller decides he needs more
+ * register state.
+ *
+ * It would be better to just return what's available and let the caller fetch
+ * what is missing from his point of view in a single kernel call.
+ *
+ * Update: All concerns have been addressed in or about build 17757. Selected
+ * registers are now available via shared memory and thus HLT should (not
+ * verified) no longer require a system call to compose the exit context data.
+ *
+ *
+ * - The WHvRunVirtualProcessor implementation does lazy GPA range mappings when
+ * a unmapped GPA message is received from hyper-V.
+ *
+ * Since MMIO is currently realized as unmapped GPA, this will slow down all
+ * MMIO accesses a tiny little bit as WHvRunVirtualProcessor looks up the
+ * guest physical address to check if it is a pending lazy mapping.
+ *
+ * The lazy mapping feature makes no sense to us. We as API user have all the
+ * information and can do lazy mapping ourselves if we want/have to (see next
+ * point).
+ *
+ * Update: All concerns have been addressed in or about build 17757.
+ *
+ *
+ * - The WHvGetCapability function has a weird design:
+ * - The CapabilityCode parameter is pointlessly duplicated in the output
+ * structure (WHV_CAPABILITY).
+ *
+ * - API takes void pointer, but everyone will probably be using
+ * WHV_CAPABILITY due to WHV_CAPABILITY::CapabilityCode making it
+ * impractical to use anything else.
+ *
+ * - No output size.
+ *
+ * - See GetFileAttributesEx, GetFileInformationByHandleEx,
+ * FindFirstFileEx, and others for typical pattern for generic
+ * information getters.
+ *
+ * Update: All concerns have been addressed in build 17110.
+ *
+ *
+ * - The WHvGetPartitionProperty function uses the same weird design as
+ * WHvGetCapability, see above.
+ *
+ * Update: All concerns have been addressed in build 17110.
+ *
+ *
+ * - The WHvSetPartitionProperty function has a totally weird design too:
+ * - In contrast to its partner WHvGetPartitionProperty, the property code
+ * is not a separate input parameter here but part of the input
+ * structure.
+ *
+ * - The input structure is a void pointer rather than a pointer to
+ * WHV_PARTITION_PROPERTY which everyone probably will be using because
+ * of the WHV_PARTITION_PROPERTY::PropertyCode field.
+ *
+ * - Really, why use PVOID for the input when the function isn't accepting
+ * minimal sizes. E.g. WHVPartitionPropertyCodeProcessorClFlushSize only
+ * requires a 9 byte input, but the function insists on 16 bytes (17083).
+ *
+ * - See GetFileAttributesEx, SetFileInformationByHandle, FindFirstFileEx,
+ * and others for typical pattern for generic information setters and
+ * getters.
+ *
+ * Update: All concerns have been addressed in build 17110.
+ *
+ *
+ * @section sec_nem_win_large_pages Large Pages
+ *
+ * We've got a standalone memory allocation and access testcase bs3-memalloc-1
+ * which was run with 48GiB of guest RAM configured on a NUC 11 box running
+ * Windows 11 GA. In the simplified NEM memory mode no exits should be
+ * generated while the access tests are running.
+ *
+ * The bs3-memalloc-1 results kind of hints at some tiny speed-up if the guest
+ * RAM is allocated using the MEM_LARGE_PAGES flag, but only in the 3rd access
+ * check (typical 350 000 MiB/s w/o and around 400 000 MiB/s). The result for
+ * the 2nd access varies a lot, perhaps hinting at some table optimizations
+ * going on.
+ *
+ * The initial access where the memory is locked/whatever has absolutely horrid
+ * results regardless of whether large pages are enabled or not. Typically
+ * bobbing close to 500 MiB/s, non-large pages a little faster.
+ *
+ * NEM w/ simplified memory and MEM_LARGE_PAGES:
+ * @verbatim
+bs3-memalloc-1: TESTING...
+bs3-memalloc-1: #0/0x0: 0x0000000000000000 LB 0x000000000009fc00 USABLE (1)
+bs3-memalloc-1: #1/0x1: 0x000000000009fc00 LB 0x0000000000000400 RESERVED (2)
+bs3-memalloc-1: #2/0x2: 0x00000000000f0000 LB 0x0000000000010000 RESERVED (2)
+bs3-memalloc-1: #3/0x3: 0x0000000000100000 LB 0x00000000dfef0000 USABLE (1)
+bs3-memalloc-1: #4/0x4: 0x00000000dfff0000 LB 0x0000000000010000 ACPI_RECLAIMABLE (3)
+bs3-memalloc-1: #5/0x5: 0x00000000fec00000 LB 0x0000000000001000 RESERVED (2)
+bs3-memalloc-1: #6/0x6: 0x00000000fee00000 LB 0x0000000000001000 RESERVED (2)
+bs3-memalloc-1: #7/0x7: 0x00000000fffc0000 LB 0x0000000000040000 RESERVED (2)
+bs3-memalloc-1: #8/0x9: 0x0000000100000000 LB 0x0000000b20000000 USABLE (1)
+bs3-memalloc-1: Found 1 interesting entries covering 0xb20000000 bytes (44 GB).
+bs3-memalloc-1: From 0x100000000 to 0xc20000000
+bs3-memalloc-1: INT15h/E820 : PASSED
+bs3-memalloc-1: Mapping memory above 4GB : PASSED
+bs3-memalloc-1: Pages : 11 665 408 pages
+bs3-memalloc-1: MiBs : 45 568 MB
+bs3-memalloc-1: Alloc elapsed : 90 925 263 996 ns
+bs3-memalloc-1: Alloc elapsed in ticks : 272 340 387 336 ticks
+bs3-memalloc-1: Page alloc time : 7 794 ns/page
+bs3-memalloc-1: Page alloc time in ticks : 23 345 ticks/page
+bs3-memalloc-1: Alloc thruput : 128 296 pages/s
+bs3-memalloc-1: Alloc thruput in MiBs : 501 MB/s
+bs3-memalloc-1: Allocation speed : PASSED
+bs3-memalloc-1: Access elapsed : 85 074 483 467 ns
+bs3-memalloc-1: Access elapsed in ticks : 254 816 088 412 ticks
+bs3-memalloc-1: Page access time : 7 292 ns/page
+bs3-memalloc-1: Page access time in ticks : 21 843 ticks/page
+bs3-memalloc-1: Access thruput : 137 119 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 535 MB/s
+bs3-memalloc-1: 2nd access : PASSED
+bs3-memalloc-1: Access elapsed : 112 963 925 ns
+bs3-memalloc-1: Access elapsed in ticks : 338 284 436 ticks
+bs3-memalloc-1: Page access time : 9 ns/page
+bs3-memalloc-1: Page access time in ticks : 28 ticks/page
+bs3-memalloc-1: Access thruput : 103 266 666 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 403 385 MB/s
+bs3-memalloc-1: 3rd access : PASSED
+bs3-memalloc-1: SUCCESS
+ * @endverbatim
+ *
+ * NEM w/ simplified memory and but no MEM_LARGE_PAGES:
+ * @verbatim
+bs3-memalloc-1: From 0x100000000 to 0xc20000000
+bs3-memalloc-1: Pages : 11 665 408 pages
+bs3-memalloc-1: MiBs : 45 568 MB
+bs3-memalloc-1: Alloc elapsed : 90 062 027 900 ns
+bs3-memalloc-1: Alloc elapsed in ticks : 269 754 826 466 ticks
+bs3-memalloc-1: Page alloc time : 7 720 ns/page
+bs3-memalloc-1: Page alloc time in ticks : 23 124 ticks/page
+bs3-memalloc-1: Alloc thruput : 129 526 pages/s
+bs3-memalloc-1: Alloc thruput in MiBs : 505 MB/s
+bs3-memalloc-1: Allocation speed : PASSED
+bs3-memalloc-1: Access elapsed : 3 596 017 220 ns
+bs3-memalloc-1: Access elapsed in ticks : 10 770 732 620 ticks
+bs3-memalloc-1: Page access time : 308 ns/page
+bs3-memalloc-1: Page access time in ticks : 923 ticks/page
+bs3-memalloc-1: Access thruput : 3 243 980 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 12 671 MB/s
+bs3-memalloc-1: 2nd access : PASSED
+bs3-memalloc-1: Access elapsed : 133 060 160 ns
+bs3-memalloc-1: Access elapsed in ticks : 398 459 884 ticks
+bs3-memalloc-1: Page access time : 11 ns/page
+bs3-memalloc-1: Page access time in ticks : 34 ticks/page
+bs3-memalloc-1: Access thruput : 87 670 178 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 342 461 MB/s
+bs3-memalloc-1: 3rd access : PASSED
+ * @endverbatim
+ *
+ * Same everything but native VT-x and VBox (stripped output a little):
+ * @verbatim
+bs3-memalloc-1: From 0x100000000 to 0xc20000000
+bs3-memalloc-1: Pages : 11 665 408 pages
+bs3-memalloc-1: MiBs : 45 568 MB
+bs3-memalloc-1: Alloc elapsed : 776 111 427 ns
+bs3-memalloc-1: Alloc elapsed in ticks : 2 323 267 035 ticks
+bs3-memalloc-1: Page alloc time : 66 ns/page
+bs3-memalloc-1: Page alloc time in ticks : 199 ticks/page
+bs3-memalloc-1: Alloc thruput : 15 030 584 pages/s
+bs3-memalloc-1: Alloc thruput in MiBs : 58 713 MB/s
+bs3-memalloc-1: Allocation speed : PASSED
+bs3-memalloc-1: Access elapsed : 112 141 904 ns
+bs3-memalloc-1: Access elapsed in ticks : 335 751 077 ticks
+bs3-memalloc-1: Page access time : 9 ns/page
+bs3-memalloc-1: Page access time in ticks : 28 ticks/page
+bs3-memalloc-1: Access thruput : 104 023 630 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 406 342 MB/s
+bs3-memalloc-1: 2nd access : PASSED
+bs3-memalloc-1: Access elapsed : 112 023 049 ns
+bs3-memalloc-1: Access elapsed in ticks : 335 418 343 ticks
+bs3-memalloc-1: Page access time : 9 ns/page
+bs3-memalloc-1: Page access time in ticks : 28 ticks/page
+bs3-memalloc-1: Access thruput : 104 133 998 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 406 773 MB/s
+bs3-memalloc-1: 3rd access : PASSED
+ * @endverbatim
+ *
+ * VBox with large pages disabled:
+ * @verbatim
+bs3-memalloc-1: From 0x100000000 to 0xc20000000
+bs3-memalloc-1: Pages : 11 665 408 pages
+bs3-memalloc-1: MiBs : 45 568 MB
+bs3-memalloc-1: Alloc elapsed : 50 986 588 028 ns
+bs3-memalloc-1: Alloc elapsed in ticks : 152 714 862 044 ticks
+bs3-memalloc-1: Page alloc time : 4 370 ns/page
+bs3-memalloc-1: Page alloc time in ticks : 13 091 ticks/page
+bs3-memalloc-1: Alloc thruput : 228 793 pages/s
+bs3-memalloc-1: Alloc thruput in MiBs : 893 MB/s
+bs3-memalloc-1: Allocation speed : PASSED
+bs3-memalloc-1: Access elapsed : 2 849 641 741 ns
+bs3-memalloc-1: Access elapsed in ticks : 8 535 372 249 ticks
+bs3-memalloc-1: Page access time : 244 ns/page
+bs3-memalloc-1: Page access time in ticks : 731 ticks/page
+bs3-memalloc-1: Access thruput : 4 093 640 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 15 990 MB/s
+bs3-memalloc-1: 2nd access : PASSED
+bs3-memalloc-1: Access elapsed : 2 866 960 770 ns
+bs3-memalloc-1: Access elapsed in ticks : 8 587 097 799 ticks
+bs3-memalloc-1: Page access time : 245 ns/page
+bs3-memalloc-1: Page access time in ticks : 736 ticks/page
+bs3-memalloc-1: Access thruput : 4 068 910 pages/s
+bs3-memalloc-1: Access thruput in MiBs : 15 894 MB/s
+bs3-memalloc-1: 3rd access : PASSED
+ * @endverbatim
+ *
+ * Comparing large pages, therer is an allocation speed difference of two order
+ * of magnitude. When disabling large pages in VBox the allocation numbers are
+ * closer, and the is clear from the 2nd and 3rd access tests that VBox doesn't
+ * spend enough memory on nested page tables as Hyper-V does. The similar 2nd
+ * and 3rd access numbers the two large page testruns seems to hint strongly at
+ * Hyper-V eventually getting the large pages in place too, only that it sucks
+ * hundredfold in the setting up phase.
+ *
+ *
+ *
+ * @section sec_nem_win_impl Our implementation.
+ *
+ * We set out with the goal of wanting to run as much as possible in ring-0,
+ * reasoning that this would give use the best performance.
+ *
+ * This goal was approached gradually, starting out with a pure WinHvPlatform
+ * implementation, gradually replacing parts: register access, guest memory
+ * handling, running virtual processors. Then finally moving it all into
+ * ring-0, while keeping most of it configurable so that we could make
+ * comparisons (see NEMInternal.h and nemR3NativeRunGC()).
+ *
+ *
+ * @subsection subsect_nem_win_impl_ioctl VID.SYS I/O control calls
+ *
+ * To run things in ring-0 we need to talk directly to VID.SYS thru its I/O
+ * control interface. Looking at changes between like build 17083 and 17101 (if
+ * memory serves) a set of the VID I/O control numbers shifted a little, which
+ * means we need to determin them dynamically. We currently do this by hooking
+ * the NtDeviceIoControlFile API call from VID.DLL and snooping up the
+ * parameters when making dummy calls to relevant APIs. (We could also
+ * disassemble the relevant APIs and try fish out the information from that, but
+ * this is way simpler.)
+ *
+ * Issuing I/O control calls from ring-0 is facing a small challenge with
+ * respect to direct buffering. When using direct buffering the device will
+ * typically check that the buffer is actually in the user address space range
+ * and reject kernel addresses. Fortunately, we've got the cross context VM
+ * structure that is mapped into both kernel and user space, it's also locked
+ * and safe to access from kernel space. So, we place the I/O control buffers
+ * in the per-CPU part of it (NEMCPU::uIoCtlBuf) and give the driver the user
+ * address if direct access buffering or kernel address if not.
+ *
+ * The I/O control calls are 'abstracted' in the support driver, see
+ * SUPR0IoCtlSetupForHandle(), SUPR0IoCtlPerform() and SUPR0IoCtlCleanup().
+ *
+ *
+ * @subsection subsect_nem_win_impl_cpumctx CPUMCTX
+ *
+ * Since the CPU state needs to live in Hyper-V when executing, we probably
+ * should not transfer more than necessary when handling VMEXITs. To help us
+ * manage this CPUMCTX got a new field CPUMCTX::fExtrn that to indicate which
+ * part of the state is currently externalized (== in Hyper-V).
+ *
+ *
+ * @subsection sec_nem_win_benchmarks Benchmarks.
+ *
+ * @subsubsection subsect_nem_win_benchmarks_bs2t1 17134/2018-06-22: Bootsector2-test1
+ *
+ * This is ValidationKit/bootsectors/bootsector2-test1.asm as of 2018-06-22
+ * (internal r123172) running a the release build of VirtualBox from the same
+ * source, though with exit optimizations disabled. Host is AMD Threadripper 1950X
+ * running out an up to date 64-bit Windows 10 build 17134.
+ *
+ * The base line column is using the official WinHv API for everything but physical
+ * memory mapping. The 2nd column is the default NEM/win configuration where we
+ * put the main execution loop in ring-0, using hypercalls when we can and VID for
+ * managing execution. The 3rd column is regular VirtualBox using AMD-V directly,
+ * hyper-V is disabled, main execution loop in ring-0.
+ *
+ * @verbatim
+TESTING... WinHv API Hypercalls + VID VirtualBox AMD-V
+ 32-bit paged protected mode, CPUID : 108 874 ins/sec 113% / 123 602 1198% / 1 305 113
+ 32-bit pae protected mode, CPUID : 106 722 ins/sec 115% / 122 740 1232% / 1 315 201
+ 64-bit long mode, CPUID : 106 798 ins/sec 114% / 122 111 1198% / 1 280 404
+ 16-bit unpaged protected mode, CPUID : 106 835 ins/sec 114% / 121 994 1216% / 1 299 665
+ 32-bit unpaged protected mode, CPUID : 105 257 ins/sec 115% / 121 772 1235% / 1 300 860
+ real mode, CPUID : 104 507 ins/sec 116% / 121 800 1228% / 1 283 848
+CPUID EAX=1 : PASSED
+ 32-bit paged protected mode, RDTSC : 99 581 834 ins/sec 100% / 100 323 307 93% / 93 473 299
+ 32-bit pae protected mode, RDTSC : 99 620 585 ins/sec 100% / 99 960 952 84% / 83 968 839
+ 64-bit long mode, RDTSC : 100 540 009 ins/sec 100% / 100 946 372 93% / 93 652 826
+ 16-bit unpaged protected mode, RDTSC : 99 688 473 ins/sec 100% / 100 097 751 76% / 76 281 287
+ 32-bit unpaged protected mode, RDTSC : 98 385 857 ins/sec 102% / 100 510 404 94% / 93 379 536
+ real mode, RDTSC : 100 087 967 ins/sec 101% / 101 386 138 93% / 93 234 999
+RDTSC : PASSED
+ 32-bit paged protected mode, Read CR4 : 2 156 102 ins/sec 98% / 2 121 967 17114% / 369 009 009
+ 32-bit pae protected mode, Read CR4 : 2 163 820 ins/sec 98% / 2 133 804 17469% / 377 999 261
+ 64-bit long mode, Read CR4 : 2 164 822 ins/sec 98% / 2 128 698 18875% / 408 619 313
+ 16-bit unpaged protected mode, Read CR4 : 2 162 367 ins/sec 100% / 2 168 508 17132% / 370 477 568
+ 32-bit unpaged protected mode, Read CR4 : 2 163 189 ins/sec 100% / 2 169 808 16768% / 362 734 679
+ real mode, Read CR4 : 2 162 436 ins/sec 100% / 2 164 914 15551% / 336 288 998
+Read CR4 : PASSED
+ real mode, 32-bit IN : 104 649 ins/sec 118% / 123 513 1028% / 1 075 831
+ real mode, 32-bit OUT : 107 102 ins/sec 115% / 123 660 982% / 1 052 259
+ real mode, 32-bit IN-to-ring-3 : 105 697 ins/sec 98% / 104 471 201% / 213 216
+ real mode, 32-bit OUT-to-ring-3 : 105 830 ins/sec 98% / 104 598 198% / 210 495
+ 16-bit unpaged protected mode, 32-bit IN : 104 855 ins/sec 117% / 123 174 1029% / 1 079 591
+ 16-bit unpaged protected mode, 32-bit OUT : 107 529 ins/sec 115% / 124 250 992% / 1 067 053
+ 16-bit unpaged protected mode, 32-bit IN-to-ring-3 : 106 337 ins/sec 103% / 109 565 196% / 209 367
+ 16-bit unpaged protected mode, 32-bit OUT-to-ring-3 : 107 558 ins/sec 100% / 108 237 191% / 206 387
+ 32-bit unpaged protected mode, 32-bit IN : 106 351 ins/sec 116% / 123 584 1016% / 1 081 325
+ 32-bit unpaged protected mode, 32-bit OUT : 106 424 ins/sec 116% / 124 252 995% / 1 059 408
+ 32-bit unpaged protected mode, 32-bit IN-to-ring-3 : 104 035 ins/sec 101% / 105 305 202% / 210 750
+ 32-bit unpaged protected mode, 32-bit OUT-to-ring-3 : 103 831 ins/sec 102% / 106 919 205% / 213 198
+ 32-bit paged protected mode, 32-bit IN : 103 356 ins/sec 119% / 123 870 1041% / 1 076 463
+ 32-bit paged protected mode, 32-bit OUT : 107 177 ins/sec 115% / 124 302 998% / 1 069 655
+ 32-bit paged protected mode, 32-bit IN-to-ring-3 : 104 491 ins/sec 100% / 104 744 200% / 209 264
+ 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 106 603 ins/sec 97% / 103 849 197% / 210 219
+ 32-bit pae protected mode, 32-bit IN : 105 923 ins/sec 115% / 122 759 1041% / 1 103 261
+ 32-bit pae protected mode, 32-bit OUT : 107 083 ins/sec 117% / 126 057 1024% / 1 096 667
+ 32-bit pae protected mode, 32-bit IN-to-ring-3 : 106 114 ins/sec 97% / 103 496 199% / 211 312
+ 32-bit pae protected mode, 32-bit OUT-to-ring-3 : 105 675 ins/sec 96% / 102 096 198% / 209 890
+ 64-bit long mode, 32-bit IN : 105 800 ins/sec 113% / 120 006 1013% / 1 072 116
+ 64-bit long mode, 32-bit OUT : 105 635 ins/sec 113% / 120 375 997% / 1 053 655
+ 64-bit long mode, 32-bit IN-to-ring-3 : 105 274 ins/sec 95% / 100 763 197% / 208 026
+ 64-bit long mode, 32-bit OUT-to-ring-3 : 106 262 ins/sec 94% / 100 749 196% / 209 288
+NOP I/O Port Access : PASSED
+ 32-bit paged protected mode, 32-bit read : 57 687 ins/sec 119% / 69 136 1197% / 690 548
+ 32-bit paged protected mode, 32-bit write : 57 957 ins/sec 118% / 68 935 1183% / 685 930
+ 32-bit paged protected mode, 32-bit read-to-ring-3 : 57 958 ins/sec 95% / 55 432 276% / 160 505
+ 32-bit paged protected mode, 32-bit write-to-ring-3 : 57 922 ins/sec 100% / 58 340 304% / 176 464
+ 32-bit pae protected mode, 32-bit read : 57 478 ins/sec 119% / 68 453 1141% / 656 159
+ 32-bit pae protected mode, 32-bit write : 57 226 ins/sec 118% / 68 097 1157% / 662 504
+ 32-bit pae protected mode, 32-bit read-to-ring-3 : 57 582 ins/sec 94% / 54 651 268% / 154 867
+ 32-bit pae protected mode, 32-bit write-to-ring-3 : 57 697 ins/sec 100% / 57 750 299% / 173 030
+ 64-bit long mode, 32-bit read : 57 128 ins/sec 118% / 67 779 1071% / 611 949
+ 64-bit long mode, 32-bit write : 57 127 ins/sec 118% / 67 632 1084% / 619 395
+ 64-bit long mode, 32-bit read-to-ring-3 : 57 181 ins/sec 94% / 54 123 265% / 151 937
+ 64-bit long mode, 32-bit write-to-ring-3 : 57 297 ins/sec 99% / 57 286 294% / 168 694
+ 16-bit unpaged protected mode, 32-bit read : 58 827 ins/sec 118% / 69 545 1185% / 697 602
+ 16-bit unpaged protected mode, 32-bit write : 58 678 ins/sec 118% / 69 442 1183% / 694 387
+ 16-bit unpaged protected mode, 32-bit read-to-ring-3 : 57 841 ins/sec 96% / 55 730 275% / 159 163
+ 16-bit unpaged protected mode, 32-bit write-to-ring-3 : 57 855 ins/sec 101% / 58 834 304% / 176 169
+ 32-bit unpaged protected mode, 32-bit read : 58 063 ins/sec 120% / 69 690 1233% / 716 444
+ 32-bit unpaged protected mode, 32-bit write : 57 936 ins/sec 120% / 69 633 1199% / 694 753
+ 32-bit unpaged protected mode, 32-bit read-to-ring-3 : 58 451 ins/sec 96% / 56 183 273% / 159 972
+ 32-bit unpaged protected mode, 32-bit write-to-ring-3 : 58 962 ins/sec 99% / 58 955 298% / 175 936
+ real mode, 32-bit read : 58 571 ins/sec 118% / 69 478 1160% / 679 917
+ real mode, 32-bit write : 58 418 ins/sec 118% / 69 320 1185% / 692 513
+ real mode, 32-bit read-to-ring-3 : 58 072 ins/sec 96% / 55 751 274% / 159 145
+ real mode, 32-bit write-to-ring-3 : 57 870 ins/sec 101% / 58 755 307% / 178 042
+NOP MMIO Access : PASSED
+SUCCESS
+ * @endverbatim
+ *
+ * What we see here is:
+ *
+ * - The WinHv API approach is 10 to 12 times slower for exits we can
+ * handle directly in ring-0 in the VBox AMD-V code.
+ *
+ * - The WinHv API approach is 2 to 3 times slower for exits we have to
+ * go to ring-3 to handle with the VBox AMD-V code.
+ *
+ * - By using hypercalls and VID.SYS from ring-0 we gain between
+ * 13% and 20% over the WinHv API on exits handled in ring-0.
+ *
+ * - For exits requiring ring-3 handling are between 6% slower and 3% faster
+ * than the WinHv API.
+ *
+ *
+ * As a side note, it looks like Hyper-V doesn't let the guest read CR4 but
+ * triggers exits all the time. This isn't all that important these days since
+ * OSes like Linux cache the CR4 value specifically to avoid these kinds of exits.
+ *
+ *
+ * @subsubsection subsect_nem_win_benchmarks_bs2t1u1 17134/2018-10-02: Bootsector2-test1
+ *
+ * Update on 17134. While expectantly testing a couple of newer builds (17758,
+ * 17763) hoping for some increases in performance, the numbers turned out
+ * altogether worse than the June test run. So, we went back to the 1803
+ * (17134) installation, made sure it was fully up to date (as per 2018-10-02)
+ * and re-tested.
+ *
+ * The numbers had somehow turned significantly worse over the last 3-4 months,
+ * dropping around 70% for the WinHv API test, more for Hypercalls + VID.
+ *
+ * @verbatim
+TESTING... WinHv API Hypercalls + VID VirtualBox AMD-V *
+ 32-bit paged protected mode, CPUID : 33 270 ins/sec 33 154
+ real mode, CPUID : 33 534 ins/sec 32 711
+ [snip]
+ 32-bit paged protected mode, RDTSC : 102 216 011 ins/sec 98 225 419
+ real mode, RDTSC : 102 492 243 ins/sec 98 225 419
+ [snip]
+ 32-bit paged protected mode, Read CR4 : 2 096 165 ins/sec 2 123 815
+ real mode, Read CR4 : 2 081 047 ins/sec 2 075 151
+ [snip]
+ 32-bit paged protected mode, 32-bit IN : 32 739 ins/sec 33 655
+ 32-bit paged protected mode, 32-bit OUT : 32 702 ins/sec 33 777
+ 32-bit paged protected mode, 32-bit IN-to-ring-3 : 32 579 ins/sec 29 985
+ 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 32 750 ins/sec 29 757
+ [snip]
+ 32-bit paged protected mode, 32-bit read : 20 042 ins/sec 21 489
+ 32-bit paged protected mode, 32-bit write : 20 036 ins/sec 21 493
+ 32-bit paged protected mode, 32-bit read-to-ring-3 : 19 985 ins/sec 19 143
+ 32-bit paged protected mode, 32-bit write-to-ring-3 : 19 972 ins/sec 19 595
+
+ * @endverbatim
+ *
+ * Suspects are security updates and/or microcode updates installed since then.
+ * Given that the RDTSC and CR4 numbers are reasonably unchanges, it seems that
+ * the Hyper-V core loop (in hvax64.exe) aren't affected. Our ring-0 runloop
+ * is equally affected as the ring-3 based runloop, so it cannot be ring
+ * switching as such (unless the ring-0 loop is borked and we didn't notice yet).
+ *
+ * The issue is probably in the thread / process switching area, could be
+ * something special for hyper-V interrupt delivery or worker thread switching.
+ *
+ * Really wish this thread ping-pong going on in VID.SYS could be eliminated!
+ *
+ *
+ * @subsubsection subsect_nem_win_benchmarks_bs2t1u2 17763: Bootsector2-test1
+ *
+ * Some preliminary numbers for build 17763 on the 3.4 GHz AMD 1950X, the second
+ * column will improve we get time to have a look the register page.
+ *
+ * There is a 50% performance loss here compared to the June numbers with
+ * build 17134. The RDTSC numbers hits that it isn't in the Hyper-V core
+ * (hvax64.exe), but something on the NT side.
+ *
+ * Clearing bit 20 in nt!KiSpeculationFeatures speeds things up (i.e. changing
+ * the dword from 0x00300065 to 0x00200065 in windbg). This is checked by
+ * nt!KePrepareToDispatchVirtualProcessor, making it a no-op if the flag is
+ * clear. winhvr!WinHvpVpDispatchLoop call that function before making
+ * hypercall 0xc2, which presumably does the heavy VCpu lifting in hvcax64.exe.
+ *
+ * @verbatim
+TESTING... WinHv API Hypercalls + VID clr(bit-20) + WinHv API
+ 32-bit paged protected mode, CPUID : 54 145 ins/sec 51 436 130 076
+ real mode, CPUID : 54 178 ins/sec 51 713 130 449
+ [snip]
+ 32-bit paged protected mode, RDTSC : 98 927 639 ins/sec 100 254 552 100 549 882
+ real mode, RDTSC : 99 601 206 ins/sec 100 886 699 100 470 957
+ [snip]
+ 32-bit paged protected mode, 32-bit IN : 54 621 ins/sec 51 524 128 294
+ 32-bit paged protected mode, 32-bit OUT : 54 870 ins/sec 51 671 129 397
+ 32-bit paged protected mode, 32-bit IN-to-ring-3 : 54 624 ins/sec 43 964 127 874
+ 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 54 803 ins/sec 44 087 129 443
+ [snip]
+ 32-bit paged protected mode, 32-bit read : 28 230 ins/sec 34 042 48 113
+ 32-bit paged protected mode, 32-bit write : 27 962 ins/sec 34 050 48 069
+ 32-bit paged protected mode, 32-bit read-to-ring-3 : 27 841 ins/sec 28 397 48 146
+ 32-bit paged protected mode, 32-bit write-to-ring-3 : 27 896 ins/sec 29 455 47 970
+ * @endverbatim
+ *
+ *
+ * @subsubsection subsect_nem_win_benchmarks_w2k 17134/2018-06-22: Windows 2000 Boot & Shutdown
+ *
+ * Timing the startup and automatic shutdown of a Windows 2000 SP4 guest serves
+ * as a real world benchmark and example of why exit performance is import. When
+ * Windows 2000 boots up is doing a lot of VGA redrawing of the boot animation,
+ * which is very costly. Not having installed guest additions leaves it in a VGA
+ * mode after the bootup sequence is done, keep up the screen access expenses,
+ * though the graphics driver more economical than the bootvid code.
+ *
+ * The VM was configured to automatically logon. A startup script was installed
+ * to perform the automatic shuting down and powering off the VM (thru
+ * vts_shutdown.exe -f -p). An offline snapshot of the VM was taken an restored
+ * before each test run. The test time run time is calculated from the monotonic
+ * VBox.log timestamps, starting with the state change to 'RUNNING' and stopping
+ * at 'POWERING_OFF'.
+ *
+ * The host OS and VirtualBox build is the same as for the bootsector2-test1
+ * scenario.
+ *
+ * Results:
+ *
+ * - WinHv API for all but physical page mappings:
+ * 32 min 12.19 seconds
+ *
+ * - The default NEM/win configuration where we put the main execution loop
+ * in ring-0, using hypercalls when we can and VID for managing execution:
+ * 3 min 23.18 seconds
+ *
+ * - Regular VirtualBox using AMD-V directly, hyper-V is disabled, main
+ * execution loop in ring-0:
+ * 58.09 seconds
+ *
+ * - WinHv API with exit history based optimizations:
+ * 58.66 seconds
+ *
+ * - Hypercall + VID.SYS with exit history base optimizations:
+ * 58.94 seconds
+ *
+ * With a well above average machine needing over half an hour for booting a
+ * nearly 20 year old guest kind of says it all. The 13%-20% exit performance
+ * increase we get by using hypercalls and VID.SYS directly pays off a lot here.
+ * The 3m23s is almost acceptable in comparison to the half an hour.
+ *
+ * The similarity between the last three results strongly hits at windows 2000
+ * doing a lot of waiting during boot and shutdown and isn't the best testcase
+ * once a basic performance level is reached.
+ *
+ *
+ * @subsubsection subsection_iem_win_benchmarks_deb9_nat Debian 9 NAT performance
+ *
+ * This benchmark is about network performance over NAT from a 64-bit Debian 9
+ * VM with a single CPU. For network performance measurements, we use our own
+ * NetPerf tool (ValidationKit/utils/network/NetPerf.cpp) to measure latency
+ * and throughput.
+ *
+ * The setups, builds and configurations are as in the previous benchmarks
+ * (release r123172 on 1950X running 64-bit W10/17134 (2016-06-xx). Please note
+ * that the exit optimizations hasn't yet been in tuned with NetPerf in mind.
+ *
+ * The NAT network setup was selected here since it's the default one and the
+ * slowest one. There is quite a bit of IPC with worker threads and packet
+ * processing involved.
+ *
+ * Latency test is first up. This is a classic back and forth between the two
+ * NetPerf instances, where the key measurement is the roundrip latency. The
+ * values here are the lowest result over 3-6 runs.
+ *
+ * Against host system:
+ * - 152 258 ns/roundtrip - 100% - regular VirtualBox SVM
+ * - 271 059 ns/roundtrip - 178% - Hypercalls + VID.SYS in ring-0 with exit optimizations.
+ * - 280 149 ns/roundtrip - 184% - Hypercalls + VID.SYS in ring-0
+ * - 317 735 ns/roundtrip - 209% - Win HV API with exit optimizations.
+ * - 342 440 ns/roundtrip - 225% - Win HV API
+ *
+ * Against a remote Windows 10 system over a 10Gbps link:
+ * - 243 969 ns/roundtrip - 100% - regular VirtualBox SVM
+ * - 384 427 ns/roundtrip - 158% - Win HV API with exit optimizations.
+ * - 402 411 ns/roundtrip - 165% - Hypercalls + VID.SYS in ring-0
+ * - 406 313 ns/roundtrip - 167% - Win HV API
+ * - 413 160 ns/roundtrip - 169% - Hypercalls + VID.SYS in ring-0 with exit optimizations.
+ *
+ * What we see here is:
+ *
+ * - Consistent and signficant latency increase using Hyper-V compared
+ * to directly harnessing AMD-V ourselves.
+ *
+ * - When talking to the host, it's clear that the hypercalls + VID.SYS
+ * in ring-0 method pays off.
+ *
+ * - When talking to a different host, the numbers are closer and it
+ * is not longer clear which Hyper-V execution method is better.
+ *
+ *
+ * Throughput benchmarks are performed by one side pushing data full throttle
+ * for 10 seconds (minus a 1 second at each end of the test), then reversing
+ * the roles and measuring it in the other direction. The tests ran 3-5 times
+ * and below are the highest and lowest results in each direction.
+ *
+ * Receiving from host system:
+ * - Regular VirtualBox SVM:
+ * Max: 96 907 549 bytes/s - 100%
+ * Min: 86 912 095 bytes/s - 100%
+ * - Hypercalls + VID.SYS in ring-0:
+ * Max: 84 036 544 bytes/s - 87%
+ * Min: 64 978 112 bytes/s - 75%
+ * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
+ * Max: 77 760 699 bytes/s - 80%
+ * Min: 72 677 171 bytes/s - 84%
+ * - Win HV API with exit optimizations:
+ * Max: 64 465 905 bytes/s - 67%
+ * Min: 62 286 369 bytes/s - 72%
+ * - Win HV API:
+ * Max: 62 466 631 bytes/s - 64%
+ * Min: 61 362 782 bytes/s - 70%
+ *
+ * Sending to the host system:
+ * - Regular VirtualBox SVM:
+ * Max: 87 728 652 bytes/s - 100%
+ * Min: 86 923 198 bytes/s - 100%
+ * - Hypercalls + VID.SYS in ring-0:
+ * Max: 84 280 749 bytes/s - 96%
+ * Min: 78 369 842 bytes/s - 90%
+ * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
+ * Max: 84 119 932 bytes/s - 96%
+ * Min: 77 396 811 bytes/s - 89%
+ * - Win HV API:
+ * Max: 81 714 377 bytes/s - 93%
+ * Min: 78 697 419 bytes/s - 91%
+ * - Win HV API with exit optimizations:
+ * Max: 80 502 488 bytes/s - 91%
+ * Min: 71 164 978 bytes/s - 82%
+ *
+ * Receiving from a remote Windows 10 system over a 10Gbps link:
+ * - Hypercalls + VID.SYS in ring-0:
+ * Max: 115 346 922 bytes/s - 136%
+ * Min: 112 912 035 bytes/s - 137%
+ * - Regular VirtualBox SVM:
+ * Max: 84 517 504 bytes/s - 100%
+ * Min: 82 597 049 bytes/s - 100%
+ * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
+ * Max: 77 736 251 bytes/s - 92%
+ * Min: 73 813 784 bytes/s - 89%
+ * - Win HV API with exit optimizations:
+ * Max: 63 035 587 bytes/s - 75%
+ * Min: 57 538 380 bytes/s - 70%
+ * - Win HV API:
+ * Max: 62 279 185 bytes/s - 74%
+ * Min: 56 813 866 bytes/s - 69%
+ *
+ * Sending to a remote Windows 10 system over a 10Gbps link:
+ * - Win HV API with exit optimizations:
+ * Max: 116 502 357 bytes/s - 103%
+ * Min: 49 046 550 bytes/s - 59%
+ * - Regular VirtualBox SVM:
+ * Max: 113 030 991 bytes/s - 100%
+ * Min: 83 059 511 bytes/s - 100%
+ * - Hypercalls + VID.SYS in ring-0:
+ * Max: 106 435 031 bytes/s - 94%
+ * Min: 47 253 510 bytes/s - 57%
+ * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
+ * Max: 94 842 287 bytes/s - 84%
+ * Min: 68 362 172 bytes/s - 82%
+ * - Win HV API:
+ * Max: 65 165 225 bytes/s - 58%
+ * Min: 47 246 573 bytes/s - 57%
+ *
+ * What we see here is:
+ *
+ * - Again consistent numbers when talking to the host. Showing that the
+ * ring-0 approach is preferable to the ring-3 one.
+ *
+ * - Again when talking to a remote host, things get more difficult to
+ * make sense of. The spread is larger and direct AMD-V gets beaten by
+ * a different the Hyper-V approaches in each direction.
+ *
+ * - However, if we treat the first entry (remote host) as weird spikes, the
+ * other entries are consistently worse compared to direct AMD-V. For the
+ * send case we get really bad results for WinHV.
+ *
+ */
+
diff --git a/src/VBox/VMM/VMMR3/PDM.cpp b/src/VBox/VMM/VMMR3/PDM.cpp
new file mode 100644
index 00000000..1e56b66f
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDM.cpp
@@ -0,0 +1,3096 @@
+/* $Id: PDM.cpp $ */
+/** @file
+ * PDM - Pluggable Device Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_pdm PDM - The Pluggable Device & Driver Manager
+ *
+ * The PDM handles devices and their drivers in a flexible and dynamic manner.
+ *
+ * VirtualBox is designed to be very configurable, i.e. the ability to select
+ * virtual devices and configure them uniquely for a VM. For this reason
+ * virtual devices are not statically linked with the VMM but loaded, linked and
+ * instantiated at runtime by PDM using the information found in the
+ * Configuration Manager (CFGM).
+ *
+ * While the chief purpose of PDM is to manager of devices their drivers, it
+ * also serves as somewhere to put usful things like cross context queues, cross
+ * context synchronization (like critsect), VM centric thread management,
+ * asynchronous I/O framework, and so on.
+ *
+ * @sa @ref grp_pdm
+ * @subpage pg_pdm_block_cache
+ * @subpage pg_pdm_audio
+ *
+ *
+ * @section sec_pdm_dev The Pluggable Devices
+ *
+ * Devices register themselves when the module containing them is loaded. PDM
+ * will call the entry point 'VBoxDevicesRegister' when loading a device module.
+ * The device module will then use the supplied callback table to check the VMM
+ * version and to register its devices. Each device has an unique name (within
+ * the VM configuration anyway). The name is not only used in PDM, but also in
+ * CFGM to organize device and device instance settings, and by anyone who wants
+ * to talk to a specific device instance.
+ *
+ * When all device modules have been successfully loaded PDM will instantiate
+ * those devices which are configured for the VM. Note that a device may have
+ * more than one instance, take network adaptors as an example. When
+ * instantiating a device PDM provides device instance memory and a callback
+ * table (aka Device Helpers / DevHlp) with the VM APIs which the device
+ * instance is trusted with.
+ *
+ * Some devices are trusted devices, most are not. The trusted devices are an
+ * integrated part of the VM and can obtain the VM handle, thus enabling them to
+ * call any VM API. Untrusted devices can only use the callbacks provided
+ * during device instantiation.
+ *
+ * The main purpose in having DevHlps rather than just giving all the devices
+ * the VM handle and let them call the internal VM APIs directly, is both to
+ * create a binary interface that can be supported across releases and to
+ * create a barrier between devices and the VM. (The trusted / untrusted bit
+ * hasn't turned out to be of much use btw., but it's easy to maintain so there
+ * isn't any point in removing it.)
+ *
+ * A device can provide a ring-0 and/or a raw-mode context extension to improve
+ * the VM performance by handling exits and traps (respectively) without
+ * requiring context switches (to ring-3). Callbacks for MMIO and I/O ports
+ * need to be registered specifically for the additional contexts for this to
+ * make sense. Also, the device has to be trusted to be loaded into R0/RC
+ * because of the extra privilege it entails. Note that raw-mode code and data
+ * will be subject to relocation.
+ *
+ *
+ * @subsection sec_pdm_dev_pci PCI Devices
+ *
+ * A PDM device usually registers one a PCI device during it's instantiation,
+ * legacy devices may register zero, while a few (currently none) more
+ * complicated devices may register multiple PCI functions or devices.
+ *
+ * The bus, device and function assignments can either be done explictly via the
+ * configuration or the registration call, or it can be left up to the PCI bus.
+ * The typical VBox configuration construct (ConsoleImpl2.cpp) will do explict
+ * assignments for all devices it's BusAssignmentManager class knows about.
+ *
+ * For explict CFGM style configuration, the "PCIBusNo", "PCIDeviceNo", and
+ * "PCIFunctionNo" values in the PDM device instance configuration (not the
+ * "config" subkey, but the top level one) will be picked up for the primary PCI
+ * device. The primary PCI configuration is by default the first one, but this
+ * can be controlled using the @a idxDevCfg parameter of the
+ * PDMDEVHLPR3::pfnPCIRegister method. For subsequent configuration (@a
+ * idxDevCfg > 0) the values are taken from the "PciDevNN" subkey, where "NN" is
+ * replaced by the @a idxDevCfg value.
+ *
+ * There's currently a limit of 256 PCI devices per PDM device.
+ *
+ *
+ * @subsection sec_pdm_dev_new New Style (6.1)
+ *
+ * VBox 6.1 changes the PDM interface for devices and they have to be converted
+ * to the new style to continue working (see @bugref{9218}).
+ *
+ * Steps for converting a PDM device to the new style:
+ *
+ * - State data needs to be split into shared, ring-3, ring-0 and raw-mode
+ * structures. The shared structure shall contains absolutely no pointers.
+ *
+ * - Context specific typedefs ending in CC for the structure and pointer to
+ * it are required (copy & edit the PRTCSTATECC stuff).
+ * The pointer to a context specific structure is obtained using the
+ * PDMINS_2_DATA_CC macro. The PDMINS_2_DATA macro gets the shared one.
+ *
+ * - Update the registration structure with sizeof the new structures.
+ *
+ * - MMIO handlers to FNIOMMMIONEWREAD and FNIOMMMIONEWRITE form, take care renaming
+ * GCPhys to off and really treat it as an offset. Return status is VBOXSTRICTRC,
+ * which should be propagated to worker functions as far as possible.
+ *
+ * - I/O handlers to FNIOMIOPORTNEWIN and FNIOMIOPORTNEWOUT form, take care renaming
+ * uPort/Port to offPort and really treat it as an offset. Return status is
+ * VBOXSTRICTRC, which should be propagated to worker functions as far as possible.
+ *
+ * - MMIO and I/O port registration must be converted, handles stored in the shared structure.
+ *
+ * - PCI devices must also update the I/O region registration and corresponding
+ * mapping callback. The latter is generally not needed any more, as the PCI
+ * bus does the mapping and unmapping using the handle passed to it during registration.
+ *
+ * - If the device contains ring-0 or raw-mode optimizations:
+ * - Make sure to replace any R0Enabled, GCEnabled, and RZEnabled with
+ * pDevIns->fR0Enabled and pDevIns->fRCEnabled. Removing CFGM reading and
+ * validation of such options as well as state members for them.
+ * - Callbacks for ring-0 and raw-mode are registered in a context contructor.
+ * Setting up of non-default critical section handling needs to be repeated
+ * in the ring-0/raw-mode context constructor too. See for instance
+ * e1kRZConstruct().
+ *
+ * - Convert all PDMCritSect calls to PDMDevHlpCritSect.
+ * Note! pDevIns should be passed as parameter rather than put in pThisCC.
+ *
+ * - Convert all timers to the handle based ones.
+ *
+ * - Convert all queues to the handle based ones or tasks.
+ *
+ * - Set the PDM_DEVREG_FLAGS_NEW_STYLE in the registration structure.
+ * (Functionally, this only makes a difference for PDMDevHlpSetDeviceCritSect
+ * behavior, but it will become mandatory once all devices has been
+ * converted.)
+ *
+ * - Convert all CFGMR3Xxxx calls to pHlp->pfnCFGMXxxx.
+ *
+ * - Convert all SSMR3Xxxx calls to pHlp->pfnSSMXxxx.
+ *
+ * - Ensure that CFGM values and nodes are validated using PDMDEV_VALIDATE_CONFIG_RETURN()
+ *
+ * - Ensure that the first statement in the constructors is
+ * @code
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+ @endcode
+ * There shall be absolutely nothing preceeding that and it is mandatory.
+ *
+ * - Ensure that the first statement in the destructors is
+ * @code
+ PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
+ @endcode
+ * There shall be absolutely nothing preceeding that and it is mandatory.
+ *
+ * - Use 'nm -u' (tools/win.amd64/mingw-w64/r1/bin/nm.exe on windows) to check
+ * for VBoxVMM and VMMR0 function you forgot to convert to device help calls
+ * or would need adding as device helpers or something.
+ *
+ *
+ * @section sec_pdm_special_devs Special Devices
+ *
+ * Several kinds of devices interacts with the VMM and/or other device and PDM
+ * will work like a mediator for these. The typical pattern is that the device
+ * calls a special registration device helper with a set of callbacks, PDM
+ * responds by copying this and providing a pointer to a set helper callbacks
+ * for that particular kind of device. Unlike interfaces where the callback
+ * table pointer is used a 'this' pointer, these arrangements will use the
+ * device instance pointer (PPDMDEVINS) as a kind of 'this' pointer.
+ *
+ * For an example of this kind of setup, see the PIC. The PIC registers itself
+ * by calling PDMDEVHLPR3::pfnPICRegister. PDM saves the device instance,
+ * copies the callback tables (PDMPICREG), resolving the ring-0 and raw-mode
+ * addresses in the process, and hands back the pointer to a set of helper
+ * methods (PDMPICHLPR3). The PCI device then queries the ring-0 and raw-mode
+ * helpers using PDMPICHLPR3::pfnGetR0Helpers and PDMPICHLPR3::pfnGetRCHelpers.
+ * The PCI device repeats ths pfnGetRCHelpers call in it's relocation method
+ * since the address changes when RC is relocated.
+ *
+ * @see grp_pdm_device
+ *
+ * @section sec_pdm_usbdev The Pluggable USB Devices
+ *
+ * USB devices are handled a little bit differently than other devices. The
+ * general concepts wrt. pluggability are mostly the same, but the details
+ * varies. The registration entry point is 'VBoxUsbRegister', the device
+ * instance is PDMUSBINS and the callbacks helpers are different. Also, USB
+ * device are restricted to ring-3 and cannot have any ring-0 or raw-mode
+ * extensions (at least not yet).
+ *
+ * The way USB devices work differs greatly from other devices though since they
+ * aren't attaches directly to the PCI/ISA/whatever system buses but via a
+ * USB host control (OHCI, UHCI or EHCI). USB devices handle USB requests
+ * (URBs) and does not register I/O ports, MMIO ranges or PCI bus
+ * devices/functions.
+ *
+ * @see grp_pdm_usbdev
+ *
+ *
+ * @section sec_pdm_drv The Pluggable Drivers
+ *
+ * The VM devices are often accessing host hardware or OS facilities. For most
+ * devices these facilities can be abstracted in one or more levels. These
+ * abstractions are called drivers.
+ *
+ * For instance take a DVD/CD drive. This can be connected to a SCSI
+ * controller, an ATA controller or a SATA controller. The basics of the DVD/CD
+ * drive implementation remains the same - eject, insert, read, seek, and such.
+ * (For the scsi SCSCI, you might want to speak SCSI directly to, but that can of
+ * course be fixed - see SCSI passthru.) So, it
+ * makes much sense to have a generic CD/DVD driver which implements this.
+ *
+ * Then the media 'inserted' into the DVD/CD drive can be a ISO image, or it can
+ * be read from a real CD or DVD drive (there are probably other custom formats
+ * someone could desire to read or construct too). So, it would make sense to
+ * have abstracted interfaces for dealing with this in a generic way so the
+ * cdrom unit doesn't have to implement it all. Thus we have created the
+ * CDROM/DVD media driver family.
+ *
+ * So, for this example the IDE controller #1 (i.e. secondary) will have
+ * the DVD/CD Driver attached to it's LUN #0 (master). When a media is mounted
+ * the DVD/CD Driver will have a ISO, HostDVD or RAW (media) Driver attached.
+ *
+ * It is possible to configure many levels of drivers inserting filters, loggers,
+ * or whatever you desire into the chain. We're using this for network sniffing,
+ * for instance.
+ *
+ * The drivers are loaded in a similar manner to that of a device, namely by
+ * iterating a keyspace in CFGM, load the modules listed there and call
+ * 'VBoxDriversRegister' with a callback table.
+ *
+ * @see grp_pdm_driver
+ *
+ *
+ * @section sec_pdm_ifs Interfaces
+ *
+ * The pluggable drivers and devices expose one standard interface (callback
+ * table) which is used to construct, destruct, attach, detach,( ++,) and query
+ * other interfaces. A device will query the interfaces required for it's
+ * operation during init and hot-plug. PDM may query some interfaces during
+ * runtime mounting too.
+ *
+ * An interface here means a function table contained within the device or
+ * driver instance data. Its methods are invoked with the function table pointer
+ * as the first argument and they will calculate the address of the device or
+ * driver instance data from it. (This is one of the aspects which *might* have
+ * been better done in C++.)
+ *
+ * @see grp_pdm_interfaces
+ *
+ *
+ * @section sec_pdm_utils Utilities
+ *
+ * As mentioned earlier, PDM is the location of any usful constructs that doesn't
+ * quite fit into IPRT. The next subsections will discuss these.
+ *
+ * One thing these APIs all have in common is that resources will be associated
+ * with a device / driver and automatically freed after it has been destroyed if
+ * the destructor didn't do this.
+ *
+ *
+ * @subsection sec_pdm_async_completion Async I/O
+ *
+ * The PDM Async I/O API provides a somewhat platform agnostic interface for
+ * asynchronous I/O. For reasons of performance and complexity this does not
+ * build upon any IPRT API.
+ *
+ * @todo more details.
+ *
+ * @see grp_pdm_async_completion
+ *
+ *
+ * @subsection sec_pdm_async_task Async Task - not implemented
+ *
+ * @todo implement and describe
+ *
+ * @see grp_pdm_async_task
+ *
+ *
+ * @subsection sec_pdm_critsect Critical Section
+ *
+ * The PDM Critical Section API is currently building on the IPRT API with the
+ * same name. It adds the possibility to use critical sections in ring-0 and
+ * raw-mode as well as in ring-3. There are certain restrictions on the RC and
+ * R0 usage though since we're not able to wait on it, nor wake up anyone that
+ * is waiting on it. These restrictions origins with the use of a ring-3 event
+ * semaphore. In a later incarnation we plan to replace the ring-3 event
+ * semaphore with a ring-0 one, thus enabling us to wake up waiters while
+ * exectuing in ring-0 and making the hardware assisted execution mode more
+ * efficient. (Raw-mode won't benefit much from this, naturally.)
+ *
+ * @see grp_pdm_critsect
+ *
+ *
+ * @subsection sec_pdm_queue Queue
+ *
+ * The PDM Queue API is for queuing one or more tasks for later consumption in
+ * ring-3 by EMT, and optionally forcing a delayed or ASAP return to ring-3. The
+ * queues can also be run on a timer basis as an alternative to the ASAP thing.
+ * The queue will be flushed at forced action time.
+ *
+ * A queue can also be used by another thread (a I/O worker for instance) to
+ * send work / events over to the EMT.
+ *
+ * @see grp_pdm_queue
+ *
+ *
+ * @subsection sec_pdm_task Task - not implemented yet
+ *
+ * The PDM Task API is for flagging a task for execution at a later point when
+ * we're back in ring-3, optionally forcing the ring-3 return to happen ASAP.
+ * As you can see the concept is similar to queues only simpler.
+ *
+ * A task can also be scheduled by another thread (a I/O worker for instance) as
+ * a mean of getting something done in EMT.
+ *
+ * @see grp_pdm_task
+ *
+ *
+ * @subsection sec_pdm_thread Thread
+ *
+ * The PDM Thread API is there to help devices and drivers manage their threads
+ * correctly wrt. power on, suspend, resume, power off and destruction.
+ *
+ * The general usage pattern for threads in the employ of devices and drivers is
+ * that they shuffle data or requests while the VM is running and stop doing
+ * this when the VM is paused or powered down. Rogue threads running while the
+ * VM is paused can cause the state to change during saving or have other
+ * unwanted side effects. The PDM Threads API ensures that this won't happen.
+ *
+ * @see grp_pdm_thread
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM
+#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/sup.h>
+
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/ctype.h>
+#include <iprt/ldr.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The PDM saved state version. */
+#define PDM_SAVED_STATE_VERSION 5
+/** Before the PDM audio architecture was introduced there was an "AudioSniffer"
+ * device which took care of multiplexing input/output audio data from/to various places.
+ * Thus this device is not needed/used anymore. */
+#define PDM_SAVED_STATE_VERSION_PRE_PDM_AUDIO 4
+#define PDM_SAVED_STATE_VERSION_PRE_NMI_FF 3
+
+/** The number of nanoseconds a suspend callback needs to take before
+ * PDMR3Suspend warns about it taking too long. */
+#define PDMSUSPEND_WARN_AT_NS UINT64_C(1200000000)
+
+/** The number of nanoseconds a suspend callback needs to take before
+ * PDMR3PowerOff warns about it taking too long. */
+#define PDMPOWEROFF_WARN_AT_NS UINT64_C( 900000000)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Statistics of asynchronous notification tasks - used by reset, suspend and
+ * power off.
+ */
+typedef struct PDMNOTIFYASYNCSTATS
+{
+ /** The start timestamp. */
+ uint64_t uStartNsTs;
+ /** When to log the next time. */
+ uint64_t cNsElapsedNextLog;
+ /** The loop counter. */
+ uint32_t cLoops;
+ /** The number of pending asynchronous notification tasks. */
+ uint32_t cAsync;
+ /** The name of the operation (log prefix). */
+ const char *pszOp;
+ /** The current list buffer position. */
+ size_t offList;
+ /** String containing a list of the pending tasks. */
+ char szList[1024];
+} PDMNOTIFYASYNCSTATS;
+/** Pointer to the stats of pending asynchronous notification tasks. */
+typedef PDMNOTIFYASYNCSTATS *PPDMNOTIFYASYNCSTATS;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) pdmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
+static DECLCALLBACK(int) pdmR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static DECLCALLBACK(int) pdmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
+
+static FNDBGFHANDLERINT pdmR3InfoTracingIds;
+
+
+/**
+ * Initializes the PDM part of the UVM.
+ *
+ * This doesn't really do much right now but has to be here for the sake
+ * of completeness.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3_INT_DECL(int) PDMR3InitUVM(PUVM pUVM)
+{
+ AssertCompile(sizeof(pUVM->pdm.s) <= sizeof(pUVM->pdm.padding));
+ AssertRelease(sizeof(pUVM->pdm.s) <= sizeof(pUVM->pdm.padding));
+ pUVM->pdm.s.pModules = NULL;
+ pUVM->pdm.s.pCritSects = NULL;
+ pUVM->pdm.s.pRwCritSects = NULL;
+ return RTCritSectInit(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Initializes the PDM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) PDMR3Init(PVM pVM)
+{
+ LogFlow(("PDMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertRelease(!(RT_UOFFSETOF(VM, pdm.s) & 31));
+ AssertRelease(sizeof(pVM->pdm.s) <= sizeof(pVM->pdm.padding));
+ AssertCompileMemberAlignment(PDM, CritSect, sizeof(uintptr_t));
+
+ /*
+ * Init the structure.
+ */
+ pVM->pdm.s.GCPhysVMMDevHeap = NIL_RTGCPHYS;
+ //pVM->pdm.s.idTracingDev = 0;
+ pVM->pdm.s.idTracingOther = 1024;
+
+ /*
+ * Initialize critical sections first.
+ */
+ int rc = pdmR3CritSectBothInitStatsAndInfo(pVM);
+ if (RT_SUCCESS(rc))
+ rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.CritSect, RT_SRC_POS, "PDM");
+ if (RT_SUCCESS(rc))
+ {
+ rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.NopCritSect, RT_SRC_POS, "NOP");
+ if (RT_SUCCESS(rc))
+ pVM->pdm.s.NopCritSect.s.Core.fFlags |= RTCRITSECT_FLAGS_NOP;
+ }
+
+ /*
+ * Initialize sub components.
+ */
+ if (RT_SUCCESS(rc))
+ rc = pdmR3TaskInit(pVM);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3LdrInitU(pVM->pUVM);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ if (RT_SUCCESS(rc))
+ rc = pdmR3AsyncCompletionInit(pVM);
+#endif
+#ifdef VBOX_WITH_NETSHAPER
+ if (RT_SUCCESS(rc))
+ rc = pdmR3NetShaperInit(pVM);
+#endif
+ if (RT_SUCCESS(rc))
+ rc = pdmR3BlkCacheInit(pVM);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3DrvInit(pVM);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3DevInit(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Register the saved state data unit.
+ */
+ rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128,
+ NULL, pdmR3LiveExec, NULL,
+ NULL, pdmR3SaveExec, NULL,
+ pdmR3LoadPrep, pdmR3LoadExec, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Register the info handlers.
+ */
+ DBGFR3InfoRegisterInternal(pVM, "pdmtracingids",
+ "Displays the tracing IDs assigned by PDM to devices, USB device, drivers and more.",
+ pdmR3InfoTracingIds);
+
+ LogFlow(("PDM: Successfully initialized\n"));
+ return rc;
+ }
+ }
+
+ /*
+ * Cleanup and return failure.
+ */
+ PDMR3Term(pVM);
+ LogFlow(("PDMR3Init: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Init phase completed callback.
+ *
+ * We use this for calling PDMDEVREG::pfnInitComplete callback after everything
+ * else has been initialized.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat The phase that was completed.
+ */
+VMMR3_INT_DECL(int) PDMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ if (enmWhat == VMINITCOMPLETED_RING0)
+ return pdmR3DevInitComplete(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ * @remark The loader subcomponent is relocated by PDMR3LdrRelocate() very
+ * early in the relocation phase.
+ */
+VMMR3_INT_DECL(void) PDMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ LogFlow(("PDMR3Relocate\n"));
+ RT_NOREF(pVM, offDelta);
+
+#ifdef VBOX_WITH_RAW_MODE_KEEP /* needs fixing */
+ /*
+ * The registered PIC.
+ */
+ if (pVM->pdm.s.Pic.pDevInsRC)
+ {
+ pVM->pdm.s.Pic.pDevInsRC += offDelta;
+ pVM->pdm.s.Pic.pfnSetIrqRC += offDelta;
+ pVM->pdm.s.Pic.pfnGetInterruptRC += offDelta;
+ }
+
+ /*
+ * The registered APIC.
+ */
+ if (pVM->pdm.s.Apic.pDevInsRC)
+ pVM->pdm.s.Apic.pDevInsRC += offDelta;
+
+ /*
+ * The registered I/O APIC.
+ */
+ if (pVM->pdm.s.IoApic.pDevInsRC)
+ {
+ pVM->pdm.s.IoApic.pDevInsRC += offDelta;
+ pVM->pdm.s.IoApic.pfnSetIrqRC += offDelta;
+ if (pVM->pdm.s.IoApic.pfnSendMsiRC)
+ pVM->pdm.s.IoApic.pfnSendMsiRC += offDelta;
+ if (pVM->pdm.s.IoApic.pfnSetEoiRC)
+ pVM->pdm.s.IoApic.pfnSetEoiRC += offDelta;
+ }
+
+ /*
+ * Devices & Drivers.
+ */
+ int rc;
+ PCPDMDEVHLPRC pDevHlpRC = NIL_RTRCPTR;
+ if (VM_IS_RAW_MODE_ENABLED(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDevHlpRC);
+ AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ }
+
+ PCPDMDRVHLPRC pDrvHlpRC = NIL_RTRCPTR;
+ if (VM_IS_RAW_MODE_ENABLED(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDrvHlpRC);
+ AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ }
+
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ {
+ pDevIns->pHlpRC = pDevHlpRC;
+ pDevIns->pvInstanceDataRC = MMHyperR3ToRC(pVM, pDevIns->pvInstanceDataR3);
+ if (pDevIns->pCritSectRoR3)
+ pDevIns->pCritSectRoRC = MMHyperR3ToRC(pVM, pDevIns->pCritSectRoR3);
+ pDevIns->Internal.s.pVMRC = pVM->pVMRC;
+
+ PPDMPCIDEV pPciDev = pDevIns->Internal.s.pHeadPciDevR3;
+ if (pPciDev)
+ {
+ pDevIns->Internal.s.pHeadPciDevRC = MMHyperR3ToRC(pVM, pPciDev);
+ do
+ {
+ pPciDev->Int.s.pDevInsRC = MMHyperR3ToRC(pVM, pPciDev->Int.s.pDevInsR3);
+ pPciDev->Int.s.pPdmBusRC = MMHyperR3ToRC(pVM, pPciDev->Int.s.pPdmBusR3);
+ if (pPciDev->Int.s.pNextR3)
+ pPciDev->Int.s.pNextRC = MMHyperR3ToRC(pVM, pPciDev->Int.s.pNextR3);
+ pPciDev = pPciDev->Int.s.pNextR3;
+ } while (pPciDev);
+ }
+
+ if (pDevIns->pReg->pfnRelocate)
+ {
+ LogFlow(("PDMR3Relocate: Relocating device '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->pReg->pfnRelocate(pDevIns, offDelta);
+ }
+ }
+
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ {
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ {
+ if (pDrvIns->pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+ {
+ pDrvIns->pHlpRC = pDrvHlpRC;
+ pDrvIns->pvInstanceDataRC = MMHyperR3ToRC(pVM, pDrvIns->pvInstanceDataR3);
+ pDrvIns->Internal.s.pVMRC = pVM->pVMRC;
+ if (pDrvIns->pReg->pfnRelocate)
+ {
+ LogFlow(("PDMR3Relocate: Relocating driver '%s'/%u attached to '%s'/%d/%u\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance,
+ pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun));
+ pDrvIns->pReg->pfnRelocate(pDrvIns, offDelta);
+ }
+ }
+ }
+ }
+
+ }
+#endif /* VBOX_WITH_RAW_MODE_KEEP */
+}
+
+
+/**
+ * Worker for pdmR3Term that terminates a LUN chain.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pLun The head of the chain.
+ * @param pszDevice The name of the device (for logging).
+ * @param iInstance The device instance number (for logging).
+ */
+static void pdmR3TermLuns(PVM pVM, PPDMLUN pLun, const char *pszDevice, unsigned iInstance)
+{
+ RT_NOREF2(pszDevice, iInstance);
+
+ for (; pLun; pLun = pLun->pNext)
+ {
+ /*
+ * Destroy them one at a time from the bottom up.
+ * (The serial device/drivers depends on this - bad.)
+ */
+ PPDMDRVINS pDrvIns = pLun->pBottom;
+ pLun->pBottom = pLun->pTop = NULL;
+ while (pDrvIns)
+ {
+ PPDMDRVINS pDrvNext = pDrvIns->Internal.s.pUp;
+
+ if (pDrvIns->pReg->pfnDestruct)
+ {
+ LogFlow(("pdmR3DevTerm: Destroying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pLun->iLun, pszDevice, iInstance));
+ pDrvIns->pReg->pfnDestruct(pDrvIns);
+ }
+ pDrvIns->Internal.s.pDrv->cInstances--;
+
+ /* Order of resource freeing like in pdmR3DrvDestroyChain, but
+ * not all need to be done as they are done globally later. */
+ //PDMR3QueueDestroyDriver(pVM, pDrvIns);
+ TMR3TimerDestroyDriver(pVM, pDrvIns);
+ SSMR3DeregisterDriver(pVM, pDrvIns, NULL, 0);
+ //pdmR3ThreadDestroyDriver(pVM, pDrvIns);
+ //DBGFR3InfoDeregisterDriver(pVM, pDrvIns, NULL);
+ //pdmR3CritSectBothDeleteDriver(pVM, pDrvIns);
+ //PDMR3BlkCacheReleaseDriver(pVM, pDrvIns);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ //pdmR3AsyncCompletionTemplateDestroyDriver(pVM, pDrvIns);
+#endif
+
+ /* Clear the driver struture to catch sloppy code. */
+ ASMMemFill32(pDrvIns, RT_UOFFSETOF_DYN(PDMDRVINS, achInstanceData[pDrvIns->pReg->cbInstance]), 0xdeadd0d0);
+
+ pDrvIns = pDrvNext;
+ }
+ }
+}
+
+
+/**
+ * Terminates the PDM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) PDMR3Term(PVM pVM)
+{
+ LogFlow(("PDMR3Term:\n"));
+ AssertMsg(PDMCritSectIsInitialized(&pVM->pdm.s.CritSect), ("bad init order!\n"));
+
+ /*
+ * Iterate the device instances and attach drivers, doing
+ * relevant destruction processing.
+ *
+ * N.B. There is no need to mess around freeing memory allocated
+ * from any MM heap since MM will do that in its Term function.
+ */
+ /* usb ones first. */
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ pdmR3TermLuns(pVM, pUsbIns->Internal.s.pLuns, pUsbIns->pReg->szName, pUsbIns->iInstance);
+
+ /*
+ * Detach it from the HUB (if it's actually attached to one) so the HUB has
+ * a chance to stop accessing any data.
+ */
+ PPDMUSBHUB pHub = pUsbIns->Internal.s.pHub;
+ if (pHub)
+ {
+ int rc = pHub->Reg.pfnDetachDevice(pHub->pDrvIns, pUsbIns, pUsbIns->Internal.s.iPort);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDM: Failed to detach USB device '%s' instance %d from %p: %Rrc\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pHub, rc));
+ }
+ else
+ {
+ pHub->cAvailablePorts++;
+ Assert(pHub->cAvailablePorts > 0 && pHub->cAvailablePorts <= pHub->cPorts);
+ pUsbIns->Internal.s.pHub = NULL;
+ }
+ }
+
+ if (pUsbIns->pReg->pfnDestruct)
+ {
+ LogFlow(("pdmR3DevTerm: Destroying - device '%s'/%d\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->pReg->pfnDestruct(pUsbIns);
+ }
+
+ //TMR3TimerDestroyUsb(pVM, pUsbIns);
+ //SSMR3DeregisterUsb(pVM, pUsbIns, NULL, 0);
+ pdmR3ThreadDestroyUsb(pVM, pUsbIns);
+
+ if (pUsbIns->pszName)
+ {
+ RTStrFree(pUsbIns->pszName); /* See the RTStrDup() call in PDMUsb.cpp:pdmR3UsbCreateDevice. */
+ pUsbIns->pszName = NULL;
+ }
+ }
+
+ /* then the 'normal' ones. */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ pdmR3TermLuns(pVM, pDevIns->Internal.s.pLunsR3, pDevIns->pReg->szName, pDevIns->iInstance);
+
+ if (pDevIns->pReg->pfnDestruct)
+ {
+ LogFlow(("pdmR3DevTerm: Destroying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->pReg->pfnDestruct(pDevIns);
+ }
+
+ if (pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_CONTRUCT)
+ {
+ LogFlow(("pdmR3DevTerm: Destroying (ring-0) - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PDMDEVICEGENCALLREQ Req;
+ RT_ZERO(Req.Params);
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.enmCall = PDMDEVICEGENCALL_DESTRUCT;
+ Req.idxR0Device = pDevIns->Internal.s.idxR0Device;
+ Req.pDevInsR3 = pDevIns;
+ int rc2 = VMMR3CallR0(pVM, VMMR0_DO_PDM_DEVICE_GEN_CALL, 0, &Req.Hdr);
+ AssertRC(rc2);
+ }
+
+ if (pDevIns->Internal.s.paDbgfTraceTrack)
+ {
+ RTMemFree(pDevIns->Internal.s.paDbgfTraceTrack);
+ pDevIns->Internal.s.paDbgfTraceTrack = NULL;
+ }
+
+#ifdef VBOX_WITH_DBGF_TRACING
+ if (pDevIns->Internal.s.hDbgfTraceEvtSrc != NIL_DBGFTRACEREVTSRC)
+ {
+ DBGFR3TracerDeregisterEvtSrc(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc);
+ pDevIns->Internal.s.hDbgfTraceEvtSrc = NIL_DBGFTRACEREVTSRC;
+ }
+#endif
+
+ TMR3TimerDestroyDevice(pVM, pDevIns);
+ SSMR3DeregisterDevice(pVM, pDevIns, NULL, 0);
+ pdmR3CritSectBothDeleteDevice(pVM, pDevIns);
+ pdmR3ThreadDestroyDevice(pVM, pDevIns);
+ PDMR3QueueDestroyDevice(pVM, pDevIns);
+ PGMR3PhysMmio2Deregister(pVM, pDevIns, NIL_PGMMMIO2HANDLE);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ pdmR3AsyncCompletionTemplateDestroyDevice(pVM, pDevIns);
+#endif
+ DBGFR3InfoDeregisterDevice(pVM, pDevIns, NULL);
+ }
+
+ /*
+ * Destroy all threads.
+ */
+ pdmR3ThreadDestroyAll(pVM);
+
+ /*
+ * Destroy the block cache.
+ */
+ pdmR3BlkCacheTerm(pVM);
+
+#ifdef VBOX_WITH_NETSHAPER
+ /*
+ * Destroy network bandwidth groups.
+ */
+ pdmR3NetShaperTerm(pVM);
+#endif
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ /*
+ * Free async completion managers.
+ */
+ pdmR3AsyncCompletionTerm(pVM);
+#endif
+
+ /*
+ * Free modules.
+ */
+ pdmR3LdrTermU(pVM->pUVM, false /*fFinal*/);
+
+ /*
+ * Stop task threads.
+ */
+ pdmR3TaskTerm(pVM);
+
+ /*
+ * Cleanup any leftover queues.
+ */
+ pdmR3QueueTerm(pVM);
+
+ /*
+ * Destroy the PDM lock.
+ */
+ PDMR3CritSectDelete(pVM, &pVM->pdm.s.CritSect);
+ /* The MiscCritSect is deleted by PDMR3CritSectBothTerm later. */
+
+ LogFlow(("PDMR3Term: returns %Rrc\n", VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates the PDM part of the UVM.
+ *
+ * This will unload any modules left behind.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3_INT_DECL(void) PDMR3TermUVM(PUVM pUVM)
+{
+ /*
+ * In the normal cause of events we will now call pdmR3LdrTermU for
+ * the second time. In the case of init failure however, this might
+ * the first time, which is why we do it.
+ */
+ pdmR3LdrTermU(pUVM, true /*fFinal*/);
+
+ Assert(pUVM->pdm.s.pCritSects == NULL);
+ Assert(pUVM->pdm.s.pRwCritSects == NULL);
+ RTCritSectDelete(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * For APIC assertions.
+ *
+ * @returns true if we've loaded state.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) PDMR3HasLoadedState(PVM pVM)
+{
+ return pVM->pdm.s.fStateLoaded;
+}
+
+
+/**
+ * Bits that are saved in pass 0 and in the final pass.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static void pdmR3SaveBoth(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Save the list of device instances so we can check that they're all still
+ * there when we load the state and that nothing new has been added.
+ */
+ uint32_t i = 0;
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3, i++)
+ {
+ SSMR3PutU32(pSSM, i);
+ SSMR3PutStrZ(pSSM, pDevIns->pReg->szName);
+ SSMR3PutU32(pSSM, pDevIns->iInstance);
+ }
+ SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
+}
+
+
+/**
+ * Live save.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uPass The pass.
+ */
+static DECLCALLBACK(int) pdmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ LogFlow(("pdmR3LiveExec:\n"));
+ AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
+ pdmR3SaveBoth(pVM, pSSM);
+ return VINF_SSM_DONT_CALL_AGAIN;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static DECLCALLBACK(int) pdmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("pdmR3SaveExec:\n"));
+
+ /*
+ * Save interrupt and DMA states.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI));
+ }
+ SSMR3PutU32(pSSM, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA));
+
+ pdmR3SaveBoth(pVM, pSSM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Prepare state load operation.
+ *
+ * This will dispatch pending operations and clear the FFs governed by PDM and its devices.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ */
+static DECLCALLBACK(int) pdmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("pdmR3LoadPrep: %s%s\n",
+ VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES) ? " VM_FF_PDM_QUEUES" : "",
+ VM_FF_IS_SET(pVM, VM_FF_PDM_DMA) ? " VM_FF_PDM_DMA" : ""));
+#ifdef LOG_ENABLED
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ LogFlow(("pdmR3LoadPrep: VCPU %u %s%s\n", idCpu,
+ VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "",
+ VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : ""));
+ }
+#endif
+ NOREF(pSSM);
+
+ /*
+ * In case there is work pending that will raise an interrupt,
+ * start a DMA transfer, or release a lock. (unlikely)
+ */
+ if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
+ PDMR3QueueFlushAll(pVM);
+
+ /* Clear the FFs. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_SMI);
+ }
+ VM_FF_CLEAR(pVM, VM_FF_PDM_DMA);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ int rc;
+
+ LogFlow(("pdmR3LoadExec: uPass=%#x\n", uPass));
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != PDM_SAVED_STATE_VERSION
+ && uVersion != PDM_SAVED_STATE_VERSION_PRE_NMI_FF
+ && uVersion != PDM_SAVED_STATE_VERSION_PRE_PDM_AUDIO)
+ {
+ AssertMsgFailed(("Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ if (uPass == SSM_PASS_FINAL)
+ {
+ /*
+ * Load the interrupt and DMA states.
+ *
+ * The APIC, PIC and DMA devices does not restore these, we do. In the
+ * APIC and PIC cases, it is possible that some devices is incorrectly
+ * setting IRQs during restore. We'll warn when this happens. (There
+ * are debug assertions in PDMDevMiscHlp.cpp and APICAll.cpp for
+ * catching the buggy device.)
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /* APIC interrupt */
+ uint32_t fInterruptPending = 0;
+ rc = SSMR3GetU32(pSSM, &fInterruptPending);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fInterruptPending & ~1)
+ {
+ AssertMsgFailed(("fInterruptPending=%#x (APIC)\n", fInterruptPending));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC),
+ ("VCPU%03u: VMCPU_FF_INTERRUPT_APIC set! Devices shouldn't set interrupts during state restore...\n", idCpu));
+ if (fInterruptPending)
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
+
+ /* PIC interrupt */
+ fInterruptPending = 0;
+ rc = SSMR3GetU32(pSSM, &fInterruptPending);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fInterruptPending & ~1)
+ {
+ AssertMsgFailed(("fInterruptPending=%#x (PIC)\n", fInterruptPending));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC),
+ ("VCPU%03u: VMCPU_FF_INTERRUPT_PIC set! Devices shouldn't set interrupts during state restore...\n", idCpu));
+ if (fInterruptPending)
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
+
+ if (uVersion > PDM_SAVED_STATE_VERSION_PRE_NMI_FF)
+ {
+ /* NMI interrupt */
+ fInterruptPending = 0;
+ rc = SSMR3GetU32(pSSM, &fInterruptPending);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fInterruptPending & ~1)
+ {
+ AssertMsgFailed(("fInterruptPending=%#x (NMI)\n", fInterruptPending));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI), ("VCPU%3u: VMCPU_FF_INTERRUPT_NMI set!\n", idCpu));
+ if (fInterruptPending)
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+
+ /* SMI interrupt */
+ fInterruptPending = 0;
+ rc = SSMR3GetU32(pSSM, &fInterruptPending);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fInterruptPending & ~1)
+ {
+ AssertMsgFailed(("fInterruptPending=%#x (SMI)\n", fInterruptPending));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), ("VCPU%3u: VMCPU_FF_INTERRUPT_SMI set!\n", idCpu));
+ if (fInterruptPending)
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI);
+ }
+ }
+
+ /* DMA pending */
+ uint32_t fDMAPending = 0;
+ rc = SSMR3GetU32(pSSM, &fDMAPending);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fDMAPending & ~1)
+ {
+ AssertMsgFailed(("fDMAPending=%#x\n", fDMAPending));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ if (fDMAPending)
+ VM_FF_SET(pVM, VM_FF_PDM_DMA);
+ Log(("pdmR3LoadExec: VM_FF_PDM_DMA=%RTbool\n", VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)));
+ }
+
+ /*
+ * Load the list of devices and verify that they are all there.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_FOUND;
+
+ for (uint32_t i = 0; ; i++)
+ {
+ /* Get the sequence number / terminator. */
+ uint32_t u32Sep;
+ rc = SSMR3GetU32(pSSM, &u32Sep);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32Sep == UINT32_MAX)
+ break;
+ if (u32Sep != i)
+ AssertMsgFailedReturn(("Out of sequence. u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /* Get the name and instance number. */
+ char szName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
+ rc = SSMR3GetStrZ(pSSM, szName, sizeof(szName));
+ if (RT_FAILURE(rc))
+ return rc;
+ uint32_t iInstance;
+ rc = SSMR3GetU32(pSSM, &iInstance);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Try locate it. */
+ PPDMDEVINS pDevIns;
+ for (pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ if ( !RTStrCmp(szName, pDevIns->pReg->szName)
+ && pDevIns->iInstance == iInstance)
+ {
+ AssertLogRelMsgReturn(!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_FOUND),
+ ("%s/#%u\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_FOUND;
+ break;
+ }
+
+ if (!pDevIns)
+ {
+ bool fSkip = false;
+
+ /* Skip the non-existing (deprecated) "AudioSniffer" device stored in the saved state. */
+ if ( uVersion <= PDM_SAVED_STATE_VERSION_PRE_PDM_AUDIO
+ && !RTStrCmp(szName, "AudioSniffer"))
+ fSkip = true;
+
+ if (!fSkip)
+ {
+ LogRel(("Device '%s'/%d not found in current config\n", szName, iInstance));
+ if (SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Device '%s'/%d not found in current config"), szName, iInstance);
+ }
+ }
+ }
+
+ /*
+ * Check that no additional devices were configured.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_FOUND))
+ {
+ LogRel(("Device '%s'/%d not found in the saved state\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ if (SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Device '%s'/%d not found in the saved state"),
+ pDevIns->pReg->szName, pDevIns->iInstance);
+ }
+
+
+ /*
+ * Indicate that we've been called (for assertions).
+ */
+ pVM->pdm.s.fStateLoaded = true;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3PowerOn that deals with one driver.
+ *
+ * @param pDrvIns The driver instance.
+ * @param pszDevName The parent device name.
+ * @param iDevInstance The parent device instance number.
+ * @param iLun The parent LUN number.
+ */
+DECLINLINE(int) pdmR3PowerOnDrv(PPDMDRVINS pDrvIns, const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ Assert(pDrvIns->Internal.s.fVMSuspended);
+ if (pDrvIns->pReg->pfnPowerOn)
+ {
+ LogFlow(("PDMR3PowerOn: Notifying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ int rc = VINF_SUCCESS; pDrvIns->pReg->pfnPowerOn(pDrvIns);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3PowerOn: Driver '%s'/%d on LUN#%d of device '%s'/%d -> %Rrc\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance, rc));
+ return rc;
+ }
+ }
+ pDrvIns->Internal.s.fVMSuspended = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3PowerOn that deals with one USB device instance.
+ *
+ * @returns VBox status code.
+ * @param pUsbIns The USB device instance.
+ */
+DECLINLINE(int) pdmR3PowerOnUsb(PPDMUSBINS pUsbIns)
+{
+ Assert(pUsbIns->Internal.s.fVMSuspended);
+ if (pUsbIns->pReg->pfnVMPowerOn)
+ {
+ LogFlow(("PDMR3PowerOn: Notifying - device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ int rc = VINF_SUCCESS; pUsbIns->pReg->pfnVMPowerOn(pUsbIns);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3PowerOn: Device '%s'/%d -> %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+ }
+ }
+ pUsbIns->Internal.s.fVMSuspended = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3PowerOn that deals with one device instance.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ */
+DECLINLINE(int) pdmR3PowerOnDev(PVM pVM, PPDMDEVINS pDevIns)
+{
+ Assert(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_SUSPENDED);
+ if (pDevIns->pReg->pfnPowerOn)
+ {
+ LogFlow(("PDMR3PowerOn: Notifying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ int rc = VINF_SUCCESS; pDevIns->pReg->pfnPowerOn(pDevIns);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3PowerOn: Device '%s'/%d -> %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+ }
+ }
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_SUSPENDED;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This function will notify all the devices and their
+ * attached drivers about the VM now being powered on.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) PDMR3PowerOn(PVM pVM)
+{
+ LogFlow(("PDMR3PowerOn:\n"));
+
+ /*
+ * Iterate thru the device instances and USB device instances,
+ * processing the drivers associated with those.
+ */
+ int rc = VINF_SUCCESS;
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns && RT_SUCCESS(rc); pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun && RT_SUCCESS(rc); pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns && RT_SUCCESS(rc); pDrvIns = pDrvIns->Internal.s.pDown)
+ rc = pdmR3PowerOnDrv(pDrvIns, pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3PowerOnDev(pVM, pDevIns);
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns && RT_SUCCESS(rc); pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun && RT_SUCCESS(rc); pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns && RT_SUCCESS(rc); pDrvIns = pDrvIns->Internal.s.pDown)
+ rc = pdmR3PowerOnDrv(pDrvIns, pUsbIns->pReg->szName, pUsbIns->iInstance, pLun->iLun);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3PowerOnUsb(pUsbIns);
+ }
+#endif
+
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ pdmR3AsyncCompletionResume(pVM);
+#endif
+
+ /*
+ * Resume all threads.
+ */
+ if (RT_SUCCESS(rc))
+ pdmR3ThreadResumeAll(pVM);
+
+ /*
+ * On failure, clean up via PDMR3Suspend.
+ */
+ if (RT_FAILURE(rc))
+ PDMR3Suspend(pVM);
+
+ LogFlow(("PDMR3PowerOn: returns %Rrc\n", rc));
+ return /*rc*/;
+}
+
+
+/**
+ * Initializes the asynchronous notifi stats structure.
+ *
+ * @param pThis The asynchronous notifification stats.
+ * @param pszOp The name of the operation.
+ */
+static void pdmR3NotifyAsyncInit(PPDMNOTIFYASYNCSTATS pThis, const char *pszOp)
+{
+ pThis->uStartNsTs = RTTimeNanoTS();
+ pThis->cNsElapsedNextLog = 0;
+ pThis->cLoops = 0;
+ pThis->cAsync = 0;
+ pThis->pszOp = pszOp;
+ pThis->offList = 0;
+ pThis->szList[0] = '\0';
+}
+
+
+/**
+ * Begin a new loop, prepares to gather new stats.
+ *
+ * @param pThis The asynchronous notifification stats.
+ */
+static void pdmR3NotifyAsyncBeginLoop(PPDMNOTIFYASYNCSTATS pThis)
+{
+ pThis->cLoops++;
+ pThis->cAsync = 0;
+ pThis->offList = 0;
+ pThis->szList[0] = '\0';
+}
+
+
+/**
+ * Records a device or USB device with a pending asynchronous notification.
+ *
+ * @param pThis The asynchronous notifification stats.
+ * @param pszName The name of the thing.
+ * @param iInstance The instance number.
+ */
+static void pdmR3NotifyAsyncAdd(PPDMNOTIFYASYNCSTATS pThis, const char *pszName, uint32_t iInstance)
+{
+ pThis->cAsync++;
+ if (pThis->offList < sizeof(pThis->szList) - 4)
+ pThis->offList += RTStrPrintf(&pThis->szList[pThis->offList], sizeof(pThis->szList) - pThis->offList,
+ pThis->offList == 0 ? "%s/%u" : ", %s/%u",
+ pszName, iInstance);
+}
+
+
+/**
+ * Records the asynchronous completition of a reset, suspend or power off.
+ *
+ * @param pThis The asynchronous notifification stats.
+ * @param pszDrvName The driver name.
+ * @param iDrvInstance The driver instance number.
+ * @param pszDevName The device or USB device name.
+ * @param iDevInstance The device or USB device instance number.
+ * @param iLun The LUN.
+ */
+static void pdmR3NotifyAsyncAddDrv(PPDMNOTIFYASYNCSTATS pThis, const char *pszDrvName, uint32_t iDrvInstance,
+ const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ pThis->cAsync++;
+ if (pThis->offList < sizeof(pThis->szList) - 8)
+ pThis->offList += RTStrPrintf(&pThis->szList[pThis->offList], sizeof(pThis->szList) - pThis->offList,
+ pThis->offList == 0 ? "%s/%u/%u/%s/%u" : ", %s/%u/%u/%s/%u",
+ pszDevName, iDevInstance, iLun, pszDrvName, iDrvInstance);
+}
+
+
+/**
+ * Log the stats.
+ *
+ * @param pThis The asynchronous notifification stats.
+ */
+static void pdmR3NotifyAsyncLog(PPDMNOTIFYASYNCSTATS pThis)
+{
+ /*
+ * Return if we shouldn't log at this point.
+ * We log with an internval increasing from 0 sec to 60 sec.
+ */
+ if (!pThis->cAsync)
+ return;
+
+ uint64_t cNsElapsed = RTTimeNanoTS() - pThis->uStartNsTs;
+ if (cNsElapsed < pThis->cNsElapsedNextLog)
+ return;
+
+ if (pThis->cNsElapsedNextLog == 0)
+ pThis->cNsElapsedNextLog = RT_NS_1SEC;
+ else if (pThis->cNsElapsedNextLog >= RT_NS_1MIN / 2)
+ pThis->cNsElapsedNextLog = RT_NS_1MIN;
+ else
+ pThis->cNsElapsedNextLog *= 2;
+
+ /*
+ * Do the logging.
+ */
+ LogRel(("%s: after %5llu ms, %u loops: %u async tasks - %s\n",
+ pThis->pszOp, cNsElapsed / RT_NS_1MS, pThis->cLoops, pThis->cAsync, pThis->szList));
+}
+
+
+/**
+ * Wait for events and process pending requests.
+ *
+ * @param pThis The asynchronous notifification stats.
+ * @param pVM The cross context VM structure.
+ */
+static void pdmR3NotifyAsyncWaitAndProcessRequests(PPDMNOTIFYASYNCSTATS pThis, PVM pVM)
+{
+ VM_ASSERT_EMT0(pVM);
+ int rc = VMR3AsyncPdmNotificationWaitU(&pVM->pUVM->aCpus[0]);
+ AssertReleaseMsg(rc == VINF_SUCCESS, ("%Rrc - %s - %s\n", rc, pThis->pszOp, pThis->szList));
+
+ rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
+ AssertReleaseMsg(rc == VINF_SUCCESS, ("%Rrc - %s - %s\n", rc, pThis->pszOp, pThis->szList));
+ rc = VMR3ReqProcessU(pVM->pUVM, 0/*idDstCpu*/, true /*fPriorityOnly*/);
+ AssertReleaseMsg(rc == VINF_SUCCESS, ("%Rrc - %s - %s\n", rc, pThis->pszOp, pThis->szList));
+}
+
+
+/**
+ * Worker for PDMR3Reset that deals with one driver.
+ *
+ * @param pDrvIns The driver instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ * @param pszDevName The parent device name.
+ * @param iDevInstance The parent device instance number.
+ * @param iLun The parent LUN number.
+ */
+DECLINLINE(bool) pdmR3ResetDrv(PPDMDRVINS pDrvIns, PPDMNOTIFYASYNCSTATS pAsync,
+ const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ if (!pDrvIns->Internal.s.fVMReset)
+ {
+ pDrvIns->Internal.s.fVMReset = true;
+ if (pDrvIns->pReg->pfnReset)
+ {
+ if (!pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Reset: Notifying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->pReg->pfnReset(pDrvIns);
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Reset: Async notification started - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ }
+ else if (pDrvIns->Internal.s.pfnAsyncNotify(pDrvIns))
+ {
+ LogFlow(("PDMR3Reset: Async notification completed - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ pDrvIns->Internal.s.fVMReset = false;
+ pdmR3NotifyAsyncAddDrv(pAsync, pDrvIns->Internal.s.pDrv->pReg->szName, pDrvIns->iInstance,
+ pszDevName, iDevInstance, iLun);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+/**
+ * Worker for PDMR3Reset that deals with one USB device instance.
+ *
+ * @param pUsbIns The USB device instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ */
+DECLINLINE(void) pdmR3ResetUsb(PPDMUSBINS pUsbIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!pUsbIns->Internal.s.fVMReset)
+ {
+ pUsbIns->Internal.s.fVMReset = true;
+ if (pUsbIns->pReg->pfnVMReset)
+ {
+ if (!pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Reset: Notifying - device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->pReg->pfnVMReset(pUsbIns);
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Reset: Async notification started - device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ }
+ else if (pUsbIns->Internal.s.pfnAsyncNotify(pUsbIns))
+ {
+ LogFlow(("PDMR3Reset: Async notification completed - device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ pUsbIns->Internal.s.fVMReset = false;
+ pdmR3NotifyAsyncAdd(pAsync, pUsbIns->Internal.s.pUsbDev->pReg->szName, pUsbIns->iInstance);
+ }
+ }
+ }
+}
+
+
+/**
+ * Worker for PDMR3Reset that deals with one device instance.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ * @param pAsync The structure for recording asynchronous notification tasks.
+ */
+DECLINLINE(void) pdmR3ResetDev(PVM pVM, PPDMDEVINS pDevIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_RESET))
+ {
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_RESET;
+ if (pDevIns->pReg->pfnReset)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+
+ if (!pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Reset: Notifying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->pReg->pfnReset(pDevIns);
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Reset: Async notification started - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ }
+ else if (pDevIns->Internal.s.pfnAsyncNotify(pDevIns))
+ {
+ LogFlow(("PDMR3Reset: Async notification completed - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_RESET;
+ pdmR3NotifyAsyncAdd(pAsync, pDevIns->Internal.s.pDevR3->pReg->szName, pDevIns->iInstance);
+ }
+
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMSUSPEND_WARN_AT_NS)
+ LogRel(("PDMR3Reset: Device '%s'/%d took %'llu ns to reset\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cNsElapsed));
+ }
+ }
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used by PDMR3Reset and CPU hot plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(void) PDMR3ResetCpu(PVMCPU pVCpu)
+{
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_SMI);
+}
+
+
+/**
+ * This function will notify all the devices and their attached drivers about
+ * the VM now being reset.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) PDMR3Reset(PVM pVM)
+{
+ LogFlow(("PDMR3Reset:\n"));
+
+ /*
+ * Clear all the reset flags.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_RESET;
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->Internal.s.fVMReset = false;
+ }
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ pUsbIns->Internal.s.fVMReset = false;
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->Internal.s.fVMReset = false;
+ }
+#endif
+
+ /*
+ * The outer loop repeats until there are no more async requests.
+ */
+ PDMNOTIFYASYNCSTATS Async;
+ pdmR3NotifyAsyncInit(&Async, "PDMR3Reset");
+ for (;;)
+ {
+ pdmR3NotifyAsyncBeginLoop(&Async);
+
+ /*
+ * Iterate thru the device instances and USB device instances,
+ * processing the drivers associated with those.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_RESET_NOTIFICATION)
+ pdmR3ResetDev(pVM, pDevIns, &Async);
+
+ if (Async.cAsync == cAsyncStart)
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3ResetDrv(pDrvIns, &Async, pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun))
+ break;
+
+ if ( Async.cAsync == cAsyncStart
+ && !(pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_RESET_NOTIFICATION))
+ pdmR3ResetDev(pVM, pDevIns, &Async);
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3ResetDrv(pDrvIns, &Async, pUsbIns->pReg->szName, pUsbIns->iInstance, pLun->iLun))
+ break;
+
+ if (Async.cAsync == cAsyncStart)
+ pdmR3ResetUsb(pUsbIns, &Async);
+ }
+#endif
+ if (!Async.cAsync)
+ break;
+ pdmR3NotifyAsyncLog(&Async);
+ pdmR3NotifyAsyncWaitAndProcessRequests(&Async, pVM);
+ }
+
+ /*
+ * Clear all pending interrupts and DMA operations.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ PDMR3ResetCpu(pVM->apCpusR3[idCpu]);
+ VM_FF_CLEAR(pVM, VM_FF_PDM_DMA);
+
+ LogFlow(("PDMR3Reset: returns void\n"));
+}
+
+
+/**
+ * This function will tell all the devices to setup up their memory structures
+ * after VM construction and after VM reset.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fAtReset Indicates the context, after reset if @c true or after
+ * construction if @c false.
+ */
+VMMR3_INT_DECL(void) PDMR3MemSetup(PVM pVM, bool fAtReset)
+{
+ LogFlow(("PDMR3MemSetup: fAtReset=%RTbool\n", fAtReset));
+ PDMDEVMEMSETUPCTX const enmCtx = fAtReset ? PDMDEVMEMSETUPCTX_AFTER_RESET : PDMDEVMEMSETUPCTX_AFTER_CONSTRUCTION;
+
+ /*
+ * Iterate thru the device instances and work the callback.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ if (pDevIns->pReg->pfnMemSetup)
+ {
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ pDevIns->pReg->pfnMemSetup(pDevIns, enmCtx);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ }
+
+ LogFlow(("PDMR3MemSetup: returns void\n"));
+}
+
+
+/**
+ * Retrieves and resets the info left behind by PDMDevHlpVMReset.
+ *
+ * @returns True if hard reset, false if soft reset.
+ * @param pVM The cross context VM structure.
+ * @param fOverride If non-zero, the override flags will be used instead
+ * of the reset flags kept by PDM. (For triple faults.)
+ * @param pfResetFlags Where to return the reset flags (PDMVMRESET_F_XXX).
+ * @thread EMT
+ */
+VMMR3_INT_DECL(bool) PDMR3GetResetInfo(PVM pVM, uint32_t fOverride, uint32_t *pfResetFlags)
+{
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Get the reset flags.
+ */
+ uint32_t fResetFlags;
+ fResetFlags = ASMAtomicXchgU32(&pVM->pdm.s.fResetFlags, 0);
+ if (fOverride)
+ fResetFlags = fOverride;
+ *pfResetFlags = fResetFlags;
+
+ /*
+ * To try avoid trouble, we never ever do soft/warm resets on SMP systems
+ * with more than CPU #0 active. However, if only one CPU is active we
+ * will ask the firmware what it wants us to do (because the firmware may
+ * depend on the VMM doing a lot of what is normally its responsibility,
+ * like clearing memory).
+ */
+ bool fOtherCpusActive = false;
+ VMCPUID idCpu = pVM->cCpus;
+ while (idCpu-- > 1)
+ {
+ EMSTATE enmState = EMGetState(pVM->apCpusR3[idCpu]);
+ if ( enmState != EMSTATE_WAIT_SIPI
+ && enmState != EMSTATE_NONE)
+ {
+ fOtherCpusActive = true;
+ break;
+ }
+ }
+
+ bool fHardReset = fOtherCpusActive
+ || (fResetFlags & PDMVMRESET_F_SRC_MASK) < PDMVMRESET_F_LAST_ALWAYS_HARD
+ || !pVM->pdm.s.pFirmware
+ || pVM->pdm.s.pFirmware->Reg.pfnIsHardReset(pVM->pdm.s.pFirmware->pDevIns, fResetFlags);
+
+ Log(("PDMR3GetResetInfo: returns fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
+ return fHardReset;
+}
+
+
+/**
+ * Performs a soft reset of devices.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fResetFlags PDMVMRESET_F_XXX.
+ */
+VMMR3_INT_DECL(void) PDMR3SoftReset(PVM pVM, uint32_t fResetFlags)
+{
+ LogFlow(("PDMR3SoftReset: fResetFlags=%#x\n", fResetFlags));
+
+ /*
+ * Iterate thru the device instances and work the callback.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ if (pDevIns->pReg->pfnSoftReset)
+ {
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ pDevIns->pReg->pfnSoftReset(pDevIns, fResetFlags);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ }
+
+ LogFlow(("PDMR3SoftReset: returns void\n"));
+}
+
+
+/**
+ * Worker for PDMR3Suspend that deals with one driver.
+ *
+ * @param pDrvIns The driver instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ * @param pszDevName The parent device name.
+ * @param iDevInstance The parent device instance number.
+ * @param iLun The parent LUN number.
+ */
+DECLINLINE(bool) pdmR3SuspendDrv(PPDMDRVINS pDrvIns, PPDMNOTIFYASYNCSTATS pAsync,
+ const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ if (!pDrvIns->Internal.s.fVMSuspended)
+ {
+ pDrvIns->Internal.s.fVMSuspended = true;
+ if (pDrvIns->pReg->pfnSuspend)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ if (!pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Suspend: Notifying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->pReg->pfnSuspend(pDrvIns);
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Suspend: Async notification started - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ }
+ else if (pDrvIns->Internal.s.pfnAsyncNotify(pDrvIns))
+ {
+ LogFlow(("PDMR3Suspend: Async notification completed - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMSUSPEND_WARN_AT_NS)
+ LogRel(("PDMR3Suspend: Driver '%s'/%d on LUN#%d of device '%s'/%d took %'llu ns to suspend\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance, cNsElapsed));
+
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ pDrvIns->Internal.s.fVMSuspended = false;
+ pdmR3NotifyAsyncAddDrv(pAsync, pDrvIns->Internal.s.pDrv->pReg->szName, pDrvIns->iInstance, pszDevName, iDevInstance, iLun);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+/**
+ * Worker for PDMR3Suspend that deals with one USB device instance.
+ *
+ * @param pUsbIns The USB device instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ */
+DECLINLINE(void) pdmR3SuspendUsb(PPDMUSBINS pUsbIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!pUsbIns->Internal.s.fVMSuspended)
+ {
+ pUsbIns->Internal.s.fVMSuspended = true;
+ if (pUsbIns->pReg->pfnVMSuspend)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ if (!pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Suspend: Notifying - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->pReg->pfnVMSuspend(pUsbIns);
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Suspend: Async notification started - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ }
+ else if (pUsbIns->Internal.s.pfnAsyncNotify(pUsbIns))
+ {
+ LogFlow(("PDMR3Suspend: Async notification completed - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ pUsbIns->Internal.s.fVMSuspended = false;
+ pdmR3NotifyAsyncAdd(pAsync, pUsbIns->Internal.s.pUsbDev->pReg->szName, pUsbIns->iInstance);
+ }
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMSUSPEND_WARN_AT_NS)
+ LogRel(("PDMR3Suspend: USB device '%s'/%d took %'llu ns to suspend\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, cNsElapsed));
+ }
+ }
+}
+
+
+/**
+ * Worker for PDMR3Suspend that deals with one device instance.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ * @param pAsync The structure for recording asynchronous notification tasks.
+ */
+DECLINLINE(void) pdmR3SuspendDev(PVM pVM, PPDMDEVINS pDevIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_SUSPENDED))
+ {
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_SUSPENDED;
+ if (pDevIns->pReg->pfnSuspend)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+
+ if (!pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3Suspend: Notifying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->pReg->pfnSuspend(pDevIns);
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3Suspend: Async notification started - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ }
+ else if (pDevIns->Internal.s.pfnAsyncNotify(pDevIns))
+ {
+ LogFlow(("PDMR3Suspend: Async notification completed - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_SUSPENDED;
+ pdmR3NotifyAsyncAdd(pAsync, pDevIns->Internal.s.pDevR3->pReg->szName, pDevIns->iInstance);
+ }
+
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMSUSPEND_WARN_AT_NS)
+ LogRel(("PDMR3Suspend: Device '%s'/%d took %'llu ns to suspend\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cNsElapsed));
+ }
+ }
+}
+
+
+/**
+ * This function will notify all the devices and their attached drivers about
+ * the VM now being suspended.
+ *
+ * @param pVM The cross context VM structure.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(void) PDMR3Suspend(PVM pVM)
+{
+ LogFlow(("PDMR3Suspend:\n"));
+ VM_ASSERT_EMT0(pVM);
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ /*
+ * The outer loop repeats until there are no more async requests.
+ *
+ * Note! We depend on the suspended indicators to be in the desired state
+ * and we do not reset them before starting because this allows
+ * PDMR3PowerOn and PDMR3Resume to use PDMR3Suspend for cleaning up
+ * on failure.
+ */
+ PDMNOTIFYASYNCSTATS Async;
+ pdmR3NotifyAsyncInit(&Async, "PDMR3Suspend");
+ for (;;)
+ {
+ pdmR3NotifyAsyncBeginLoop(&Async);
+
+ /*
+ * Iterate thru the device instances and USB device instances,
+ * processing the drivers associated with those.
+ *
+ * The attached drivers are normally processed first. Some devices
+ * (like DevAHCI) though needs to be notified before the drivers so
+ * that it doesn't kick off any new requests after the drivers stopped
+ * taking any. (DrvVD changes to read-only in this particular case.)
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_SUSPEND_NOTIFICATION)
+ pdmR3SuspendDev(pVM, pDevIns, &Async);
+
+ if (Async.cAsync == cAsyncStart)
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3SuspendDrv(pDrvIns, &Async, pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun))
+ break;
+
+ if ( Async.cAsync == cAsyncStart
+ && !(pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_SUSPEND_NOTIFICATION))
+ pdmR3SuspendDev(pVM, pDevIns, &Async);
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3SuspendDrv(pDrvIns, &Async, pUsbIns->pReg->szName, pUsbIns->iInstance, pLun->iLun))
+ break;
+
+ if (Async.cAsync == cAsyncStart)
+ pdmR3SuspendUsb(pUsbIns, &Async);
+ }
+#endif
+ if (!Async.cAsync)
+ break;
+ pdmR3NotifyAsyncLog(&Async);
+ pdmR3NotifyAsyncWaitAndProcessRequests(&Async, pVM);
+ }
+
+ /*
+ * Suspend all threads.
+ */
+ pdmR3ThreadSuspendAll(pVM);
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ LogRel(("PDMR3Suspend: %'llu ns run time\n", cNsElapsed));
+}
+
+
+/**
+ * Worker for PDMR3Resume that deals with one driver.
+ *
+ * @param pDrvIns The driver instance.
+ * @param pszDevName The parent device name.
+ * @param iDevInstance The parent device instance number.
+ * @param iLun The parent LUN number.
+ */
+DECLINLINE(int) pdmR3ResumeDrv(PPDMDRVINS pDrvIns, const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ Assert(pDrvIns->Internal.s.fVMSuspended);
+ if (pDrvIns->pReg->pfnResume)
+ {
+ LogFlow(("PDMR3Resume: Notifying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ int rc = VINF_SUCCESS; pDrvIns->pReg->pfnResume(pDrvIns);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3Resume: Driver '%s'/%d on LUN#%d of device '%s'/%d -> %Rrc\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance, rc));
+ return rc;
+ }
+ }
+ pDrvIns->Internal.s.fVMSuspended = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3Resume that deals with one USB device instance.
+ *
+ * @returns VBox status code.
+ * @param pUsbIns The USB device instance.
+ */
+DECLINLINE(int) pdmR3ResumeUsb(PPDMUSBINS pUsbIns)
+{
+ if (pUsbIns->Internal.s.fVMSuspended)
+ {
+ if (pUsbIns->pReg->pfnVMResume)
+ {
+ LogFlow(("PDMR3Resume: Notifying - device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ int rc = VINF_SUCCESS; pUsbIns->pReg->pfnVMResume(pUsbIns);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3Resume: Device '%s'/%d -> %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+ }
+ }
+ pUsbIns->Internal.s.fVMSuspended = false;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3Resume that deals with one device instance.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ */
+DECLINLINE(int) pdmR3ResumeDev(PVM pVM, PPDMDEVINS pDevIns)
+{
+ Assert(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_SUSPENDED);
+ if (pDevIns->pReg->pfnResume)
+ {
+ LogFlow(("PDMR3Resume: Notifying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ int rc = VINF_SUCCESS; pDevIns->pReg->pfnResume(pDevIns);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMR3Resume: Device '%s'/%d -> %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+ }
+ }
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_SUSPENDED;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * This function will notify all the devices and their
+ * attached drivers about the VM now being resumed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) PDMR3Resume(PVM pVM)
+{
+ LogFlow(("PDMR3Resume:\n"));
+
+ /*
+ * Iterate thru the device instances and USB device instances,
+ * processing the drivers associated with those.
+ */
+ int rc = VINF_SUCCESS;
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns && RT_SUCCESS(rc); pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun && RT_SUCCESS(rc); pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns && RT_SUCCESS(rc); pDrvIns = pDrvIns->Internal.s.pDown)
+ rc = pdmR3ResumeDrv(pDrvIns, pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3ResumeDev(pVM, pDevIns);
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns && RT_SUCCESS(rc); pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun && RT_SUCCESS(rc); pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns && RT_SUCCESS(rc); pDrvIns = pDrvIns->Internal.s.pDown)
+ rc = pdmR3ResumeDrv(pDrvIns, pUsbIns->pReg->szName, pUsbIns->iInstance, pLun->iLun);
+ if (RT_SUCCESS(rc))
+ rc = pdmR3ResumeUsb(pUsbIns);
+ }
+#endif
+
+ /*
+ * Resume all threads.
+ */
+ if (RT_SUCCESS(rc))
+ pdmR3ThreadResumeAll(pVM);
+
+ /*
+ * Resume the block cache.
+ */
+ if (RT_SUCCESS(rc))
+ pdmR3BlkCacheResume(pVM);
+
+ /*
+ * On failure, clean up via PDMR3Suspend.
+ */
+ if (RT_FAILURE(rc))
+ PDMR3Suspend(pVM);
+
+ LogFlow(("PDMR3Resume: returns %Rrc\n", rc));
+ return /*rc*/;
+}
+
+
+/**
+ * Worker for PDMR3PowerOff that deals with one driver.
+ *
+ * @param pDrvIns The driver instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ * @param pszDevName The parent device name.
+ * @param iDevInstance The parent device instance number.
+ * @param iLun The parent LUN number.
+ */
+DECLINLINE(bool) pdmR3PowerOffDrv(PPDMDRVINS pDrvIns, PPDMNOTIFYASYNCSTATS pAsync,
+ const char *pszDevName, uint32_t iDevInstance, uint32_t iLun)
+{
+ if (!pDrvIns->Internal.s.fVMSuspended)
+ {
+ pDrvIns->Internal.s.fVMSuspended = true;
+ if (pDrvIns->pReg->pfnPowerOff)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ if (!pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3PowerOff: Notifying - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->pReg->pfnPowerOff(pDrvIns);
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3PowerOff: Async notification started - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ }
+ else if (pDrvIns->Internal.s.pfnAsyncNotify(pDrvIns))
+ {
+ LogFlow(("PDMR3PowerOff: Async notification completed - driver '%s'/%d on LUN#%d of device '%s'/%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance));
+ pDrvIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMPOWEROFF_WARN_AT_NS)
+ LogRel(("PDMR3PowerOff: Driver '%s'/%d on LUN#%d of device '%s'/%d took %'llu ns to power off\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, iLun, pszDevName, iDevInstance, cNsElapsed));
+
+ if (pDrvIns->Internal.s.pfnAsyncNotify)
+ {
+ pDrvIns->Internal.s.fVMSuspended = false;
+ pdmR3NotifyAsyncAddDrv(pAsync, pDrvIns->Internal.s.pDrv->pReg->szName, pDrvIns->iInstance,
+ pszDevName, iDevInstance, iLun);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+/**
+ * Worker for PDMR3PowerOff that deals with one USB device instance.
+ *
+ * @param pUsbIns The USB device instance.
+ * @param pAsync The structure for recording asynchronous
+ * notification tasks.
+ */
+DECLINLINE(void) pdmR3PowerOffUsb(PPDMUSBINS pUsbIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!pUsbIns->Internal.s.fVMSuspended)
+ {
+ pUsbIns->Internal.s.fVMSuspended = true;
+ if (pUsbIns->pReg->pfnVMPowerOff)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ if (!pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3PowerOff: Notifying - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->pReg->pfnVMPowerOff(pUsbIns);
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3PowerOff: Async notification started - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ }
+ else if (pUsbIns->Internal.s.pfnAsyncNotify(pUsbIns))
+ {
+ LogFlow(("PDMR3PowerOff: Async notification completed - USB device '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pUsbIns->Internal.s.pfnAsyncNotify)
+ {
+ pUsbIns->Internal.s.fVMSuspended = false;
+ pdmR3NotifyAsyncAdd(pAsync, pUsbIns->Internal.s.pUsbDev->pReg->szName, pUsbIns->iInstance);
+ }
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMPOWEROFF_WARN_AT_NS)
+ LogRel(("PDMR3PowerOff: USB device '%s'/%d took %'llu ns to power off\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, cNsElapsed));
+
+ }
+ }
+}
+
+
+/**
+ * Worker for PDMR3PowerOff that deals with one device instance.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ * @param pAsync The structure for recording asynchronous notification tasks.
+ */
+DECLINLINE(void) pdmR3PowerOffDev(PVM pVM, PPDMDEVINS pDevIns, PPDMNOTIFYASYNCSTATS pAsync)
+{
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_SUSPENDED))
+ {
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_SUSPENDED;
+ if (pDevIns->pReg->pfnPowerOff)
+ {
+ uint64_t cNsElapsed = RTTimeNanoTS();
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+
+ if (!pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ LogFlow(("PDMR3PowerOff: Notifying - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->pReg->pfnPowerOff(pDevIns);
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ LogFlow(("PDMR3PowerOff: Async notification started - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ }
+ else if (pDevIns->Internal.s.pfnAsyncNotify(pDevIns))
+ {
+ LogFlow(("PDMR3PowerOff: Async notification completed - device '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pDevIns->Internal.s.pfnAsyncNotify = NULL;
+ }
+ if (pDevIns->Internal.s.pfnAsyncNotify)
+ {
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_SUSPENDED;
+ pdmR3NotifyAsyncAdd(pAsync, pDevIns->Internal.s.pDevR3->pReg->szName, pDevIns->iInstance);
+ }
+
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ if (cNsElapsed >= PDMPOWEROFF_WARN_AT_NS)
+ LogFlow(("PDMR3PowerOff: Device '%s'/%d took %'llu ns to power off\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cNsElapsed));
+ }
+ }
+}
+
+
+/**
+ * This function will notify all the devices and their
+ * attached drivers about the VM being powered off.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) PDMR3PowerOff(PVM pVM)
+{
+ LogFlow(("PDMR3PowerOff:\n"));
+ uint64_t cNsElapsed = RTTimeNanoTS();
+
+ /*
+ * Clear the suspended flags on all devices and drivers first because they
+ * might have been set during a suspend but the power off callbacks should
+ * be called in any case.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ pDevIns->Internal.s.fIntFlags &= ~PDMDEVINSINT_FLAGS_SUSPENDED;
+
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->Internal.s.fVMSuspended = false;
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ pUsbIns->Internal.s.fVMSuspended = false;
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->Internal.s.fVMSuspended = false;
+ }
+#endif
+
+ /*
+ * The outer loop repeats until there are no more async requests.
+ */
+ PDMNOTIFYASYNCSTATS Async;
+ pdmR3NotifyAsyncInit(&Async, "PDMR3PowerOff");
+ for (;;)
+ {
+ pdmR3NotifyAsyncBeginLoop(&Async);
+
+ /*
+ * Iterate thru the device instances and USB device instances,
+ * processing the drivers associated with those.
+ *
+ * The attached drivers are normally processed first. Some devices
+ * (like DevAHCI) though needs to be notified before the drivers so
+ * that it doesn't kick off any new requests after the drivers stopped
+ * taking any. (DrvVD changes to read-only in this particular case.)
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_POWEROFF_NOTIFICATION)
+ pdmR3PowerOffDev(pVM, pDevIns, &Async);
+
+ if (Async.cAsync == cAsyncStart)
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3PowerOffDrv(pDrvIns, &Async, pDevIns->pReg->szName, pDevIns->iInstance, pLun->iLun))
+ break;
+
+ if ( Async.cAsync == cAsyncStart
+ && !(pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_FIRST_POWEROFF_NOTIFICATION))
+ pdmR3PowerOffDev(pVM, pDevIns, &Async);
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ unsigned const cAsyncStart = Async.cAsync;
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!pdmR3PowerOffDrv(pDrvIns, &Async, pUsbIns->pReg->szName, pUsbIns->iInstance, pLun->iLun))
+ break;
+
+ if (Async.cAsync == cAsyncStart)
+ pdmR3PowerOffUsb(pUsbIns, &Async);
+ }
+#endif
+ if (!Async.cAsync)
+ break;
+ pdmR3NotifyAsyncLog(&Async);
+ pdmR3NotifyAsyncWaitAndProcessRequests(&Async, pVM);
+ }
+
+ /*
+ * Suspend all threads.
+ */
+ pdmR3ThreadSuspendAll(pVM);
+
+ cNsElapsed = RTTimeNanoTS() - cNsElapsed;
+ LogRel(("PDMR3PowerOff: %'llu ns run time\n", cNsElapsed));
+}
+
+
+/**
+ * Queries the base interface of a device instance.
+ *
+ * The caller can use this to query other interfaces the device implements
+ * and use them to talk to the device.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param ppBase Where to store the pointer to the base device interface on success.
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3QueryDevice(PUVM pUVM, const char *pszDevice, unsigned iInstance, PPDMIBASE *ppBase)
+{
+ LogFlow(("PDMR3DeviceQuery: pszDevice=%p:{%s} iInstance=%u ppBase=%p\n", pszDevice, pszDevice, iInstance, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Iterate registered devices looking for the device.
+ */
+ size_t cchDevice = strlen(pszDevice);
+ for (PPDMDEV pDev = pUVM->pVM->pdm.s.pDevs; pDev; pDev = pDev->pNext)
+ {
+ if ( pDev->cchName == cchDevice
+ && !memcmp(pDev->pReg->szName, pszDevice, cchDevice))
+ {
+ /*
+ * Iterate device instances.
+ */
+ for (PPDMDEVINS pDevIns = pDev->pInstances; pDevIns; pDevIns = pDevIns->Internal.s.pPerDeviceNextR3)
+ {
+ if (pDevIns->iInstance == iInstance)
+ {
+ if (pDevIns->IBase.pfnQueryInterface)
+ {
+ *ppBase = &pDevIns->IBase;
+ LogFlow(("PDMR3DeviceQuery: return VINF_SUCCESS and *ppBase=%p\n", *ppBase));
+ return VINF_SUCCESS;
+ }
+
+ LogFlow(("PDMR3DeviceQuery: returns VERR_PDM_DEVICE_INSTANCE_NO_IBASE\n"));
+ return VERR_PDM_DEVICE_INSTANCE_NO_IBASE;
+ }
+ }
+
+ LogFlow(("PDMR3DeviceQuery: returns VERR_PDM_DEVICE_INSTANCE_NOT_FOUND\n"));
+ return VERR_PDM_DEVICE_INSTANCE_NOT_FOUND;
+ }
+ }
+
+ LogFlow(("PDMR3QueryDevice: returns VERR_PDM_DEVICE_NOT_FOUND\n"));
+ return VERR_PDM_DEVICE_NOT_FOUND;
+}
+
+
+/**
+ * Queries the base interface of a device LUN.
+ *
+ * This differs from PDMR3QueryLun by that it returns the interface on the
+ * device and not the top level driver.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param ppBase Where to store the base interface pointer.
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3QueryDeviceLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+{
+ LogFlow(("PDMR3QueryDeviceLun: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Find the LUN.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pUVM->pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ *ppBase = pLun->pBase;
+ LogFlow(("PDMR3QueryDeviceLun: return VINF_SUCCESS and *ppBase=%p\n", *ppBase));
+ return VINF_SUCCESS;
+ }
+ LogFlow(("PDMR3QueryDeviceLun: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Query the interface of the top level driver on a LUN.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param ppBase Where to store the base interface pointer.
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3QueryLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+{
+ LogFlow(("PDMR3QueryLun: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Find the LUN.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLun->pTop)
+ {
+ *ppBase = &pLun->pTop->IBase;
+ LogFlow(("PDMR3QueryLun: return %Rrc and *ppBase=%p\n", VINF_SUCCESS, *ppBase));
+ return VINF_SUCCESS;
+ }
+ rc = VERR_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+ LogFlow(("PDMR3QueryLun: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Query the interface of a named driver on a LUN.
+ *
+ * If the driver appears more than once in the driver chain, the first instance
+ * is returned.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param pszDriver The driver name.
+ * @param ppBase Where to store the base interface pointer.
+ *
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3QueryDriverOnLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, const char *pszDriver, PPPDMIBASE ppBase)
+{
+ LogFlow(("PDMR3QueryDriverOnLun: pszDevice=%p:{%s} iInstance=%u iLun=%u pszDriver=%p:{%s} ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, pszDriver, pszDriver, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Find the LUN.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pUVM->pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLun->pTop)
+ {
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!strcmp(pDrvIns->pReg->szName, pszDriver))
+ {
+ *ppBase = &pDrvIns->IBase;
+ LogFlow(("PDMR3QueryDriverOnLun: return %Rrc and *ppBase=%p\n", VINF_SUCCESS, *ppBase));
+ return VINF_SUCCESS;
+
+ }
+ rc = VERR_PDM_DRIVER_NOT_FOUND;
+ }
+ else
+ rc = VERR_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+ LogFlow(("PDMR3QueryDriverOnLun: returns %Rrc\n", rc));
+ return rc;
+}
+
+/**
+ * Executes pending DMA transfers.
+ * Forced Action handler.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) PDMR3DmaRun(PVM pVM)
+{
+ /* Note! Not really SMP safe; restrict it to VCPU 0. */
+ if (VMMGetCpuId(pVM) != 0)
+ return;
+
+ if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_PDM_DMA))
+ {
+ if (pVM->pdm.s.pDmac)
+ {
+ bool fMore = pVM->pdm.s.pDmac->Reg.pfnRun(pVM->pdm.s.pDmac->pDevIns);
+ if (fMore)
+ VM_FF_SET(pVM, VM_FF_PDM_DMA);
+ }
+ }
+}
+
+
+/**
+ * Allocates memory from the VMM device heap.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cbSize Allocation size.
+ * @param pfnNotify Mapping/unmapping notification callback.
+ * @param ppv Ring-3 pointer. (out)
+ */
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapAlloc(PVM pVM, size_t cbSize, PFNPDMVMMDEVHEAPNOTIFY pfnNotify, RTR3PTR *ppv)
+{
+#ifdef DEBUG_bird
+ if (!cbSize || cbSize > pVM->pdm.s.cbVMMDevHeapLeft)
+ return VERR_NO_MEMORY;
+#else
+ AssertReturn(cbSize && cbSize <= pVM->pdm.s.cbVMMDevHeapLeft, VERR_NO_MEMORY);
+#endif
+
+ Log(("PDMR3VMMDevHeapAlloc: %#zx\n", cbSize));
+
+ /** @todo Not a real heap as there's currently only one user. */
+ *ppv = pVM->pdm.s.pvVMMDevHeap;
+ pVM->pdm.s.cbVMMDevHeapLeft = 0;
+ pVM->pdm.s.pfnVMMDevHeapNotify = pfnNotify;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees memory from the VMM device heap
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pv Ring-3 pointer.
+ */
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapFree(PVM pVM, RTR3PTR pv)
+{
+ Log(("PDMR3VmmDevHeapFree: %RHv\n", pv)); RT_NOREF_PV(pv);
+
+ /** @todo not a real heap as there's currently only one user. */
+ pVM->pdm.s.cbVMMDevHeapLeft = pVM->pdm.s.cbVMMDevHeap;
+ pVM->pdm.s.pfnVMMDevHeapNotify = NULL;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for DBGFR3TraceConfig that checks if the given tracing group name
+ * matches a device or driver name and applies the tracing config change.
+ *
+ * @returns VINF_SUCCESS or VERR_NOT_FOUND.
+ * @param pVM The cross context VM structure.
+ * @param pszName The tracing config group name. This is NULL if
+ * the operation applies to every device and
+ * driver.
+ * @param cchName The length to match.
+ * @param fEnable Whether to enable or disable the corresponding
+ * trace points.
+ * @param fApply Whether to actually apply the changes or just do
+ * existence checks.
+ */
+VMMR3_INT_DECL(int) PDMR3TracingConfig(PVM pVM, const char *pszName, size_t cchName, bool fEnable, bool fApply)
+{
+ /** @todo This code is potentially racing driver attaching and detaching. */
+
+ /*
+ * Applies to all.
+ */
+ if (pszName == NULL)
+ {
+ AssertReturn(fApply, VINF_SUCCESS);
+
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ pDevIns->fTracing = fEnable;
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->fTracing = fEnable;
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ pUsbIns->fTracing = fEnable;
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ pDrvIns->fTracing = fEnable;
+
+ }
+#endif
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Specific devices, USB devices or drivers.
+ * Decode prefix to figure which of these it applies to.
+ */
+ if (cchName <= 3)
+ return VERR_NOT_FOUND;
+
+ uint32_t cMatches = 0;
+ if (!strncmp("dev", pszName, 3))
+ {
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ const char *pszDevName = pDevIns->Internal.s.pDevR3->pReg->szName;
+ size_t cchDevName = strlen(pszDevName);
+ if ( ( cchDevName == cchName
+ && RTStrNICmp(pszName, pszDevName, cchDevName))
+ || ( cchDevName == cchName - 3
+ && RTStrNICmp(pszName + 3, pszDevName, cchDevName)) )
+ {
+ cMatches++;
+ if (fApply)
+ pDevIns->fTracing = fEnable;
+ }
+ }
+ }
+ else if (!strncmp("usb", pszName, 3))
+ {
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ const char *pszUsbName = pUsbIns->Internal.s.pUsbDev->pReg->szName;
+ size_t cchUsbName = strlen(pszUsbName);
+ if ( ( cchUsbName == cchName
+ && RTStrNICmp(pszName, pszUsbName, cchUsbName))
+ || ( cchUsbName == cchName - 3
+ && RTStrNICmp(pszName + 3, pszUsbName, cchUsbName)) )
+ {
+ cMatches++;
+ if (fApply)
+ pUsbIns->fTracing = fEnable;
+ }
+ }
+ }
+ else if (!strncmp("drv", pszName, 3))
+ {
+ AssertReturn(fApply, VINF_SUCCESS);
+
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ {
+ const char *pszDrvName = pDrvIns->Internal.s.pDrv->pReg->szName;
+ size_t cchDrvName = strlen(pszDrvName);
+ if ( ( cchDrvName == cchName
+ && RTStrNICmp(pszName, pszDrvName, cchDrvName))
+ || ( cchDrvName == cchName - 3
+ && RTStrNICmp(pszName + 3, pszDrvName, cchDrvName)) )
+ {
+ cMatches++;
+ if (fApply)
+ pDrvIns->fTracing = fEnable;
+ }
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ {
+ const char *pszDrvName = pDrvIns->Internal.s.pDrv->pReg->szName;
+ size_t cchDrvName = strlen(pszDrvName);
+ if ( ( cchDrvName == cchName
+ && RTStrNICmp(pszName, pszDrvName, cchDrvName))
+ || ( cchDrvName == cchName - 3
+ && RTStrNICmp(pszName + 3, pszDrvName, cchDrvName)) )
+ {
+ cMatches++;
+ if (fApply)
+ pDrvIns->fTracing = fEnable;
+ }
+ }
+#endif
+ }
+ else
+ return VERR_NOT_FOUND;
+
+ return cMatches > 0 ? VINF_SUCCESS : VERR_NOT_FOUND;
+}
+
+
+/**
+ * Worker for DBGFR3TraceQueryConfig that checks whether all drivers, devices,
+ * and USB device have the same tracing settings.
+ *
+ * @returns true / false.
+ * @param pVM The cross context VM structure.
+ * @param fEnabled The tracing setting to check for.
+ */
+VMMR3_INT_DECL(bool) PDMR3TracingAreAll(PVM pVM, bool fEnabled)
+{
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ if (pDevIns->fTracing != (uint32_t)fEnabled)
+ return false;
+
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (pDrvIns->fTracing != (uint32_t)fEnabled)
+ return false;
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ if (pUsbIns->fTracing != (uint32_t)fEnabled)
+ return false;
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (pDrvIns->fTracing != (uint32_t)fEnabled)
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+
+/**
+ * Worker for PDMR3TracingQueryConfig that adds a prefixed name to the output
+ * string.
+ *
+ * @returns VINF_SUCCESS or VERR_BUFFER_OVERFLOW
+ * @param ppszDst The pointer to the output buffer pointer.
+ * @param pcbDst The pointer to the output buffer size.
+ * @param fSpace Whether to add a space before the name.
+ * @param pszPrefix The name prefix.
+ * @param pszName The name.
+ */
+static int pdmR3TracingAdd(char **ppszDst, size_t *pcbDst, bool fSpace, const char *pszPrefix, const char *pszName)
+{
+ size_t const cchPrefix = strlen(pszPrefix);
+ if (!RTStrNICmp(pszPrefix, pszName, cchPrefix))
+ pszName += cchPrefix;
+ size_t const cchName = strlen(pszName);
+
+ size_t const cchThis = cchName + cchPrefix + fSpace;
+ if (cchThis >= *pcbDst)
+ return VERR_BUFFER_OVERFLOW;
+ if (fSpace)
+ {
+ **ppszDst = ' ';
+ memcpy(*ppszDst + 1, pszPrefix, cchPrefix);
+ memcpy(*ppszDst + 1 + cchPrefix, pszName, cchName + 1);
+ }
+ else
+ {
+ memcpy(*ppszDst, pszPrefix, cchPrefix);
+ memcpy(*ppszDst + cchPrefix, pszName, cchName + 1);
+ }
+ *ppszDst += cchThis;
+ *pcbDst -= cchThis;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for DBGFR3TraceQueryConfig use when not everything is either enabled
+ * or disabled.
+ *
+ * @returns VINF_SUCCESS or VERR_BUFFER_OVERFLOW
+ * @param pVM The cross context VM structure.
+ * @param pszConfig Where to store the config spec.
+ * @param cbConfig The size of the output buffer.
+ */
+VMMR3_INT_DECL(int) PDMR3TracingQueryConfig(PVM pVM, char *pszConfig, size_t cbConfig)
+{
+ int rc;
+ char *pszDst = pszConfig;
+ size_t cbDst = cbConfig;
+
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ if (pDevIns->fTracing)
+ {
+ rc = pdmR3TracingAdd(&pszDst, &cbDst, pszDst != pszConfig, "dev", pDevIns->Internal.s.pDevR3->pReg->szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (pDrvIns->fTracing)
+ {
+ rc = pdmR3TracingAdd(&pszDst, &cbDst, pszDst != pszConfig, "drv", pDrvIns->Internal.s.pDrv->pReg->szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+
+#ifdef VBOX_WITH_USB
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ if (pUsbIns->fTracing)
+ {
+ rc = pdmR3TracingAdd(&pszDst, &cbDst, pszDst != pszConfig, "usb", pUsbIns->Internal.s.pUsbDev->pReg->szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (pDrvIns->fTracing)
+ {
+ rc = pdmR3TracingAdd(&pszDst, &cbDst, pszDst != pszConfig, "drv", pDrvIns->Internal.s.pDrv->pReg->szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks that a PDMDRVREG::szName, PDMDEVREG::szName or PDMUSBREG::szName
+ * field contains only a limited set of ASCII characters.
+ *
+ * @returns true / false.
+ * @param pszName The name to validate.
+ */
+bool pdmR3IsValidName(const char *pszName)
+{
+ char ch;
+ while ( (ch = *pszName) != '\0'
+ && ( RT_C_IS_ALNUM(ch)
+ || ch == '-'
+ || ch == ' ' /** @todo disallow this! */
+ || ch == '_') )
+ pszName++;
+ return ch == '\0';
+}
+
+
+/**
+ * Info handler for 'pdmtracingids'.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The output helpers.
+ * @param pszArgs The optional user arguments.
+ *
+ * @remarks Can be called on most threads.
+ */
+static DECLCALLBACK(void) pdmR3InfoTracingIds(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /*
+ * Parse the argument (optional).
+ */
+ if ( pszArgs
+ && *pszArgs
+ && strcmp(pszArgs, "all")
+ && strcmp(pszArgs, "devices")
+ && strcmp(pszArgs, "drivers")
+ && strcmp(pszArgs, "usb"))
+ {
+ pHlp->pfnPrintf(pHlp, "Unable to grok '%s'\n", pszArgs);
+ return;
+ }
+ bool fAll = !pszArgs || !*pszArgs || !strcmp(pszArgs, "all");
+ bool fDevices = fAll || !strcmp(pszArgs, "devices");
+ bool fUsbDevs = fAll || !strcmp(pszArgs, "usb");
+ bool fDrivers = fAll || !strcmp(pszArgs, "drivers");
+
+ /*
+ * Produce the requested output.
+ */
+/** @todo lock PDM lists! */
+ /* devices */
+ if (fDevices)
+ {
+ pHlp->pfnPrintf(pHlp, "Device tracing IDs:\n");
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ pHlp->pfnPrintf(pHlp, "%05u %s\n", pDevIns->idTracing, pDevIns->Internal.s.pDevR3->pReg->szName);
+ }
+
+ /* USB devices */
+ if (fUsbDevs)
+ {
+ pHlp->pfnPrintf(pHlp, "USB device tracing IDs:\n");
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ pHlp->pfnPrintf(pHlp, "%05u %s\n", pUsbIns->idTracing, pUsbIns->Internal.s.pUsbDev->pReg->szName);
+ }
+
+ /* Drivers */
+ if (fDrivers)
+ {
+ pHlp->pfnPrintf(pHlp, "Driver tracing IDs:\n");
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ {
+ uint32_t iLevel = 0;
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown, iLevel++)
+ pHlp->pfnPrintf(pHlp, "%05u %s (level %u, lun %u, dev %s)\n",
+ pDrvIns->idTracing, pDrvIns->Internal.s.pDrv->pReg->szName,
+ iLevel, pLun->iLun, pDevIns->Internal.s.pDevR3->pReg->szName);
+ }
+ }
+
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ {
+ uint32_t iLevel = 0;
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown, iLevel++)
+ pHlp->pfnPrintf(pHlp, "%05u %s (level %u, lun %u, dev %s)\n",
+ pDrvIns->idTracing, pDrvIns->Internal.s.pDrv->pReg->szName,
+ iLevel, pLun->iLun, pUsbIns->Internal.s.pUsbDev->pReg->szName);
+ }
+ }
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp
new file mode 100644
index 00000000..a9a16229
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp
@@ -0,0 +1,1807 @@
+/* $Id: PDMAsyncCompletion.cpp $ */
+/** @file
+ * PDM Async I/O - Transport data asynchronous in R3 using EMT.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/mem.h>
+#include <iprt/critsect.h>
+#include <iprt/tcp.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+
+#include <VBox/vmm/pdmasynccompletion.h>
+#include "PDMAsyncCompletionInternal.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Async I/O type.
+ */
+typedef enum PDMASYNCCOMPLETIONTEMPLATETYPE
+{
+ /** Device . */
+ PDMASYNCCOMPLETIONTEMPLATETYPE_DEV = 1,
+ /** Driver consumer. */
+ PDMASYNCCOMPLETIONTEMPLATETYPE_DRV,
+ /** Internal consumer. */
+ PDMASYNCCOMPLETIONTEMPLATETYPE_INTERNAL,
+ /** Usb consumer. */
+ PDMASYNCCOMPLETIONTEMPLATETYPE_USB
+} PDMASYNCTEMPLATETYPE;
+
+/**
+ * PDM Async I/O template.
+ */
+typedef struct PDMASYNCCOMPLETIONTEMPLATE
+{
+ /** Pointer to the next template in the list. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONTEMPLATE) pNext;
+ /** Pointer to the previous template in the list. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONTEMPLATE) pPrev;
+ /** Type specific data. */
+ union
+ {
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_DEV */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMASYNCCOMPLETEDEV) pfnCompleted;
+ /** Pointer to the device instance owning the template. */
+ R3PTRTYPE(PPDMDEVINS) pDevIns;
+ } Dev;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_DRV */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMASYNCCOMPLETEDRV) pfnCompleted;
+ /** Pointer to the driver instance owning the template. */
+ R3PTRTYPE(PPDMDRVINS) pDrvIns;
+ /** User argument given during template creation.
+ * This is only here to make things much easier
+ * for DrVVD. */
+ void *pvTemplateUser;
+ } Drv;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_INTERNAL */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMASYNCCOMPLETEINT) pfnCompleted;
+ /** Pointer to user data. */
+ R3PTRTYPE(void *) pvUser;
+ } Int;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_USB */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMASYNCCOMPLETEUSB) pfnCompleted;
+ /** Pointer to the usb instance owning the template. */
+ R3PTRTYPE(PPDMUSBINS) pUsbIns;
+ } Usb;
+ } u;
+ /** Template type. */
+ PDMASYNCCOMPLETIONTEMPLATETYPE enmType;
+ /** Pointer to the VM. */
+ R3PTRTYPE(PVM) pVM;
+ /** Use count of the template. */
+ volatile uint32_t cUsed;
+} PDMASYNCCOMPLETIONTEMPLATE;
+
+/**
+ * Bandwidth control manager instance data
+ */
+typedef struct PDMACBWMGR
+{
+ /** Pointer to the next manager in the list. */
+ struct PDMACBWMGR *pNext;
+ /** Pointer to the shared UVM structure. */
+ PPDMASYNCCOMPLETIONEPCLASS pEpClass;
+ /** Identifier of the manager. */
+ char *pszId;
+ /** Maximum number of bytes the endpoints are allowed to transfer (Max is 4GB/s currently) */
+ volatile uint32_t cbTransferPerSecMax;
+ /** Number of bytes we start with */
+ volatile uint32_t cbTransferPerSecStart;
+ /** Step after each update */
+ volatile uint32_t cbTransferPerSecStep;
+ /** Number of bytes we are allowed to transfer till the next update.
+ * Reset by the refresh timer. */
+ volatile uint32_t cbTransferAllowed;
+ /** Timestamp of the last update */
+ volatile uint64_t tsUpdatedLast;
+ /** Reference counter - How many endpoints are associated with this manager. */
+ volatile uint32_t cRefs;
+} PDMACBWMGR;
+/** Pointer to a bandwidth control manager pointer. */
+typedef PPDMACBWMGR *PPPDMACBWMGR;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void pdmR3AsyncCompletionPutTask(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, PPDMASYNCCOMPLETIONTASK pTask);
+
+
+/**
+ * Internal worker for the creation apis
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppTemplate Where to store the template handle.
+ * @param enmType Async completion template type (dev, drv, usb, int).
+ */
+static int pdmR3AsyncCompletionTemplateCreate(PVM pVM, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PDMASYNCCOMPLETIONTEMPLATETYPE enmType)
+{
+ PUVM pUVM = pVM->pUVM;
+
+ AssertPtrReturn(ppTemplate, VERR_INVALID_POINTER);
+
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION, sizeof(PDMASYNCCOMPLETIONTEMPLATE), (void **)&pTemplate);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Initialize fields.
+ */
+ pTemplate->pVM = pVM;
+ pTemplate->cUsed = 0;
+ pTemplate->enmType = enmType;
+
+ /*
+ * Add template to the global VM template list.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pTemplate->pNext = pUVM->pdm.s.pAsyncCompletionTemplates;
+ if (pUVM->pdm.s.pAsyncCompletionTemplates)
+ pUVM->pdm.s.pAsyncCompletionTemplates->pPrev = pTemplate;
+ pUVM->pdm.s.pAsyncCompletionTemplates = pTemplate;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ *ppTemplate = pTemplate;
+ return VINF_SUCCESS;
+}
+
+
+#ifdef SOME_UNUSED_FUNCTION
+/**
+ * Creates a async completion template for a device instance.
+ *
+ * The template is used when creating new completion tasks.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ * @param ppTemplate Where to store the template pointer on success.
+ * @param pfnCompleted The completion callback routine.
+ * @param pszDesc Description.
+ */
+int pdmR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDEV pfnCompleted, const char *pszDesc)
+{
+ LogFlow(("%s: pDevIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n",
+ __FUNCTION__, pDevIns, ppTemplate, pfnCompleted, pszDesc));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT(pVM);
+ AssertPtrReturn(pfnCompleted, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTemplate, VERR_INVALID_POINTER);
+
+ /*
+ * Create the template.
+ */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ int rc = pdmR3AsyncCompletionTemplateCreate(pVM, &pTemplate, PDMASYNCCOMPLETIONTEMPLATETYPE_DEV);
+ if (RT_SUCCESS(rc))
+ {
+ pTemplate->u.Dev.pDevIns = pDevIns;
+ pTemplate->u.Dev.pfnCompleted = pfnCompleted;
+
+ *ppTemplate = pTemplate;
+ Log(("PDM: Created device template %p: pfnCompleted=%p pDevIns=%p\n",
+ pTemplate, pfnCompleted, pDevIns));
+ }
+
+ return rc;
+}
+#endif /* SOME_UNUSED_FUNCTION */
+
+
+/**
+ * Creates a async completion template for a driver instance.
+ *
+ * The template is used when creating new completion tasks.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ * @param ppTemplate Where to store the template pointer on success.
+ * @param pfnCompleted The completion callback routine.
+ * @param pvTemplateUser Template user argument
+ * @param pszDesc Description.
+ */
+int pdmR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser,
+ const char *pszDesc)
+{
+ LogFlow(("PDMR3AsyncCompletionTemplateCreateDriver: pDrvIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n",
+ pDrvIns, ppTemplate, pfnCompleted, pszDesc));
+ RT_NOREF_PV(pszDesc); /** @todo async template description */
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnCompleted, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTemplate, VERR_INVALID_POINTER);
+
+ /*
+ * Create the template.
+ */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ int rc = pdmR3AsyncCompletionTemplateCreate(pVM, &pTemplate, PDMASYNCCOMPLETIONTEMPLATETYPE_DRV);
+ if (RT_SUCCESS(rc))
+ {
+ pTemplate->u.Drv.pDrvIns = pDrvIns;
+ pTemplate->u.Drv.pfnCompleted = pfnCompleted;
+ pTemplate->u.Drv.pvTemplateUser = pvTemplateUser;
+
+ *ppTemplate = pTemplate;
+ Log(("PDM: Created driver template %p: pfnCompleted=%p pDrvIns=%p\n",
+ pTemplate, pfnCompleted, pDrvIns));
+ }
+
+ return rc;
+}
+
+
+#ifdef SOME_UNUSED_FUNCTION
+/**
+ * Creates a async completion template for a USB device instance.
+ *
+ * The template is used when creating new completion tasks.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance.
+ * @param ppTemplate Where to store the template pointer on success.
+ * @param pfnCompleted The completion callback routine.
+ * @param pszDesc Description.
+ */
+int pdmR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEUSB pfnCompleted, const char *pszDesc)
+{
+ LogFlow(("pdmR3AsyncCompletionTemplateCreateUsb: pUsbIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n", pUsbIns, ppTemplate, pfnCompleted, pszDesc));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT(pVM);
+ AssertPtrReturn(pfnCompleted, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTemplate, VERR_INVALID_POINTER);
+
+ /*
+ * Create the template.
+ */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ int rc = pdmR3AsyncCompletionTemplateCreate(pVM, &pTemplate, PDMASYNCCOMPLETIONTEMPLATETYPE_USB);
+ if (RT_SUCCESS(rc))
+ {
+ pTemplate->u.Usb.pUsbIns = pUsbIns;
+ pTemplate->u.Usb.pfnCompleted = pfnCompleted;
+
+ *ppTemplate = pTemplate;
+ Log(("PDM: Created usb template %p: pfnCompleted=%p pDevIns=%p\n",
+ pTemplate, pfnCompleted, pUsbIns));
+ }
+
+ return rc;
+}
+#endif
+
+
+/**
+ * Creates a async completion template for internally by the VMM.
+ *
+ * The template is used when creating new completion tasks.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppTemplate Where to store the template pointer on success.
+ * @param pfnCompleted The completion callback routine.
+ * @param pvUser2 The 2nd user argument for the callback.
+ * @param pszDesc Description.
+ * @internal
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateInternal(PVM pVM, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEINT pfnCompleted, void *pvUser2, const char *pszDesc)
+{
+ LogFlow(("PDMR3AsyncCompletionTemplateCreateInternal: ppTemplate=%p pfnCompleted=%p pvUser2=%p pszDesc=%s\n",
+ ppTemplate, pfnCompleted, pvUser2, pszDesc));
+ RT_NOREF_PV(pszDesc); /** @todo async template description */
+
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT(pVM);
+ AssertPtrReturn(pfnCompleted, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTemplate, VERR_INVALID_POINTER);
+
+ /*
+ * Create the template.
+ */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ int rc = pdmR3AsyncCompletionTemplateCreate(pVM, &pTemplate, PDMASYNCCOMPLETIONTEMPLATETYPE_INTERNAL);
+ if (RT_SUCCESS(rc))
+ {
+ pTemplate->u.Int.pvUser = pvUser2;
+ pTemplate->u.Int.pfnCompleted = pfnCompleted;
+
+ *ppTemplate = pTemplate;
+ Log(("PDM: Created internal template %p: pfnCompleted=%p pvUser2=%p\n",
+ pTemplate, pfnCompleted, pvUser2));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Destroys the specified async completion template.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PDM_ASYNC_TEMPLATE_BUSY if the template is still in use.
+ *
+ * @param pTemplate The template in question.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroy(PPDMASYNCCOMPLETIONTEMPLATE pTemplate)
+{
+ LogFlow(("%s: pTemplate=%p\n", __FUNCTION__, pTemplate));
+
+ if (!pTemplate)
+ {
+ AssertMsgFailed(("pTemplate is NULL!\n"));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Check if the template is still used.
+ */
+ if (pTemplate->cUsed > 0)
+ {
+ AssertMsgFailed(("Template is still in use\n"));
+ return VERR_PDM_ASYNC_TEMPLATE_BUSY;
+ }
+
+ /*
+ * Unlink the template from the list.
+ */
+ PUVM pUVM = pTemplate->pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ PPDMASYNCCOMPLETIONTEMPLATE pPrev = pTemplate->pPrev;
+ PPDMASYNCCOMPLETIONTEMPLATE pNext = pTemplate->pNext;
+
+ if (pPrev)
+ pPrev->pNext = pNext;
+ else
+ pUVM->pdm.s.pAsyncCompletionTemplates = pNext;
+
+ if (pNext)
+ pNext->pPrev = pPrev;
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ /*
+ * Free the template.
+ */
+ MMR3HeapFree(pTemplate);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys all the specified async completion templates for the given device instance.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PDM_ASYNC_TEMPLATE_BUSY if one or more of the templates are still in use.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ */
+int pdmR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyDevice: pDevIns=%p\n", pDevIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pDevIns)
+ return VERR_INVALID_PARAMETER;
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Unlink it.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate = pUVM->pdm.s.pAsyncCompletionTemplates;
+ while (pTemplate)
+ {
+ if ( pTemplate->enmType == PDMASYNCCOMPLETIONTEMPLATETYPE_DEV
+ && pTemplate->u.Dev.pDevIns == pDevIns)
+ {
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplateDestroy = pTemplate;
+ pTemplate = pTemplate->pNext;
+ int rc = PDMR3AsyncCompletionTemplateDestroy(pTemplateDestroy);
+ if (RT_FAILURE(rc))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+ }
+ else
+ pTemplate = pTemplate->pNext;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys all the specified async completion templates for the given driver instance.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PDM_ASYNC_TEMPLATE_BUSY if one or more of the templates are still in use.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ */
+int pdmR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyDriver: pDevIns=%p\n", pDrvIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pDrvIns)
+ return VERR_INVALID_PARAMETER;
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Unlink it.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate = pUVM->pdm.s.pAsyncCompletionTemplates;
+ while (pTemplate)
+ {
+ if ( pTemplate->enmType == PDMASYNCCOMPLETIONTEMPLATETYPE_DRV
+ && pTemplate->u.Drv.pDrvIns == pDrvIns)
+ {
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplateDestroy = pTemplate;
+ pTemplate = pTemplate->pNext;
+ int rc = PDMR3AsyncCompletionTemplateDestroy(pTemplateDestroy);
+ if (RT_FAILURE(rc))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+ }
+ else
+ pTemplate = pTemplate->pNext;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys all the specified async completion templates for the given USB device instance.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PDM_ASYNC_TEMPLATE_BUSY if one or more of the templates are still in use.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance.
+ */
+int pdmR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns)
+{
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyUsb: pUsbIns=%p\n", pUsbIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pUsbIns)
+ return VERR_INVALID_PARAMETER;
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Unlink it.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate = pUVM->pdm.s.pAsyncCompletionTemplates;
+ while (pTemplate)
+ {
+ if ( pTemplate->enmType == PDMASYNCCOMPLETIONTEMPLATETYPE_USB
+ && pTemplate->u.Usb.pUsbIns == pUsbIns)
+ {
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplateDestroy = pTemplate;
+ pTemplate = pTemplate->pNext;
+ int rc = PDMR3AsyncCompletionTemplateDestroy(pTemplateDestroy);
+ if (RT_FAILURE(rc))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+ }
+ else
+ pTemplate = pTemplate->pNext;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
+
+/** Lazy coder. */
+static PPDMACBWMGR pdmacBwMgrFindById(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pszId)
+{
+ PPDMACBWMGR pBwMgr = NULL;
+
+ if (pszId)
+ {
+ int rc = RTCritSectEnter(&pEpClass->CritSect); AssertRC(rc);
+
+ pBwMgr = pEpClass->pBwMgrsHead;
+ while ( pBwMgr
+ && RTStrCmp(pBwMgr->pszId, pszId))
+ pBwMgr = pBwMgr->pNext;
+
+ rc = RTCritSectLeave(&pEpClass->CritSect); AssertRC(rc);
+ }
+
+ return pBwMgr;
+}
+
+
+/** Lazy coder. */
+static void pdmacBwMgrLink(PPDMACBWMGR pBwMgr)
+{
+ PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass;
+ int rc = RTCritSectEnter(&pEpClass->CritSect); AssertRC(rc);
+
+ pBwMgr->pNext = pEpClass->pBwMgrsHead;
+ pEpClass->pBwMgrsHead = pBwMgr;
+
+ rc = RTCritSectLeave(&pEpClass->CritSect); AssertRC(rc);
+}
+
+
+#ifdef SOME_UNUSED_FUNCTION
+/** Lazy coder. */
+static void pdmacBwMgrUnlink(PPDMACBWMGR pBwMgr)
+{
+ PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass;
+ int rc = RTCritSectEnter(&pEpClass->CritSect); AssertRC(rc);
+
+ if (pBwMgr == pEpClass->pBwMgrsHead)
+ pEpClass->pBwMgrsHead = pBwMgr->pNext;
+ else
+ {
+ PPDMACBWMGR pPrev = pEpClass->pBwMgrsHead;
+ while ( pPrev
+ && pPrev->pNext != pBwMgr)
+ pPrev = pPrev->pNext;
+
+ AssertPtr(pPrev);
+ pPrev->pNext = pBwMgr->pNext;
+ }
+
+ rc = RTCritSectLeave(&pEpClass->CritSect); AssertRC(rc);
+}
+#endif /* SOME_UNUSED_FUNCTION */
+
+
+/** Lazy coder. */
+static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pszBwMgr, uint32_t cbTransferPerSecMax,
+ uint32_t cbTransferPerSecStart, uint32_t cbTransferPerSecStep)
+{
+ LogFlowFunc(("pEpClass=%#p pszBwMgr=%#p{%s} cbTransferPerSecMax=%u cbTransferPerSecStart=%u cbTransferPerSecStep=%u\n",
+ pEpClass, pszBwMgr, pszBwMgr, cbTransferPerSecMax, cbTransferPerSecStart, cbTransferPerSecStep));
+
+ AssertPtrReturn(pEpClass, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszBwMgr, VERR_INVALID_POINTER);
+ AssertReturn(*pszBwMgr != '\0', VERR_INVALID_PARAMETER);
+
+ int rc;
+ PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pszBwMgr);
+ if (!pBwMgr)
+ {
+ rc = MMR3HeapAllocZEx(pEpClass->pVM, MM_TAG_PDM_ASYNC_COMPLETION,
+ sizeof(PDMACBWMGR),
+ (void **)&pBwMgr);
+ if (RT_SUCCESS(rc))
+ {
+ pBwMgr->pszId = RTStrDup(pszBwMgr);
+ if (pBwMgr->pszId)
+ {
+ pBwMgr->pEpClass = pEpClass;
+ pBwMgr->cRefs = 0;
+
+ /* Init I/O flow control. */
+ pBwMgr->cbTransferPerSecMax = cbTransferPerSecMax;
+ pBwMgr->cbTransferPerSecStart = cbTransferPerSecStart;
+ pBwMgr->cbTransferPerSecStep = cbTransferPerSecStep;
+
+ pBwMgr->cbTransferAllowed = pBwMgr->cbTransferPerSecStart;
+ pBwMgr->tsUpdatedLast = RTTimeSystemNanoTS();
+
+ pdmacBwMgrLink(pBwMgr);
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ rc = VERR_NO_MEMORY;
+ MMR3HeapFree(pBwMgr);
+ }
+ }
+ }
+ else
+ rc = VERR_ALREADY_EXISTS;
+
+ LogFlowFunc(("returns rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/** Lazy coder. */
+DECLINLINE(void) pdmacBwMgrRetain(PPDMACBWMGR pBwMgr)
+{
+ ASMAtomicIncU32(&pBwMgr->cRefs);
+}
+
+
+/** Lazy coder. */
+DECLINLINE(void) pdmacBwMgrRelease(PPDMACBWMGR pBwMgr)
+{
+ Assert(pBwMgr->cRefs > 0);
+ ASMAtomicDecU32(&pBwMgr->cRefs);
+}
+
+
+/**
+ * Checks if the endpoint is allowed to transfer the given amount of bytes.
+ *
+ * @returns true if the endpoint is allowed to transfer the data.
+ * false otherwise
+ * @param pEndpoint The endpoint.
+ * @param cbTransfer The number of bytes to transfer.
+ * @param pmsWhenNext Where to store the number of milliseconds
+ * until the bandwidth is refreshed.
+ * Only set if false is returned.
+ */
+bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext)
+{
+ bool fAllowed = true;
+ PPDMACBWMGR pBwMgr = ASMAtomicReadPtrT(&pEndpoint->pBwMgr, PPDMACBWMGR);
+
+ LogFlowFunc(("pEndpoint=%p pBwMgr=%p cbTransfer=%u\n", pEndpoint, pBwMgr, cbTransfer));
+
+ if (pBwMgr)
+ {
+ uint32_t cbOld = ASMAtomicSubU32(&pBwMgr->cbTransferAllowed, cbTransfer);
+ if (RT_LIKELY(cbOld >= cbTransfer))
+ fAllowed = true;
+ else
+ {
+ fAllowed = false;
+
+ /* We are out of resources Check if we can update again. */
+ uint64_t tsNow = RTTimeSystemNanoTS();
+ uint64_t tsUpdatedLast = ASMAtomicUoReadU64(&pBwMgr->tsUpdatedLast);
+
+ if (tsNow - tsUpdatedLast >= (1000*1000*1000))
+ {
+ if (ASMAtomicCmpXchgU64(&pBwMgr->tsUpdatedLast, tsNow, tsUpdatedLast))
+ {
+ if (pBwMgr->cbTransferPerSecStart < pBwMgr->cbTransferPerSecMax)
+ {
+ pBwMgr->cbTransferPerSecStart = RT_MIN(pBwMgr->cbTransferPerSecMax, pBwMgr->cbTransferPerSecStart + pBwMgr->cbTransferPerSecStep);
+ LogFlow(("AIOMgr: Increasing maximum bandwidth to %u bytes/sec\n", pBwMgr->cbTransferPerSecStart));
+ }
+
+ /* Update */
+ uint32_t cbTransferAllowedNew = pBwMgr->cbTransferPerSecStart > cbTransfer
+ ? pBwMgr->cbTransferPerSecStart - cbTransfer
+ : 0;
+ ASMAtomicWriteU32(&pBwMgr->cbTransferAllowed, cbTransferAllowedNew);
+ fAllowed = true;
+ LogFlow(("AIOMgr: Refreshed bandwidth\n"));
+ }
+ }
+ else
+ {
+ ASMAtomicAddU32(&pBwMgr->cbTransferAllowed, cbTransfer);
+ *pmsWhenNext = ((1000*1000*1000) - (tsNow - tsUpdatedLast)) / (1000*1000);
+ }
+ }
+ }
+
+ LogFlowFunc(("fAllowed=%RTbool\n", fAllowed));
+ return fAllowed;
+}
+
+
+/**
+ * Called by the endpoint if a task has finished.
+ *
+ * @param pTask Pointer to the finished task.
+ * @param rc Status code of the completed request.
+ * @param fCallCompletionHandler Flag whether the completion handler should be called to
+ * inform the owner of the task that it has completed.
+ */
+void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler)
+{
+ LogFlow(("%s: pTask=%#p fCallCompletionHandler=%RTbool\n", __FUNCTION__, pTask, fCallCompletionHandler));
+
+ if (fCallCompletionHandler)
+ {
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate = pTask->pEndpoint->pTemplate;
+
+ switch (pTemplate->enmType)
+ {
+ case PDMASYNCCOMPLETIONTEMPLATETYPE_DEV:
+ pTemplate->u.Dev.pfnCompleted(pTemplate->u.Dev.pDevIns, pTask->pvUser, rc);
+ break;
+
+ case PDMASYNCCOMPLETIONTEMPLATETYPE_DRV:
+ pTemplate->u.Drv.pfnCompleted(pTemplate->u.Drv.pDrvIns, pTemplate->u.Drv.pvTemplateUser, pTask->pvUser, rc);
+ break;
+
+ case PDMASYNCCOMPLETIONTEMPLATETYPE_USB:
+ pTemplate->u.Usb.pfnCompleted(pTemplate->u.Usb.pUsbIns, pTask->pvUser, rc);
+ break;
+
+ case PDMASYNCCOMPLETIONTEMPLATETYPE_INTERNAL:
+ pTemplate->u.Int.pfnCompleted(pTemplate->pVM, pTask->pvUser, pTemplate->u.Int.pvUser, rc);
+ break;
+
+ default:
+ AssertMsgFailed(("Unknown template type!\n"));
+ }
+ }
+
+ pdmR3AsyncCompletionPutTask(pTask->pEndpoint, pTask);
+}
+
+
+/**
+ * Worker initializing a endpoint class.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pEpClassOps Pointer to the endpoint class structure.
+ * @param pCfgHandle Pointer to the CFGM tree.
+ */
+int pdmR3AsyncCompletionEpClassInit(PVM pVM, PCPDMASYNCCOMPLETIONEPCLASSOPS pEpClassOps, PCFGMNODE pCfgHandle)
+{
+ /* Validate input. */
+ AssertPtrReturn(pEpClassOps, VERR_INVALID_POINTER);
+ AssertReturn(pEpClassOps->u32Version == PDMAC_EPCLASS_OPS_VERSION, VERR_VERSION_MISMATCH);
+ AssertReturn(pEpClassOps->u32VersionEnd == PDMAC_EPCLASS_OPS_VERSION, VERR_VERSION_MISMATCH);
+
+ LogFlow(("pdmR3AsyncCompletionEpClassInit: pVM=%p pEpClassOps=%p{%s}\n", pVM, pEpClassOps, pEpClassOps->pszName));
+
+ /* Allocate global class data. */
+ PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = NULL;
+
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION,
+ pEpClassOps->cbEndpointClassGlobal,
+ (void **)&pEndpointClass);
+ if (RT_SUCCESS(rc))
+ {
+ /* Initialize common data. */
+ pEndpointClass->pVM = pVM;
+ pEndpointClass->pEndpointOps = pEpClassOps;
+
+ rc = RTCritSectInit(&pEndpointClass->CritSect);
+ if (RT_SUCCESS(rc))
+ {
+ PCFGMNODE pCfgNodeClass = CFGMR3GetChild(pCfgHandle, pEpClassOps->pszName);
+
+ /* Create task cache */
+ rc = RTMemCacheCreate(&pEndpointClass->hMemCacheTasks, pEpClassOps->cbTask,
+ 0, UINT32_MAX, NULL, NULL, NULL, 0);
+ if (RT_SUCCESS(rc))
+ {
+ /* Call the specific endpoint class initializer. */
+ rc = pEpClassOps->pfnInitialize(pEndpointClass, pCfgNodeClass);
+ if (RT_SUCCESS(rc))
+ {
+ /* Create all bandwidth groups for resource control. */
+ PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNodeClass, "BwGroups");
+ if (pCfgBwGrp)
+ {
+ for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ size_t cbName = CFGMR3GetNameLen(pCur) + 1;
+ char *pszBwGrpId = (char *)RTMemAllocZ(cbName);
+ if (pszBwGrpId)
+ {
+ rc = CFGMR3GetName(pCur, pszBwGrpId, cbName);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t cbMax;
+ rc = CFGMR3QueryU32(pCur, "Max", &cbMax);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t cbStart;
+ rc = CFGMR3QueryU32Def(pCur, "Start", &cbStart, cbMax);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t cbStep;
+ rc = CFGMR3QueryU32Def(pCur, "Step", &cbStep, 0);
+ if (RT_SUCCESS(rc))
+ rc = pdmacAsyncCompletionBwMgrCreate(pEndpointClass, pszBwGrpId,
+ cbMax, cbStart, cbStep);
+ }
+ }
+ }
+ RTMemFree(pszBwGrpId);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ if (RT_FAILURE(rc))
+ break;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ PUVM pUVM = pVM->pUVM;
+ AssertMsg(!pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType],
+ ("Endpoint class was already initialized\n"));
+
+#ifdef VBOX_WITH_STATISTICS
+ CFGMR3QueryBoolDef(pCfgNodeClass, "AdvancedStatistics", &pEndpointClass->fGatherAdvancedStatistics, true);
+#else
+ CFGMR3QueryBoolDef(pCfgNodeClass, "AdvancedStatistics", &pEndpointClass->fGatherAdvancedStatistics, false);
+#endif
+
+ pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType] = pEndpointClass;
+ LogFlowFunc((": Initialized endpoint class \"%s\" rc=%Rrc\n", pEpClassOps->pszName, rc));
+ return VINF_SUCCESS;
+ }
+ }
+ RTMemCacheDestroy(pEndpointClass->hMemCacheTasks);
+ }
+ RTCritSectDelete(&pEndpointClass->CritSect);
+ }
+ MMR3HeapFree(pEndpointClass);
+ }
+
+ LogFlowFunc((": Failed to initialize endpoint class rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+
+/**
+ * Worker terminating all endpoint classes.
+ *
+ * @param pEndpointClass Pointer to the endpoint class to terminate.
+ *
+ * @remarks This method ensures that any still open endpoint is closed.
+ */
+static void pdmR3AsyncCompletionEpClassTerminate(PPDMASYNCCOMPLETIONEPCLASS pEndpointClass)
+{
+ PVM pVM = pEndpointClass->pVM;
+
+ /* Close all still open endpoints. */
+ while (pEndpointClass->pEndpointsHead)
+ PDMR3AsyncCompletionEpClose(pEndpointClass->pEndpointsHead);
+
+ /* Destroy the bandwidth managers. */
+ PPDMACBWMGR pBwMgr = pEndpointClass->pBwMgrsHead;
+ while (pBwMgr)
+ {
+ PPDMACBWMGR pFree = pBwMgr;
+ pBwMgr = pBwMgr->pNext;
+ MMR3HeapFree(pFree);
+ }
+
+ /* Call the termination callback of the class. */
+ pEndpointClass->pEndpointOps->pfnTerminate(pEndpointClass);
+
+ RTMemCacheDestroy(pEndpointClass->hMemCacheTasks);
+ RTCritSectDelete(&pEndpointClass->CritSect);
+
+ /* Free the memory of the class finally and clear the entry in the class array. */
+ pVM->pUVM->pdm.s.apAsyncCompletionEndpointClass[pEndpointClass->pEndpointOps->enmClassType] = NULL;
+ MMR3HeapFree(pEndpointClass);
+}
+
+
+/**
+ * Records the size of the request in the statistics.
+ *
+ * @param pEndpoint The endpoint to register the request size for.
+ * @param cbReq Size of the request.
+ */
+static void pdmR3AsyncCompletionStatisticsRecordSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, size_t cbReq)
+{
+ if (cbReq < 512)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSizeSmaller512);
+ else if (cbReq < _1K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize512To1K);
+ else if (cbReq < _2K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize1KTo2K);
+ else if (cbReq < _4K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize2KTo4K);
+ else if (cbReq < _8K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize4KTo8K);
+ else if (cbReq < _16K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize8KTo16K);
+ else if (cbReq < _32K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize16KTo32K);
+ else if (cbReq < _64K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize32KTo64K);
+ else if (cbReq < _128K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize64KTo128K);
+ else if (cbReq < _256K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize128KTo256K);
+ else if (cbReq < _512K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize256KTo512K);
+ else
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSizeOver512K);
+
+ if (cbReq & ((size_t)512 - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned512);
+ else if (cbReq & ((size_t)_4K - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned4K);
+ else if (cbReq & ((size_t)_8K - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned8K);
+}
+
+
+/**
+ * Records the required processing time of a request.
+ *
+ * @param pEndpoint The endpoint.
+ * @param cNsRun The request time in nanoseconds.
+ */
+static void pdmR3AsyncCompletionStatisticsRecordCompletionTime(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t cNsRun)
+{
+ PSTAMCOUNTER pStatCounter;
+ if (cNsRun < RT_NS_1US)
+ pStatCounter = &pEndpoint->StatTaskRunTimesNs[cNsRun / (RT_NS_1US / 10)];
+ else if (cNsRun < RT_NS_1MS)
+ pStatCounter = &pEndpoint->StatTaskRunTimesUs[cNsRun / (RT_NS_1MS / 10)];
+ else if (cNsRun < RT_NS_1SEC)
+ pStatCounter = &pEndpoint->StatTaskRunTimesMs[cNsRun / (RT_NS_1SEC / 10)];
+ else if (cNsRun < RT_NS_1SEC_64*100)
+ pStatCounter = &pEndpoint->StatTaskRunTimesSec[cNsRun / (RT_NS_1SEC_64*100 / 10)];
+ else
+ pStatCounter = &pEndpoint->StatTaskRunOver100Sec;
+ STAM_REL_COUNTER_INC(pStatCounter);
+
+ STAM_REL_COUNTER_INC(&pEndpoint->StatIoOpsCompleted);
+ pEndpoint->cIoOpsCompleted++;
+ uint64_t tsMsCur = RTTimeMilliTS();
+ uint64_t tsInterval = tsMsCur - pEndpoint->tsIntervalStartMs;
+ if (tsInterval >= 1000)
+ {
+ pEndpoint->StatIoOpsPerSec.c = pEndpoint->cIoOpsCompleted / (tsInterval / 1000);
+ pEndpoint->tsIntervalStartMs = tsMsCur;
+ pEndpoint->cIoOpsCompleted = 0;
+ }
+}
+
+
+/**
+ * Registers advanced statistics for the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The endpoint to register the advanced statistics for.
+ */
+static int pdmR3AsyncCompletionStatisticsRegister(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ int rc = VINF_SUCCESS;
+ PVM pVM = pEndpoint->pEpClass->pVM;
+
+ pEndpoint->tsIntervalStartMs = RTTimeMilliTS();
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesNs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesNs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Nanosecond resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/%d/TaskRun1Ns-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId, i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesUs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesUs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Microsecond resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/%d/TaskRun2MicroSec-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId, i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesMs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Milliseconds resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/%d/TaskRun3Ms-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId, i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesSec[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Second resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/%d/TaskRun4Sec-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId, i*10, i*10+10-1);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunOver100Sec, STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Tasks which ran more than 100sec",
+ "/PDM/AsyncCompletion/File/%s/%d/TaskRunSecGreater100Sec",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsPerSec, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Processed I/O operations per second",
+ "/PDM/AsyncCompletion/File/%s/%d/IoOpsPerSec",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsStarted, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Started I/O operations for this endpoint",
+ "/PDM/AsyncCompletion/File/%s/%d/IoOpsStarted",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsCompleted, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Completed I/O operations for this endpoint",
+ "/PDM/AsyncCompletion/File/%s/%d/IoOpsCompleted",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSizeSmaller512, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size smaller than 512 bytes",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSizeSmaller512",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize512To1K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 512 bytes and 1KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize512To1K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize1KTo2K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 1KB and 2KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize1KTo2K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize2KTo4K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 2KB and 4KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize2KTo4K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize4KTo8K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 4KB and 8KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize4KTo8K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize8KTo16K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 8KB and 16KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize8KTo16K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize16KTo32K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 16KB and 32KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize16KTo32K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize32KTo64K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 32KB and 64KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize32KTo64K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize64KTo128K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 64KB and 128KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize64KTo128K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize128KTo256K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 128KB and 256KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize128KTo256K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize256KTo512K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 256KB and 512KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSize256KTo512K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSizeOver512K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size over 512KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqSizeOver512K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned512, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 512 bytes",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqsUnaligned512",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned4K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 4KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqsUnaligned4K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned8K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 8KB",
+ "/PDM/AsyncCompletion/File/%s/%d/ReqsUnaligned8K",
+ RTPathFilename(pEndpoint->pszUri), pEndpoint->iStatId);
+
+ return rc;
+}
+
+
+/**
+ * Deregisters advanced statistics for one endpoint.
+ *
+ * @param pEndpoint The endpoint to deregister the advanced statistics for.
+ */
+static void pdmR3AsyncCompletionStatisticsDeregister(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ /* I hope this doesn't remove too much... */
+ STAMR3DeregisterF(pEndpoint->pEpClass->pVM->pUVM, "/PDM/AsyncCompletion/File/%s/*", RTPathFilename(pEndpoint->pszUri));
+}
+
+
+/**
+ * Initialize the async completion manager.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3AsyncCompletionInit(PVM pVM)
+{
+ LogFlowFunc((": pVM=%p\n", pVM));
+
+ VM_ASSERT_EMT(pVM);
+
+ PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
+ PCFGMNODE pCfgAsyncCompletion = CFGMR3GetChild(CFGMR3GetChild(pCfgRoot, "PDM"), "AsyncCompletion");
+
+ int rc = pdmR3AsyncCompletionEpClassInit(pVM, &g_PDMAsyncCompletionEndpointClassFile, pCfgAsyncCompletion);
+ LogFlowFunc((": pVM=%p rc=%Rrc\n", pVM, rc));
+ return rc;
+}
+
+
+/**
+ * Terminates the async completion manager.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3AsyncCompletionTerm(PVM pVM)
+{
+ LogFlowFunc((": pVM=%p\n", pVM));
+ PUVM pUVM = pVM->pUVM;
+
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->pdm.s.apAsyncCompletionEndpointClass); i++)
+ if (pUVM->pdm.s.apAsyncCompletionEndpointClass[i])
+ pdmR3AsyncCompletionEpClassTerminate(pUVM->pdm.s.apAsyncCompletionEndpointClass[i]);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resume worker for the async completion manager.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pdmR3AsyncCompletionResume(PVM pVM)
+{
+ LogFlowFunc((": pVM=%p\n", pVM));
+ PUVM pUVM = pVM->pUVM;
+
+ /* Log the bandwidth groups and all assigned endpoints. */
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->pdm.s.apAsyncCompletionEndpointClass); i++)
+ if (pUVM->pdm.s.apAsyncCompletionEndpointClass[i])
+ {
+ PPDMASYNCCOMPLETIONEPCLASS pEpClass = pUVM->pdm.s.apAsyncCompletionEndpointClass[i];
+ PPDMACBWMGR pBwMgr = pEpClass->pBwMgrsHead;
+ PPDMASYNCCOMPLETIONENDPOINT pEp;
+
+ if (pBwMgr)
+ LogRel(("AIOMgr: Bandwidth groups for class '%s'\n", i == PDMASYNCCOMPLETIONEPCLASSTYPE_FILE
+ ? "File" : "<Unknown>"));
+
+ while (pBwMgr)
+ {
+ LogRel(("AIOMgr: Id: %s\n", pBwMgr->pszId));
+ LogRel(("AIOMgr: Max: %u B/s\n", pBwMgr->cbTransferPerSecMax));
+ LogRel(("AIOMgr: Start: %u B/s\n", pBwMgr->cbTransferPerSecStart));
+ LogRel(("AIOMgr: Step: %u B/s\n", pBwMgr->cbTransferPerSecStep));
+ LogRel(("AIOMgr: Endpoints:\n"));
+
+ pEp = pEpClass->pEndpointsHead;
+ while (pEp)
+ {
+ if (pEp->pBwMgr == pBwMgr)
+ LogRel(("AIOMgr: %s\n", pEp->pszUri));
+
+ pEp = pEp->pNext;
+ }
+
+ pBwMgr = pBwMgr->pNext;
+ }
+
+ /* Print all endpoints without assigned bandwidth groups. */
+ pEp = pEpClass->pEndpointsHead;
+ if (pEp)
+ LogRel(("AIOMgr: Endpoints without assigned bandwidth groups:\n"));
+
+ while (pEp)
+ {
+ if (!pEp->pBwMgr)
+ LogRel(("AIOMgr: %s\n", pEp->pszUri));
+
+ pEp = pEp->pNext;
+ }
+ }
+}
+
+
+/**
+ * Tries to get a free task from the endpoint or class cache
+ * allocating the task if it fails.
+ *
+ * @returns Pointer to a new and initialized task or NULL
+ * @param pEndpoint The endpoint the task is for.
+ * @param pvUser Opaque user data for the task.
+ */
+static PPDMASYNCCOMPLETIONTASK pdmR3AsyncCompletionGetTask(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, void *pvUser)
+{
+ PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass;
+ PPDMASYNCCOMPLETIONTASK pTask = (PPDMASYNCCOMPLETIONTASK)RTMemCacheAlloc(pEndpointClass->hMemCacheTasks);
+ if (RT_LIKELY(pTask))
+ {
+ /* Initialize common parts. */
+ pTask->pvUser = pvUser;
+ pTask->pEndpoint = pEndpoint;
+ /* Clear list pointers for safety. */
+ pTask->pPrev = NULL;
+ pTask->pNext = NULL;
+ pTask->tsNsStart = RTTimeNanoTS();
+ STAM_REL_COUNTER_INC(&pEndpoint->StatIoOpsStarted);
+ }
+
+ return pTask;
+}
+
+
+/**
+ * Puts a task in one of the caches.
+ *
+ * @param pEndpoint The endpoint the task belongs to.
+ * @param pTask The task to cache.
+ */
+static void pdmR3AsyncCompletionPutTask(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, PPDMASYNCCOMPLETIONTASK pTask)
+{
+ PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass;
+ uint64_t cNsRun = RTTimeNanoTS() - pTask->tsNsStart;
+
+ if (RT_UNLIKELY(cNsRun >= RT_NS_10SEC))
+ LogRel(("AsyncCompletion: Task %#p completed after %llu seconds\n", pTask, cNsRun / RT_NS_1SEC));
+
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordCompletionTime(pEndpoint, cNsRun);
+
+ RTMemCacheFree(pEndpointClass->hMemCacheTasks, pTask);
+}
+
+
+static unsigned
+pdmR3AsyncCompletionGetStatId(PPDMASYNCCOMPLETIONEPCLASS pEndpointClass, const char *pszUri)
+{
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint = pEndpointClass->pEndpointsHead;
+ const char *pszFilename = RTPathFilename(pszUri);
+ unsigned iStatId = 0;
+
+ while (pEndpoint)
+ {
+ if ( !RTStrCmp(RTPathFilename(pEndpoint->pszUri), pszFilename)
+ && pEndpoint->iStatId >= iStatId)
+ iStatId = pEndpoint->iStatId + 1;
+
+ pEndpoint = pEndpoint->pNext;
+ }
+
+ return iStatId;
+}
+
+/**
+ * Opens a file as an async completion endpoint.
+ *
+ * @returns VBox status code.
+ * @param ppEndpoint Where to store the opaque endpoint handle on success.
+ * @param pszFilename Path to the file which is to be opened. (UTF-8)
+ * @param fFlags Open flags, see grp_pdmacep_file_flags.
+ * @param pTemplate Handle to the completion callback template to use
+ * for this end point.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT ppEndpoint,
+ const char *pszFilename, uint32_t fFlags,
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate)
+{
+ LogFlowFunc((": ppEndpoint=%p pszFilename=%p{%s} fFlags=%u pTemplate=%p\n",
+ ppEndpoint, pszFilename, pszFilename, fFlags, pTemplate));
+
+ /* Sanity checks. */
+ AssertPtrReturn(ppEndpoint, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
+ AssertPtrReturn(pTemplate, VERR_INVALID_POINTER);
+
+ /* Check that the flags are valid. */
+ AssertReturn(((~(PDMACEP_FILE_FLAGS_READ_ONLY | PDMACEP_FILE_FLAGS_DONT_LOCK | PDMACEP_FILE_FLAGS_HOST_CACHE_ENABLED) & fFlags) == 0),
+ VERR_INVALID_PARAMETER);
+
+ PVM pVM = pTemplate->pVM;
+ PUVM pUVM = pVM->pUVM;
+ PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint = NULL;
+
+ AssertMsg(pEndpointClass, ("File endpoint class was not initialized\n"));
+
+ /* Create an endpoint. */
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION,
+ pEndpointClass->pEndpointOps->cbEndpoint,
+ (void **)&pEndpoint);
+ if (RT_SUCCESS(rc))
+ {
+ /* Initialize common parts. */
+ pEndpoint->pNext = NULL;
+ pEndpoint->pPrev = NULL;
+ pEndpoint->pEpClass = pEndpointClass;
+ pEndpoint->pTemplate = pTemplate;
+ pEndpoint->pszUri = RTStrDup(pszFilename);
+ pEndpoint->iStatId = pdmR3AsyncCompletionGetStatId(pEndpointClass, pszFilename);
+ pEndpoint->pBwMgr = NULL;
+
+ if ( pEndpoint->pszUri
+ && RT_SUCCESS(rc))
+ {
+ /* Call the initializer for the endpoint. */
+ rc = pEndpointClass->pEndpointOps->pfnEpInitialize(pEndpoint, pszFilename, fFlags);
+ if (RT_SUCCESS(rc))
+ {
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ rc = pdmR3AsyncCompletionStatisticsRegister(pEndpoint);
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Link it into the list of endpoints. */
+ rc = RTCritSectEnter(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
+
+ pEndpoint->pNext = pEndpointClass->pEndpointsHead;
+ if (pEndpointClass->pEndpointsHead)
+ pEndpointClass->pEndpointsHead->pPrev = pEndpoint;
+
+ pEndpointClass->pEndpointsHead = pEndpoint;
+ pEndpointClass->cEndpoints++;
+
+ rc = RTCritSectLeave(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
+
+ /* Reference the template. */
+ ASMAtomicIncU32(&pTemplate->cUsed);
+
+ *ppEndpoint = pEndpoint;
+ LogFlowFunc((": Created endpoint for %s\n", pszFilename));
+ return VINF_SUCCESS;
+ }
+ else
+ pEndpointClass->pEndpointOps->pfnEpClose(pEndpoint);
+
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsDeregister(pEndpoint);
+ }
+ RTStrFree(pEndpoint->pszUri);
+ }
+ MMR3HeapFree(pEndpoint);
+ }
+
+ LogFlowFunc((": Creation of endpoint for %s failed: rc=%Rrc\n", pszFilename, rc));
+ return rc;
+}
+
+
+/**
+ * Closes a endpoint waiting for any pending tasks to finish.
+ *
+ * @param pEndpoint Handle of the endpoint.
+ */
+VMMR3DECL(void) PDMR3AsyncCompletionEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ LogFlowFunc((": pEndpoint=%p\n", pEndpoint));
+
+ /* Sanity checks. */
+ AssertReturnVoid(RT_VALID_PTR(pEndpoint));
+
+ PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass;
+ pEndpointClass->pEndpointOps->pfnEpClose(pEndpoint);
+
+ /* Drop reference from the template. */
+ ASMAtomicDecU32(&pEndpoint->pTemplate->cUsed);
+
+ /* Unlink the endpoint from the list. */
+ int rc = RTCritSectEnter(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
+
+ PPDMASYNCCOMPLETIONENDPOINT pEndpointNext = pEndpoint->pNext;
+ PPDMASYNCCOMPLETIONENDPOINT pEndpointPrev = pEndpoint->pPrev;
+
+ if (pEndpointPrev)
+ pEndpointPrev->pNext = pEndpointNext;
+ else
+ pEndpointClass->pEndpointsHead = pEndpointNext;
+ if (pEndpointNext)
+ pEndpointNext->pPrev = pEndpointPrev;
+
+ pEndpointClass->cEndpoints--;
+
+ rc = RTCritSectLeave(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
+
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsDeregister(pEndpoint);
+
+ RTStrFree(pEndpoint->pszUri);
+ MMR3HeapFree(pEndpoint);
+}
+
+
+/**
+ * Creates a read task on the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to read from.
+ * @param off Where to start reading from.
+ * @param paSegments Scatter gather list to store the data in.
+ * @param cSegments Number of segments in the list.
+ * @param cbRead The overall number of bytes to read.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpRead(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, unsigned cSegments,
+ size_t cbRead, void *pvUser,
+ PPPDMASYNCCOMPLETIONTASK ppTask)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+ AssertPtrReturn(paSegments, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTask, VERR_INVALID_POINTER);
+ AssertReturn(cSegments > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cbRead > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(off >= 0, VERR_INVALID_PARAMETER);
+
+ PPDMASYNCCOMPLETIONTASK pTask;
+
+ pTask = pdmR3AsyncCompletionGetTask(pEndpoint, pvUser);
+ if (!pTask)
+ return VERR_NO_MEMORY;
+
+ int rc = pEndpoint->pEpClass->pEndpointOps->pfnEpRead(pTask, pEndpoint, off,
+ paSegments, cSegments, cbRead);
+ if (RT_SUCCESS(rc))
+ {
+ if (pEndpoint->pEpClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordSize(pEndpoint, cbRead);
+
+ *ppTask = pTask;
+ }
+ else
+ pdmR3AsyncCompletionPutTask(pEndpoint, pTask);
+
+ return rc;
+}
+
+
+/**
+ * Creates a write task on the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to write to.
+ * @param off Where to start writing at.
+ * @param paSegments Scatter gather list of the data to write.
+ * @param cSegments Number of segments in the list.
+ * @param cbWrite The overall number of bytes to write.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpWrite(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, unsigned cSegments,
+ size_t cbWrite, void *pvUser,
+ PPPDMASYNCCOMPLETIONTASK ppTask)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+ AssertPtrReturn(paSegments, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTask, VERR_INVALID_POINTER);
+ AssertReturn(cSegments > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cbWrite > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(off >= 0, VERR_INVALID_PARAMETER);
+
+ PPDMASYNCCOMPLETIONTASK pTask;
+
+ pTask = pdmR3AsyncCompletionGetTask(pEndpoint, pvUser);
+ if (!pTask)
+ return VERR_NO_MEMORY;
+
+ int rc = pEndpoint->pEpClass->pEndpointOps->pfnEpWrite(pTask, pEndpoint, off,
+ paSegments, cSegments, cbWrite);
+ if (RT_SUCCESS(rc))
+ {
+ if (pEndpoint->pEpClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordSize(pEndpoint, cbWrite);
+
+ *ppTask = pTask;
+ }
+ else
+ pdmR3AsyncCompletionPutTask(pEndpoint, pTask);
+
+ return rc;
+}
+
+
+/**
+ * Creates a flush task on the given endpoint.
+ *
+ * Every read and write task initiated before the flush task is
+ * finished upon completion of this task.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to flush.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpFlush(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, void *pvUser, PPPDMASYNCCOMPLETIONTASK ppTask)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+ AssertPtrReturn(ppTask, VERR_INVALID_POINTER);
+
+ PPDMASYNCCOMPLETIONTASK pTask;
+
+ pTask = pdmR3AsyncCompletionGetTask(pEndpoint, pvUser);
+ if (!pTask)
+ return VERR_NO_MEMORY;
+
+ int rc = pEndpoint->pEpClass->pEndpointOps->pfnEpFlush(pTask, pEndpoint);
+ if (RT_SUCCESS(rc))
+ *ppTask = pTask;
+ else
+ pdmR3AsyncCompletionPutTask(pEndpoint, pTask);
+
+ return rc;
+}
+
+
+/**
+ * Queries the size of an endpoint.
+ *
+ * Not that some endpoints may not support this and will return an error
+ * (sockets for example).
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_SUPPORTED if the endpoint does not support this operation.
+ * @param pEndpoint The file endpoint.
+ * @param pcbSize Where to store the size of the endpoint.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpGetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
+ uint64_t *pcbSize)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+ AssertPtrReturn(pcbSize, VERR_INVALID_POINTER);
+
+ if (pEndpoint->pEpClass->pEndpointOps->pfnEpGetSize)
+ return pEndpoint->pEpClass->pEndpointOps->pfnEpGetSize(pEndpoint, pcbSize);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Sets the size of an endpoint.
+ *
+ * Not that some endpoints may not support this and will return an error
+ * (sockets for example).
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_SUPPORTED if the endpoint does not support this operation.
+ * @param pEndpoint The file endpoint.
+ * @param cbSize The size to set.
+ *
+ * @note PDMR3AsyncCompletionEpFlush should be called before this operation is executed.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpSetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t cbSize)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+
+ if (pEndpoint->pEpClass->pEndpointOps->pfnEpSetSize)
+ return pEndpoint->pEpClass->pEndpointOps->pfnEpSetSize(pEndpoint, cbSize);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Assigns or removes a bandwidth control manager to/from the endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The endpoint.
+ * @param pszBwMgr The identifer of the new bandwidth manager to assign
+ * or NULL to remove the current one.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpSetBwMgr(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, const char *pszBwMgr)
+{
+ AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
+ PPDMACBWMGR pBwMgrOld = NULL;
+ PPDMACBWMGR pBwMgrNew = NULL;
+
+ int rc = VINF_SUCCESS;
+ if (pszBwMgr)
+ {
+ pBwMgrNew = pdmacBwMgrFindById(pEndpoint->pEpClass, pszBwMgr);
+ if (pBwMgrNew)
+ pdmacBwMgrRetain(pBwMgrNew);
+ else
+ rc = VERR_NOT_FOUND;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ pBwMgrOld = ASMAtomicXchgPtrT(&pEndpoint->pBwMgr, pBwMgrNew, PPDMACBWMGR);
+ if (pBwMgrOld)
+ pdmacBwMgrRelease(pBwMgrOld);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Cancels an async completion task.
+ *
+ * If you want to use this method, you have to take great create to make sure
+ * you will never attempt cancel a task which has been completed. Since there is
+ * no reference counting or anything on the task it self, you have to serialize
+ * the cancelation and completion paths such that the aren't racing one another.
+ *
+ * @returns VBox status code
+ * @param pTask The Task to cancel.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionTaskCancel(PPDMASYNCCOMPLETIONTASK pTask)
+{
+ NOREF(pTask);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Changes the limit of a bandwidth manager for file endpoints to the given value.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszBwMgr The identifer of the bandwidth manager to change.
+ * @param cbMaxNew The new maximum for the bandwidth manager in bytes/sec.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionBwMgrSetMaxForFile(PUVM pUVM, const char *pszBwMgr, uint32_t cbMaxNew)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszBwMgr, VERR_INVALID_POINTER);
+
+ int rc = VINF_SUCCESS;
+ PPDMASYNCCOMPLETIONEPCLASS pEpClass = pVM->pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+ PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pszBwMgr);
+ if (pBwMgr)
+ {
+ /*
+ * Set the new value for the start and max value to let the manager pick up
+ * the new limit immediately.
+ */
+ ASMAtomicWriteU32(&pBwMgr->cbTransferPerSecMax, cbMaxNew);
+ ASMAtomicWriteU32(&pBwMgr->cbTransferPerSecStart, cbMaxNew);
+ }
+ else
+ rc = VERR_NOT_FOUND;
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
new file mode 100644
index 00000000..118eced4
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
@@ -0,0 +1,1305 @@
+/* $Id: PDMAsyncCompletionFile.cpp $ */
+/** @file
+ * PDM Async I/O - Transport data asynchronous in R3 using EMT.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/dbg.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/tm.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/critsect.h>
+#include <iprt/env.h>
+#include <iprt/file.h>
+#include <iprt/mem.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/path.h>
+#include <iprt/rand.h>
+
+#include "PDMAsyncCompletionFileInternal.h"
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_DEBUGGER
+static FNDBGCCMD pdmacEpFileErrorInject;
+# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+static FNDBGCCMD pdmacEpFileDelayInject;
+# endif
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_DEBUGGER
+static const DBGCVARDESC g_aInjectErrorArgs[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "direction", "write/read." },
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "filename", "Filename." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "errcode", "VBox status code." },
+};
+
+# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+static const DBGCVARDESC g_aInjectDelayArgs[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "direction", "write|read|flush|any." },
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "filename", "Filename." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "delay", "Delay in milliseconds." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "jitter", "Jitter of the delay." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "reqs", "Number of requests to delay." }
+
+};
+# endif
+
+/** Command descriptors. */
+static const DBGCCMD g_aCmds[] =
+{
+ /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax,.pszDescription */
+ { "injecterror", 3, 3, &g_aInjectErrorArgs[0], 3, 0, pdmacEpFileErrorInject, "", "Inject error into I/O subsystem." }
+# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ ,{ "injectdelay", 3, 5, &g_aInjectDelayArgs[0], RT_ELEMENTS(g_aInjectDelayArgs), 0, pdmacEpFileDelayInject, "", "Inject a delay of a request." }
+# endif
+};
+#endif
+
+
+/**
+ * Frees a task.
+ *
+ * @param pEndpoint Pointer to the endpoint the segment was for.
+ * @param pTask The task to free.
+ */
+void pdmacFileTaskFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
+{
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
+
+ LogFlowFunc((": pEndpoint=%p pTask=%p\n", pEndpoint, pTask));
+
+ /* Try the per endpoint cache first. */
+ if (pEndpoint->cTasksCached < pEpClass->cTasksCacheMax)
+ {
+ /* Add it to the list. */
+ pEndpoint->pTasksFreeTail->pNext = pTask;
+ pEndpoint->pTasksFreeTail = pTask;
+ ASMAtomicIncU32(&pEndpoint->cTasksCached);
+ }
+ else
+ {
+ Log(("Freeing task %p because all caches are full\n", pTask));
+ MMR3HeapFree(pTask);
+ }
+}
+
+/**
+ * Allocates a task segment
+ *
+ * @returns Pointer to the new task segment or NULL
+ * @param pEndpoint Pointer to the endpoint
+ */
+PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ PPDMACTASKFILE pTask = NULL;
+
+ /* Try the small per endpoint cache first. */
+ if (pEndpoint->pTasksFreeHead == pEndpoint->pTasksFreeTail)
+ {
+ /* Try the bigger endpoint class cache. */
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEndpointClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
+
+ /*
+ * Allocate completely new.
+ * If this fails we return NULL.
+ */
+ int rc = MMR3HeapAllocZEx(pEndpointClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION,
+ sizeof(PDMACTASKFILE),
+ (void **)&pTask);
+ if (RT_FAILURE(rc))
+ pTask = NULL;
+
+ LogFlow(("Allocated task %p -> %Rrc\n", pTask, rc));
+ }
+ else
+ {
+ /* Grab a free task from the head. */
+ AssertMsg(pEndpoint->cTasksCached > 0, ("No tasks cached but list contains more than one element\n"));
+
+ pTask = pEndpoint->pTasksFreeHead;
+ pEndpoint->pTasksFreeHead = pTask->pNext;
+ ASMAtomicDecU32(&pEndpoint->cTasksCached);
+ pTask->pNext = NULL;
+ }
+
+ return pTask;
+}
+
+PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ /*
+ * Get pending tasks.
+ */
+ PPDMACTASKFILE pTasks = ASMAtomicXchgPtrT(&pEndpoint->pTasksNewHead, NULL, PPDMACTASKFILE);
+
+ /* Reverse the list to process in FIFO order. */
+ if (pTasks)
+ {
+ PPDMACTASKFILE pTask = pTasks;
+
+ pTasks = NULL;
+
+ while (pTask)
+ {
+ PPDMACTASKFILE pCur = pTask;
+ pTask = pTask->pNext;
+ pCur->pNext = pTasks;
+ pTasks = pCur;
+ }
+ }
+
+ return pTasks;
+}
+
+static void pdmacFileAioMgrWakeup(PPDMACEPFILEMGR pAioMgr)
+{
+ bool fWokenUp = ASMAtomicXchgBool(&pAioMgr->fWokenUp, true);
+ if (!fWokenUp)
+ {
+ bool fWaitingEventSem = ASMAtomicReadBool(&pAioMgr->fWaitingEventSem);
+ if (fWaitingEventSem)
+ {
+ int rc = RTSemEventSignal(pAioMgr->EventSem);
+ AssertRC(rc);
+ }
+ }
+}
+
+static int pdmacFileAioMgrWaitForBlockingEvent(PPDMACEPFILEMGR pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT enmEvent)
+{
+ ASMAtomicWriteU32((volatile uint32_t *)&pAioMgr->enmBlockingEvent, enmEvent);
+ Assert(!pAioMgr->fBlockingEventPending);
+ ASMAtomicXchgBool(&pAioMgr->fBlockingEventPending, true);
+
+ /* Wakeup the async I/O manager */
+ pdmacFileAioMgrWakeup(pAioMgr);
+
+ /* Wait for completion. */
+ int rc = RTSemEventWait(pAioMgr->EventSemBlock, RT_INDEFINITE_WAIT);
+ AssertRC(rc);
+
+ ASMAtomicXchgBool(&pAioMgr->fBlockingEventPending, false);
+ ASMAtomicWriteU32((volatile uint32_t *)&pAioMgr->enmBlockingEvent, PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID);
+
+ return rc;
+}
+
+int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p{%s}\n", pAioMgr, pEndpoint, pEndpoint->Core.pszUri));
+
+ /* Update the assigned I/O manager. */
+ ASMAtomicWritePtr(&pEndpoint->pAioMgr, pAioMgr);
+
+ int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent);
+ AssertRCReturn(rc, rc);
+
+ ASMAtomicWritePtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, pEndpoint);
+ rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT);
+ ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint);
+
+ RTCritSectLeave(&pAioMgr->CritSectBlockingEvent);
+
+ return rc;
+}
+
+#ifdef SOME_UNUSED_FUNCTION
+static int pdmacFileAioMgrRemoveEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent);
+ AssertRCReturn(rc, rc);
+
+ ASMAtomicWritePtr(&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint, pEndpoint);
+ rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT);
+ ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint);
+
+ RTCritSectLeave(&pAioMgr->CritSectBlockingEvent);
+
+ return rc;
+}
+#endif
+
+static int pdmacFileAioMgrCloseEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent);
+ AssertRCReturn(rc, rc);
+
+ ASMAtomicWritePtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, pEndpoint);
+ rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT);
+ ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint);
+
+ RTCritSectLeave(&pAioMgr->CritSectBlockingEvent);
+
+ return rc;
+}
+
+static int pdmacFileAioMgrShutdown(PPDMACEPFILEMGR pAioMgr)
+{
+ int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent);
+ AssertRCReturn(rc, rc);
+
+ rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN);
+
+ RTCritSectLeave(&pAioMgr->CritSectBlockingEvent);
+
+ return rc;
+}
+
+int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
+{
+ PPDMACTASKFILE pNext;
+ do
+ {
+ pNext = pEndpoint->pTasksNewHead;
+ pTask->pNext = pNext;
+ } while (!ASMAtomicCmpXchgPtr(&pEndpoint->pTasksNewHead, pTask, pNext));
+
+ pdmacFileAioMgrWakeup(ASMAtomicReadPtrT(&pEndpoint->pAioMgr, PPDMACEPFILEMGR));
+
+ return VINF_SUCCESS;
+}
+
+static DECLCALLBACK(void) pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc)
+{
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser;
+
+ LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc));
+
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
+ pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true);
+ else
+ {
+ Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0);
+ uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg);
+
+ /* The first error will be returned. */
+ if (RT_FAILURE(rc))
+ ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
+#ifdef VBOX_WITH_DEBUGGER
+ else
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;
+
+ /* Overwrite with injected error code. */
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
+ rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS);
+ else
+ rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS);
+
+ if (RT_FAILURE(rc))
+ ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
+ }
+#endif
+
+ if (!(uOld - pTask->DataSeg.cbSeg)
+ && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true))
+ {
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEpFile->Core.pEpClass;
+
+ /* Check if we should delay completion of the request. */
+ if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0
+ && ASMAtomicReadU32(&pEpFile->cReqsDelay) > 0)
+ {
+ uint64_t tsDelay = pEpFile->msDelay;
+
+ if (pEpFile->msJitter)
+ tsDelay = (RTRandU32() % 100) > 50 ? pEpFile->msDelay + (RTRandU32() % pEpFile->msJitter)
+ : pEpFile->msDelay - (RTRandU32() % pEpFile->msJitter);
+ ASMAtomicDecU32(&pEpFile->cReqsDelay);
+
+ /* Arm the delay. */
+ pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + tsDelay;
+
+ /* Append to the list. */
+ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
+ do
+ {
+ pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE);
+ pTaskFile->pDelayedNext = pHead;
+ } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTaskFile, pHead));
+
+ if (tsDelay < pEpClassFile->cMilliesNext)
+ {
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, tsDelay);
+ TMTimerSetMillies(pVM, pEpClassFile->hTimer, tsDelay);
+ }
+
+ LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, tsDelay));
+ }
+ else
+#endif
+ pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);
+ }
+ }
+}
+
+DECLINLINE(void) pdmacFileEpTaskInit(PPDMASYNCCOMPLETIONTASK pTask, size_t cbTransfer)
+{
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTask;
+
+ Assert((uint32_t)cbTransfer == cbTransfer && (int32_t)cbTransfer >= 0);
+ ASMAtomicWriteS32(&pTaskFile->cbTransferLeft, (int32_t)cbTransfer);
+ ASMAtomicWriteBool(&pTaskFile->fCompleted, false);
+ ASMAtomicWriteS32(&pTaskFile->rc, VINF_SUCCESS);
+}
+
+int pdmacFileEpTaskInitiate(PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbTransfer, PDMACTASKFILETRANSFER enmTransfer)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTask;
+
+ Assert( (enmTransfer == PDMACTASKFILETRANSFER_READ)
+ || (enmTransfer == PDMACTASKFILETRANSFER_WRITE));
+
+ for (size_t i = 0; i < cSegments; i++)
+ {
+ PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEpFile);
+ AssertPtr(pIoTask);
+
+ pIoTask->pEndpoint = pEpFile;
+ pIoTask->enmTransferType = enmTransfer;
+ pIoTask->Off = off;
+ pIoTask->DataSeg.cbSeg = paSegments[i].cbSeg;
+ pIoTask->DataSeg.pvSeg = paSegments[i].pvSeg;
+ pIoTask->pvUser = pTaskFile;
+ pIoTask->pfnCompleted = pdmacFileEpTaskCompleted;
+
+ /* Send it off to the I/O manager. */
+ pdmacFileEpAddTask(pEpFile, pIoTask);
+ off += paSegments[i].cbSeg;
+ cbTransfer -= paSegments[i].cbSeg;
+ }
+
+ AssertMsg(!cbTransfer, ("Incomplete transfer %u bytes left\n", cbTransfer));
+
+ return VINF_AIO_TASK_PENDING;
+}
+
+/**
+ * Creates a new async I/O manager.
+ *
+ * @returns VBox status code.
+ * @param pEpClass Pointer to the endpoint class data.
+ * @param ppAioMgr Where to store the pointer to the new async I/O manager on success.
+ * @param enmMgrType Wanted manager type - can be overwritten by the global override.
+ */
+int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr,
+ PDMACEPFILEMGRTYPE enmMgrType)
+{
+ LogFlowFunc((": Entered\n"));
+
+ PPDMACEPFILEMGR pAioMgrNew;
+ int rc = MMR3HeapAllocZEx(pEpClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, sizeof(PDMACEPFILEMGR), (void **)&pAioMgrNew);
+ if (RT_SUCCESS(rc))
+ {
+ if (enmMgrType < pEpClass->enmMgrTypeOverride)
+ pAioMgrNew->enmMgrType = enmMgrType;
+ else
+ pAioMgrNew->enmMgrType = pEpClass->enmMgrTypeOverride;
+
+ pAioMgrNew->msBwLimitExpired = RT_INDEFINITE_WAIT;
+
+ rc = RTSemEventCreate(&pAioMgrNew->EventSem);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemEventCreate(&pAioMgrNew->EventSemBlock);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTCritSectInit(&pAioMgrNew->CritSectBlockingEvent);
+ if (RT_SUCCESS(rc))
+ {
+ /* Init the rest of the manager. */
+ if (pAioMgrNew->enmMgrType != PDMACEPFILEMGRTYPE_SIMPLE)
+ rc = pdmacFileAioMgrNormalInit(pAioMgrNew);
+
+ if (RT_SUCCESS(rc))
+ {
+ pAioMgrNew->enmState = PDMACEPFILEMGRSTATE_RUNNING;
+
+ rc = RTThreadCreateF(&pAioMgrNew->Thread,
+ pAioMgrNew->enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE
+ ? pdmacFileAioMgrFailsafe
+ : pdmacFileAioMgrNormal,
+ pAioMgrNew,
+ 0,
+ RTTHREADTYPE_IO,
+ 0,
+ "AioMgr%d-%s", pEpClass->cAioMgrs,
+ pAioMgrNew->enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE
+ ? "F"
+ : "N");
+ if (RT_SUCCESS(rc))
+ {
+ /* Link it into the list. */
+ RTCritSectEnter(&pEpClass->CritSect);
+ pAioMgrNew->pNext = pEpClass->pAioMgrHead;
+ if (pEpClass->pAioMgrHead)
+ pEpClass->pAioMgrHead->pPrev = pAioMgrNew;
+ pEpClass->pAioMgrHead = pAioMgrNew;
+ pEpClass->cAioMgrs++;
+ RTCritSectLeave(&pEpClass->CritSect);
+
+ *ppAioMgr = pAioMgrNew;
+
+ Log(("PDMAC: Successfully created new file AIO Mgr {%s}\n", RTThreadGetName(pAioMgrNew->Thread)));
+ return VINF_SUCCESS;
+ }
+ pdmacFileAioMgrNormalDestroy(pAioMgrNew);
+ }
+ RTCritSectDelete(&pAioMgrNew->CritSectBlockingEvent);
+ }
+ RTSemEventDestroy(pAioMgrNew->EventSem);
+ }
+ RTSemEventDestroy(pAioMgrNew->EventSemBlock);
+ }
+ MMR3HeapFree(pAioMgrNew);
+ }
+
+ LogFlowFunc((": Leave rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+/**
+ * Destroys a async I/O manager.
+ *
+ * @param pEpClassFile Pointer to globals for the file endpoint class.
+ * @param pAioMgr The async I/O manager to destroy.
+ */
+static void pdmacFileAioMgrDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile, PPDMACEPFILEMGR pAioMgr)
+{
+ int rc = pdmacFileAioMgrShutdown(pAioMgr);
+ AssertRC(rc);
+
+ /* Unlink from the list. */
+ rc = RTCritSectEnter(&pEpClassFile->CritSect);
+ AssertRC(rc);
+
+ PPDMACEPFILEMGR pPrev = pAioMgr->pPrev;
+ PPDMACEPFILEMGR pNext = pAioMgr->pNext;
+
+ if (pPrev)
+ pPrev->pNext = pNext;
+ else
+ pEpClassFile->pAioMgrHead = pNext;
+
+ if (pNext)
+ pNext->pPrev = pPrev;
+
+ pEpClassFile->cAioMgrs--;
+ rc = RTCritSectLeave(&pEpClassFile->CritSect);
+ AssertRC(rc);
+
+ /* Free the resources. */
+ RTCritSectDelete(&pAioMgr->CritSectBlockingEvent);
+ RTSemEventDestroy(pAioMgr->EventSem);
+ RTSemEventDestroy(pAioMgr->EventSemBlock);
+ if (pAioMgr->enmMgrType != PDMACEPFILEMGRTYPE_SIMPLE)
+ pdmacFileAioMgrNormalDestroy(pAioMgr);
+
+ MMR3HeapFree(pAioMgr);
+}
+
+static int pdmacFileMgrTypeFromName(const char *pszVal, PPDMACEPFILEMGRTYPE penmMgrType)
+{
+ int rc = VINF_SUCCESS;
+
+ if (!RTStrCmp(pszVal, "Simple"))
+ *penmMgrType = PDMACEPFILEMGRTYPE_SIMPLE;
+ else if (!RTStrCmp(pszVal, "Async"))
+ *penmMgrType = PDMACEPFILEMGRTYPE_ASYNC;
+ else
+ rc = VERR_CFGM_CONFIG_UNKNOWN_VALUE;
+
+ return rc;
+}
+
+static const char *pdmacFileMgrTypeToName(PDMACEPFILEMGRTYPE enmMgrType)
+{
+ if (enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE)
+ return "Simple";
+ if (enmMgrType == PDMACEPFILEMGRTYPE_ASYNC)
+ return "Async";
+
+ return NULL;
+}
+
+static int pdmacFileBackendTypeFromName(const char *pszVal, PPDMACFILEEPBACKEND penmBackendType)
+{
+ int rc = VINF_SUCCESS;
+
+ if (!RTStrCmp(pszVal, "Buffered"))
+ *penmBackendType = PDMACFILEEPBACKEND_BUFFERED;
+ else if (!RTStrCmp(pszVal, "NonBuffered"))
+ *penmBackendType = PDMACFILEEPBACKEND_NON_BUFFERED;
+ else
+ rc = VERR_CFGM_CONFIG_UNKNOWN_VALUE;
+
+ return rc;
+}
+
+static const char *pdmacFileBackendTypeToName(PDMACFILEEPBACKEND enmBackendType)
+{
+ if (enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
+ return "Buffered";
+ if (enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
+ return "NonBuffered";
+
+ return NULL;
+}
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.injecterror' command.}
+ */
+static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR pArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs == 3);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, pArgs[0].enmType == DBGCVAR_TYPE_STRING);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, pArgs[1].enmType == DBGCVAR_TYPE_STRING);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 2, pArgs[2].enmType == DBGCVAR_TYPE_NUMBER);
+
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile;
+ pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+
+ /* Syntax is "read|write <filename> <status code>" */
+ bool fWrite;
+ if (!RTStrCmp(pArgs[0].u.pszString, "read"))
+ fWrite = false;
+ else if (!RTStrCmp(pArgs[0].u.pszString, "write"))
+ fWrite = true;
+ else
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "invalid transfer direction '%s'", pArgs[0].u.pszString);
+
+ int32_t rcToInject = (int32_t)pArgs[2].u.u64Number;
+ if ((uint64_t)rcToInject != pArgs[2].u.u64Number)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "The status code '%lld' is out of range", pArgs[0].u.u64Number);
+
+ /*
+ * Search for the matching endpoint.
+ */
+ RTCritSectEnter(&pEpClassFile->Core.CritSect);
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead;
+ while (pEpFile)
+ {
+ if (!RTStrCmp(pArgs[1].u.pszString, RTPathFilename(pEpFile->Core.pszUri)))
+ break;
+ pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext;
+ }
+
+ if (pEpFile)
+ {
+ /*
+ * Do the job.
+ */
+ if (fWrite)
+ ASMAtomicXchgS32(&pEpFile->rcReqWrite, rcToInject);
+ else
+ ASMAtomicXchgS32(&pEpFile->rcReqRead, rcToInject);
+
+ DBGCCmdHlpPrintf(pCmdHlp, "Injected %Rrc into '%s' for %s\n",
+ (int)rcToInject, pArgs[1].u.pszString, pArgs[0].u.pszString);
+ }
+
+ RTCritSectLeave(&pEpClassFile->Core.CritSect);
+
+ if (!pEpFile)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "No file with name '%s' found", pArgs[1].u.pszString);
+ return VINF_SUCCESS;
+}
+
+# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.injectdelay' command.}
+ */
+static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR pArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs >= 3);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, pArgs[0].enmType == DBGCVAR_TYPE_STRING);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, pArgs[1].enmType == DBGCVAR_TYPE_STRING);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 2, pArgs[2].enmType == DBGCVAR_TYPE_NUMBER);
+
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile;
+ pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+
+ /* Syntax is "read|write|flush|any <filename> <delay> [reqs]" */
+ PDMACFILEREQTYPEDELAY enmDelayType = PDMACFILEREQTYPEDELAY_ANY;
+ if (!RTStrCmp(pArgs[0].u.pszString, "read"))
+ enmDelayType = PDMACFILEREQTYPEDELAY_READ;
+ else if (!RTStrCmp(pArgs[0].u.pszString, "write"))
+ enmDelayType = PDMACFILEREQTYPEDELAY_WRITE;
+ else if (!RTStrCmp(pArgs[0].u.pszString, "flush"))
+ enmDelayType = PDMACFILEREQTYPEDELAY_FLUSH;
+ else if (!RTStrCmp(pArgs[0].u.pszString, "any"))
+ enmDelayType = PDMACFILEREQTYPEDELAY_ANY;
+ else
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "invalid transfer direction '%s'", pArgs[0].u.pszString);
+
+ uint32_t msDelay = (uint32_t)pArgs[2].u.u64Number;
+ if ((uint64_t)msDelay != pArgs[2].u.u64Number)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "The delay '%lld' is out of range", pArgs[0].u.u64Number);
+
+ uint32_t cReqsDelay = 1;
+ uint32_t msJitter = 0;
+ if (cArgs >= 4)
+ msJitter = (uint32_t)pArgs[3].u.u64Number;
+ if (cArgs == 5)
+ cReqsDelay = (uint32_t)pArgs[4].u.u64Number;
+
+ /*
+ * Search for the matching endpoint.
+ */
+ RTCritSectEnter(&pEpClassFile->Core.CritSect);
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead;
+ while (pEpFile)
+ {
+ if (!RTStrCmp(pArgs[1].u.pszString, RTPathFilename(pEpFile->Core.pszUri)))
+ break;
+ pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext;
+ }
+
+ if (pEpFile)
+ {
+ ASMAtomicWriteSize(&pEpFile->enmTypeDelay, enmDelayType);
+ ASMAtomicWriteU32(&pEpFile->msDelay, msDelay);
+ ASMAtomicWriteU32(&pEpFile->msJitter, msJitter);
+ ASMAtomicWriteU32(&pEpFile->cReqsDelay, cReqsDelay);
+
+ DBGCCmdHlpPrintf(pCmdHlp, "Injected delay for the next %u requests of %u ms into '%s' for %s\n",
+ cReqsDelay, msDelay, pArgs[1].u.pszString, pArgs[0].u.pszString);
+ }
+
+ RTCritSectLeave(&pEpClassFile->Core.CritSect);
+
+ if (!pEpFile)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "No file with name '%s' found", pArgs[1].u.pszString);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNTMTIMERINT, }
+ */
+static DECLCALLBACK(void) pdmacR3TimerCallback(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ Assert(hTimer == pEpClassFile->hTimer);
+ uint64_t tsCur = RTTimeProgramMilliTS();
+ uint64_t cMilliesNext = UINT64_MAX;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser;
+
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, UINT64_MAX);
+
+ /* Go through all endpoints and check for expired requests. */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead;
+
+ while (pEpFile)
+ {
+ /* Check for an expired delay. */
+ if (pEpFile->pDelayedHead != NULL)
+ {
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE);
+
+ while (pTaskFile)
+ {
+ PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile;
+ pTaskFile = pTaskFile->pDelayedNext;
+
+ if (tsCur >= pTmp->tsDelayEnd)
+ {
+ LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp));
+ pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true);
+ }
+ else
+ {
+ /* Prepend to the delayed list again. */
+ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
+
+ if (pTmp->tsDelayEnd - tsCur < cMilliesNext)
+ cMilliesNext = pTmp->tsDelayEnd - tsCur;
+
+ do
+ {
+ pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE);
+ pTmp->pDelayedNext = pHead;
+ } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead));
+ }
+ }
+ }
+
+ pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext;
+ }
+
+ if (cMilliesNext < pEpClassFile->cMilliesNext)
+ {
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, cMilliesNext);
+ TMTimerSetMillies(pVM, hTimer, cMilliesNext);
+ }
+}
+
+# endif /* PDM_ASYNC_COMPLETION_FILE_WITH_DELAY */
+
+#endif /* VBOX_WITH_DEBUGGER */
+
+static DECLCALLBACK(int) pdmacFileInitialize(PPDMASYNCCOMPLETIONEPCLASS pClassGlobals, PCFGMNODE pCfgNode)
+{
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pClassGlobals;
+ RTFILEAIOLIMITS AioLimits; /** < Async I/O limitations. */
+
+ int rc = RTFileAioGetLimits(&AioLimits);
+#ifdef DEBUG
+ if (RT_SUCCESS(rc) && RTEnvExist("VBOX_ASYNC_IO_FAILBACK"))
+ rc = VERR_ENV_VAR_NOT_FOUND;
+#endif
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("AIO: Async I/O manager not supported (rc=%Rrc). Falling back to simple manager\n", rc));
+ pEpClassFile->enmMgrTypeOverride = PDMACEPFILEMGRTYPE_SIMPLE;
+ pEpClassFile->enmEpBackendDefault = PDMACFILEEPBACKEND_BUFFERED;
+ }
+ else
+ {
+ pEpClassFile->uBitmaskAlignment = AioLimits.cbBufferAlignment ? ~((RTR3UINTPTR)AioLimits.cbBufferAlignment - 1) : RTR3UINTPTR_MAX;
+ pEpClassFile->cReqsOutstandingMax = AioLimits.cReqsOutstandingMax;
+
+ if (pCfgNode)
+ {
+ /* Query the default manager type */
+ char *pszVal = NULL;
+ rc = CFGMR3QueryStringAllocDef(pCfgNode, "IoMgr", &pszVal, "Async");
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = pdmacFileMgrTypeFromName(pszVal, &pEpClassFile->enmMgrTypeOverride);
+ MMR3HeapFree(pszVal);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ LogRel(("AIOMgr: Default manager type is '%s'\n", pdmacFileMgrTypeToName(pEpClassFile->enmMgrTypeOverride)));
+
+ /* Query default backend type */
+ rc = CFGMR3QueryStringAllocDef(pCfgNode, "FileBackend", &pszVal, "NonBuffered");
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = pdmacFileBackendTypeFromName(pszVal, &pEpClassFile->enmEpBackendDefault);
+ MMR3HeapFree(pszVal);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ LogRel(("AIOMgr: Default file backend is '%s'\n", pdmacFileBackendTypeToName(pEpClassFile->enmEpBackendDefault)));
+
+#ifdef RT_OS_LINUX
+ if ( pEpClassFile->enmMgrTypeOverride == PDMACEPFILEMGRTYPE_ASYNC
+ && pEpClassFile->enmEpBackendDefault == PDMACFILEEPBACKEND_BUFFERED)
+ {
+ LogRel(("AIOMgr: Linux does not support buffered async I/O, changing to non buffered\n"));
+ pEpClassFile->enmEpBackendDefault = PDMACFILEEPBACKEND_NON_BUFFERED;
+ }
+#endif
+ }
+ else
+ {
+ /* No configuration supplied, set defaults */
+ pEpClassFile->enmEpBackendDefault = PDMACFILEEPBACKEND_NON_BUFFERED;
+ pEpClassFile->enmMgrTypeOverride = PDMACEPFILEMGRTYPE_ASYNC;
+ }
+ }
+
+ /* Init critical section. */
+ rc = RTCritSectInit(&pEpClassFile->CritSect);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /* Install the error injection handler. */
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ AssertRC(rc);
+ }
+
+# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ rc = TMR3TimerCreate(pEpClassFile->Core.pVM, TMCLOCK_REAL, pdmacR3TimerCallback, pEpClassFile,
+ TMTIMER_FLAGS_NO_RING0, "AC Delay", &pEpClassFile->hTimer);
+ AssertRC(rc);
+ pEpClassFile->cMilliesNext = UINT64_MAX;
+# endif
+#endif
+
+ return rc;
+}
+
+static DECLCALLBACK(void) pdmacFileTerminate(PPDMASYNCCOMPLETIONEPCLASS pClassGlobals)
+{
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pClassGlobals;
+
+ /* All endpoints should be closed at this point. */
+ AssertMsg(!pEpClassFile->Core.pEndpointsHead, ("There are still endpoints left\n"));
+
+ /* Destroy all left async I/O managers. */
+ while (pEpClassFile->pAioMgrHead)
+ pdmacFileAioMgrDestroy(pEpClassFile, pEpClassFile->pAioMgrHead);
+
+ RTCritSectDelete(&pEpClassFile->CritSect);
+}
+
+static DECLCALLBACK(int) pdmacFileEpInitialize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
+ const char *pszUri, uint32_t fFlags)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->pEpClass;
+ PDMACEPFILEMGRTYPE enmMgrType = pEpClassFile->enmMgrTypeOverride;
+ PDMACFILEEPBACKEND enmEpBackend = pEpClassFile->enmEpBackendDefault;
+
+ AssertMsgReturn((fFlags & ~(PDMACEP_FILE_FLAGS_READ_ONLY | PDMACEP_FILE_FLAGS_DONT_LOCK | PDMACEP_FILE_FLAGS_HOST_CACHE_ENABLED)) == 0,
+ ("PDMAsyncCompletion: Invalid flag specified\n"), VERR_INVALID_PARAMETER);
+
+ unsigned fFileFlags = RTFILE_O_OPEN;
+
+ /*
+ * Revert to the simple manager and the buffered backend if
+ * the host cache should be enabled.
+ */
+ if (fFlags & PDMACEP_FILE_FLAGS_HOST_CACHE_ENABLED)
+ {
+ enmMgrType = PDMACEPFILEMGRTYPE_SIMPLE;
+ enmEpBackend = PDMACFILEEPBACKEND_BUFFERED;
+ }
+
+ if (fFlags & PDMACEP_FILE_FLAGS_READ_ONLY)
+ fFileFlags |= RTFILE_O_READ | RTFILE_O_DENY_NONE;
+ else
+ {
+ fFileFlags |= RTFILE_O_READWRITE;
+
+ /*
+ * Opened in read/write mode. Check whether the caller wants to
+ * avoid the lock. Return an error in case caching is enabled
+ * because this can lead to data corruption.
+ */
+ if (fFlags & PDMACEP_FILE_FLAGS_DONT_LOCK)
+ fFileFlags |= RTFILE_O_DENY_NONE;
+ else
+ fFileFlags |= RTFILE_O_DENY_WRITE;
+ }
+
+ if (enmMgrType == PDMACEPFILEMGRTYPE_ASYNC)
+ fFileFlags |= RTFILE_O_ASYNC_IO;
+
+ int rc;
+ if (enmEpBackend == PDMACFILEEPBACKEND_NON_BUFFERED)
+ {
+ /*
+ * We only disable the cache if the size of the file is a multiple of 512.
+ * Certain hosts like Windows, Linux and Solaris require that transfer sizes
+ * are aligned to the volume sector size.
+ * If not we just make sure that the data is written to disk with RTFILE_O_WRITE_THROUGH
+ * which will trash the host cache but ensures that the host cache will not
+ * contain dirty buffers.
+ */
+ RTFILE hFile;
+ rc = RTFileOpen(&hFile, pszUri, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
+ if (RT_SUCCESS(rc))
+ {
+ uint64_t cbSize;
+
+ rc = RTFileQuerySize(hFile, &cbSize);
+
+ if (RT_SUCCESS(rc) && ((cbSize % 512) == 0))
+ fFileFlags |= RTFILE_O_NO_CACHE;
+ else
+ {
+ /* Downgrade to the buffered backend */
+ enmEpBackend = PDMACFILEEPBACKEND_BUFFERED;
+
+#ifdef RT_OS_LINUX
+ fFileFlags &= ~RTFILE_O_ASYNC_IO;
+ enmMgrType = PDMACEPFILEMGRTYPE_SIMPLE;
+#endif
+ }
+ RTFileClose(hFile);
+ }
+ }
+
+ /* Open with final flags. */
+ rc = RTFileOpen(&pEpFile->hFile, pszUri, fFileFlags);
+ if ( rc == VERR_INVALID_FUNCTION
+ || rc == VERR_INVALID_PARAMETER)
+ {
+ LogRel(("AIOMgr: pdmacFileEpInitialize: RTFileOpen %s / %08x failed with %Rrc\n",
+ pszUri, fFileFlags, rc));
+ /*
+ * Solaris doesn't support directio on ZFS so far. :-\
+ * Trying to enable it returns VERR_INVALID_FUNCTION
+ * (ENOTTY). Remove it and hope for the best.
+ * ZFS supports write throttling in case applications
+ * write more data than can be synced to the disk
+ * without blocking the whole application.
+ *
+ * On Linux we have the same problem with cifs.
+ * Have to disable async I/O here too because it requires O_DIRECT.
+ */
+ fFileFlags &= ~RTFILE_O_NO_CACHE;
+ enmEpBackend = PDMACFILEEPBACKEND_BUFFERED;
+
+#ifdef RT_OS_LINUX
+ fFileFlags &= ~RTFILE_O_ASYNC_IO;
+ enmMgrType = PDMACEPFILEMGRTYPE_SIMPLE;
+#endif
+
+ /* Open again. */
+ rc = RTFileOpen(&pEpFile->hFile, pszUri, fFileFlags);
+
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("AIOMgr: pdmacFileEpInitialize: RTFileOpen %s / %08x failed AGAIN(!) with %Rrc\n",
+ pszUri, fFileFlags, rc));
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ pEpFile->fFlags = fFileFlags;
+
+ rc = RTFileQuerySize(pEpFile->hFile, (uint64_t *)&pEpFile->cbFile);
+ if (RT_SUCCESS(rc))
+ {
+ /* Initialize the segment cache */
+ rc = MMR3HeapAllocZEx(pEpClassFile->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION,
+ sizeof(PDMACTASKFILE),
+ (void **)&pEpFile->pTasksFreeHead);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMACEPFILEMGR pAioMgr = NULL;
+
+ pEpFile->pTasksFreeTail = pEpFile->pTasksFreeHead;
+ pEpFile->cTasksCached = 0;
+ pEpFile->enmBackendType = enmEpBackend;
+ /*
+ * Disable async flushes on Solaris for now.
+ * They cause weird hangs which needs more investigations.
+ */
+#ifndef RT_OS_SOLARIS
+ pEpFile->fAsyncFlushSupported = true;
+#else
+ pEpFile->fAsyncFlushSupported = false;
+#endif
+
+ if (enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE)
+ {
+ /* Simple mode. Every file has its own async I/O manager. */
+ rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgr, PDMACEPFILEMGRTYPE_SIMPLE);
+ }
+ else
+ {
+ pAioMgr = pEpClassFile->pAioMgrHead;
+
+ /* Check for an idling manager of the same type */
+ while (pAioMgr)
+ {
+ if (pAioMgr->enmMgrType == enmMgrType)
+ break;
+ pAioMgr = pAioMgr->pNext;
+ }
+
+ if (!pAioMgr)
+ rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgr, enmMgrType);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ pEpFile->AioMgr.pTreeRangesLocked = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
+ if (!pEpFile->AioMgr.pTreeRangesLocked)
+ rc = VERR_NO_MEMORY;
+ else
+ {
+ pEpFile->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
+
+ /* Assign the endpoint to the thread. */
+ rc = pdmacFileAioMgrAddEndpoint(pAioMgr, pEpFile);
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pEpFile->AioMgr.pTreeRangesLocked);
+ MMR3HeapFree(pEpFile->pTasksFreeHead);
+ }
+ }
+ }
+ else if (rc == VERR_FILE_AIO_INSUFFICIENT_EVENTS)
+ {
+ PUVM pUVM = VMR3GetUVM(pEpClassFile->Core.pVM);
+#if defined(RT_OS_LINUX)
+ rc = VMR3SetError(pUVM, rc, RT_SRC_POS,
+ N_("Failed to create I/O manager for VM due to insufficient resources on the host. "
+ "Either increase the amount of allowed events in /proc/sys/fs/aio-max-nr or enable "
+ "the host I/O cache"));
+#else
+ rc = VMR3SetError(pUVM, rc, RT_SRC_POS,
+ N_("Failed to create I/O manager for VM due to insufficient resources on the host. "
+ "Enable the host I/O cache"));
+#endif
+ }
+ else
+ {
+ PUVM pUVM = VMR3GetUVM(pEpClassFile->Core.pVM);
+ rc = VMR3SetError(pUVM, rc, RT_SRC_POS,
+ N_("Failed to create I/O manager for VM due to an unknown error"));
+ }
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ RTFileClose(pEpFile->hFile);
+ }
+
+#ifdef VBOX_WITH_STATISTICS
+ if (RT_SUCCESS(rc))
+ {
+ STAMR3RegisterF(pEpClassFile->Core.pVM, &pEpFile->StatRead,
+ STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_TICKS_PER_CALL, "Time taken to read from the endpoint",
+ "/PDM/AsyncCompletion/File/%s/%d/Read", RTPathFilename(pEpFile->Core.pszUri), pEpFile->Core.iStatId);
+
+ STAMR3RegisterF(pEpClassFile->Core.pVM, &pEpFile->StatWrite,
+ STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_TICKS_PER_CALL, "Time taken to write to the endpoint",
+ "/PDM/AsyncCompletion/File/%s/%d/Write", RTPathFilename(pEpFile->Core.pszUri), pEpFile->Core.iStatId);
+ }
+#endif
+
+ if (RT_SUCCESS(rc))
+ LogRel(("AIOMgr: Endpoint for file '%s' (flags %08x) created successfully\n", pszUri, pEpFile->fFlags));
+
+ return rc;
+}
+
+static DECLCALLBACK(int) pdmacFileEpRangesLockedDestroy(PAVLRFOFFNODECORE pNode, void *pvUser)
+{
+ NOREF(pNode); NOREF(pvUser);
+ AssertMsgFailed(("The locked ranges tree should be empty at that point\n"));
+ return VINF_SUCCESS;
+}
+
+static DECLCALLBACK(int) pdmacFileEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->pEpClass;
+
+ /* Make sure that all tasks finished for this endpoint. */
+ int rc = pdmacFileAioMgrCloseEndpoint(pEpFile->pAioMgr, pEpFile);
+ AssertRC(rc);
+
+ /*
+ * If the async I/O manager is in failsafe mode this is the only endpoint
+ * he processes and thus can be destroyed now.
+ */
+ if (pEpFile->pAioMgr->enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE)
+ pdmacFileAioMgrDestroy(pEpClassFile, pEpFile->pAioMgr);
+
+ /* Free cached tasks. */
+ PPDMACTASKFILE pTask = pEpFile->pTasksFreeHead;
+
+ while (pTask)
+ {
+ PPDMACTASKFILE pTaskFree = pTask;
+ pTask = pTask->pNext;
+ MMR3HeapFree(pTaskFree);
+ }
+
+ /* Destroy the locked ranges tree now. */
+ RTAvlrFileOffsetDestroy(pEpFile->AioMgr.pTreeRangesLocked, pdmacFileEpRangesLockedDestroy, NULL);
+ RTMemFree(pEpFile->AioMgr.pTreeRangesLocked);
+ pEpFile->AioMgr.pTreeRangesLocked = NULL;
+
+ RTFileClose(pEpFile->hFile);
+
+#ifdef VBOX_WITH_STATISTICS
+ /* Not sure if this might be unnecessary because of similar statement in pdmR3AsyncCompletionStatisticsDeregister? */
+ STAMR3DeregisterF(pEpClassFile->Core.pVM->pUVM, "/PDM/AsyncCompletion/File/%s/*", RTPathFilename(pEpFile->Core.pszUri));
+#endif
+
+ return VINF_SUCCESS;
+}
+
+static DECLCALLBACK(int) pdmacFileEpRead(PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbRead)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+
+ LogFlowFunc(("pTask=%#p pEndpoint=%#p off=%RTfoff paSegments=%#p cSegments=%zu cbRead=%zu\n",
+ pTask, pEndpoint, off, paSegments, cSegments, cbRead));
+
+ if (RT_UNLIKELY((uint64_t)off + cbRead > pEpFile->cbFile))
+ return VERR_EOF;
+
+ STAM_PROFILE_ADV_START(&pEpFile->StatRead, Read);
+ pdmacFileEpTaskInit(pTask, cbRead);
+ int rc = pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbRead,
+ PDMACTASKFILETRANSFER_READ);
+ STAM_PROFILE_ADV_STOP(&pEpFile->StatRead, Read);
+
+ return rc;
+}
+
+static DECLCALLBACK(int) pdmacFileEpWrite(PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbWrite)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+
+ if (RT_UNLIKELY(pEpFile->fReadonly))
+ return VERR_NOT_SUPPORTED;
+
+ STAM_PROFILE_ADV_START(&pEpFile->StatWrite, Write);
+
+ pdmacFileEpTaskInit(pTask, cbWrite);
+
+ int rc = pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbWrite,
+ PDMACTASKFILETRANSFER_WRITE);
+
+ STAM_PROFILE_ADV_STOP(&pEpFile->StatWrite, Write);
+
+ return rc;
+}
+
+static DECLCALLBACK(int) pdmacFileEpFlush(PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTask;
+
+ if (RT_UNLIKELY(pEpFile->fReadonly))
+ return VERR_NOT_SUPPORTED;
+
+ pdmacFileEpTaskInit(pTask, 0);
+
+ PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEpFile);
+ if (RT_UNLIKELY(!pIoTask))
+ return VERR_NO_MEMORY;
+
+ pIoTask->pEndpoint = pEpFile;
+ pIoTask->enmTransferType = PDMACTASKFILETRANSFER_FLUSH;
+ pIoTask->pvUser = pTaskFile;
+ pIoTask->pfnCompleted = pdmacFileEpTaskCompleted;
+ pdmacFileEpAddTask(pEpFile, pIoTask);
+
+ return VINF_AIO_TASK_PENDING;
+}
+
+static DECLCALLBACK(int) pdmacFileEpGetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t *pcbSize)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+
+ *pcbSize = ASMAtomicReadU64(&pEpFile->cbFile);
+
+ return VINF_SUCCESS;
+}
+
+static DECLCALLBACK(int) pdmacFileEpSetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t cbSize)
+{
+ int rc;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint;
+
+ rc = RTFileSetSize(pEpFile->hFile, cbSize);
+ if (RT_SUCCESS(rc))
+ ASMAtomicWriteU64(&pEpFile->cbFile, cbSize);
+
+ return rc;
+}
+
+const PDMASYNCCOMPLETIONEPCLASSOPS g_PDMAsyncCompletionEndpointClassFile =
+{
+ /* u32Version */
+ PDMAC_EPCLASS_OPS_VERSION,
+ /* pcszName */
+ "File",
+ /* enmClassType */
+ PDMASYNCCOMPLETIONEPCLASSTYPE_FILE,
+ /* cbEndpointClassGlobal */
+ sizeof(PDMASYNCCOMPLETIONEPCLASSFILE),
+ /* cbEndpoint */
+ sizeof(PDMASYNCCOMPLETIONENDPOINTFILE),
+ /* cbTask */
+ sizeof(PDMASYNCCOMPLETIONTASKFILE),
+ /* pfnInitialize */
+ pdmacFileInitialize,
+ /* pfnTerminate */
+ pdmacFileTerminate,
+ /* pfnEpInitialize. */
+ pdmacFileEpInitialize,
+ /* pfnEpClose */
+ pdmacFileEpClose,
+ /* pfnEpRead */
+ pdmacFileEpRead,
+ /* pfnEpWrite */
+ pdmacFileEpWrite,
+ /* pfnEpFlush */
+ pdmacFileEpFlush,
+ /* pfnEpGetSize */
+ pdmacFileEpGetSize,
+ /* pfnEpSetSize */
+ pdmacFileEpSetSize,
+ /* u32VersionEnd */
+ PDMAC_EPCLASS_OPS_VERSION
+};
+
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp
new file mode 100644
index 00000000..b3a49090
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp
@@ -0,0 +1,280 @@
+/* $Id: PDMAsyncCompletionFileFailsafe.cpp $ */
+/** @file
+ * PDM Async I/O - Transport data asynchronous in R3 using EMT.
+ * Simple File I/O manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <VBox/log.h>
+
+#include "PDMAsyncCompletionFileInternal.h"
+
+
+
+/**
+ * Put a list of tasks in the pending request list of an endpoint.
+ */
+DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
+{
+ /* Add the rest of the tasks to the pending list */
+ if (!pEndpoint->AioMgr.pReqsPendingHead)
+ {
+ Assert(!pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
+ }
+ else
+ {
+ Assert(pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
+ }
+
+ /* Update the tail. */
+ while (pTaskHead->pNext)
+ pTaskHead = pTaskHead->pNext;
+
+ pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
+ pTaskHead->pNext = NULL;
+}
+
+/**
+ * Processes a given task list for assigned to the given endpoint.
+ */
+static int pdmacFileAioMgrFailsafeProcessEndpointTaskList(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACTASKFILE pTasks)
+{
+ int rc = VINF_SUCCESS;
+
+ while (pTasks)
+ {
+ RTMSINTERVAL msWhenNext;
+ PPDMACTASKFILE pCurr = pTasks;
+
+ if (!pdmacEpIsTransferAllowed(&pEndpoint->Core, (uint32_t)pCurr->DataSeg.cbSeg, &msWhenNext))
+ {
+ pAioMgr->msBwLimitExpired = RT_MIN(pAioMgr->msBwLimitExpired, msWhenNext);
+ break;
+ }
+
+ pTasks = pTasks->pNext;
+
+ switch (pCurr->enmTransferType)
+ {
+ case PDMACTASKFILETRANSFER_FLUSH:
+ {
+ rc = RTFileFlush(pEndpoint->hFile);
+ break;
+ }
+ case PDMACTASKFILETRANSFER_READ:
+ case PDMACTASKFILETRANSFER_WRITE:
+ {
+ if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_READ)
+ {
+ rc = RTFileReadAt(pEndpoint->hFile, pCurr->Off,
+ pCurr->DataSeg.pvSeg,
+ pCurr->DataSeg.cbSeg,
+ NULL);
+ }
+ else
+ {
+ if (RT_UNLIKELY((uint64_t)pCurr->Off + pCurr->DataSeg.cbSeg > pEndpoint->cbFile))
+ {
+ ASMAtomicWriteU64(&pEndpoint->cbFile, pCurr->Off + pCurr->DataSeg.cbSeg);
+ RTFileSetSize(pEndpoint->hFile, pCurr->Off + pCurr->DataSeg.cbSeg);
+ }
+
+ rc = RTFileWriteAt(pEndpoint->hFile, pCurr->Off,
+ pCurr->DataSeg.pvSeg,
+ pCurr->DataSeg.cbSeg,
+ NULL);
+ }
+
+ break;
+ }
+ default:
+ AssertMsgFailed(("Invalid transfer type %d\n", pTasks->enmTransferType));
+ }
+
+ pCurr->pfnCompleted(pCurr, pCurr->pvUser, rc);
+ pdmacFileTaskFree(pEndpoint, pCurr);
+ }
+
+ if (pTasks)
+ {
+ /* Add the rest of the tasks to the pending list */
+ pdmacFileAioMgrEpAddTaskList(pEndpoint, pTasks);
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int pdmacFileAioMgrFailsafeProcessEndpoint(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ int rc = VINF_SUCCESS;
+ PPDMACTASKFILE pTasks = pEndpoint->AioMgr.pReqsPendingHead;
+
+ pEndpoint->AioMgr.pReqsPendingHead = NULL;
+ pEndpoint->AioMgr.pReqsPendingTail = NULL;
+
+ /* Process the request pending list first in case the endpoint was migrated due to an error. */
+ if (pTasks)
+ rc = pdmacFileAioMgrFailsafeProcessEndpointTaskList(pAioMgr, pEndpoint, pTasks);
+
+ if (RT_SUCCESS(rc))
+ {
+ pTasks = pdmacFileEpGetNewTasks(pEndpoint);
+
+ if (pTasks)
+ rc = pdmacFileAioMgrFailsafeProcessEndpointTaskList(pAioMgr, pEndpoint, pTasks);
+ }
+
+ return rc;
+}
+
+/**
+ * A fallback method in case something goes wrong with the normal
+ * I/O manager.
+ */
+DECLCALLBACK(int) pdmacFileAioMgrFailsafe(RTTHREAD hThreadSelf, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
+ NOREF(hThreadSelf);
+
+ while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
+ || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING))
+ {
+ ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
+ if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
+ rc = RTSemEventWait(pAioMgr->EventSem, pAioMgr->msBwLimitExpired);
+ ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
+ Assert(RT_SUCCESS(rc) || rc == VERR_TIMEOUT);
+
+ LogFlow(("Got woken up\n"));
+ ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
+
+ /* Process endpoint events first. */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
+ while (pEndpoint)
+ {
+ pAioMgr->msBwLimitExpired = RT_INDEFINITE_WAIT;
+ rc = pdmacFileAioMgrFailsafeProcessEndpoint(pAioMgr, pEndpoint);
+ AssertRC(rc);
+ pEndpoint = pEndpoint->AioMgr.pEndpointNext;
+ }
+
+ /* Now check for an external blocking event. */
+ if (pAioMgr->fBlockingEventPending)
+ {
+ switch (pAioMgr->enmBlockingEvent)
+ {
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = pAioMgr->BlockingEventData.AddEndpoint.pEndpoint;
+ AssertMsg(RT_VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
+
+ pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
+
+ pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
+ pEndpointNew->AioMgr.pEndpointPrev = NULL;
+ if (pAioMgr->pEndpointsHead)
+ pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
+ pAioMgr->pEndpointsHead = pEndpointNew;
+
+ pAioMgr->cEndpoints++;
+
+ /*
+ * Process the task list the first time. There might be pending requests
+ * if the endpoint was migrated from another endpoint.
+ */
+ rc = pdmacFileAioMgrFailsafeProcessEndpoint(pAioMgr, pEndpointNew);
+ AssertRC(rc);
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint;
+ AssertMsg(RT_VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
+
+ pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
+
+ if (pPrev)
+ pPrev->AioMgr.pEndpointNext = pNext;
+ else
+ pAioMgr->pEndpointsHead = pNext;
+
+ if (pNext)
+ pNext->AioMgr.pEndpointPrev = pPrev;
+
+ pAioMgr->cEndpoints--;
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint;
+ AssertMsg(RT_VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to Close\n"));
+
+ pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
+
+ /* Make sure all tasks finished. */
+ rc = pdmacFileAioMgrFailsafeProcessEndpoint(pAioMgr, pEndpointClose);
+ AssertRC(rc);
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
+ break;
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
+ break;
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
+ break;
+ default:
+ AssertMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
+ }
+
+ ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
+ pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
+
+ /* Release the waiting thread. */
+ rc = RTSemEventSignal(pAioMgr->EventSemBlock);
+ AssertRC(rc);
+ }
+ }
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
new file mode 100644
index 00000000..f6fa9d85
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
@@ -0,0 +1,1744 @@
+/* $Id: PDMAsyncCompletionFileNormal.cpp $ */
+/** @file
+ * PDM Async I/O - Async File I/O manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+#include <iprt/types.h>
+#include <iprt/asm.h>
+#include <iprt/file.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/assert.h>
+#include <VBox/log.h>
+
+#include "PDMAsyncCompletionFileInternal.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The update period for the I/O load statistics in ms. */
+#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
+/** Maximum number of requests a manager will handle. */
+#define PDMACEPFILEMGR_REQS_STEP 64
+
+
+/*********************************************************************************************************************************
+* Internal functions *
+*********************************************************************************************************************************/
+static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
+ PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+
+static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACFILERANGELOCK pRangeLock);
+
+static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
+ int rc, size_t cbTransfered);
+
+
+int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
+{
+ pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
+
+ int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
+ if (rc == VERR_OUT_OF_RANGE)
+ rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Initialize request handle array. */
+ pAioMgr->iFreeEntry = 0;
+ pAioMgr->cReqEntries = pAioMgr->cRequestsActiveMax;
+ pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
+
+ if (pAioMgr->pahReqsFree)
+ {
+ /* Create the range lock memcache. */
+ rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
+ 0, UINT32_MAX, NULL, NULL, NULL, 0);
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+
+ RTMemFree(pAioMgr->pahReqsFree);
+ }
+ else
+ {
+ RTFileAioCtxDestroy(pAioMgr->hAioCtx);
+ rc = VERR_NO_MEMORY;
+ }
+ }
+
+ return rc;
+}
+
+void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
+{
+ RTFileAioCtxDestroy(pAioMgr->hAioCtx);
+
+ while (pAioMgr->iFreeEntry > 0)
+ {
+ pAioMgr->iFreeEntry--;
+ Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] != NIL_RTFILEAIOREQ);
+ RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry]);
+ }
+
+ RTMemFree(pAioMgr->pahReqsFree);
+ RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
+}
+
+#if 0 /* currently unused */
+/**
+ * Sorts the endpoint list with insertion sort.
+ */
+static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
+
+ pEpPrev = pAioMgr->pEndpointsHead;
+ pEpCurr = pEpPrev->AioMgr.pEndpointNext;
+
+ while (pEpCurr)
+ {
+ /* Remember the next element to sort because the list might change. */
+ pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
+
+ /* Unlink the current element from the list. */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
+
+ if (pPrev)
+ pPrev->AioMgr.pEndpointNext = pNext;
+ else
+ pAioMgr->pEndpointsHead = pNext;
+
+ if (pNext)
+ pNext->AioMgr.pEndpointPrev = pPrev;
+
+ /* Go back until we reached the place to insert the current endpoint into. */
+ while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
+ pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
+
+ /* Link the endpoint into the list. */
+ if (pEpPrev)
+ pNext = pEpPrev->AioMgr.pEndpointNext;
+ else
+ pNext = pAioMgr->pEndpointsHead;
+
+ pEpCurr->AioMgr.pEndpointNext = pNext;
+ pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
+
+ if (pNext)
+ pNext->AioMgr.pEndpointPrev = pEpCurr;
+
+ if (pEpPrev)
+ pEpPrev->AioMgr.pEndpointNext = pEpCurr;
+ else
+ pAioMgr->pEndpointsHead = pEpCurr;
+
+ pEpCurr = pEpNextToSort;
+ }
+
+#ifdef DEBUG
+ /* Validate sorting algorithm */
+ unsigned cEndpoints = 0;
+ pEpCurr = pAioMgr->pEndpointsHead;
+
+ AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
+ AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
+
+ while (pEpCurr)
+ {
+ cEndpoints++;
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
+
+ Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
+ Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
+
+ pEpCurr = pNext;
+ }
+
+ AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
+
+#endif
+}
+#endif /* currently unused */
+
+/**
+ * Removes an endpoint from the currently assigned manager.
+ *
+ * @returns TRUE if there are still requests pending on the current manager for this endpoint.
+ * FALSE otherwise.
+ * @param pEndpointRemove The endpoint to remove.
+ */
+static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
+{
+ PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
+ PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
+
+ pAioMgr->cEndpoints--;
+
+ if (pPrev)
+ pPrev->AioMgr.pEndpointNext = pNext;
+ else
+ pAioMgr->pEndpointsHead = pNext;
+
+ if (pNext)
+ pNext->AioMgr.pEndpointPrev = pPrev;
+
+ /* Make sure that there is no request pending on this manager for the endpoint. */
+ if (!pEndpointRemove->AioMgr.cRequestsActive)
+ {
+ Assert(!pEndpointRemove->pFlushReq);
+
+ /* Reopen the file so that the new endpoint can re-associate with the file */
+ RTFileClose(pEndpointRemove->hFile);
+ int rc = RTFileOpen(&pEndpointRemove->hFile, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
+ AssertRC(rc);
+ return false;
+ }
+
+ return true;
+}
+
+#if 0 /* currently unused */
+
+static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
+{
+ /* Balancing doesn't make sense with only one endpoint. */
+ if (pAioMgr->cEndpoints == 1)
+ return false;
+
+ /* Doesn't make sens to move endpoints if only one produces the whole load */
+ unsigned cEndpointsWithLoad = 0;
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
+
+ while (pCurr)
+ {
+ if (pCurr->AioMgr.cReqsPerSec)
+ cEndpointsWithLoad++;
+
+ pCurr = pCurr->AioMgr.pEndpointNext;
+ }
+
+ return (cEndpointsWithLoad > 1);
+}
+
+/**
+ * Creates a new I/O manager and spreads the I/O load of the endpoints
+ * between the given I/O manager and the new one.
+ *
+ * @param pAioMgr The I/O manager with high I/O load.
+ */
+static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
+{
+ /*
+ * Check if balancing would improve the situation.
+ */
+ if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
+ {
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
+ PPDMACEPFILEMGR pAioMgrNew = NULL;
+
+ int rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
+ if (RT_SUCCESS(rc))
+ {
+ /* We will sort the list by request count per second. */
+ pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
+
+ /* Now move some endpoints to the new manager. */
+ unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
+ unsigned cReqsOther = 0;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
+
+ while (pCurr)
+ {
+ if (cReqsHere <= cReqsOther)
+ {
+ /*
+ * The other manager has more requests to handle now.
+ * We will keep the current endpoint.
+ */
+ Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
+ cReqsHere += pCurr->AioMgr.cReqsPerSec;
+ pCurr = pCurr->AioMgr.pEndpointNext;
+ }
+ else
+ {
+ /* Move to other endpoint. */
+ Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
+ cReqsOther += pCurr->AioMgr.cReqsPerSec;
+
+ PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
+
+ pCurr = pCurr->AioMgr.pEndpointNext;
+
+ bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
+
+ if (fReqsPending)
+ {
+ pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
+ pMove->AioMgr.fMoving = true;
+ pMove->AioMgr.pAioMgrDst = pAioMgrNew;
+ }
+ else
+ {
+ pMove->AioMgr.fMoving = false;
+ pMove->AioMgr.pAioMgrDst = NULL;
+ pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Don't process further but leave a log entry about reduced performance. */
+ LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
+ }
+ }
+ else
+ Log(("AIOMgr: Load balancing would not improve anything\n"));
+}
+
+#endif /* unused */
+
+/**
+ * Increase the maximum number of active requests for the given I/O manager.
+ *
+ * @returns VBox status code.
+ * @param pAioMgr The I/O manager to grow.
+ */
+static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
+{
+ LogFlowFunc(("pAioMgr=%#p\n", pAioMgr));
+
+ AssertMsg( pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING
+ && !pAioMgr->cRequestsActive,
+ ("Invalid state of the I/O manager\n"));
+
+#ifdef RT_OS_WINDOWS
+ /*
+ * Reopen the files of all assigned endpoints first so we can assign them to the new
+ * I/O context.
+ */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
+
+ while (pCurr)
+ {
+ RTFileClose(pCurr->hFile);
+ int rc2 = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); AssertRC(rc2);
+
+ pCurr = pCurr->AioMgr.pEndpointNext;
+ }
+#endif
+
+ /* Create the new bigger context. */
+ pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
+
+ RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
+ int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
+ if (rc == VERR_OUT_OF_RANGE)
+ rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Close the old context. */
+ rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx);
+ AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */
+
+ pAioMgr->hAioCtx = hAioCtxNew;
+
+ /* Create a new I/O task handle array */
+ uint32_t cReqEntriesNew = pAioMgr->cRequestsActiveMax + 1;
+ RTFILEAIOREQ *pahReqNew = (RTFILEAIOREQ *)RTMemAllocZ(cReqEntriesNew * sizeof(RTFILEAIOREQ));
+
+ if (pahReqNew)
+ {
+ /* Copy the cached request handles. */
+ for (uint32_t iReq = 0; iReq < pAioMgr->cReqEntries; iReq++)
+ pahReqNew[iReq] = pAioMgr->pahReqsFree[iReq];
+
+ RTMemFree(pAioMgr->pahReqsFree);
+ pAioMgr->pahReqsFree = pahReqNew;
+ pAioMgr->cReqEntries = cReqEntriesNew;
+ LogFlowFunc(("I/O manager increased to handle a maximum of %u requests\n",
+ pAioMgr->cRequestsActiveMax));
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+#ifdef RT_OS_WINDOWS
+ /* Assign the file to the new context. */
+ pCurr = pAioMgr->pEndpointsHead;
+ while (pCurr)
+ {
+ rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->hFile);
+ AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */
+
+ pCurr = pCurr->AioMgr.pEndpointNext;
+ }
+#endif
+
+ if (RT_FAILURE(rc))
+ {
+ LogFlow(("Increasing size of the I/O manager failed with rc=%Rrc\n", rc));
+ pAioMgr->cRequestsActiveMax -= PDMACEPFILEMGR_REQS_STEP;
+ }
+
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
+ LogFlowFunc(("returns rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+/**
+ * Checks if a given status code is fatal.
+ * Non fatal errors can be fixed by migrating the endpoint to a
+ * failsafe manager.
+ *
+ * @returns true If the error is fatal and migrating to a failsafe manager doesn't help
+ * false If the error can be fixed by a migration. (image on NFS disk for example)
+ * @param rcReq The status code to check.
+ */
+DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq)
+{
+ return rcReq == VERR_DEV_IO_ERROR
+ || rcReq == VERR_FILE_IO_ERROR
+ || rcReq == VERR_DISK_IO_ERROR
+ || rcReq == VERR_DISK_FULL
+ || rcReq == VERR_FILE_TOO_BIG;
+}
+
+/**
+ * Error handler which will create the failsafe managers and destroy the failed I/O manager.
+ *
+ * @returns VBox status code
+ * @param pAioMgr The I/O manager the error occurred on.
+ * @param rc The error code.
+ * @param SRC_POS The source location of the error (use RT_SRC_POS).
+ */
+static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
+{
+ LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
+ pAioMgr, rc));
+ LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
+ LogRel(("AIOMgr: Please contact the product vendor\n"));
+
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
+
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
+ ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
+
+ AssertMsgFailed(("Implement\n"));
+ return VINF_SUCCESS;
+}
+
+/**
+ * Put a list of tasks in the pending request list of an endpoint.
+ */
+DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
+{
+ /* Add the rest of the tasks to the pending list */
+ if (!pEndpoint->AioMgr.pReqsPendingHead)
+ {
+ Assert(!pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
+ }
+ else
+ {
+ Assert(pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
+ }
+
+ /* Update the tail. */
+ while (pTaskHead->pNext)
+ pTaskHead = pTaskHead->pNext;
+
+ pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
+ pTaskHead->pNext = NULL;
+}
+
+/**
+ * Put one task in the pending request list of an endpoint.
+ */
+DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
+{
+ /* Add the rest of the tasks to the pending list */
+ if (!pEndpoint->AioMgr.pReqsPendingHead)
+ {
+ Assert(!pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingHead = pTask;
+ }
+ else
+ {
+ Assert(pEndpoint->AioMgr.pReqsPendingTail);
+ pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
+ }
+
+ pEndpoint->AioMgr.pReqsPendingTail = pTask;
+ pTask->pNext = NULL;
+}
+
+/**
+ * Allocates a async I/O request.
+ *
+ * @returns Handle to the request.
+ * @param pAioMgr The I/O manager.
+ */
+static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
+{
+ /* Get a request handle. */
+ RTFILEAIOREQ hReq;
+ if (pAioMgr->iFreeEntry > 0)
+ {
+ pAioMgr->iFreeEntry--;
+ hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeEntry];
+ pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = NIL_RTFILEAIOREQ;
+ Assert(hReq != NIL_RTFILEAIOREQ);
+ }
+ else
+ {
+ int rc = RTFileAioReqCreate(&hReq);
+ AssertRCReturn(rc, NIL_RTFILEAIOREQ);
+ }
+
+ return hReq;
+}
+
+/**
+ * Frees a async I/O request handle.
+ *
+ * @param pAioMgr The I/O manager.
+ * @param hReq The I/O request handle to free.
+ */
+static void pdmacFileAioMgrNormalRequestFree(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
+{
+ Assert(pAioMgr->iFreeEntry < pAioMgr->cReqEntries);
+ Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] == NIL_RTFILEAIOREQ);
+
+ pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = hReq;
+ pAioMgr->iFreeEntry++;
+}
+
+/**
+ * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
+ */
+static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PRTFILEAIOREQ pahReqs, unsigned cReqs)
+{
+ pAioMgr->cRequestsActive += cReqs;
+ pEndpoint->AioMgr.cRequestsActive += cReqs;
+
+ LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
+ LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
+
+ int rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
+ if (RT_FAILURE(rc))
+ {
+ if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
+ {
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
+
+ /* Append any not submitted task to the waiting list. */
+ for (size_t i = 0; i < cReqs; i++)
+ {
+ int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
+
+ if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
+ {
+ PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
+
+ Assert(pTask->hReq == pahReqs[i]);
+ pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
+ pAioMgr->cRequestsActive--;
+ pEndpoint->AioMgr.cRequestsActive--;
+
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
+ {
+ /* Clear the pending flush */
+ Assert(pEndpoint->pFlushReq == pTask);
+ pEndpoint->pFlushReq = NULL;
+ }
+ }
+ }
+
+ pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive;
+
+ /* Print an entry in the release log */
+ if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
+ {
+ pEpClass->fOutOfResourcesWarningPrinted = true;
+ LogRel(("AIOMgr: Host limits number of active IO requests to %u. Expect a performance impact.\n",
+ pAioMgr->cRequestsActive));
+ }
+
+ LogFlow(("Removed requests. I/O manager has a total of %u active requests now\n", pAioMgr->cRequestsActive));
+ LogFlow(("Endpoint has a total of %u active requests now\n", pEndpoint->AioMgr.cRequestsActive));
+ rc = VINF_SUCCESS;
+ }
+ else /* Another kind of error happened (full disk, ...) */
+ {
+ /* An error happened. Find out which one caused the error and resubmit all other tasks. */
+ for (size_t i = 0; i < cReqs; i++)
+ {
+ int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
+
+ if (rcReq == VERR_FILE_AIO_NOT_SUBMITTED)
+ {
+ /* We call ourself again to do any error handling which might come up now. */
+ rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &pahReqs[i], 1);
+ AssertRC(rc);
+ }
+ else if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
+ pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, pahReqs[i], rcReq, 0);
+ }
+
+
+ if ( pEndpoint->pFlushReq
+ && !pAioMgr->cRequestsActive
+ && !pEndpoint->fAsyncFlushSupported)
+ {
+ /*
+ * Complete a pending flush if we don't have requests enqueued and the host doesn't support
+ * the async flush API.
+ * Happens only if this we just noticed that this is not supported
+ * and the only active request was a flush.
+ */
+ PPDMACTASKFILE pFlush = pEndpoint->pFlushReq;
+ pEndpoint->pFlushReq = NULL;
+ pFlush->pfnCompleted(pFlush, pFlush->pvUser, VINF_SUCCESS);
+ pdmacFileTaskFree(pEndpoint, pFlush);
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ RTFOFF offStart, size_t cbRange,
+ PPDMACTASKFILE pTask, bool fAlignedReq)
+{
+ AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
+ || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
+ ("Invalid task type %d\n", pTask->enmTransferType));
+
+ /*
+ * If there is no unaligned request active and the current one is aligned
+ * just pass it through.
+ */
+ if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
+ return false;
+
+ PPDMACFILERANGELOCK pRangeLock;
+ pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
+ if (!pRangeLock)
+ {
+ pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
+ /* Check if we intersect with the range. */
+ if ( !pRangeLock
+ || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
+ && (pRangeLock->Core.KeyLast) >= offStart))
+ {
+ pRangeLock = NULL; /* False alarm */
+ }
+ }
+
+ /* Check whether we have one of the situations explained below */
+ if (pRangeLock)
+ {
+ /* Add to the list. */
+ pTask->pNext = NULL;
+
+ if (!pRangeLock->pWaitingTasksHead)
+ {
+ Assert(!pRangeLock->pWaitingTasksTail);
+ pRangeLock->pWaitingTasksHead = pTask;
+ pRangeLock->pWaitingTasksTail = pTask;
+ }
+ else
+ {
+ AssertPtr(pRangeLock->pWaitingTasksTail);
+ pRangeLock->pWaitingTasksTail->pNext = pTask;
+ pRangeLock->pWaitingTasksTail = pTask;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ RTFOFF offStart, size_t cbRange,
+ PPDMACTASKFILE pTask, bool fAlignedReq)
+{
+ LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p offStart=%RTfoff cbRange=%zu pTask=%#p\n",
+ pAioMgr, pEndpoint, offStart, cbRange, pTask));
+
+ AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask, fAlignedReq),
+ ("Range is already locked offStart=%RTfoff cbRange=%u\n",
+ offStart, cbRange));
+
+ /*
+ * If there is no unaligned request active and the current one is aligned
+ * just don't use the lock.
+ */
+ if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
+ {
+ pTask->pRangeLock = NULL;
+ return VINF_SUCCESS;
+ }
+
+ PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
+ if (!pRangeLock)
+ return VERR_NO_MEMORY;
+
+ /* Init the lock. */
+ pRangeLock->Core.Key = offStart;
+ pRangeLock->Core.KeyLast = offStart + cbRange - 1;
+ pRangeLock->cRefs = 1;
+ pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
+ pRangeLock->pWaitingTasksHead = NULL;
+ pRangeLock->pWaitingTasksTail = NULL;
+
+ bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
+ AssertMsg(fInserted, ("Range lock was not inserted!\n")); NOREF(fInserted);
+
+ /* Let the task point to its lock. */
+ pTask->pRangeLock = pRangeLock;
+ pEndpoint->AioMgr.cLockedReqsActive++;
+
+ return VINF_SUCCESS;
+}
+
+static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACFILERANGELOCK pRangeLock)
+{
+ PPDMACTASKFILE pTasksWaitingHead;
+
+ LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p pRangeLock=%#p\n",
+ pAioMgr, pEndpoint, pRangeLock));
+
+ /* pRangeLock can be NULL if there was no lock assigned with the task. */
+ if (!pRangeLock)
+ return NULL;
+
+ Assert(pRangeLock->cRefs == 1);
+
+ RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
+ pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
+ pRangeLock->pWaitingTasksHead = NULL;
+ pRangeLock->pWaitingTasksTail = NULL;
+ RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
+ pEndpoint->AioMgr.cLockedReqsActive--;
+
+ return pTasksWaitingHead;
+}
+
+static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
+{
+ AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
+ || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
+ ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
+ pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
+
+ pTask->fPrefetch = false;
+ pTask->cbBounceBuffer = 0;
+
+ /*
+ * Before we start to setup the request we have to check whether there is a task
+ * already active which range intersects with ours. We have to defer execution
+ * of this task in two cases:
+ * - The pending task is a write and the current is either read or write
+ * - The pending task is a read and the current task is a write task.
+ *
+ * To check whether a range is currently "locked" we use the AVL tree where every pending task
+ * is stored by its file offset range. The current task will be added to the active task
+ * and will be executed when the active one completes. (The method below
+ * which checks whether a range is already used will add the task)
+ *
+ * This is necessary because of the requirement to align all requests to a 512 boundary
+ * which is enforced by the host OS (Linux and Windows atm). It is possible that
+ * we have to process unaligned tasks and need to align them using bounce buffers.
+ * While the data is fetched from the file another request might arrive writing to
+ * the same range. This will result in data corruption if both are executed concurrently.
+ */
+ int rc = VINF_SUCCESS;
+ bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask,
+ true /* fAlignedReq */);
+ if (!fLocked)
+ {
+ /* Get a request handle. */
+ RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
+ AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
+
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
+ {
+ /* Grow the file if needed. */
+ if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
+ {
+ ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
+ RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
+ }
+
+ rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
+ pTask->Off, pTask->DataSeg.pvSeg,
+ pTask->DataSeg.cbSeg, pTask);
+ }
+ else
+ rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile,
+ pTask->Off, pTask->DataSeg.pvSeg,
+ pTask->DataSeg.cbSeg, pTask);
+ AssertRC(rc);
+
+ rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
+ pTask->DataSeg.cbSeg,
+ pTask, true /* fAlignedReq */);
+
+ if (RT_SUCCESS(rc))
+ {
+ pTask->hReq = hReq;
+ *phReq = hReq;
+ }
+ }
+ else
+ LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
+
+ return rc;
+}
+
+static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
+{
+ /*
+ * Check if the alignment requirements are met.
+ * Offset, transfer size and buffer address
+ * need to be on a 512 boundary.
+ */
+ RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
+ size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
+ PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
+ bool fAlignedReq = cbToTransfer == pTask->DataSeg.cbSeg
+ && offStart == pTask->Off;
+
+ AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
+ || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
+ ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
+ offStart, cbToTransfer, pEndpoint->cbFile));
+
+ pTask->fPrefetch = false;
+
+ /*
+ * Before we start to setup the request we have to check whether there is a task
+ * already active which range intersects with ours. We have to defer execution
+ * of this task in two cases:
+ * - The pending task is a write and the current is either read or write
+ * - The pending task is a read and the current task is a write task.
+ *
+ * To check whether a range is currently "locked" we use the AVL tree where every pending task
+ * is stored by its file offset range. The current task will be added to the active task
+ * and will be executed when the active one completes. (The method below
+ * which checks whether a range is already used will add the task)
+ *
+ * This is necessary because of the requirement to align all requests to a 512 boundary
+ * which is enforced by the host OS (Linux and Windows atm). It is possible that
+ * we have to process unaligned tasks and need to align them using bounce buffers.
+ * While the data is fetched from the file another request might arrive writing to
+ * the same range. This will result in data corruption if both are executed concurrently.
+ */
+ int rc = VINF_SUCCESS;
+ bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
+ if (!fLocked)
+ {
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
+ void *pvBuf = pTask->DataSeg.pvSeg;
+
+ /* Get a request handle. */
+ RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
+ AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
+
+ if ( !fAlignedReq
+ || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
+ {
+ LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
+ pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
+
+ /* Create bounce buffer. */
+ pTask->cbBounceBuffer = cbToTransfer;
+
+ AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
+ pTask->Off, offStart));
+ pTask->offBounceBuffer = pTask->Off - offStart;
+
+ /** @todo I think we need something like a RTMemAllocAligned method here.
+ * Current assumption is that the maximum alignment is 4096byte
+ * (GPT disk on Windows)
+ * so we can use RTMemPageAlloc here.
+ */
+ pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
+ if (RT_LIKELY(pTask->pvBounceBuffer))
+ {
+ pvBuf = pTask->pvBounceBuffer;
+
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
+ {
+ if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
+ || RT_UNLIKELY(offStart != pTask->Off))
+ {
+ /* We have to fill the buffer first before we can update the data. */
+ LogFlow(("Prefetching data for task %#p\n", pTask));
+ pTask->fPrefetch = true;
+ enmTransferType = PDMACTASKFILETRANSFER_READ;
+ }
+ else
+ memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
+ }
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ pTask->cbBounceBuffer = 0;
+
+ if (RT_SUCCESS(rc))
+ {
+ AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
+ ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
+
+ if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
+ {
+ /* Grow the file if needed. */
+ if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
+ {
+ ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
+ RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
+ }
+
+ rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
+ offStart, pvBuf, cbToTransfer, pTask);
+ }
+ else
+ rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile,
+ offStart, pvBuf, cbToTransfer, pTask);
+ AssertRC(rc);
+
+ rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
+ if (RT_SUCCESS(rc))
+ {
+ pTask->hReq = hReq;
+ *phReq = hReq;
+ }
+ else
+ {
+ /* Cleanup */
+ if (pTask->cbBounceBuffer)
+ RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
+ }
+ }
+ }
+ else
+ LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
+
+ return rc;
+}
+
+static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
+ PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ RTFILEAIOREQ apReqs[20];
+ unsigned cRequests = 0;
+ int rc = VINF_SUCCESS;
+
+ AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
+ ("Trying to process request lists of a non active endpoint!\n"));
+
+ /* Go through the list and queue the requests until we get a flush request */
+ while ( pTaskHead
+ && !pEndpoint->pFlushReq
+ && (pAioMgr->cRequestsActive + cRequests < pAioMgr->cRequestsActiveMax)
+ && RT_SUCCESS(rc))
+ {
+ RTMSINTERVAL msWhenNext;
+ PPDMACTASKFILE pCurr = pTaskHead;
+
+ if (!pdmacEpIsTransferAllowed(&pEndpoint->Core, (uint32_t)pCurr->DataSeg.cbSeg, &msWhenNext))
+ {
+ pAioMgr->msBwLimitExpired = RT_MIN(pAioMgr->msBwLimitExpired, msWhenNext);
+ break;
+ }
+
+ pTaskHead = pTaskHead->pNext;
+
+ pCurr->pNext = NULL;
+
+ AssertMsg(RT_VALID_PTR(pCurr->pEndpoint) && pCurr->pEndpoint == pEndpoint,
+ ("Endpoints do not match\n"));
+
+ switch (pCurr->enmTransferType)
+ {
+ case PDMACTASKFILETRANSFER_FLUSH:
+ {
+ /* If there is no data transfer request this flush request finished immediately. */
+ if (pEndpoint->fAsyncFlushSupported)
+ {
+ /* Issue a flush to the host. */
+ RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
+ AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
+
+ LogFlow(("Flush request %#p\n", hReq));
+
+ rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->hFile, pCurr);
+ if (RT_FAILURE(rc))
+ {
+ if (rc == VERR_NOT_SUPPORTED)
+ LogRel(("AIOMgr: Async flushes not supported\n"));
+ else
+ LogRel(("AIOMgr: Preparing flush failed with %Rrc, disabling async flushes\n", rc));
+ pEndpoint->fAsyncFlushSupported = false;
+ pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
+ rc = VINF_SUCCESS; /* Fake success */
+ }
+ else
+ {
+ pCurr->hReq = hReq;
+ apReqs[cRequests] = hReq;
+ pEndpoint->AioMgr.cReqsProcessed++;
+ cRequests++;
+ }
+ }
+
+ if ( !pEndpoint->AioMgr.cRequestsActive
+ && !pEndpoint->fAsyncFlushSupported)
+ {
+ pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
+ pdmacFileTaskFree(pEndpoint, pCurr);
+ }
+ else
+ {
+ Assert(!pEndpoint->pFlushReq);
+ pEndpoint->pFlushReq = pCurr;
+ }
+ break;
+ }
+ case PDMACTASKFILETRANSFER_READ:
+ case PDMACTASKFILETRANSFER_WRITE:
+ {
+ RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
+
+ if (pCurr->hReq == NIL_RTFILEAIOREQ)
+ {
+ if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
+ rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
+ else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
+ rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
+ else
+ AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
+
+ AssertRC(rc);
+ }
+ else
+ {
+ LogFlow(("Task %#p has I/O request %#p already\n", pCurr, pCurr->hReq));
+ hReq = pCurr->hReq;
+ }
+
+ LogFlow(("Read/Write request %#p\n", hReq));
+
+ if (hReq != NIL_RTFILEAIOREQ)
+ {
+ apReqs[cRequests] = hReq;
+ cRequests++;
+ }
+ break;
+ }
+ default:
+ AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
+ } /* switch transfer type */
+
+ /* Queue the requests if the array is full. */
+ if (cRequests == RT_ELEMENTS(apReqs))
+ {
+ rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
+ cRequests = 0;
+ AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
+ ("Unexpected return code\n"));
+ }
+ }
+
+ if (cRequests)
+ {
+ rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
+ AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
+ ("Unexpected return code rc=%Rrc\n", rc));
+ }
+
+ if (pTaskHead)
+ {
+ /* Add the rest of the tasks to the pending list */
+ pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
+
+ if (RT_UNLIKELY( pAioMgr->cRequestsActiveMax == pAioMgr->cRequestsActive
+ && !pEndpoint->pFlushReq))
+ {
+#if 0
+ /*
+ * The I/O manager has no room left for more requests
+ * but there are still requests to process.
+ * Create a new I/O manager and let it handle some endpoints.
+ */
+ pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
+#else
+ /* Grow the I/O manager */
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_GROWING;
+#endif
+ }
+ }
+
+ /* Insufficient resources are not fatal. */
+ if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
+ rc = VINF_SUCCESS;
+
+ return rc;
+}
+
+/**
+ * Adds all pending requests for the given endpoint
+ * until a flush request is encountered or there is no
+ * request anymore.
+ *
+ * @returns VBox status code.
+ * @param pAioMgr The async I/O manager for the endpoint
+ * @param pEndpoint The endpoint to get the requests from.
+ */
+static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
+{
+ int rc = VINF_SUCCESS;
+ PPDMACTASKFILE pTasksHead = NULL;
+
+ AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
+ ("Trying to process request lists of a non active endpoint!\n"));
+
+ Assert(!pEndpoint->pFlushReq);
+
+ /* Check the pending list first */
+ if (pEndpoint->AioMgr.pReqsPendingHead)
+ {
+ LogFlow(("Queuing pending requests first\n"));
+
+ pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
+ /*
+ * Clear the list as the processing routine will insert them into the list
+ * again if it gets a flush request.
+ */
+ pEndpoint->AioMgr.pReqsPendingHead = NULL;
+ pEndpoint->AioMgr.pReqsPendingTail = NULL;
+ rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
+ AssertRC(rc); /** @todo r=bird: status code potentially overwritten. */
+ }
+
+ if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
+ {
+ /* Now the request queue. */
+ pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
+ if (pTasksHead)
+ {
+ rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
+ AssertRC(rc);
+ }
+ }
+
+ return rc;
+}
+
+static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
+{
+ int rc = VINF_SUCCESS;
+ bool fNotifyWaiter = false;
+
+ LogFlowFunc((": Enter\n"));
+
+ Assert(pAioMgr->fBlockingEventPending);
+
+ switch (pAioMgr->enmBlockingEvent)
+ {
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
+ AssertMsg(RT_VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
+
+ pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
+
+ pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
+ pEndpointNew->AioMgr.pEndpointPrev = NULL;
+ if (pAioMgr->pEndpointsHead)
+ pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
+ pAioMgr->pEndpointsHead = pEndpointNew;
+
+ /* Assign the completion point to this file. */
+ rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->hFile);
+ fNotifyWaiter = true;
+ pAioMgr->cEndpoints++;
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
+ AssertMsg(RT_VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
+
+ pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
+ fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
+ AssertMsg(RT_VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
+
+ if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
+ {
+ LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
+
+ /* Make sure all tasks finished. Process the queues a last time first. */
+ rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
+ AssertRC(rc);
+
+ pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
+ fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
+ }
+ else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
+ && (!pEndpointClose->AioMgr.cRequestsActive))
+ fNotifyWaiter = true;
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
+ {
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
+ if (!pAioMgr->cRequestsActive)
+ fNotifyWaiter = true;
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
+ {
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
+ break;
+ }
+ case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
+ {
+ pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
+ fNotifyWaiter = true;
+ break;
+ }
+ default:
+ AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
+ }
+
+ if (fNotifyWaiter)
+ {
+ ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
+ pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
+
+ /* Release the waiting thread. */
+ LogFlow(("Signalling waiter\n"));
+ rc = RTSemEventSignal(pAioMgr->EventSemBlock);
+ AssertRC(rc);
+ }
+
+ LogFlowFunc((": Leave\n"));
+ return rc;
+}
+
+/**
+ * Checks all endpoints for pending events or new requests.
+ *
+ * @returns VBox status code.
+ * @param pAioMgr The I/O manager handle.
+ */
+static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
+{
+ /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
+ int rc = VINF_SUCCESS;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
+
+ pAioMgr->msBwLimitExpired = RT_INDEFINITE_WAIT;
+
+ while (pEndpoint)
+ {
+ if (!pEndpoint->pFlushReq
+ && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
+ && !pEndpoint->AioMgr.fMoving)
+ {
+ rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ else if ( !pEndpoint->AioMgr.cRequestsActive
+ && pEndpoint->enmState != PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
+ {
+ /* Reopen the file so that the new endpoint can re-associate with the file */
+ RTFileClose(pEndpoint->hFile);
+ rc = RTFileOpen(&pEndpoint->hFile, pEndpoint->Core.pszUri, pEndpoint->fFlags);
+ AssertRC(rc);
+
+ if (pEndpoint->AioMgr.fMoving)
+ {
+ pEndpoint->AioMgr.fMoving = false;
+ pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
+ }
+ else
+ {
+ Assert(pAioMgr->fBlockingEventPending);
+ ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
+
+ /* Release the waiting thread. */
+ LogFlow(("Signalling waiter\n"));
+ rc = RTSemEventSignal(pAioMgr->EventSemBlock);
+ AssertRC(rc);
+ }
+ }
+
+ pEndpoint = pEndpoint->AioMgr.pEndpointNext;
+ }
+
+ return rc;
+}
+
+/**
+ * Wrapper around pdmacFileAioMgrNormalReqCompleteRc().
+ */
+static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
+{
+ size_t cbTransfered = 0;
+ int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
+
+ pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, hReq, rcReq, cbTransfered);
+}
+
+static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
+ int rcReq, size_t cbTransfered)
+{
+ int rc = VINF_SUCCESS;
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
+ PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
+ PPDMACTASKFILE pTasksWaiting;
+
+ LogFlowFunc(("pAioMgr=%#p hReq=%#p\n", pAioMgr, hReq));
+
+ pEndpoint = pTask->pEndpoint;
+
+ pTask->hReq = NIL_RTFILEAIOREQ;
+
+ pAioMgr->cRequestsActive--;
+ pEndpoint->AioMgr.cRequestsActive--;
+ pEndpoint->AioMgr.cReqsProcessed++;
+
+ /*
+ * It is possible that the request failed on Linux with kernels < 2.6.23
+ * if the passed buffer was allocated with remap_pfn_range or if the file
+ * is on an NFS endpoint which does not support async and direct I/O at the same time.
+ * The endpoint will be migrated to a failsafe manager in case a request fails.
+ */
+ if (RT_FAILURE(rcReq))
+ {
+ /* Free bounce buffers and the IPRT request. */
+ pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
+
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
+ {
+ LogRel(("AIOMgr: Flush failed with %Rrc, disabling async flushes\n", rcReq));
+ pEndpoint->fAsyncFlushSupported = false;
+ AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
+ /* The other method will take over now. */
+
+ pEndpoint->pFlushReq = NULL;
+ /* Call completion callback */
+ LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, VINF_SUCCESS));
+ pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
+ pdmacFileTaskFree(pEndpoint, pTask);
+ }
+ else
+ {
+ /* Free the lock and process pending tasks if necessary */
+ pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
+ rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
+ AssertRC(rc);
+
+ if (pTask->cbBounceBuffer)
+ RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
+
+ /*
+ * Fatal errors are reported to the guest and non-fatal errors
+ * will cause a migration to the failsafe manager in the hope
+ * that the error disappears.
+ */
+ if (!pdmacFileAioMgrNormalRcIsFatal(rcReq))
+ {
+ /* Queue the request on the pending list. */
+ pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
+ pEndpoint->AioMgr.pReqsPendingHead = pTask;
+
+ /* Create a new failsafe manager if necessary. */
+ if (!pEndpoint->AioMgr.fMoving)
+ {
+ PPDMACEPFILEMGR pAioMgrFailsafe;
+
+ LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
+ RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
+
+ pEndpoint->AioMgr.fMoving = true;
+
+ rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
+ &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
+ AssertRC(rc);
+
+ pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
+
+ /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
+ pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
+ }
+
+ /* If this was the last request for the endpoint migrate it to the new manager. */
+ if (!pEndpoint->AioMgr.cRequestsActive)
+ {
+ bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
+ Assert(!fReqsPending); NOREF(fReqsPending);
+
+ rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
+ AssertRC(rc);
+ }
+ }
+ else
+ {
+ pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
+ pdmacFileTaskFree(pEndpoint, pTask);
+ }
+ }
+ }
+ else
+ {
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
+ {
+ /* Clear pending flush */
+ AssertMsg(pEndpoint->pFlushReq == pTask, ("Completed flush request doesn't match active one\n"));
+ pEndpoint->pFlushReq = NULL;
+ pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
+
+ /* Call completion callback */
+ LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, rcReq));
+ pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
+ pdmacFileTaskFree(pEndpoint, pTask);
+ }
+ else
+ {
+ /*
+ * Restart an incomplete transfer.
+ * This usually means that the request will return an error now
+ * but to get the cause of the error (disk full, file too big, I/O error, ...)
+ * the transfer needs to be continued.
+ */
+ pTask->cbTransfered += cbTransfered;
+
+ if (RT_UNLIKELY( pTask->cbTransfered < pTask->DataSeg.cbSeg
+ || ( pTask->cbBounceBuffer
+ && pTask->cbTransfered < pTask->cbBounceBuffer)))
+ {
+ RTFOFF offStart;
+ size_t cbToTransfer;
+ uint8_t *pbBuf = NULL;
+
+ LogFlow(("Restarting incomplete transfer %#p (%zu bytes transferred)\n",
+ pTask, cbTransfered));
+ Assert(cbTransfered % 512 == 0);
+
+ if (pTask->cbBounceBuffer)
+ {
+ AssertPtr(pTask->pvBounceBuffer);
+ offStart = (pTask->Off & ~((RTFOFF)512-1)) + pTask->cbTransfered;
+ cbToTransfer = pTask->cbBounceBuffer - pTask->cbTransfered;
+ pbBuf = (uint8_t *)pTask->pvBounceBuffer + pTask->cbTransfered;
+ }
+ else
+ {
+ Assert(!pTask->pvBounceBuffer);
+ offStart = pTask->Off + pTask->cbTransfered;
+ cbToTransfer = pTask->DataSeg.cbSeg - pTask->cbTransfered;
+ pbBuf = (uint8_t *)pTask->DataSeg.pvSeg + pTask->cbTransfered;
+ }
+
+ if (pTask->fPrefetch || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
+ {
+ rc = RTFileAioReqPrepareRead(hReq, pEndpoint->hFile, offStart,
+ pbBuf, cbToTransfer, pTask);
+ }
+ else
+ {
+ AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE,
+ ("Invalid transfer type\n"));
+ rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile, offStart,
+ pbBuf, cbToTransfer, pTask);
+ }
+ AssertRC(rc);
+
+ pTask->hReq = hReq;
+ rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
+ AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
+ ("Unexpected return code rc=%Rrc\n", rc));
+ }
+ else if (pTask->fPrefetch)
+ {
+ Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
+ Assert(pTask->cbBounceBuffer);
+
+ memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
+ pTask->DataSeg.pvSeg,
+ pTask->DataSeg.cbSeg);
+
+ /* Write it now. */
+ pTask->fPrefetch = false;
+ RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
+ size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
+
+ pTask->cbTransfered = 0;
+
+ /* Grow the file if needed. */
+ if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
+ {
+ ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
+ RTFileSetSize(pEndpoint->hFile, pTask->Off + pTask->DataSeg.cbSeg);
+ }
+
+ rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->hFile,
+ offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
+ AssertRC(rc);
+ pTask->hReq = hReq;
+ rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
+ AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
+ ("Unexpected return code rc=%Rrc\n", rc));
+ }
+ else
+ {
+ if (RT_SUCCESS(rc) && pTask->cbBounceBuffer)
+ {
+ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
+ memcpy(pTask->DataSeg.pvSeg,
+ ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
+ pTask->DataSeg.cbSeg);
+
+ RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
+ }
+
+ pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
+
+ /* Free the lock and process pending tasks if necessary */
+ pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
+ if (pTasksWaiting)
+ {
+ rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
+ AssertRC(rc);
+ }
+
+ /* Call completion callback */
+ LogFlow(("Task=%#p completed with %Rrc\n", pTask, rcReq));
+ pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
+ pdmacFileTaskFree(pEndpoint, pTask);
+
+ /*
+ * If there is no request left on the endpoint but a flush request is set
+ * it completed now and we notify the owner.
+ * Furthermore we look for new requests and continue.
+ */
+ if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
+ {
+ /* Call completion callback */
+ pTask = pEndpoint->pFlushReq;
+ pEndpoint->pFlushReq = NULL;
+
+ AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
+
+ pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
+ pdmacFileTaskFree(pEndpoint, pTask);
+ }
+ else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
+ {
+ /* If the endpoint is about to be migrated do it now. */
+ bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
+ Assert(!fReqsPending); NOREF(fReqsPending);
+
+ rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
+ AssertRC(rc);
+ }
+ }
+ } /* Not a flush request */
+ } /* request completed successfully */
+}
+
+/** Helper macro for checking for error codes. */
+#define CHECK_RC(pAioMgr, rc) \
+ if (RT_FAILURE(rc)) \
+ {\
+ int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
+ return rc2;\
+ }
+
+/**
+ * The normal I/O manager using the RTFileAio* API
+ *
+ * @returns VBox status code.
+ * @param hThreadSelf Handle of the thread.
+ * @param pvUser Opaque user data.
+ */
+DECLCALLBACK(int) pdmacFileAioMgrNormal(RTTHREAD hThreadSelf, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
+ uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
+ NOREF(hThreadSelf);
+
+ while ( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
+ || pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING
+ || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
+ {
+ if (!pAioMgr->cRequestsActive)
+ {
+ ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
+ if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
+ rc = RTSemEventWait(pAioMgr->EventSem, pAioMgr->msBwLimitExpired);
+ ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
+ Assert(RT_SUCCESS(rc) || rc == VERR_TIMEOUT);
+
+ LogFlow(("Got woken up\n"));
+ ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
+ }
+
+ /* Check for an external blocking event first. */
+ if (pAioMgr->fBlockingEventPending)
+ {
+ rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
+ CHECK_RC(pAioMgr, rc);
+ }
+
+ if (RT_LIKELY( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
+ || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
+ {
+ /* We got woken up because an endpoint issued new requests. Queue them. */
+ rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
+ CHECK_RC(pAioMgr, rc);
+
+ while (pAioMgr->cRequestsActive)
+ {
+ RTFILEAIOREQ apReqs[20];
+ uint32_t cReqsCompleted = 0;
+ size_t cReqsWait;
+
+ if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
+ cReqsWait = RT_ELEMENTS(apReqs);
+ else
+ cReqsWait = pAioMgr->cRequestsActive;
+
+ LogFlow(("Waiting for %d of %d tasks to complete\n", 1, cReqsWait));
+
+ rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
+ 1,
+ RT_INDEFINITE_WAIT, apReqs,
+ cReqsWait, &cReqsCompleted);
+ if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
+ CHECK_RC(pAioMgr, rc);
+
+ LogFlow(("%d tasks completed\n", cReqsCompleted));
+
+ for (uint32_t i = 0; i < cReqsCompleted; i++)
+ pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
+
+ /* Check for an external blocking event before we go to sleep again. */
+ if (pAioMgr->fBlockingEventPending)
+ {
+ rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
+ CHECK_RC(pAioMgr, rc);
+ }
+
+ /* Update load statistics. */
+ uint64_t uMillisCurr = RTTimeMilliTS();
+ if (uMillisCurr > uMillisEnd)
+ {
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
+
+ /* Calculate timespan. */
+ uMillisCurr -= uMillisEnd;
+
+ while (pEndpointCurr)
+ {
+ pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
+ pEndpointCurr->AioMgr.cReqsProcessed = 0;
+ pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
+ }
+
+ /* Set new update interval */
+ uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
+ }
+
+ /* Check endpoints for new requests. */
+ if (pAioMgr->enmState != PDMACEPFILEMGRSTATE_GROWING)
+ {
+ rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
+ CHECK_RC(pAioMgr, rc);
+ }
+ } /* while requests are active. */
+
+ if (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
+ {
+ rc = pdmacFileAioMgrNormalGrow(pAioMgr);
+ AssertRC(rc);
+ Assert(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING);
+
+ rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
+ CHECK_RC(pAioMgr, rc);
+ }
+ } /* if still running */
+ } /* while running */
+
+ LogFlowFunc(("rc=%Rrc\n", rc));
+ return rc;
+}
+
+#undef CHECK_RC
+
diff --git a/src/VBox/VMM/VMMR3/PDMBlkCache.cpp b/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
new file mode 100644
index 00000000..98073f31
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
@@ -0,0 +1,2802 @@
+/* $Id: PDMBlkCache.cpp $ */
+/** @file
+ * PDM Block Cache.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_pdm_block_cache PDM Block Cache - The I/O cache
+ * This component implements an I/O cache based on the 2Q cache algorithm.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_BLK_CACHE
+#include "PDMInternal.h"
+#include <iprt/asm.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+#include <iprt/trace.h>
+#include <VBox/log.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
+
+#include "PDMBlkCacheInternal.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#ifdef VBOX_STRICT
+# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) \
+ do \
+ { \
+ AssertMsg(RTCritSectIsOwner(&Cache->CritSect), \
+ ("Thread does not own critical section\n"));\
+ } while (0)
+
+# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) \
+ do \
+ { \
+ AssertMsg(RTSemRWIsWriteOwner(pEpCache->SemRWEntries), \
+ ("Thread is not exclusive owner of the per endpoint RW semaphore\n")); \
+ } while (0)
+
+# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) \
+ do \
+ { \
+ AssertMsg(RTSemRWIsReadOwner(pEpCache->SemRWEntries), \
+ ("Thread is not read owner of the per endpoint RW semaphore\n")); \
+ } while (0)
+
+#else
+# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while (0)
+# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) do { } while (0)
+# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) do { } while (0)
+#endif
+
+#define PDM_BLK_CACHE_SAVED_STATE_VERSION 1
+
+/* Enable to enable some tracing in the block cache code for investigating issues. */
+/*#define VBOX_BLKCACHE_TRACING 1*/
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+static PPDMBLKCACHEENTRY pdmBlkCacheEntryAlloc(PPDMBLKCACHE pBlkCache,
+ uint64_t off, size_t cbData, uint8_t *pbBuffer);
+static bool pdmBlkCacheAddDirtyEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry);
+
+
+/**
+ * Add message to the VM trace buffer.
+ *
+ * @param pBlkCache The block cache.
+ * @param pszFmt The format string.
+ * @param ... Additional parameters for the string formatter.
+ */
+DECLINLINE(void) pdmBlkCacheR3TraceMsgF(PPDMBLKCACHE pBlkCache, const char *pszFmt, ...)
+{
+#if defined(VBOX_BLKCACHE_TRACING)
+ va_list va;
+ va_start(va, pszFmt);
+ RTTraceBufAddMsgV(pBlkCache->pCache->pVM->CTX_SUFF(hTraceBuf), pszFmt, va);
+ va_end(va);
+#else
+ RT_NOREF2(pBlkCache, pszFmt);
+#endif
+}
+
+/**
+ * Decrement the reference counter of the given cache entry.
+ *
+ * @param pEntry The entry to release.
+ */
+DECLINLINE(void) pdmBlkCacheEntryRelease(PPDMBLKCACHEENTRY pEntry)
+{
+ AssertMsg(pEntry->cRefs > 0, ("Trying to release a not referenced entry\n"));
+ ASMAtomicDecU32(&pEntry->cRefs);
+}
+
+/**
+ * Increment the reference counter of the given cache entry.
+ *
+ * @param pEntry The entry to reference.
+ */
+DECLINLINE(void) pdmBlkCacheEntryRef(PPDMBLKCACHEENTRY pEntry)
+{
+ ASMAtomicIncU32(&pEntry->cRefs);
+}
+
+#ifdef VBOX_STRICT
+static void pdmBlkCacheValidate(PPDMBLKCACHEGLOBAL pCache)
+{
+ /* Amount of cached data should never exceed the maximum amount. */
+ AssertMsg(pCache->cbCached <= pCache->cbMax,
+ ("Current amount of cached data exceeds maximum\n"));
+
+ /* The amount of cached data in the LRU and FRU list should match cbCached */
+ AssertMsg(pCache->LruRecentlyUsedIn.cbCached + pCache->LruFrequentlyUsed.cbCached == pCache->cbCached,
+ ("Amount of cached data doesn't match\n"));
+
+ AssertMsg(pCache->LruRecentlyUsedOut.cbCached <= pCache->cbRecentlyUsedOutMax,
+ ("Paged out list exceeds maximum\n"));
+}
+#endif
+
+DECLINLINE(void) pdmBlkCacheLockEnter(PPDMBLKCACHEGLOBAL pCache)
+{
+ RTCritSectEnter(&pCache->CritSect);
+#ifdef VBOX_STRICT
+ pdmBlkCacheValidate(pCache);
+#endif
+}
+
+DECLINLINE(void) pdmBlkCacheLockLeave(PPDMBLKCACHEGLOBAL pCache)
+{
+#ifdef VBOX_STRICT
+ pdmBlkCacheValidate(pCache);
+#endif
+ RTCritSectLeave(&pCache->CritSect);
+}
+
+DECLINLINE(void) pdmBlkCacheSub(PPDMBLKCACHEGLOBAL pCache, uint32_t cbAmount)
+{
+ PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
+ pCache->cbCached -= cbAmount;
+}
+
+DECLINLINE(void) pdmBlkCacheAdd(PPDMBLKCACHEGLOBAL pCache, uint32_t cbAmount)
+{
+ PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
+ pCache->cbCached += cbAmount;
+}
+
+DECLINLINE(void) pdmBlkCacheListAdd(PPDMBLKLRULIST pList, uint32_t cbAmount)
+{
+ pList->cbCached += cbAmount;
+}
+
+DECLINLINE(void) pdmBlkCacheListSub(PPDMBLKLRULIST pList, uint32_t cbAmount)
+{
+ pList->cbCached -= cbAmount;
+}
+
+#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
+/**
+ * Checks consistency of a LRU list.
+ *
+ * @param pList The LRU list to check.
+ * @param pNotInList Element which is not allowed to occur in the list.
+ */
+static void pdmBlkCacheCheckList(PPDMBLKLRULIST pList, PPDMBLKCACHEENTRY pNotInList)
+{
+ PPDMBLKCACHEENTRY pCurr = pList->pHead;
+
+ /* Check that there are no double entries and no cycles in the list. */
+ while (pCurr)
+ {
+ PPDMBLKCACHEENTRY pNext = pCurr->pNext;
+
+ while (pNext)
+ {
+ AssertMsg(pCurr != pNext,
+ ("Entry %#p is at least two times in list %#p or there is a cycle in the list\n",
+ pCurr, pList));
+ pNext = pNext->pNext;
+ }
+
+ AssertMsg(pCurr != pNotInList, ("Not allowed entry %#p is in list\n", pCurr));
+
+ if (!pCurr->pNext)
+ AssertMsg(pCurr == pList->pTail, ("End of list reached but last element is not list tail\n"));
+
+ pCurr = pCurr->pNext;
+ }
+}
+#endif
+
+/**
+ * Unlinks a cache entry from the LRU list it is assigned to.
+ *
+ * @param pEntry The entry to unlink.
+ */
+static void pdmBlkCacheEntryRemoveFromList(PPDMBLKCACHEENTRY pEntry)
+{
+ PPDMBLKLRULIST pList = pEntry->pList;
+ PPDMBLKCACHEENTRY pPrev, pNext;
+
+ LogFlowFunc((": Deleting entry %#p from list %#p\n", pEntry, pList));
+
+ AssertPtr(pList);
+
+#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
+ pdmBlkCacheCheckList(pList, NULL);
+#endif
+
+ pPrev = pEntry->pPrev;
+ pNext = pEntry->pNext;
+
+ AssertMsg(pEntry != pPrev, ("Entry links to itself as previous element\n"));
+ AssertMsg(pEntry != pNext, ("Entry links to itself as next element\n"));
+
+ if (pPrev)
+ pPrev->pNext = pNext;
+ else
+ {
+ pList->pHead = pNext;
+
+ if (pNext)
+ pNext->pPrev = NULL;
+ }
+
+ if (pNext)
+ pNext->pPrev = pPrev;
+ else
+ {
+ pList->pTail = pPrev;
+
+ if (pPrev)
+ pPrev->pNext = NULL;
+ }
+
+ pEntry->pList = NULL;
+ pEntry->pPrev = NULL;
+ pEntry->pNext = NULL;
+ pdmBlkCacheListSub(pList, pEntry->cbData);
+#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
+ pdmBlkCacheCheckList(pList, pEntry);
+#endif
+}
+
+/**
+ * Adds a cache entry to the given LRU list unlinking it from the currently
+ * assigned list if needed.
+ *
+ * @param pList List to the add entry to.
+ * @param pEntry Entry to add.
+ */
+static void pdmBlkCacheEntryAddToList(PPDMBLKLRULIST pList, PPDMBLKCACHEENTRY pEntry)
+{
+ LogFlowFunc((": Adding entry %#p to list %#p\n", pEntry, pList));
+#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
+ pdmBlkCacheCheckList(pList, NULL);
+#endif
+
+ /* Remove from old list if needed */
+ if (pEntry->pList)
+ pdmBlkCacheEntryRemoveFromList(pEntry);
+
+ pEntry->pNext = pList->pHead;
+ if (pList->pHead)
+ pList->pHead->pPrev = pEntry;
+ else
+ {
+ Assert(!pList->pTail);
+ pList->pTail = pEntry;
+ }
+
+ pEntry->pPrev = NULL;
+ pList->pHead = pEntry;
+ pdmBlkCacheListAdd(pList, pEntry->cbData);
+ pEntry->pList = pList;
+#ifdef PDMACFILECACHE_WITH_LRULIST_CHECKS
+ pdmBlkCacheCheckList(pList, NULL);
+#endif
+}
+
+/**
+ * Destroys a LRU list freeing all entries.
+ *
+ * @param pList Pointer to the LRU list to destroy.
+ *
+ * @note The caller must own the critical section of the cache.
+ */
+static void pdmBlkCacheDestroyList(PPDMBLKLRULIST pList)
+{
+ while (pList->pHead)
+ {
+ PPDMBLKCACHEENTRY pEntry = pList->pHead;
+
+ pList->pHead = pEntry->pNext;
+
+ AssertMsg(!(pEntry->fFlags & (PDMBLKCACHE_ENTRY_IO_IN_PROGRESS | PDMBLKCACHE_ENTRY_IS_DIRTY)),
+ ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
+
+ RTMemPageFree(pEntry->pbData, pEntry->cbData);
+ RTMemFree(pEntry);
+ }
+}
+
+/**
+ * Tries to remove the given amount of bytes from a given list in the cache
+ * moving the entries to one of the given ghosts lists
+ *
+ * @returns Amount of data which could be freed.
+ * @param pCache Pointer to the global cache data.
+ * @param cbData The amount of the data to free.
+ * @param pListSrc The source list to evict data from.
+ * @param pGhostListDst Where the ghost list removed entries should be
+ * moved to, NULL if the entry should be freed.
+ * @param fReuseBuffer Flag whether a buffer should be reused if it has
+ * the same size
+ * @param ppbBuffer Where to store the address of the buffer if an
+ * entry with the same size was found and
+ * fReuseBuffer is true.
+ *
+ * @note This function may return fewer bytes than requested because entries
+ * may be marked as non evictable if they are used for I/O at the
+ * moment.
+ */
+static size_t pdmBlkCacheEvictPagesFrom(PPDMBLKCACHEGLOBAL pCache, size_t cbData,
+ PPDMBLKLRULIST pListSrc, PPDMBLKLRULIST pGhostListDst,
+ bool fReuseBuffer, uint8_t **ppbBuffer)
+{
+ size_t cbEvicted = 0;
+
+ PDMACFILECACHE_IS_CRITSECT_OWNER(pCache);
+
+ AssertMsg(cbData > 0, ("Evicting 0 bytes not possible\n"));
+ AssertMsg( !pGhostListDst
+ || (pGhostListDst == &pCache->LruRecentlyUsedOut),
+ ("Destination list must be NULL or the recently used but paged out list\n"));
+
+ if (fReuseBuffer)
+ {
+ AssertPtr(ppbBuffer);
+ *ppbBuffer = NULL;
+ }
+
+ /* Start deleting from the tail. */
+ PPDMBLKCACHEENTRY pEntry = pListSrc->pTail;
+
+ while ((cbEvicted < cbData) && pEntry)
+ {
+ PPDMBLKCACHEENTRY pCurr = pEntry;
+
+ pEntry = pEntry->pPrev;
+
+ /* We can't evict pages which are currently in progress or dirty but not in progress */
+ if ( !(pCurr->fFlags & PDMBLKCACHE_NOT_EVICTABLE)
+ && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
+ {
+ /* Ok eviction candidate. Grab the endpoint semaphore and check again
+ * because somebody else might have raced us. */
+ PPDMBLKCACHE pBlkCache = pCurr->pBlkCache;
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+
+ if (!(pCurr->fFlags & PDMBLKCACHE_NOT_EVICTABLE)
+ && (ASMAtomicReadU32(&pCurr->cRefs) == 0))
+ {
+ LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData));
+
+ if (fReuseBuffer && pCurr->cbData == cbData)
+ {
+ STAM_COUNTER_INC(&pCache->StatBuffersReused);
+ *ppbBuffer = pCurr->pbData;
+ }
+ else if (pCurr->pbData)
+ RTMemPageFree(pCurr->pbData, pCurr->cbData);
+
+ pCurr->pbData = NULL;
+ cbEvicted += pCurr->cbData;
+
+ pdmBlkCacheEntryRemoveFromList(pCurr);
+ pdmBlkCacheSub(pCache, pCurr->cbData);
+
+ if (pGhostListDst)
+ {
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ PPDMBLKCACHEENTRY pGhostEntFree = pGhostListDst->pTail;
+
+ /* We have to remove the last entries from the paged out list. */
+ while ( pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax
+ && pGhostEntFree)
+ {
+ PPDMBLKCACHEENTRY pFree = pGhostEntFree;
+ PPDMBLKCACHE pBlkCacheFree = pFree->pBlkCache;
+
+ pGhostEntFree = pGhostEntFree->pPrev;
+
+ RTSemRWRequestWrite(pBlkCacheFree->SemRWEntries, RT_INDEFINITE_WAIT);
+
+ if (ASMAtomicReadU32(&pFree->cRefs) == 0)
+ {
+ pdmBlkCacheEntryRemoveFromList(pFree);
+
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCacheFree->pTree, pFree->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+
+ RTMemFree(pFree);
+ }
+
+ RTSemRWReleaseWrite(pBlkCacheFree->SemRWEntries);
+ }
+
+ if (pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax)
+ {
+ /* Couldn't remove enough entries. Delete */
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pCurr->pBlkCache->pTree, pCurr->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+
+ RTMemFree(pCurr);
+ }
+ else
+ pdmBlkCacheEntryAddToList(pGhostListDst, pCurr);
+ }
+ else
+ {
+ /* Delete the entry from the AVL tree it is assigned to. */
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pCurr->pBlkCache->pTree, pCurr->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ RTMemFree(pCurr);
+ }
+ }
+ else
+ {
+ LogFlow(("Someone raced us, entry %#p (%u bytes) cannot be evicted any more (fFlags=%#x cRefs=%#x)\n",
+ pCurr, pCurr->cbData, pCurr->fFlags, pCurr->cRefs));
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ }
+
+ }
+ else
+ LogFlow(("Entry %#p (%u bytes) is still in progress and can't be evicted\n", pCurr, pCurr->cbData));
+ }
+
+ return cbEvicted;
+}
+
+static bool pdmBlkCacheReclaim(PPDMBLKCACHEGLOBAL pCache, size_t cbData, bool fReuseBuffer, uint8_t **ppbBuffer)
+{
+ size_t cbRemoved = 0;
+
+ if ((pCache->cbCached + cbData) < pCache->cbMax)
+ return true;
+ else if ((pCache->LruRecentlyUsedIn.cbCached + cbData) > pCache->cbRecentlyUsedInMax)
+ {
+ /* Try to evict as many bytes as possible from A1in */
+ cbRemoved = pdmBlkCacheEvictPagesFrom(pCache, cbData, &pCache->LruRecentlyUsedIn,
+ &pCache->LruRecentlyUsedOut, fReuseBuffer, ppbBuffer);
+
+ /*
+ * If it was not possible to remove enough entries
+ * try the frequently accessed cache.
+ */
+ if (cbRemoved < cbData)
+ {
+ Assert(!fReuseBuffer || !*ppbBuffer); /* It is not possible that we got a buffer with the correct size but we didn't freed enough data. */
+
+ /*
+ * If we removed something we can't pass the reuse buffer flag anymore because
+ * we don't need to evict that much data
+ */
+ if (!cbRemoved)
+ cbRemoved += pdmBlkCacheEvictPagesFrom(pCache, cbData, &pCache->LruFrequentlyUsed,
+ NULL, fReuseBuffer, ppbBuffer);
+ else
+ cbRemoved += pdmBlkCacheEvictPagesFrom(pCache, cbData - cbRemoved, &pCache->LruFrequentlyUsed,
+ NULL, false, NULL);
+ }
+ }
+ else
+ {
+ /* We have to remove entries from frequently access list. */
+ cbRemoved = pdmBlkCacheEvictPagesFrom(pCache, cbData, &pCache->LruFrequentlyUsed,
+ NULL, fReuseBuffer, ppbBuffer);
+ }
+
+ LogFlowFunc((": removed %u bytes, requested %u\n", cbRemoved, cbData));
+ return (cbRemoved >= cbData);
+}
+
+DECLINLINE(int) pdmBlkCacheEnqueue(PPDMBLKCACHE pBlkCache, uint64_t off, size_t cbXfer, PPDMBLKCACHEIOXFER pIoXfer)
+{
+ int rc = VINF_SUCCESS;
+
+ LogFlowFunc(("%s: Enqueuing hIoXfer=%#p enmXferDir=%d\n",
+ __FUNCTION__, pIoXfer, pIoXfer->enmXferDir));
+
+ ASMAtomicIncU32(&pBlkCache->cIoXfersActive);
+ pdmBlkCacheR3TraceMsgF(pBlkCache, "BlkCache: I/O req %#p (%RTbool , %d) queued (%u now active)",
+ pIoXfer, pIoXfer->fIoCache, pIoXfer->enmXferDir, pBlkCache->cIoXfersActive);
+
+ switch (pBlkCache->enmType)
+ {
+ case PDMBLKCACHETYPE_DEV:
+ {
+ rc = pBlkCache->u.Dev.pfnXferEnqueue(pBlkCache->u.Dev.pDevIns,
+ pIoXfer->enmXferDir,
+ off, cbXfer,
+ &pIoXfer->SgBuf, pIoXfer);
+ break;
+ }
+ case PDMBLKCACHETYPE_DRV:
+ {
+ rc = pBlkCache->u.Drv.pfnXferEnqueue(pBlkCache->u.Drv.pDrvIns,
+ pIoXfer->enmXferDir,
+ off, cbXfer,
+ &pIoXfer->SgBuf, pIoXfer);
+ break;
+ }
+ case PDMBLKCACHETYPE_USB:
+ {
+ rc = pBlkCache->u.Usb.pfnXferEnqueue(pBlkCache->u.Usb.pUsbIns,
+ pIoXfer->enmXferDir,
+ off, cbXfer,
+ &pIoXfer->SgBuf, pIoXfer);
+ break;
+ }
+ case PDMBLKCACHETYPE_INTERNAL:
+ {
+ rc = pBlkCache->u.Int.pfnXferEnqueue(pBlkCache->u.Int.pvUser,
+ pIoXfer->enmXferDir,
+ off, cbXfer,
+ &pIoXfer->SgBuf, pIoXfer);
+ break;
+ }
+ default:
+ AssertMsgFailed(("Unknown block cache type!\n"));
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ pdmBlkCacheR3TraceMsgF(pBlkCache, "BlkCache: Queueing I/O req %#p failed %Rrc", pIoXfer, rc);
+ ASMAtomicDecU32(&pBlkCache->cIoXfersActive);
+ }
+
+ LogFlowFunc(("%s: returns rc=%Rrc\n", __FUNCTION__, rc));
+ return rc;
+}
+
+/**
+ * Initiates a read I/O task for the given entry.
+ *
+ * @returns VBox status code.
+ * @param pEntry The entry to fetch the data to.
+ */
+static int pdmBlkCacheEntryReadFromMedium(PPDMBLKCACHEENTRY pEntry)
+{
+ PPDMBLKCACHE pBlkCache = pEntry->pBlkCache;
+ LogFlowFunc((": Reading data into cache entry %#p\n", pEntry));
+
+ /* Make sure no one evicts the entry while it is accessed. */
+ pEntry->fFlags |= PDMBLKCACHE_ENTRY_IO_IN_PROGRESS;
+
+ PPDMBLKCACHEIOXFER pIoXfer = (PPDMBLKCACHEIOXFER)RTMemAllocZ(sizeof(PDMBLKCACHEIOXFER));
+ if (RT_UNLIKELY(!pIoXfer))
+ return VERR_NO_MEMORY;
+
+ AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
+
+ pIoXfer->fIoCache = true;
+ pIoXfer->pEntry = pEntry;
+ pIoXfer->SgSeg.pvSeg = pEntry->pbData;
+ pIoXfer->SgSeg.cbSeg = pEntry->cbData;
+ pIoXfer->enmXferDir = PDMBLKCACHEXFERDIR_READ;
+ RTSgBufInit(&pIoXfer->SgBuf, &pIoXfer->SgSeg, 1);
+
+ return pdmBlkCacheEnqueue(pBlkCache, pEntry->Core.Key, pEntry->cbData, pIoXfer);
+}
+
+/**
+ * Initiates a write I/O task for the given entry.
+ *
+ * @returns VBox status code.
+ * @param pEntry The entry to read the data from.
+ */
+static int pdmBlkCacheEntryWriteToMedium(PPDMBLKCACHEENTRY pEntry)
+{
+ PPDMBLKCACHE pBlkCache = pEntry->pBlkCache;
+ LogFlowFunc((": Writing data from cache entry %#p\n", pEntry));
+
+ /* Make sure no one evicts the entry while it is accessed. */
+ pEntry->fFlags |= PDMBLKCACHE_ENTRY_IO_IN_PROGRESS;
+
+ PPDMBLKCACHEIOXFER pIoXfer = (PPDMBLKCACHEIOXFER)RTMemAllocZ(sizeof(PDMBLKCACHEIOXFER));
+ if (RT_UNLIKELY(!pIoXfer))
+ return VERR_NO_MEMORY;
+
+ AssertMsg(pEntry->pbData, ("Entry is in ghost state\n"));
+
+ pIoXfer->fIoCache = true;
+ pIoXfer->pEntry = pEntry;
+ pIoXfer->SgSeg.pvSeg = pEntry->pbData;
+ pIoXfer->SgSeg.cbSeg = pEntry->cbData;
+ pIoXfer->enmXferDir = PDMBLKCACHEXFERDIR_WRITE;
+ RTSgBufInit(&pIoXfer->SgBuf, &pIoXfer->SgSeg, 1);
+
+ return pdmBlkCacheEnqueue(pBlkCache, pEntry->Core.Key, pEntry->cbData, pIoXfer);
+}
+
+/**
+ * Passthrough a part of a request directly to the I/O manager handling the
+ * endpoint.
+ *
+ * @returns VBox status code.
+ * @param pBlkCache The endpoint cache.
+ * @param pReq The request.
+ * @param pSgBuf The scatter/gather buffer.
+ * @param offStart Offset to start transfer from.
+ * @param cbData Amount of data to transfer.
+ * @param enmXferDir The transfer type (read/write)
+ */
+static int pdmBlkCacheRequestPassthrough(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEREQ pReq,
+ PRTSGBUF pSgBuf, uint64_t offStart, size_t cbData,
+ PDMBLKCACHEXFERDIR enmXferDir)
+{
+
+ PPDMBLKCACHEIOXFER pIoXfer = (PPDMBLKCACHEIOXFER)RTMemAllocZ(sizeof(PDMBLKCACHEIOXFER));
+ if (RT_UNLIKELY(!pIoXfer))
+ return VERR_NO_MEMORY;
+
+ ASMAtomicIncU32(&pReq->cXfersPending);
+ pIoXfer->fIoCache = false;
+ pIoXfer->pReq = pReq;
+ pIoXfer->enmXferDir = enmXferDir;
+ if (pSgBuf)
+ {
+ RTSgBufClone(&pIoXfer->SgBuf, pSgBuf);
+ RTSgBufAdvance(pSgBuf, cbData);
+ }
+
+ return pdmBlkCacheEnqueue(pBlkCache, offStart, cbData, pIoXfer);
+}
+
+/**
+ * Commit a single dirty entry to the endpoint
+ *
+ * @param pEntry The entry to commit.
+ */
+static void pdmBlkCacheEntryCommit(PPDMBLKCACHEENTRY pEntry)
+{
+ AssertMsg( (pEntry->fFlags & PDMBLKCACHE_ENTRY_IS_DIRTY)
+ && !(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS),
+ ("Invalid flags set for entry %#p\n", pEntry));
+
+ pdmBlkCacheEntryWriteToMedium(pEntry);
+}
+
+/**
+ * Commit all dirty entries for a single endpoint.
+ *
+ * @param pBlkCache The endpoint cache to commit.
+ */
+static void pdmBlkCacheCommit(PPDMBLKCACHE pBlkCache)
+{
+ uint32_t cbCommitted = 0;
+
+ /* Return if the cache was suspended. */
+ if (pBlkCache->fSuspended)
+ return;
+
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+
+ /* The list is moved to a new header to reduce locking overhead. */
+ RTLISTANCHOR ListDirtyNotCommitted;
+
+ RTSpinlockAcquire(pBlkCache->LockList);
+ RTListMove(&ListDirtyNotCommitted, &pBlkCache->ListDirtyNotCommitted);
+ RTSpinlockRelease(pBlkCache->LockList);
+
+ if (!RTListIsEmpty(&ListDirtyNotCommitted))
+ {
+ PPDMBLKCACHEENTRY pEntry = RTListGetFirst(&ListDirtyNotCommitted, PDMBLKCACHEENTRY, NodeNotCommitted);
+
+ while (!RTListNodeIsLast(&ListDirtyNotCommitted, &pEntry->NodeNotCommitted))
+ {
+ PPDMBLKCACHEENTRY pNext = RTListNodeGetNext(&pEntry->NodeNotCommitted, PDMBLKCACHEENTRY,
+ NodeNotCommitted);
+ pdmBlkCacheEntryCommit(pEntry);
+ cbCommitted += pEntry->cbData;
+ RTListNodeRemove(&pEntry->NodeNotCommitted);
+ pEntry = pNext;
+ }
+
+ /* Commit the last endpoint */
+ Assert(RTListNodeIsLast(&ListDirtyNotCommitted, &pEntry->NodeNotCommitted));
+ pdmBlkCacheEntryCommit(pEntry);
+ cbCommitted += pEntry->cbData;
+ RTListNodeRemove(&pEntry->NodeNotCommitted);
+ AssertMsg(RTListIsEmpty(&ListDirtyNotCommitted),
+ ("Committed all entries but list is not empty\n"));
+ }
+
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ AssertMsg(pBlkCache->pCache->cbDirty >= cbCommitted,
+ ("Number of committed bytes exceeds number of dirty bytes\n"));
+ uint32_t cbDirtyOld = ASMAtomicSubU32(&pBlkCache->pCache->cbDirty, cbCommitted);
+
+ /* Reset the commit timer if we don't have any dirty bits. */
+ if ( !(cbDirtyOld - cbCommitted)
+ && pBlkCache->pCache->u32CommitTimeoutMs != 0)
+ TMTimerStop(pBlkCache->pCache->pVM, pBlkCache->pCache->hTimerCommit);
+}
+
+/**
+ * Commit all dirty entries in the cache.
+ *
+ * @param pCache The global cache instance.
+ */
+static void pdmBlkCacheCommitDirtyEntries(PPDMBLKCACHEGLOBAL pCache)
+{
+ bool fCommitInProgress = ASMAtomicXchgBool(&pCache->fCommitInProgress, true);
+
+ if (!fCommitInProgress)
+ {
+ pdmBlkCacheLockEnter(pCache);
+ Assert(!RTListIsEmpty(&pCache->ListUsers));
+
+ PPDMBLKCACHE pBlkCache = RTListGetFirst(&pCache->ListUsers, PDMBLKCACHE, NodeCacheUser);
+ AssertPtr(pBlkCache);
+
+ while (!RTListNodeIsLast(&pCache->ListUsers, &pBlkCache->NodeCacheUser))
+ {
+ pdmBlkCacheCommit(pBlkCache);
+
+ pBlkCache = RTListNodeGetNext(&pBlkCache->NodeCacheUser, PDMBLKCACHE,
+ NodeCacheUser);
+ }
+
+ /* Commit the last endpoint */
+ Assert(RTListNodeIsLast(&pCache->ListUsers, &pBlkCache->NodeCacheUser));
+ pdmBlkCacheCommit(pBlkCache);
+
+ pdmBlkCacheLockLeave(pCache);
+ ASMAtomicWriteBool(&pCache->fCommitInProgress, false);
+ }
+}
+
+/**
+ * Adds the given entry as a dirty to the cache.
+ *
+ * @returns Flag whether the amount of dirty bytes in the cache exceeds the threshold
+ * @param pBlkCache The endpoint cache the entry belongs to.
+ * @param pEntry The entry to add.
+ */
+static bool pdmBlkCacheAddDirtyEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry)
+{
+ bool fDirtyBytesExceeded = false;
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+
+ /* If the commit timer is disabled we commit right away. */
+ if (pCache->u32CommitTimeoutMs == 0)
+ {
+ pEntry->fFlags |= PDMBLKCACHE_ENTRY_IS_DIRTY;
+ pdmBlkCacheEntryCommit(pEntry);
+ }
+ else if (!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IS_DIRTY))
+ {
+ pEntry->fFlags |= PDMBLKCACHE_ENTRY_IS_DIRTY;
+
+ RTSpinlockAcquire(pBlkCache->LockList);
+ RTListAppend(&pBlkCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted);
+ RTSpinlockRelease(pBlkCache->LockList);
+
+ uint32_t cbDirty = ASMAtomicAddU32(&pCache->cbDirty, pEntry->cbData);
+
+ /* Prevent committing if the VM was suspended. */
+ if (RT_LIKELY(!ASMAtomicReadBool(&pCache->fIoErrorVmSuspended)))
+ fDirtyBytesExceeded = (cbDirty + pEntry->cbData >= pCache->cbCommitDirtyThreshold);
+ else if (!cbDirty && pCache->u32CommitTimeoutMs > 0)
+ {
+ /* Arm the commit timer. */
+ TMTimerSetMillies(pCache->pVM, pCache->hTimerCommit, pCache->u32CommitTimeoutMs);
+ }
+ }
+
+ return fDirtyBytesExceeded;
+}
+
+static PPDMBLKCACHE pdmR3BlkCacheFindById(PPDMBLKCACHEGLOBAL pBlkCacheGlobal, const char *pcszId)
+{
+ bool fFound = false;
+
+ PPDMBLKCACHE pBlkCache;
+ RTListForEach(&pBlkCacheGlobal->ListUsers, pBlkCache, PDMBLKCACHE, NodeCacheUser)
+ {
+ if (!RTStrCmp(pBlkCache->pszId, pcszId))
+ {
+ fFound = true;
+ break;
+ }
+ }
+
+ return fFound ? pBlkCache : NULL;
+}
+
+/**
+ * @callback_method_impl{FNTMTIMERINT, Commit timer callback.}
+ */
+static DECLCALLBACK(void) pdmBlkCacheCommitTimerCallback(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ PPDMBLKCACHEGLOBAL pCache = (PPDMBLKCACHEGLOBAL)pvUser;
+ RT_NOREF(pVM, hTimer);
+
+ LogFlowFunc(("Commit interval expired, commiting dirty entries\n"));
+
+ if ( ASMAtomicReadU32(&pCache->cbDirty) > 0
+ && !ASMAtomicReadBool(&pCache->fIoErrorVmSuspended))
+ pdmBlkCacheCommitDirtyEntries(pCache);
+
+ LogFlowFunc(("Entries committed, going to sleep\n"));
+}
+
+static DECLCALLBACK(int) pdmR3BlkCacheSaveExec(PVM pVM, PSSMHANDLE pSSM)
+{
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+
+ AssertPtr(pBlkCacheGlobal);
+
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ SSMR3PutU32(pSSM, pBlkCacheGlobal->cRefs);
+
+ /* Go through the list and save all dirty entries. */
+ PPDMBLKCACHE pBlkCache;
+ RTListForEach(&pBlkCacheGlobal->ListUsers, pBlkCache, PDMBLKCACHE, NodeCacheUser)
+ {
+ uint32_t cEntries = 0;
+ PPDMBLKCACHEENTRY pEntry;
+
+ RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ SSMR3PutU32(pSSM, (uint32_t)strlen(pBlkCache->pszId));
+ SSMR3PutStrZ(pSSM, pBlkCache->pszId);
+
+ /* Count the number of entries to safe. */
+ RTListForEach(&pBlkCache->ListDirtyNotCommitted, pEntry, PDMBLKCACHEENTRY, NodeNotCommitted)
+ {
+ cEntries++;
+ }
+
+ SSMR3PutU32(pSSM, cEntries);
+
+ /* Walk the list of all dirty entries and save them. */
+ RTListForEach(&pBlkCache->ListDirtyNotCommitted, pEntry, PDMBLKCACHEENTRY, NodeNotCommitted)
+ {
+ /* A few sanity checks. */
+ AssertMsg(!pEntry->cRefs, ("The entry is still referenced\n"));
+ AssertMsg(pEntry->fFlags & PDMBLKCACHE_ENTRY_IS_DIRTY, ("Entry is not dirty\n"));
+ AssertMsg(!(pEntry->fFlags & ~PDMBLKCACHE_ENTRY_IS_DIRTY), ("Invalid flags set\n"));
+ AssertMsg(!pEntry->pWaitingHead && !pEntry->pWaitingTail, ("There are waiting requests\n"));
+ AssertMsg( pEntry->pList == &pBlkCacheGlobal->LruRecentlyUsedIn
+ || pEntry->pList == &pBlkCacheGlobal->LruFrequentlyUsed,
+ ("Invalid list\n"));
+ AssertMsg(pEntry->cbData == pEntry->Core.KeyLast - pEntry->Core.Key + 1,
+ ("Size and range do not match\n"));
+
+ /* Save */
+ SSMR3PutU64(pSSM, pEntry->Core.Key);
+ SSMR3PutU32(pSSM, pEntry->cbData);
+ SSMR3PutMem(pSSM, pEntry->pbData, pEntry->cbData);
+ }
+
+ RTSemRWReleaseRead(pBlkCache->SemRWEntries);
+ }
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+
+ /* Terminator */
+ return SSMR3PutU32(pSSM, UINT32_MAX);
+}
+
+static DECLCALLBACK(int) pdmR3BlkCacheLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+ uint32_t cRefs;
+
+ NOREF(uPass);
+ AssertPtr(pBlkCacheGlobal);
+
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ if (uVersion != PDM_BLK_CACHE_SAVED_STATE_VERSION)
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+
+ SSMR3GetU32(pSSM, &cRefs);
+
+ /*
+ * Fewer users in the saved state than in the current VM are allowed
+ * because that means that there are only new ones which don't have any saved state
+ * which can get lost.
+ * More saved state entries than registered cache users are only allowed if the
+ * missing users don't have any data saved in the cache.
+ */
+ int rc = VINF_SUCCESS;
+ char *pszId = NULL;
+
+ while ( cRefs > 0
+ && RT_SUCCESS(rc))
+ {
+ PPDMBLKCACHE pBlkCache = NULL;
+ uint32_t cbId = 0;
+
+ SSMR3GetU32(pSSM, &cbId);
+ Assert(cbId > 0);
+
+ cbId++; /* Include terminator */
+ pszId = (char *)RTMemAllocZ(cbId * sizeof(char));
+ if (!pszId)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+
+ rc = SSMR3GetStrZ(pSSM, pszId, cbId);
+ AssertRC(rc);
+
+ /* Search for the block cache with the provided id. */
+ pBlkCache = pdmR3BlkCacheFindById(pBlkCacheGlobal, pszId);
+
+ /* Get the entries */
+ uint32_t cEntries;
+ SSMR3GetU32(pSSM, &cEntries);
+
+ if (!pBlkCache && (cEntries > 0))
+ {
+ rc = SSMR3SetCfgError(pSSM, RT_SRC_POS,
+ N_("The VM is missing a block device and there is data in the cache. Please make sure the source and target VMs have compatible storage configurations"));
+ break;
+ }
+
+ RTMemFree(pszId);
+ pszId = NULL;
+
+ while (cEntries > 0)
+ {
+ PPDMBLKCACHEENTRY pEntry;
+ uint64_t off;
+ uint32_t cbEntry;
+
+ SSMR3GetU64(pSSM, &off);
+ SSMR3GetU32(pSSM, &cbEntry);
+
+ pEntry = pdmBlkCacheEntryAlloc(pBlkCache, off, cbEntry, NULL);
+ if (!pEntry)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+
+ rc = SSMR3GetMem(pSSM, pEntry->pbData, cbEntry);
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pEntry->pbData);
+ RTMemFree(pEntry);
+ break;
+ }
+
+ /* Insert into the tree. */
+ bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core);
+ Assert(fInserted); NOREF(fInserted);
+
+ /* Add to the dirty list. */
+ pdmBlkCacheAddDirtyEntry(pBlkCache, pEntry);
+ pdmBlkCacheEntryAddToList(&pBlkCacheGlobal->LruRecentlyUsedIn, pEntry);
+ pdmBlkCacheAdd(pBlkCacheGlobal, cbEntry);
+ pdmBlkCacheEntryRelease(pEntry);
+ cEntries--;
+ }
+
+ cRefs--;
+ }
+
+ if (pszId)
+ RTMemFree(pszId);
+
+ if (cRefs && RT_SUCCESS(rc))
+ rc = SSMR3SetCfgError(pSSM, RT_SRC_POS,
+ N_("Unexpected error while restoring state. Please make sure the source and target VMs have compatible storage configurations"));
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t u32 = 0;
+ rc = SSMR3GetU32(pSSM, &u32);
+ if (RT_SUCCESS(rc))
+ AssertMsgReturn(u32 == UINT32_MAX, ("%#x\n", u32), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ }
+
+ return rc;
+}
+
+int pdmR3BlkCacheInit(PVM pVM)
+{
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = pVM->pUVM;
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal;
+
+ LogFlowFunc((": pVM=%p\n", pVM));
+
+ VM_ASSERT_EMT(pVM);
+
+ PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
+ PCFGMNODE pCfgBlkCache = CFGMR3GetChild(CFGMR3GetChild(pCfgRoot, "PDM"), "BlkCache");
+
+ pBlkCacheGlobal = (PPDMBLKCACHEGLOBAL)RTMemAllocZ(sizeof(PDMBLKCACHEGLOBAL));
+ if (!pBlkCacheGlobal)
+ return VERR_NO_MEMORY;
+
+ RTListInit(&pBlkCacheGlobal->ListUsers);
+ pBlkCacheGlobal->pVM = pVM;
+ pBlkCacheGlobal->cRefs = 0;
+ pBlkCacheGlobal->cbCached = 0;
+ pBlkCacheGlobal->fCommitInProgress = false;
+
+ /* Initialize members */
+ pBlkCacheGlobal->LruRecentlyUsedIn.pHead = NULL;
+ pBlkCacheGlobal->LruRecentlyUsedIn.pTail = NULL;
+ pBlkCacheGlobal->LruRecentlyUsedIn.cbCached = 0;
+
+ pBlkCacheGlobal->LruRecentlyUsedOut.pHead = NULL;
+ pBlkCacheGlobal->LruRecentlyUsedOut.pTail = NULL;
+ pBlkCacheGlobal->LruRecentlyUsedOut.cbCached = 0;
+
+ pBlkCacheGlobal->LruFrequentlyUsed.pHead = NULL;
+ pBlkCacheGlobal->LruFrequentlyUsed.pTail = NULL;
+ pBlkCacheGlobal->LruFrequentlyUsed.cbCached = 0;
+
+ do
+ {
+ rc = CFGMR3QueryU32Def(pCfgBlkCache, "CacheSize", &pBlkCacheGlobal->cbMax, 5 * _1M);
+ AssertLogRelRCBreak(rc);
+ LogFlowFunc(("Maximum number of bytes cached %u\n", pBlkCacheGlobal->cbMax));
+
+ pBlkCacheGlobal->cbRecentlyUsedInMax = (pBlkCacheGlobal->cbMax / 100) * 25; /* 25% of the buffer size */
+ pBlkCacheGlobal->cbRecentlyUsedOutMax = (pBlkCacheGlobal->cbMax / 100) * 50; /* 50% of the buffer size */
+ LogFlowFunc(("cbRecentlyUsedInMax=%u cbRecentlyUsedOutMax=%u\n",
+ pBlkCacheGlobal->cbRecentlyUsedInMax, pBlkCacheGlobal->cbRecentlyUsedOutMax));
+
+ /** @todo r=aeichner: Experiment to find optimal default values */
+ rc = CFGMR3QueryU32Def(pCfgBlkCache, "CacheCommitIntervalMs", &pBlkCacheGlobal->u32CommitTimeoutMs, 10000 /* 10sec */);
+ AssertLogRelRCBreak(rc);
+ rc = CFGMR3QueryU32Def(pCfgBlkCache, "CacheCommitThreshold", &pBlkCacheGlobal->cbCommitDirtyThreshold, pBlkCacheGlobal->cbMax / 2);
+ AssertLogRelRCBreak(rc);
+ } while (0);
+
+ if (RT_SUCCESS(rc))
+ {
+ STAMR3Register(pVM, &pBlkCacheGlobal->cbMax,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/cbMax",
+ STAMUNIT_BYTES,
+ "Maximum cache size");
+ STAMR3Register(pVM, &pBlkCacheGlobal->cbCached,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/cbCached",
+ STAMUNIT_BYTES,
+ "Currently used cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->LruRecentlyUsedIn.cbCached,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/cbCachedMruIn",
+ STAMUNIT_BYTES,
+ "Number of bytes cached in MRU list");
+ STAMR3Register(pVM, &pBlkCacheGlobal->LruRecentlyUsedOut.cbCached,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/cbCachedMruOut",
+ STAMUNIT_BYTES,
+ "Number of bytes cached in FRU list");
+ STAMR3Register(pVM, &pBlkCacheGlobal->LruFrequentlyUsed.cbCached,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/cbCachedFru",
+ STAMUNIT_BYTES,
+ "Number of bytes cached in FRU ghost list");
+
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3Register(pVM, &pBlkCacheGlobal->cHits,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheHits",
+ STAMUNIT_COUNT, "Number of hits in the cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->cPartialHits,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CachePartialHits",
+ STAMUNIT_COUNT, "Number of partial hits in the cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->cMisses,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheMisses",
+ STAMUNIT_COUNT, "Number of misses when accessing the cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatRead,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheRead",
+ STAMUNIT_BYTES, "Number of bytes read from the cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatWritten,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheWritten",
+ STAMUNIT_BYTES, "Number of bytes written to the cache");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatTreeGet,
+ STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheTreeGet",
+ STAMUNIT_TICKS_PER_CALL, "Time taken to access an entry in the tree");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatTreeInsert,
+ STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheTreeInsert",
+ STAMUNIT_TICKS_PER_CALL, "Time taken to insert an entry in the tree");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatTreeRemove,
+ STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheTreeRemove",
+ STAMUNIT_TICKS_PER_CALL, "Time taken to remove an entry an the tree");
+ STAMR3Register(pVM, &pBlkCacheGlobal->StatBuffersReused,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ "/PDM/BlkCache/CacheBuffersReused",
+ STAMUNIT_COUNT, "Number of times a buffer could be reused");
+#endif
+
+ /* Initialize the critical section */
+ rc = RTCritSectInit(&pBlkCacheGlobal->CritSect);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Create the commit timer */
+ if (pBlkCacheGlobal->u32CommitTimeoutMs > 0)
+ rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, pdmBlkCacheCommitTimerCallback, pBlkCacheGlobal,
+ TMTIMER_FLAGS_NO_RING0, "BlkCache-Commit", &pBlkCacheGlobal->hTimerCommit);
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Register saved state handler. */
+ rc = SSMR3RegisterInternal(pVM, "pdmblkcache", 0, PDM_BLK_CACHE_SAVED_STATE_VERSION, pBlkCacheGlobal->cbMax,
+ NULL, NULL, NULL,
+ NULL, pdmR3BlkCacheSaveExec, NULL,
+ NULL, pdmR3BlkCacheLoadExec, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("BlkCache: Cache successfully initialized. Cache size is %u bytes\n", pBlkCacheGlobal->cbMax));
+ LogRel(("BlkCache: Cache commit interval is %u ms\n", pBlkCacheGlobal->u32CommitTimeoutMs));
+ LogRel(("BlkCache: Cache commit threshold is %u bytes\n", pBlkCacheGlobal->cbCommitDirtyThreshold));
+ pUVM->pdm.s.pBlkCacheGlobal = pBlkCacheGlobal;
+ return VINF_SUCCESS;
+ }
+ }
+
+ RTCritSectDelete(&pBlkCacheGlobal->CritSect);
+ }
+
+ if (pBlkCacheGlobal)
+ RTMemFree(pBlkCacheGlobal);
+
+ LogFlowFunc((": returns rc=%Rrc\n", rc));
+ return rc;
+}
+
+void pdmR3BlkCacheTerm(PVM pVM)
+{
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+
+ if (pBlkCacheGlobal)
+ {
+ /* Make sure no one else uses the cache now */
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ /* Cleanup deleting all cache entries waiting for in progress entries to finish. */
+ pdmBlkCacheDestroyList(&pBlkCacheGlobal->LruRecentlyUsedIn);
+ pdmBlkCacheDestroyList(&pBlkCacheGlobal->LruRecentlyUsedOut);
+ pdmBlkCacheDestroyList(&pBlkCacheGlobal->LruFrequentlyUsed);
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+
+ RTCritSectDelete(&pBlkCacheGlobal->CritSect);
+ RTMemFree(pBlkCacheGlobal);
+ pVM->pUVM->pdm.s.pBlkCacheGlobal = NULL;
+ }
+}
+
+int pdmR3BlkCacheResume(PVM pVM)
+{
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+
+ LogFlowFunc(("pVM=%#p\n", pVM));
+
+ if ( pBlkCacheGlobal
+ && ASMAtomicXchgBool(&pBlkCacheGlobal->fIoErrorVmSuspended, false))
+ {
+ /* The VM was suspended because of an I/O error, commit all dirty entries. */
+ pdmBlkCacheCommitDirtyEntries(pBlkCacheGlobal);
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int pdmR3BlkCacheRetain(PVM pVM, PPPDMBLKCACHE ppBlkCache, const char *pcszId)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHE pBlkCache = NULL;
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+
+ if (!pBlkCacheGlobal)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Check that no other user cache has the same id first,
+ * Unique id's are necessary in case the state is saved.
+ */
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ pBlkCache = pdmR3BlkCacheFindById(pBlkCacheGlobal, pcszId);
+
+ if (!pBlkCache)
+ {
+ pBlkCache = (PPDMBLKCACHE)RTMemAllocZ(sizeof(PDMBLKCACHE));
+
+ if (pBlkCache)
+ pBlkCache->pszId = RTStrDup(pcszId);
+
+ if ( pBlkCache
+ && pBlkCache->pszId)
+ {
+ pBlkCache->fSuspended = false;
+ pBlkCache->cIoXfersActive = 0;
+ pBlkCache->pCache = pBlkCacheGlobal;
+ RTListInit(&pBlkCache->ListDirtyNotCommitted);
+
+ rc = RTSpinlockCreate(&pBlkCache->LockList, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "pdmR3BlkCacheRetain");
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemRWCreate(&pBlkCache->SemRWEntries);
+ if (RT_SUCCESS(rc))
+ {
+ pBlkCache->pTree = (PAVLRU64TREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));
+ if (pBlkCache->pTree)
+ {
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pBlkCacheGlobal->pVM, &pBlkCache->StatWriteDeferred,
+ STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_COUNT, "Number of deferred writes",
+ "/PDM/BlkCache/%s/Cache/DeferredWrites", pBlkCache->pszId);
+#endif
+
+ /* Add to the list of users. */
+ pBlkCacheGlobal->cRefs++;
+ RTListAppend(&pBlkCacheGlobal->ListUsers, &pBlkCache->NodeCacheUser);
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+
+ *ppBlkCache = pBlkCache;
+ LogFlowFunc(("returns success\n"));
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ RTSemRWDestroy(pBlkCache->SemRWEntries);
+ }
+
+ RTSpinlockDestroy(pBlkCache->LockList);
+ }
+
+ RTStrFree(pBlkCache->pszId);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ if (pBlkCache)
+ RTMemFree(pBlkCache);
+ }
+ else
+ rc = VERR_ALREADY_EXISTS;
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+
+ LogFlowFunc(("Leave rc=%Rrc\n", rc));
+ return rc;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheRetainDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMBLKCACHE ppBlkCache,
+ PFNPDMBLKCACHEXFERCOMPLETEDRV pfnXferComplete,
+ PFNPDMBLKCACHEXFERENQUEUEDRV pfnXferEnqueue,
+ PFNPDMBLKCACHEXFERENQUEUEDISCARDDRV pfnXferEnqueueDiscard,
+ const char *pcszId)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHE pBlkCache;
+
+ rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
+ if (RT_SUCCESS(rc))
+ {
+ pBlkCache->enmType = PDMBLKCACHETYPE_DRV;
+ pBlkCache->u.Drv.pfnXferComplete = pfnXferComplete;
+ pBlkCache->u.Drv.pfnXferEnqueue = pfnXferEnqueue;
+ pBlkCache->u.Drv.pfnXferEnqueueDiscard = pfnXferEnqueueDiscard;
+ pBlkCache->u.Drv.pDrvIns = pDrvIns;
+ *ppBlkCache = pBlkCache;
+ }
+
+ LogFlowFunc(("Leave rc=%Rrc\n", rc));
+ return rc;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheRetainDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMBLKCACHE ppBlkCache,
+ PFNPDMBLKCACHEXFERCOMPLETEDEV pfnXferComplete,
+ PFNPDMBLKCACHEXFERENQUEUEDEV pfnXferEnqueue,
+ PFNPDMBLKCACHEXFERENQUEUEDISCARDDEV pfnXferEnqueueDiscard,
+ const char *pcszId)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHE pBlkCache;
+
+ rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
+ if (RT_SUCCESS(rc))
+ {
+ pBlkCache->enmType = PDMBLKCACHETYPE_DEV;
+ pBlkCache->u.Dev.pfnXferComplete = pfnXferComplete;
+ pBlkCache->u.Dev.pfnXferEnqueue = pfnXferEnqueue;
+ pBlkCache->u.Dev.pfnXferEnqueueDiscard = pfnXferEnqueueDiscard;
+ pBlkCache->u.Dev.pDevIns = pDevIns;
+ *ppBlkCache = pBlkCache;
+ }
+
+ LogFlowFunc(("Leave rc=%Rrc\n", rc));
+ return rc;
+
+}
+
+VMMR3DECL(int) PDMR3BlkCacheRetainUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMBLKCACHE ppBlkCache,
+ PFNPDMBLKCACHEXFERCOMPLETEUSB pfnXferComplete,
+ PFNPDMBLKCACHEXFERENQUEUEUSB pfnXferEnqueue,
+ PFNPDMBLKCACHEXFERENQUEUEDISCARDUSB pfnXferEnqueueDiscard,
+ const char *pcszId)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHE pBlkCache;
+
+ rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
+ if (RT_SUCCESS(rc))
+ {
+ pBlkCache->enmType = PDMBLKCACHETYPE_USB;
+ pBlkCache->u.Usb.pfnXferComplete = pfnXferComplete;
+ pBlkCache->u.Usb.pfnXferEnqueue = pfnXferEnqueue;
+ pBlkCache->u.Usb.pfnXferEnqueueDiscard = pfnXferEnqueueDiscard;
+ pBlkCache->u.Usb.pUsbIns = pUsbIns;
+ *ppBlkCache = pBlkCache;
+ }
+
+ LogFlowFunc(("Leave rc=%Rrc\n", rc));
+ return rc;
+
+}
+
+VMMR3DECL(int) PDMR3BlkCacheRetainInt(PVM pVM, void *pvUser, PPPDMBLKCACHE ppBlkCache,
+ PFNPDMBLKCACHEXFERCOMPLETEINT pfnXferComplete,
+ PFNPDMBLKCACHEXFERENQUEUEINT pfnXferEnqueue,
+ PFNPDMBLKCACHEXFERENQUEUEDISCARDINT pfnXferEnqueueDiscard,
+ const char *pcszId)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHE pBlkCache;
+
+ rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
+ if (RT_SUCCESS(rc))
+ {
+ pBlkCache->enmType = PDMBLKCACHETYPE_INTERNAL;
+ pBlkCache->u.Int.pfnXferComplete = pfnXferComplete;
+ pBlkCache->u.Int.pfnXferEnqueue = pfnXferEnqueue;
+ pBlkCache->u.Int.pfnXferEnqueueDiscard = pfnXferEnqueueDiscard;
+ pBlkCache->u.Int.pvUser = pvUser;
+ *ppBlkCache = pBlkCache;
+ }
+
+ LogFlowFunc(("Leave rc=%Rrc\n", rc));
+ return rc;
+
+}
+
+/**
+ * Callback for the AVL destroy routine. Frees a cache entry for this endpoint.
+ *
+ * @returns IPRT status code.
+ * @param pNode The node to destroy.
+ * @param pvUser Opaque user data.
+ */
+static DECLCALLBACK(int) pdmBlkCacheEntryDestroy(PAVLRU64NODECORE pNode, void *pvUser)
+{
+ PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)pNode;
+ PPDMBLKCACHEGLOBAL pCache = (PPDMBLKCACHEGLOBAL)pvUser;
+ PPDMBLKCACHE pBlkCache = pEntry->pBlkCache;
+
+ while (ASMAtomicReadU32(&pEntry->fFlags) & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS)
+ {
+ /* Leave the locks to let the I/O thread make progress but reference the entry to prevent eviction. */
+ pdmBlkCacheEntryRef(pEntry);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ pdmBlkCacheLockLeave(pCache);
+
+ RTThreadSleep(250);
+
+ /* Re-enter all locks */
+ pdmBlkCacheLockEnter(pCache);
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+
+ AssertMsg(!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS),
+ ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
+
+ bool fUpdateCache = pEntry->pList == &pCache->LruFrequentlyUsed
+ || pEntry->pList == &pCache->LruRecentlyUsedIn;
+
+ pdmBlkCacheEntryRemoveFromList(pEntry);
+
+ if (fUpdateCache)
+ pdmBlkCacheSub(pCache, pEntry->cbData);
+
+ RTMemPageFree(pEntry->pbData, pEntry->cbData);
+ RTMemFree(pEntry);
+
+ return VINF_SUCCESS;
+}
+
+VMMR3DECL(void) PDMR3BlkCacheRelease(PPDMBLKCACHE pBlkCache)
+{
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+
+ /*
+ * Commit all dirty entries now (they are waited on for completion during the
+ * destruction of the AVL tree below).
+ * The exception is if the VM was paused because of an I/O error before.
+ */
+ if (!ASMAtomicReadBool(&pCache->fIoErrorVmSuspended))
+ pdmBlkCacheCommit(pBlkCache);
+
+ /* Make sure nobody is accessing the cache while we delete the tree. */
+ pdmBlkCacheLockEnter(pCache);
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ RTAvlrU64Destroy(pBlkCache->pTree, pdmBlkCacheEntryDestroy, pCache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ RTSpinlockDestroy(pBlkCache->LockList);
+
+ pCache->cRefs--;
+ RTListNodeRemove(&pBlkCache->NodeCacheUser);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pBlkCache->pTree);
+ pBlkCache->pTree = NULL;
+ RTSemRWDestroy(pBlkCache->SemRWEntries);
+
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3DeregisterF(pCache->pVM->pUVM, "/PDM/BlkCache/%s/Cache/DeferredWrites", pBlkCache->pszId);
+#endif
+
+ RTStrFree(pBlkCache->pszId);
+ RTMemFree(pBlkCache);
+}
+
+VMMR3DECL(void) PDMR3BlkCacheReleaseDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ LogFlow(("%s: pDevIns=%p\n", __FUNCTION__, pDevIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pDevIns)
+ return;
+ VM_ASSERT_EMT(pVM);
+
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+ PPDMBLKCACHE pBlkCache, pBlkCacheNext;
+
+ /* Return silently if not supported. */
+ if (!pBlkCacheGlobal)
+ return;
+
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ RTListForEachSafe(&pBlkCacheGlobal->ListUsers, pBlkCache, pBlkCacheNext, PDMBLKCACHE, NodeCacheUser)
+ {
+ if ( pBlkCache->enmType == PDMBLKCACHETYPE_DEV
+ && pBlkCache->u.Dev.pDevIns == pDevIns)
+ PDMR3BlkCacheRelease(pBlkCache);
+ }
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+}
+
+VMMR3DECL(void) PDMR3BlkCacheReleaseDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("%s: pDrvIns=%p\n", __FUNCTION__, pDrvIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pDrvIns)
+ return;
+ VM_ASSERT_EMT(pVM);
+
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+ PPDMBLKCACHE pBlkCache, pBlkCacheNext;
+
+ /* Return silently if not supported. */
+ if (!pBlkCacheGlobal)
+ return;
+
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ RTListForEachSafe(&pBlkCacheGlobal->ListUsers, pBlkCache, pBlkCacheNext, PDMBLKCACHE, NodeCacheUser)
+ {
+ if ( pBlkCache->enmType == PDMBLKCACHETYPE_DRV
+ && pBlkCache->u.Drv.pDrvIns == pDrvIns)
+ PDMR3BlkCacheRelease(pBlkCache);
+ }
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+}
+
+VMMR3DECL(void) PDMR3BlkCacheReleaseUsb(PVM pVM, PPDMUSBINS pUsbIns)
+{
+ LogFlow(("%s: pUsbIns=%p\n", __FUNCTION__, pUsbIns));
+
+ /*
+ * Validate input.
+ */
+ if (!pUsbIns)
+ return;
+ VM_ASSERT_EMT(pVM);
+
+ PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
+ PPDMBLKCACHE pBlkCache, pBlkCacheNext;
+
+ /* Return silently if not supported. */
+ if (!pBlkCacheGlobal)
+ return;
+
+ pdmBlkCacheLockEnter(pBlkCacheGlobal);
+
+ RTListForEachSafe(&pBlkCacheGlobal->ListUsers, pBlkCache, pBlkCacheNext, PDMBLKCACHE, NodeCacheUser)
+ {
+ if ( pBlkCache->enmType == PDMBLKCACHETYPE_USB
+ && pBlkCache->u.Usb.pUsbIns == pUsbIns)
+ PDMR3BlkCacheRelease(pBlkCache);
+ }
+
+ pdmBlkCacheLockLeave(pBlkCacheGlobal);
+}
+
+static PPDMBLKCACHEENTRY pdmBlkCacheGetCacheEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off)
+{
+ STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache);
+
+ RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off);
+ if (pEntry)
+ pdmBlkCacheEntryRef(pEntry);
+ RTSemRWReleaseRead(pBlkCache->SemRWEntries);
+
+ STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache);
+
+ return pEntry;
+}
+
+/**
+ * Return the best fit cache entries for the given offset.
+ *
+ * @param pBlkCache The endpoint cache.
+ * @param off The offset.
+ * @param ppEntryAbove Where to store the pointer to the best fit entry above
+ * the given offset. NULL if not required.
+ */
+static void pdmBlkCacheGetCacheBestFitEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off, PPDMBLKCACHEENTRY *ppEntryAbove)
+{
+ STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache);
+
+ RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ if (ppEntryAbove)
+ {
+ *ppEntryAbove = (PPDMBLKCACHEENTRY)RTAvlrU64GetBestFit(pBlkCache->pTree, off, true /*fAbove*/);
+ if (*ppEntryAbove)
+ pdmBlkCacheEntryRef(*ppEntryAbove);
+ }
+
+ RTSemRWReleaseRead(pBlkCache->SemRWEntries);
+
+ STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache);
+}
+
+static void pdmBlkCacheInsertEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry)
+{
+ STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeInsert, Cache);
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core);
+ AssertMsg(fInserted, ("Node was not inserted into tree\n")); NOREF(fInserted);
+ STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeInsert, Cache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+}
+
+/**
+ * Allocates and initializes a new entry for the cache.
+ * The entry has a reference count of 1.
+ *
+ * @returns Pointer to the new cache entry or NULL if out of memory.
+ * @param pBlkCache The cache the entry belongs to.
+ * @param off Start offset.
+ * @param cbData Size of the cache entry.
+ * @param pbBuffer Pointer to the buffer to use.
+ * NULL if a new buffer should be allocated.
+ * The buffer needs to have the same size of the entry.
+ */
+static PPDMBLKCACHEENTRY pdmBlkCacheEntryAlloc(PPDMBLKCACHE pBlkCache, uint64_t off, size_t cbData, uint8_t *pbBuffer)
+{
+ AssertReturn(cbData <= UINT32_MAX, NULL);
+ PPDMBLKCACHEENTRY pEntryNew = (PPDMBLKCACHEENTRY)RTMemAllocZ(sizeof(PDMBLKCACHEENTRY));
+
+ if (RT_UNLIKELY(!pEntryNew))
+ return NULL;
+
+ pEntryNew->Core.Key = off;
+ pEntryNew->Core.KeyLast = off + cbData - 1;
+ pEntryNew->pBlkCache = pBlkCache;
+ pEntryNew->fFlags = 0;
+ pEntryNew->cRefs = 1; /* We are using it now. */
+ pEntryNew->pList = NULL;
+ pEntryNew->cbData = (uint32_t)cbData;
+ pEntryNew->pWaitingHead = NULL;
+ pEntryNew->pWaitingTail = NULL;
+ if (pbBuffer)
+ pEntryNew->pbData = pbBuffer;
+ else
+ pEntryNew->pbData = (uint8_t *)RTMemPageAlloc(cbData);
+
+ if (RT_UNLIKELY(!pEntryNew->pbData))
+ {
+ RTMemFree(pEntryNew);
+ return NULL;
+ }
+
+ return pEntryNew;
+}
+
+/**
+ * Checks that a set of flags is set/clear acquiring the R/W semaphore
+ * in exclusive mode.
+ *
+ * @returns true if the flag in fSet is set and the one in fClear is clear.
+ * false otherwise.
+ * The R/W semaphore is only held if true is returned.
+ *
+ * @param pBlkCache The endpoint cache instance data.
+ * @param pEntry The entry to check the flags for.
+ * @param fSet The flag which is tested to be set.
+ * @param fClear The flag which is tested to be clear.
+ */
+DECLINLINE(bool) pdmBlkCacheEntryFlagIsSetClearAcquireLock(PPDMBLKCACHE pBlkCache,
+ PPDMBLKCACHEENTRY pEntry,
+ uint32_t fSet, uint32_t fClear)
+{
+ uint32_t fFlags = ASMAtomicReadU32(&pEntry->fFlags);
+ bool fPassed = ((fFlags & fSet) && !(fFlags & fClear));
+
+ if (fPassed)
+ {
+ /* Acquire the lock and check again because the completion callback might have raced us. */
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+
+ fFlags = ASMAtomicReadU32(&pEntry->fFlags);
+ fPassed = ((fFlags & fSet) && !(fFlags & fClear));
+
+ /* Drop the lock if we didn't passed the test. */
+ if (!fPassed)
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ }
+
+ return fPassed;
+}
+
+/**
+ * Adds a segment to the waiting list for a cache entry
+ * which is currently in progress.
+ *
+ * @param pEntry The cache entry to add the segment to.
+ * @param pWaiter The waiter entry to add.
+ */
+DECLINLINE(void) pdmBlkCacheEntryAddWaiter(PPDMBLKCACHEENTRY pEntry, PPDMBLKCACHEWAITER pWaiter)
+{
+ pWaiter->pNext = NULL;
+
+ if (pEntry->pWaitingHead)
+ {
+ AssertPtr(pEntry->pWaitingTail);
+
+ pEntry->pWaitingTail->pNext = pWaiter;
+ pEntry->pWaitingTail = pWaiter;
+ }
+ else
+ {
+ Assert(!pEntry->pWaitingTail);
+
+ pEntry->pWaitingHead = pWaiter;
+ pEntry->pWaitingTail = pWaiter;
+ }
+}
+
+/**
+ * Add a buffer described by the I/O memory context
+ * to the entry waiting for completion.
+ *
+ * @returns VBox status code.
+ * @param pEntry The entry to add the buffer to.
+ * @param pReq The request.
+ * @param pSgBuf The scatter/gather buffer. Will be advanced by cbData.
+ * @param offDiff Offset from the start of the buffer in the entry.
+ * @param cbData Amount of data to wait for onthis entry.
+ * @param fWrite Flag whether the task waits because it wants to write to
+ * the cache entry.
+ */
+static int pdmBlkCacheEntryWaitersAdd(PPDMBLKCACHEENTRY pEntry, PPDMBLKCACHEREQ pReq,
+ PRTSGBUF pSgBuf, uint64_t offDiff, size_t cbData, bool fWrite)
+{
+ PPDMBLKCACHEWAITER pWaiter = (PPDMBLKCACHEWAITER)RTMemAllocZ(sizeof(PDMBLKCACHEWAITER));
+ if (!pWaiter)
+ return VERR_NO_MEMORY;
+
+ ASMAtomicIncU32(&pReq->cXfersPending);
+ pWaiter->pReq = pReq;
+ pWaiter->offCacheEntry = offDiff;
+ pWaiter->cbTransfer = cbData;
+ pWaiter->fWrite = fWrite;
+ RTSgBufClone(&pWaiter->SgBuf, pSgBuf);
+ RTSgBufAdvance(pSgBuf, cbData);
+
+ pdmBlkCacheEntryAddWaiter(pEntry, pWaiter);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Calculate aligned offset and size for a new cache entry which do not
+ * intersect with an already existing entry and the file end.
+ *
+ * @returns The number of bytes the entry can hold of the requested amount
+ * of bytes.
+ * @param pBlkCache The endpoint cache.
+ * @param off The start offset.
+ * @param cb The number of bytes the entry needs to hold at
+ * least.
+ * @param pcbEntry Where to store the number of bytes the entry can hold.
+ * Can be less than given because of other entries.
+ */
+static uint32_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache,
+ uint64_t off, uint32_t cb,
+ uint32_t *pcbEntry)
+{
+ /* Get the best fit entries around the offset */
+ PPDMBLKCACHEENTRY pEntryAbove = NULL;
+ pdmBlkCacheGetCacheBestFitEntryByOffset(pBlkCache, off, &pEntryAbove);
+
+ /* Log the info */
+ LogFlow(("%sest fit entry above off=%llu (BestFit=%llu BestFitEnd=%llu BestFitSize=%u)\n",
+ pEntryAbove ? "B" : "No b",
+ off,
+ pEntryAbove ? pEntryAbove->Core.Key : 0,
+ pEntryAbove ? pEntryAbove->Core.KeyLast : 0,
+ pEntryAbove ? pEntryAbove->cbData : 0));
+
+ uint32_t cbNext;
+ uint32_t cbInEntry;
+ if ( pEntryAbove
+ && off + cb > pEntryAbove->Core.Key)
+ {
+ cbInEntry = (uint32_t)(pEntryAbove->Core.Key - off);
+ cbNext = (uint32_t)(pEntryAbove->Core.Key - off);
+ }
+ else
+ {
+ cbInEntry = cb;
+ cbNext = cb;
+ }
+
+ /* A few sanity checks */
+ AssertMsg(!pEntryAbove || off + cbNext <= pEntryAbove->Core.Key,
+ ("Aligned size intersects with another cache entry\n"));
+ Assert(cbInEntry <= cbNext);
+
+ if (pEntryAbove)
+ pdmBlkCacheEntryRelease(pEntryAbove);
+
+ LogFlow(("off=%llu cbNext=%u\n", off, cbNext));
+
+ *pcbEntry = cbNext;
+
+ return cbInEntry;
+}
+
+/**
+ * Create a new cache entry evicting data from the cache if required.
+ *
+ * @returns Pointer to the new cache entry or NULL
+ * if not enough bytes could be evicted from the cache.
+ * @param pBlkCache The endpoint cache.
+ * @param off The offset.
+ * @param cb Number of bytes the cache entry should have.
+ * @param pcbData Where to store the number of bytes the new
+ * entry can hold. May be lower than actually
+ * requested due to another entry intersecting the
+ * access range.
+ */
+static PPDMBLKCACHEENTRY pdmBlkCacheEntryCreate(PPDMBLKCACHE pBlkCache, uint64_t off, size_t cb, size_t *pcbData)
+{
+ uint32_t cbEntry = 0;
+
+ *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, (uint32_t)cb, &cbEntry);
+ AssertReturn(cb <= UINT32_MAX, NULL);
+
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+ pdmBlkCacheLockEnter(pCache);
+
+ PPDMBLKCACHEENTRY pEntryNew = NULL;
+ uint8_t *pbBuffer = NULL;
+ bool fEnough = pdmBlkCacheReclaim(pCache, cbEntry, true, &pbBuffer);
+ if (fEnough)
+ {
+ LogFlow(("Evicted enough bytes (%u requested). Creating new cache entry\n", cbEntry));
+
+ pEntryNew = pdmBlkCacheEntryAlloc(pBlkCache, off, cbEntry, pbBuffer);
+ if (RT_LIKELY(pEntryNew))
+ {
+ pdmBlkCacheEntryAddToList(&pCache->LruRecentlyUsedIn, pEntryNew);
+ pdmBlkCacheAdd(pCache, cbEntry);
+ pdmBlkCacheLockLeave(pCache);
+
+ pdmBlkCacheInsertEntry(pBlkCache, pEntryNew);
+
+ AssertMsg( (off >= pEntryNew->Core.Key)
+ && (off + *pcbData <= pEntryNew->Core.KeyLast + 1),
+ ("Overflow in calculation off=%llu\n", off));
+ }
+ else
+ pdmBlkCacheLockLeave(pCache);
+ }
+ else
+ pdmBlkCacheLockLeave(pCache);
+
+ return pEntryNew;
+}
+
+static PPDMBLKCACHEREQ pdmBlkCacheReqAlloc(void *pvUser)
+{
+ PPDMBLKCACHEREQ pReq = (PPDMBLKCACHEREQ)RTMemAlloc(sizeof(PDMBLKCACHEREQ));
+
+ if (RT_LIKELY(pReq))
+ {
+ pReq->pvUser = pvUser;
+ pReq->rcReq = VINF_SUCCESS;
+ pReq->cXfersPending = 0;
+ }
+
+ return pReq;
+}
+
+static void pdmBlkCacheReqComplete(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEREQ pReq)
+{
+ switch (pBlkCache->enmType)
+ {
+ case PDMBLKCACHETYPE_DEV:
+ {
+ pBlkCache->u.Dev.pfnXferComplete(pBlkCache->u.Dev.pDevIns,
+ pReq->pvUser, pReq->rcReq);
+ break;
+ }
+ case PDMBLKCACHETYPE_DRV:
+ {
+ pBlkCache->u.Drv.pfnXferComplete(pBlkCache->u.Drv.pDrvIns,
+ pReq->pvUser, pReq->rcReq);
+ break;
+ }
+ case PDMBLKCACHETYPE_USB:
+ {
+ pBlkCache->u.Usb.pfnXferComplete(pBlkCache->u.Usb.pUsbIns,
+ pReq->pvUser, pReq->rcReq);
+ break;
+ }
+ case PDMBLKCACHETYPE_INTERNAL:
+ {
+ pBlkCache->u.Int.pfnXferComplete(pBlkCache->u.Int.pvUser,
+ pReq->pvUser, pReq->rcReq);
+ break;
+ }
+ default:
+ AssertMsgFailed(("Unknown block cache type!\n"));
+ }
+
+ RTMemFree(pReq);
+}
+
+static bool pdmBlkCacheReqUpdate(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEREQ pReq,
+ int rcReq, bool fCallHandler)
+{
+ if (RT_FAILURE(rcReq))
+ ASMAtomicCmpXchgS32(&pReq->rcReq, rcReq, VINF_SUCCESS);
+
+ AssertMsg(pReq->cXfersPending > 0, ("No transfers are pending for this request\n"));
+ uint32_t cXfersPending = ASMAtomicDecU32(&pReq->cXfersPending);
+
+ if (!cXfersPending)
+ {
+ if (fCallHandler)
+ pdmBlkCacheReqComplete(pBlkCache, pReq);
+ return true;
+ }
+
+ LogFlowFunc(("pReq=%#p cXfersPending=%u\n", pReq, cXfersPending));
+ return false;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheRead(PPDMBLKCACHE pBlkCache, uint64_t off,
+ PCRTSGBUF pSgBuf, size_t cbRead, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+ PPDMBLKCACHEENTRY pEntry;
+ PPDMBLKCACHEREQ pReq;
+
+ LogFlowFunc((": pBlkCache=%#p{%s} off=%llu pSgBuf=%#p cbRead=%u pvUser=%#p\n",
+ pBlkCache, pBlkCache->pszId, off, pSgBuf, cbRead, pvUser));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+ AssertReturn(!pBlkCache->fSuspended, VERR_INVALID_STATE);
+
+ RTSGBUF SgBuf;
+ RTSgBufClone(&SgBuf, pSgBuf);
+
+ /* Allocate new request structure. */
+ pReq = pdmBlkCacheReqAlloc(pvUser);
+ if (RT_UNLIKELY(!pReq))
+ return VERR_NO_MEMORY;
+
+ /* Increment data transfer counter to keep the request valid while we access it. */
+ ASMAtomicIncU32(&pReq->cXfersPending);
+
+ while (cbRead)
+ {
+ size_t cbToRead;
+
+ pEntry = pdmBlkCacheGetCacheEntryByOffset(pBlkCache, off);
+
+ /*
+ * If there is no entry we try to create a new one eviciting unused pages
+ * if the cache is full. If this is not possible we will pass the request through
+ * and skip the caching (all entries may be still in progress so they can't
+ * be evicted)
+ * If we have an entry it can be in one of the LRU lists where the entry
+ * contains data (recently used or frequently used LRU) so we can just read
+ * the data we need and put the entry at the head of the frequently used LRU list.
+ * In case the entry is in one of the ghost lists it doesn't contain any data.
+ * We have to fetch it again evicting pages from either T1 or T2 to make room.
+ */
+ if (pEntry)
+ {
+ uint64_t offDiff = off - pEntry->Core.Key;
+
+ AssertMsg(off >= pEntry->Core.Key,
+ ("Overflow in calculation off=%llu OffsetAligned=%llu\n",
+ off, pEntry->Core.Key));
+
+ AssertPtr(pEntry->pList);
+
+ cbToRead = RT_MIN(pEntry->cbData - offDiff, cbRead);
+
+ AssertMsg(off + cbToRead <= pEntry->Core.Key + pEntry->Core.KeyLast + 1,
+ ("Buffer of cache entry exceeded off=%llu cbToRead=%d\n",
+ off, cbToRead));
+
+ cbRead -= cbToRead;
+
+ if (!cbRead)
+ STAM_COUNTER_INC(&pCache->cHits);
+ else
+ STAM_COUNTER_INC(&pCache->cPartialHits);
+
+ STAM_COUNTER_ADD(&pCache->StatRead, cbToRead);
+
+ /* Ghost lists contain no data. */
+ if ( (pEntry->pList == &pCache->LruRecentlyUsedIn)
+ || (pEntry->pList == &pCache->LruFrequentlyUsed))
+ {
+ if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
+ PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
+ PDMBLKCACHE_ENTRY_IS_DIRTY))
+ {
+ /* Entry didn't completed yet. Append to the list */
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToRead,
+ false /* fWrite */);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ }
+ else
+ {
+ /* Read as much as we can from the entry. */
+ RTSgBufCopyFromBuf(&SgBuf, pEntry->pbData + offDiff, cbToRead);
+ }
+
+ /* Move this entry to the top position */
+ if (pEntry->pList == &pCache->LruFrequentlyUsed)
+ {
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
+ pdmBlkCacheLockLeave(pCache);
+ }
+ /* Release the entry */
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else
+ {
+ uint8_t *pbBuffer = NULL;
+
+ LogFlow(("Fetching data for ghost entry %#p from file\n", pEntry));
+
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryRemoveFromList(pEntry); /* Remove it before we remove data, otherwise it may get freed when evicting data. */
+ bool fEnough = pdmBlkCacheReclaim(pCache, pEntry->cbData, true, &pbBuffer);
+
+ /* Move the entry to Am and fetch it to the cache. */
+ if (fEnough)
+ {
+ pdmBlkCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
+ pdmBlkCacheAdd(pCache, pEntry->cbData);
+ pdmBlkCacheLockLeave(pCache);
+
+ if (pbBuffer)
+ pEntry->pbData = pbBuffer;
+ else
+ pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
+ AssertPtr(pEntry->pbData);
+
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToRead,
+ false /* fWrite */);
+ pdmBlkCacheEntryReadFromMedium(pEntry);
+ /* Release the entry */
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else
+ {
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pEntry);
+
+ pdmBlkCacheRequestPassthrough(pBlkCache, pReq,
+ &SgBuf, off, cbToRead,
+ PDMBLKCACHEXFERDIR_READ);
+ }
+ }
+ }
+ else
+ {
+#ifdef VBOX_WITH_IO_READ_CACHE
+ /* No entry found for this offset. Create a new entry and fetch the data to the cache. */
+ PPDMBLKCACHEENTRY pEntryNew = pdmBlkCacheEntryCreate(pBlkCache,
+ off, cbRead,
+ &cbToRead);
+
+ cbRead -= cbToRead;
+
+ if (pEntryNew)
+ {
+ if (!cbRead)
+ STAM_COUNTER_INC(&pCache->cMisses);
+ else
+ STAM_COUNTER_INC(&pCache->cPartialHits);
+
+ pdmBlkCacheEntryWaitersAdd(pEntryNew, pReq,
+ &SgBuf,
+ off - pEntryNew->Core.Key,
+ cbToRead,
+ false /* fWrite */);
+ pdmBlkCacheEntryReadFromMedium(pEntryNew);
+ pdmBlkCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */
+ }
+ else
+ {
+ /*
+ * There is not enough free space in the cache.
+ * Pass the request directly to the I/O manager.
+ */
+ LogFlow(("Couldn't evict %u bytes from the cache. Remaining request will be passed through\n", cbToRead));
+
+ pdmBlkCacheRequestPassthrough(pBlkCache, pReq,
+ &SgBuf, off, cbToRead,
+ PDMBLKCACHEXFERDIR_READ);
+ }
+#else
+ /* Clip read size if necessary. */
+ PPDMBLKCACHEENTRY pEntryAbove;
+ pdmBlkCacheGetCacheBestFitEntryByOffset(pBlkCache, off, &pEntryAbove);
+
+ if (pEntryAbove)
+ {
+ if (off + cbRead > pEntryAbove->Core.Key)
+ cbToRead = pEntryAbove->Core.Key - off;
+ else
+ cbToRead = cbRead;
+
+ pdmBlkCacheEntryRelease(pEntryAbove);
+ }
+ else
+ cbToRead = cbRead;
+
+ cbRead -= cbToRead;
+ pdmBlkCacheRequestPassthrough(pBlkCache, pReq,
+ &SgBuf, off, cbToRead,
+ PDMBLKCACHEXFERDIR_READ);
+#endif
+ }
+ off += cbToRead;
+ }
+
+ if (!pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, false))
+ rc = VINF_AIO_TASK_PENDING;
+ else
+ {
+ rc = pReq->rcReq;
+ RTMemFree(pReq);
+ }
+
+ LogFlowFunc((": Leave rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheWrite(PPDMBLKCACHE pBlkCache, uint64_t off, PCRTSGBUF pSgBuf, size_t cbWrite, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+ PPDMBLKCACHEENTRY pEntry;
+ PPDMBLKCACHEREQ pReq;
+
+ LogFlowFunc((": pBlkCache=%#p{%s} off=%llu pSgBuf=%#p cbWrite=%u pvUser=%#p\n",
+ pBlkCache, pBlkCache->pszId, off, pSgBuf, cbWrite, pvUser));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+ AssertReturn(!pBlkCache->fSuspended, VERR_INVALID_STATE);
+
+ RTSGBUF SgBuf;
+ RTSgBufClone(&SgBuf, pSgBuf);
+
+ /* Allocate new request structure. */
+ pReq = pdmBlkCacheReqAlloc(pvUser);
+ if (RT_UNLIKELY(!pReq))
+ return VERR_NO_MEMORY;
+
+ /* Increment data transfer counter to keep the request valid while we access it. */
+ ASMAtomicIncU32(&pReq->cXfersPending);
+
+ while (cbWrite)
+ {
+ size_t cbToWrite;
+
+ pEntry = pdmBlkCacheGetCacheEntryByOffset(pBlkCache, off);
+ if (pEntry)
+ {
+ /* Write the data into the entry and mark it as dirty */
+ AssertPtr(pEntry->pList);
+
+ uint64_t offDiff = off - pEntry->Core.Key;
+ AssertMsg(off >= pEntry->Core.Key, ("Overflow in calculation off=%llu OffsetAligned=%llu\n", off, pEntry->Core.Key));
+
+ cbToWrite = RT_MIN(pEntry->cbData - offDiff, cbWrite);
+ cbWrite -= cbToWrite;
+
+ if (!cbWrite)
+ STAM_COUNTER_INC(&pCache->cHits);
+ else
+ STAM_COUNTER_INC(&pCache->cPartialHits);
+
+ STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
+
+ /* Ghost lists contain no data. */
+ if ( (pEntry->pList == &pCache->LruRecentlyUsedIn)
+ || (pEntry->pList == &pCache->LruFrequentlyUsed))
+ {
+ /* Check if the entry is dirty. */
+ if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
+ PDMBLKCACHE_ENTRY_IS_DIRTY,
+ 0))
+ {
+ /* If it is already dirty but not in progress just update the data. */
+ if (!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS))
+ RTSgBufCopyToBuf(&SgBuf, pEntry->pbData + offDiff, cbToWrite);
+ else
+ {
+ /* The data isn't written to the file yet */
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+ }
+
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ }
+ else /* Dirty bit not set */
+ {
+ /*
+ * Check if a read is in progress for this entry.
+ * We have to defer processing in that case.
+ */
+ if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
+ PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
+ 0))
+ {
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ }
+ else /* I/O in progress flag not set */
+ {
+ /* Write as much as we can into the entry and update the file. */
+ RTSgBufCopyToBuf(&SgBuf, pEntry->pbData + offDiff, cbToWrite);
+
+ bool fCommit = pdmBlkCacheAddDirtyEntry(pBlkCache, pEntry);
+ if (fCommit)
+ pdmBlkCacheCommitDirtyEntries(pCache);
+ }
+ } /* Dirty bit not set */
+
+ /* Move this entry to the top position */
+ if (pEntry->pList == &pCache->LruFrequentlyUsed)
+ {
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
+ pdmBlkCacheLockLeave(pCache);
+ }
+
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else /* Entry is on the ghost list */
+ {
+ uint8_t *pbBuffer = NULL;
+
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryRemoveFromList(pEntry); /* Remove it before we remove data, otherwise it may get freed when evicting data. */
+ bool fEnough = pdmBlkCacheReclaim(pCache, pEntry->cbData, true, &pbBuffer);
+
+ if (fEnough)
+ {
+ /* Move the entry to Am and fetch it to the cache. */
+ pdmBlkCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry);
+ pdmBlkCacheAdd(pCache, pEntry->cbData);
+ pdmBlkCacheLockLeave(pCache);
+
+ if (pbBuffer)
+ pEntry->pbData = pbBuffer;
+ else
+ pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData);
+ AssertPtr(pEntry->pbData);
+
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+ pdmBlkCacheEntryReadFromMedium(pEntry);
+
+ /* Release the reference. If it is still needed the I/O in progress flag should protect it now. */
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else
+ {
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pEntry);
+ pdmBlkCacheRequestPassthrough(pBlkCache, pReq,
+ &SgBuf, off, cbToWrite,
+ PDMBLKCACHEXFERDIR_WRITE);
+ }
+ }
+ }
+ else /* No entry found */
+ {
+ /*
+ * No entry found. Try to create a new cache entry to store the data in and if that fails
+ * write directly to the file.
+ */
+ PPDMBLKCACHEENTRY pEntryNew = pdmBlkCacheEntryCreate(pBlkCache,
+ off, cbWrite,
+ &cbToWrite);
+
+ cbWrite -= cbToWrite;
+
+ if (pEntryNew)
+ {
+ uint64_t offDiff = off - pEntryNew->Core.Key;
+
+ STAM_COUNTER_INC(&pCache->cHits);
+
+ /*
+ * Check if it is possible to just write the data without waiting
+ * for it to get fetched first.
+ */
+ if (!offDiff && pEntryNew->cbData == cbToWrite)
+ {
+ RTSgBufCopyToBuf(&SgBuf, pEntryNew->pbData, cbToWrite);
+
+ bool fCommit = pdmBlkCacheAddDirtyEntry(pBlkCache, pEntryNew);
+ if (fCommit)
+ pdmBlkCacheCommitDirtyEntries(pCache);
+ STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite);
+ }
+ else
+ {
+ /* Defer the write and fetch the data from the endpoint. */
+ pdmBlkCacheEntryWaitersAdd(pEntryNew, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+ pdmBlkCacheEntryReadFromMedium(pEntryNew);
+ }
+
+ pdmBlkCacheEntryRelease(pEntryNew);
+ }
+ else
+ {
+ /*
+ * There is not enough free space in the cache.
+ * Pass the request directly to the I/O manager.
+ */
+ LogFlow(("Couldn't evict %u bytes from the cache. Remaining request will be passed through\n", cbToWrite));
+
+ STAM_COUNTER_INC(&pCache->cMisses);
+
+ pdmBlkCacheRequestPassthrough(pBlkCache, pReq,
+ &SgBuf, off, cbToWrite,
+ PDMBLKCACHEXFERDIR_WRITE);
+ }
+ }
+
+ off += cbToWrite;
+ }
+
+ if (!pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, false))
+ rc = VINF_AIO_TASK_PENDING;
+ else
+ {
+ rc = pReq->rcReq;
+ RTMemFree(pReq);
+ }
+
+ LogFlowFunc((": Leave rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheFlush(PPDMBLKCACHE pBlkCache, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHEREQ pReq;
+
+ LogFlowFunc((": pBlkCache=%#p{%s}\n", pBlkCache, pBlkCache->pszId));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+ AssertReturn(!pBlkCache->fSuspended, VERR_INVALID_STATE);
+
+ /* Commit dirty entries in the cache. */
+ pdmBlkCacheCommit(pBlkCache);
+
+ /* Allocate new request structure. */
+ pReq = pdmBlkCacheReqAlloc(pvUser);
+ if (RT_UNLIKELY(!pReq))
+ return VERR_NO_MEMORY;
+
+ rc = pdmBlkCacheRequestPassthrough(pBlkCache, pReq, NULL, 0, 0,
+ PDMBLKCACHEXFERDIR_FLUSH);
+ AssertRC(rc);
+
+ LogFlowFunc((": Leave rc=%Rrc\n", rc));
+ return VINF_AIO_TASK_PENDING;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheDiscard(PPDMBLKCACHE pBlkCache, PCRTRANGE paRanges,
+ unsigned cRanges, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+ PPDMBLKCACHEENTRY pEntry;
+ PPDMBLKCACHEREQ pReq;
+
+ LogFlowFunc((": pBlkCache=%#p{%s} paRanges=%#p cRanges=%u pvUser=%#p\n",
+ pBlkCache, pBlkCache->pszId, paRanges, cRanges, pvUser));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+ AssertReturn(!pBlkCache->fSuspended, VERR_INVALID_STATE);
+
+ /* Allocate new request structure. */
+ pReq = pdmBlkCacheReqAlloc(pvUser);
+ if (RT_UNLIKELY(!pReq))
+ return VERR_NO_MEMORY;
+
+ /* Increment data transfer counter to keep the request valid while we access it. */
+ ASMAtomicIncU32(&pReq->cXfersPending);
+
+ for (unsigned i = 0; i < cRanges; i++)
+ {
+ uint64_t offCur = paRanges[i].offStart;
+ size_t cbLeft = paRanges[i].cbRange;
+
+ while (cbLeft)
+ {
+ size_t cbThisDiscard = 0;
+
+ pEntry = pdmBlkCacheGetCacheEntryByOffset(pBlkCache, offCur);
+
+ if (pEntry)
+ {
+ /* Write the data into the entry and mark it as dirty */
+ AssertPtr(pEntry->pList);
+
+ uint64_t offDiff = offCur - pEntry->Core.Key;
+
+ AssertMsg(offCur >= pEntry->Core.Key,
+ ("Overflow in calculation offCur=%llu OffsetAligned=%llu\n",
+ offCur, pEntry->Core.Key));
+
+ cbThisDiscard = RT_MIN(pEntry->cbData - offDiff, cbLeft);
+
+ /* Ghost lists contain no data. */
+ if ( (pEntry->pList == &pCache->LruRecentlyUsedIn)
+ || (pEntry->pList == &pCache->LruFrequentlyUsed))
+ {
+ /* Check if the entry is dirty. */
+ if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
+ PDMBLKCACHE_ENTRY_IS_DIRTY,
+ 0))
+ {
+ /* If it is dirty but not yet in progress remove it. */
+ if (!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS))
+ {
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryRemoveFromList(pEntry);
+
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pEntry);
+ }
+ else
+ {
+#if 0
+ /* The data isn't written to the file yet */
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+#endif
+ }
+
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else /* Dirty bit not set */
+ {
+ /*
+ * Check if a read is in progress for this entry.
+ * We have to defer processing in that case.
+ */
+ if(pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
+ PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
+ 0))
+ {
+#if 0
+ pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
+ &SgBuf, offDiff, cbToWrite,
+ true /* fWrite */);
+#endif
+ STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+ else /* I/O in progress flag not set */
+ {
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryRemoveFromList(pEntry);
+
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pEntry);
+ }
+ } /* Dirty bit not set */
+ }
+ else /* Entry is on the ghost list just remove cache entry. */
+ {
+ pdmBlkCacheLockEnter(pCache);
+ pdmBlkCacheEntryRemoveFromList(pEntry);
+
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
+ RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
+ STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ pdmBlkCacheLockLeave(pCache);
+
+ RTMemFree(pEntry);
+ }
+ }
+ /* else: no entry found. */
+
+ offCur += cbThisDiscard;
+ cbLeft -= cbThisDiscard;
+ }
+ }
+
+ if (!pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, false))
+ rc = VINF_AIO_TASK_PENDING;
+ else
+ {
+ rc = pReq->rcReq;
+ RTMemFree(pReq);
+ }
+
+ LogFlowFunc((": Leave rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+/**
+ * Completes a task segment freeing all resources and completes the task handle
+ * if everything was transferred.
+ *
+ * @returns Next task segment handle.
+ * @param pBlkCache The endpoint block cache.
+ * @param pWaiter Task segment to complete.
+ * @param rc Status code to set.
+ */
+static PPDMBLKCACHEWAITER pdmBlkCacheWaiterComplete(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEWAITER pWaiter, int rc)
+{
+ PPDMBLKCACHEWAITER pNext = pWaiter->pNext;
+ PPDMBLKCACHEREQ pReq = pWaiter->pReq;
+
+ pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, true);
+
+ RTMemFree(pWaiter);
+
+ return pNext;
+}
+
+static void pdmBlkCacheIoXferCompleteEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEIOXFER hIoXfer, int rcIoXfer)
+{
+ PPDMBLKCACHEENTRY pEntry = hIoXfer->pEntry;
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+
+ /* Reference the entry now as we are clearing the I/O in progress flag
+ * which protected the entry till now. */
+ pdmBlkCacheEntryRef(pEntry);
+
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ pEntry->fFlags &= ~PDMBLKCACHE_ENTRY_IO_IN_PROGRESS;
+
+ /* Process waiting segment list. The data in entry might have changed in-between. */
+ bool fDirty = false;
+ PPDMBLKCACHEWAITER pComplete = pEntry->pWaitingHead;
+ PPDMBLKCACHEWAITER pCurr = pComplete;
+
+ AssertMsg((pCurr && pEntry->pWaitingTail) || (!pCurr && !pEntry->pWaitingTail),
+ ("The list tail was not updated correctly\n"));
+ pEntry->pWaitingTail = NULL;
+ pEntry->pWaitingHead = NULL;
+
+ if (hIoXfer->enmXferDir == PDMBLKCACHEXFERDIR_WRITE)
+ {
+ /*
+ * An error here is difficult to handle as the original request completed already.
+ * The error is logged for now and the VM is paused.
+ * If the user continues the entry is written again in the hope
+ * the user fixed the problem and the next write succeeds.
+ */
+ if (RT_FAILURE(rcIoXfer))
+ {
+ LogRel(("I/O cache: Error while writing entry at offset %llu (%u bytes) to medium \"%s\" (rc=%Rrc)\n",
+ pEntry->Core.Key, pEntry->cbData, pBlkCache->pszId, rcIoXfer));
+
+ if (!ASMAtomicXchgBool(&pCache->fIoErrorVmSuspended, true))
+ {
+ int rc = VMSetRuntimeError(pCache->pVM, VMSETRTERR_FLAGS_SUSPEND | VMSETRTERR_FLAGS_NO_WAIT, "BLKCACHE_IOERR",
+ N_("The I/O cache encountered an error while updating data in medium \"%s\" (rc=%Rrc). "
+ "Make sure there is enough free space on the disk and that the disk is working properly. "
+ "Operation can be resumed afterwards"),
+ pBlkCache->pszId, rcIoXfer);
+ AssertRC(rc);
+ }
+
+ /* Mark the entry as dirty again to get it added to the list later on. */
+ fDirty = true;
+ }
+
+ pEntry->fFlags &= ~PDMBLKCACHE_ENTRY_IS_DIRTY;
+
+ while (pCurr)
+ {
+ AssertMsg(pCurr->fWrite, ("Completed write entries should never have read tasks attached\n"));
+
+ RTSgBufCopyToBuf(&pCurr->SgBuf, pEntry->pbData + pCurr->offCacheEntry, pCurr->cbTransfer);
+ fDirty = true;
+ pCurr = pCurr->pNext;
+ }
+ }
+ else
+ {
+ AssertMsg(hIoXfer->enmXferDir == PDMBLKCACHEXFERDIR_READ, ("Invalid transfer type\n"));
+ AssertMsg(!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IS_DIRTY),
+ ("Invalid flags set\n"));
+
+ while (pCurr)
+ {
+ if (pCurr->fWrite)
+ {
+ RTSgBufCopyToBuf(&pCurr->SgBuf, pEntry->pbData + pCurr->offCacheEntry, pCurr->cbTransfer);
+ fDirty = true;
+ }
+ else
+ RTSgBufCopyFromBuf(&pCurr->SgBuf, pEntry->pbData + pCurr->offCacheEntry, pCurr->cbTransfer);
+
+ pCurr = pCurr->pNext;
+ }
+ }
+
+ bool fCommit = false;
+ if (fDirty)
+ fCommit = pdmBlkCacheAddDirtyEntry(pBlkCache, pEntry);
+
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ /* Dereference so that it isn't protected anymore except we issued anyother write for it. */
+ pdmBlkCacheEntryRelease(pEntry);
+
+ if (fCommit)
+ pdmBlkCacheCommitDirtyEntries(pCache);
+
+ /* Complete waiters now. */
+ while (pComplete)
+ pComplete = pdmBlkCacheWaiterComplete(pBlkCache, pComplete, rcIoXfer);
+}
+
+VMMR3DECL(void) PDMR3BlkCacheIoXferComplete(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEIOXFER hIoXfer, int rcIoXfer)
+{
+ LogFlowFunc(("pBlkCache=%#p hIoXfer=%#p rcIoXfer=%Rrc\n", pBlkCache, hIoXfer, rcIoXfer));
+
+ if (hIoXfer->fIoCache)
+ pdmBlkCacheIoXferCompleteEntry(pBlkCache, hIoXfer, rcIoXfer);
+ else
+ pdmBlkCacheReqUpdate(pBlkCache, hIoXfer->pReq, rcIoXfer, true);
+
+ ASMAtomicDecU32(&pBlkCache->cIoXfersActive);
+ pdmBlkCacheR3TraceMsgF(pBlkCache, "BlkCache: I/O req %#p (%RTbool) completed (%u now active)",
+ hIoXfer, hIoXfer->fIoCache, pBlkCache->cIoXfersActive);
+ RTMemFree(hIoXfer);
+}
+
+/**
+ * Callback for the AVL do with all routine. Waits for a cachen entry to finish any pending I/O.
+ *
+ * @returns IPRT status code.
+ * @param pNode The node to destroy.
+ * @param pvUser Opaque user data.
+ */
+static DECLCALLBACK(int) pdmBlkCacheEntryQuiesce(PAVLRU64NODECORE pNode, void *pvUser)
+{
+ PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)pNode;
+ PPDMBLKCACHE pBlkCache = pEntry->pBlkCache;
+ NOREF(pvUser);
+
+ while (ASMAtomicReadU32(&pEntry->fFlags) & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS)
+ {
+ /* Leave the locks to let the I/O thread make progress but reference the entry to prevent eviction. */
+ pdmBlkCacheEntryRef(pEntry);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ RTThreadSleep(1);
+
+ /* Re-enter all locks and drop the reference. */
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ pdmBlkCacheEntryRelease(pEntry);
+ }
+
+ AssertMsg(!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS),
+ ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags));
+
+ return VINF_SUCCESS;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheSuspend(PPDMBLKCACHE pBlkCache)
+{
+ int rc = VINF_SUCCESS;
+ LogFlowFunc(("pBlkCache=%#p\n", pBlkCache));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+
+ if (!ASMAtomicReadBool(&pBlkCache->pCache->fIoErrorVmSuspended))
+ pdmBlkCacheCommit(pBlkCache); /* Can issue new I/O requests. */
+ ASMAtomicXchgBool(&pBlkCache->fSuspended, true);
+
+ /* Wait for all I/O to complete. */
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ rc = RTAvlrU64DoWithAll(pBlkCache->pTree, true, pdmBlkCacheEntryQuiesce, NULL);
+ AssertRC(rc);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ return rc;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheResume(PPDMBLKCACHE pBlkCache)
+{
+ LogFlowFunc(("pBlkCache=%#p\n", pBlkCache));
+
+ AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
+
+ ASMAtomicXchgBool(&pBlkCache->fSuspended, false);
+
+ return VINF_SUCCESS;
+}
+
+VMMR3DECL(int) PDMR3BlkCacheClear(PPDMBLKCACHE pBlkCache)
+{
+ PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
+
+ /*
+ * Commit all dirty entries now (they are waited on for completion during the
+ * destruction of the AVL tree below).
+ * The exception is if the VM was paused because of an I/O error before.
+ */
+ if (!ASMAtomicReadBool(&pCache->fIoErrorVmSuspended))
+ pdmBlkCacheCommit(pBlkCache);
+
+ /* Make sure nobody is accessing the cache while we delete the tree. */
+ pdmBlkCacheLockEnter(pCache);
+ RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
+ RTAvlrU64Destroy(pBlkCache->pTree, pdmBlkCacheEntryDestroy, pCache);
+ RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
+
+ pdmBlkCacheLockLeave(pCache);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMCritSect.cpp b/src/VBox/VMM/VMMR3/PDMCritSect.cpp
new file mode 100644
index 00000000..4210e8ec
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMCritSect.cpp
@@ -0,0 +1,1342 @@
+/* $Id: PDMCritSect.cpp $ */
+/** @file
+ * PDM - Critical Sections, Ring-3.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
+#include "PDMInternal.h"
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/sup.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/getopt.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal);
+static int pdmR3CritSectRwDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTRWINT pCritSect, PPDMCRITSECTRWINT pPrev, bool fFinal);
+static FNDBGFINFOARGVINT pdmR3CritSectInfo;
+static FNDBGFINFOARGVINT pdmR3CritSectRwInfo;
+
+
+
+/**
+ * Register statistics and info items related to the critical sections.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3CritSectBothInitStatsAndInfo(PVM pVM)
+{
+ /*
+ * Statistics.
+ */
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatQueuedCritSectLeaves, STAMTYPE_COUNTER, "/PDM/CritSects/00-QueuedLeaves", STAMUNIT_OCCURENCES,
+ "Number of times a critical section leave request needed to be queued for ring-3 execution.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatAbortedCritSectEnters, STAMTYPE_COUNTER, "/PDM/CritSects/00-AbortedEnters", STAMUNIT_OCCURENCES,
+ "Number of times we've successfully aborted a wait in ring-0.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectEntersWhileAborting, STAMTYPE_COUNTER, "/PDM/CritSects/00-EntersWhileAborting", STAMUNIT_OCCURENCES,
+ "Number of times we've got the critical section ownership while trying to abort a wait due to VERR_INTERRUPTED.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectVerrInterrupted, STAMTYPE_COUNTER, "/PDM/CritSects/00-VERR_INTERRUPTED", STAMUNIT_OCCURENCES,
+ "Number of VERR_INTERRUPTED returns.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectVerrTimeout, STAMTYPE_COUNTER, "/PDM/CritSects/00-VERR_TIMEOUT", STAMUNIT_OCCURENCES,
+ "Number of VERR_TIMEOUT returns.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectNonInterruptibleWaits, STAMTYPE_COUNTER, "/PDM/CritSects/00-Non-interruptible-Waits-VINF_SUCCESS",
+ STAMUNIT_OCCURENCES, "Number of non-interruptible waits for rcBusy=VINF_SUCCESS");
+
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwExclVerrInterrupted, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Excl-VERR_INTERRUPTED", STAMUNIT_OCCURENCES,
+ "Number of VERR_INTERRUPTED returns in exclusive mode.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwExclVerrTimeout, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Excl-VERR_TIMEOUT", STAMUNIT_OCCURENCES,
+ "Number of VERR_TIMEOUT returns in exclusive mode.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Excl-Non-interruptible-Waits-VINF_SUCCESS",
+ STAMUNIT_OCCURENCES, "Number of non-interruptible waits for rcBusy=VINF_SUCCESS in exclusive mode");
+
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwEnterSharedWhileAborting, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-EnterSharedWhileAborting", STAMUNIT_OCCURENCES,
+ "Number of times we've got the critical section ownership in shared mode while trying to abort a wait due to VERR_INTERRUPTED or VERR_TIMEOUT.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedVerrInterrupted, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-VERR_INTERRUPTED", STAMUNIT_OCCURENCES,
+ "Number of VERR_INTERRUPTED returns in exclusive mode.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedVerrTimeout, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-VERR_TIMEOUT", STAMUNIT_OCCURENCES,
+ "Number of VERR_TIMEOUT returns in exclusive mode.");
+ STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-Non-interruptible-Waits-VINF_SUCCESS",
+ STAMUNIT_OCCURENCES, "Number of non-interruptible waits for rcBusy=VINF_SUCCESS in exclusive mode");
+
+ /*
+ * Info items.
+ */
+ DBGFR3InfoRegisterInternalArgv(pVM, "critsect", "Show critical section: critsect [-v] [pattern[...]]", pdmR3CritSectInfo, 0);
+ DBGFR3InfoRegisterInternalArgv(pVM, "critsectrw", "Show read/write critical section: critsectrw [-v] [pattern[...]]",
+ pdmR3CritSectRwInfo, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deletes all remaining critical sections.
+ *
+ * This is called at the very end of the termination process. It is also called
+ * at the end of vmR3CreateU failure cleanup, which may cause it to be called
+ * twice depending on where vmR3CreateU actually failed. We have to do the
+ * latter call because other components expect the critical sections to be
+ * automatically deleted.
+ *
+ * @returns VBox status code.
+ * First error code, rest is lost.
+ * @param pVM The cross context VM structure.
+ * @remark Don't confuse this with PDMR3CritSectDelete.
+ */
+VMMR3_INT_DECL(int) PDMR3CritSectBothTerm(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ while (pUVM->pdm.s.pCritSects)
+ {
+ int rc2 = pdmR3CritSectDeleteOne(pVM, pUVM, pUVM->pdm.s.pCritSects, NULL, true /* final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ while (pUVM->pdm.s.pRwCritSects)
+ {
+ int rc2 = pdmR3CritSectRwDeleteOne(pVM, pUVM, pUVM->pdm.s.pRwCritSects, NULL, true /* final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Initializes a critical section and inserts it into the list.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect The critical section.
+ * @param pvKey The owner key.
+ * @param SRC_POS The source position.
+ * @param fUniqueClass Whether to create a unique lock validator class for
+ * it or not.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+static int pdmR3CritSectInitOne(PVM pVM, PPDMCRITSECTINT pCritSect, void *pvKey, RT_SRC_POS_DECL, bool fUniqueClass,
+ const char *pszNameFmt, va_list va)
+{
+ VM_ASSERT_EMT(pVM);
+ Assert(pCritSect->Core.u32Magic != RTCRITSECT_MAGIC);
+
+ /*
+ * Allocate the semaphore.
+ */
+ AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.EventSem));
+ int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.EventSem);
+ if (RT_SUCCESS(rc))
+ {
+ /* Only format the name once. */
+ char *pszName = RTStrAPrintf2V(pszNameFmt, va); /** @todo plug the "leak"... */
+ if (pszName)
+ {
+ RT_SRC_POS_NOREF(); RT_NOREF(fUniqueClass);
+#ifndef PDMCRITSECT_STRICT
+ pCritSect->Core.pValidatorRec = NULL;
+#else
+ rc = RTLockValidatorRecExclCreate(&pCritSect->Core.pValidatorRec,
+# ifdef RT_LOCK_STRICT_ORDER
+ fUniqueClass
+ ? RTLockValidatorClassCreateUnique(RT_SRC_POS_ARGS, "%s", pszName)
+ : RTLockValidatorClassForSrcPos(RT_SRC_POS_ARGS, "%s", pszName),
+# else
+ NIL_RTLOCKVALCLASS,
+# endif
+ RTLOCKVAL_SUB_CLASS_NONE,
+ pCritSect, true, "%s", pszName);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the structure (first bit is c&p from RTCritSectInitEx).
+ */
+ pCritSect->Core.u32Magic = RTCRITSECT_MAGIC;
+ pCritSect->Core.fFlags = 0;
+ pCritSect->Core.cNestings = 0;
+ pCritSect->Core.cLockers = -1;
+ pCritSect->Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
+ pCritSect->pvKey = pvKey;
+ pCritSect->fAutomaticDefaultCritsect = false;
+ pCritSect->fUsedByTimerOrSimilar = false;
+ pCritSect->hEventToSignal = NIL_SUPSEMEVENT;
+ pCritSect->pszName = pszName;
+ pCritSect->pSelfR3 = (PPDMCRITSECT)pCritSect;
+
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLockBusy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLockBusy", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZWait, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/ContentionRZWait", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3Wait, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/ContentionR3Wait", pCritSect->pszName);
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName);
+#endif
+
+ /*
+ * Prepend to the list.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pCritSect->pNext = pUVM->pdm.s.pCritSects;
+ pUVM->pdm.s.pCritSects = pCritSect;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ Log(("pdmR3CritSectInitOne: %p %s\n", pCritSect, pszName));
+
+ return VINF_SUCCESS;
+ }
+
+ RTStrFree(pszName);
+ }
+ else
+ rc = VERR_NO_STR_MEMORY;
+ SUPSemEventClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.EventSem);
+ }
+ return rc;
+}
+
+
+/**
+ * Initializes a read/write critical section and inserts it into the list.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect The read/write critical section.
+ * @param pvKey The owner key.
+ * @param SRC_POS The source position.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+static int pdmR3CritSectRwInitOne(PVM pVM, PPDMCRITSECTRWINT pCritSect, void *pvKey, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ VM_ASSERT_EMT(pVM);
+ Assert(pCritSect->Core.u32Magic != RTCRITSECTRW_MAGIC);
+ AssertMsgReturn(((uintptr_t)&pCritSect->Core & 63) == 0, ("&Core=%p, must be 64-byte aligned!\n", &pCritSect->Core),
+ VERR_PDM_CRITSECTRW_MISALIGNED);
+ AssertMsgReturn(((uintptr_t)&pCritSect->Core.u & (sizeof(pCritSect->Core.u.u128) - 1)) == 0 /* paranoia */,
+ ("&Core.u=%p, must be 16-byte aligned!\n", &pCritSect->Core.u),
+ VERR_PDM_CRITSECTRW_MISALIGNED);
+
+ /*
+ * Allocate the semaphores.
+ */
+ AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.hEvtWrite));
+ int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.hEvtWrite);
+ if (RT_SUCCESS(rc))
+ {
+ AssertCompile(sizeof(SUPSEMEVENTMULTI) == sizeof(pCritSect->Core.hEvtRead));
+ rc = SUPSemEventMultiCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.hEvtRead);
+ if (RT_SUCCESS(rc))
+ {
+ /* Only format the name once. */
+ char *pszName = RTStrAPrintf2V(pszNameFmt, va); /** @todo plug the "leak"... */
+ if (pszName)
+ {
+ pCritSect->Core.pValidatorRead = NULL;
+ pCritSect->Core.pValidatorWrite = NULL;
+ RT_SRC_POS_NOREF();
+#ifdef PDMCRITSECTRW_STRICT
+# ifdef RT_LOCK_STRICT_ORDER
+ RTLOCKVALCLASS hClass = RTLockValidatorClassForSrcPos(RT_SRC_POS_ARGS, "%s", pszName);
+# else
+ RTLOCKVALCLASS hClass = NIL_RTLOCKVALCLASS;
+# endif
+ rc = RTLockValidatorRecExclCreate(&pCritSect->Core.pValidatorWrite, hClass, RTLOCKVAL_SUB_CLASS_NONE,
+ pCritSect, true, "%s", pszName);
+ if (RT_SUCCESS(rc))
+ rc = RTLockValidatorRecSharedCreate(&pCritSect->Core.pValidatorRead, hClass, RTLOCKVAL_SUB_CLASS_NONE,
+ pCritSect, false /*fSignaller*/, true, "%s", pszName);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the structure (first bit is c&p from RTCritSectRwInitEx).
+ */
+ pCritSect->Core.u32Magic = RTCRITSECTRW_MAGIC;
+ pCritSect->Core.fNeedReset = false;
+ pCritSect->Core.afPadding[0] = false;
+ pCritSect->Core.fFlags = 0;
+ pCritSect->Core.u.u128.s.Lo = 0;
+ pCritSect->Core.u.u128.s.Hi = 0;
+ pCritSect->Core.u.s.hNativeWriter = NIL_RTNATIVETHREAD;
+ pCritSect->Core.cWriterReads = 0;
+ pCritSect->Core.cWriteRecursions = 0;
+ pCritSect->pvKey = pvKey;
+ pCritSect->pszName = pszName;
+ pCritSect->pSelfR3 = (PPDMCRITSECTRW)pCritSect;
+
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZEnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZEnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLeaveExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZLeaveExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZEnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZEnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLeaveShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZLeaveShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3EnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionR3EnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3LeaveExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionR3LeaveExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3EnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionR3EnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatRZEnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/RZEnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatRZEnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/RZEnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatR3EnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/R3EnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatR3EnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/R3EnterShared", pCritSect->pszName);
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pVM, &pCritSect->StatWriteLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSectsRw/%s/WriteLocked", pCritSect->pszName);
+#endif
+
+ /*
+ * Prepend to the list.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pCritSect->pNext = pUVM->pdm.s.pRwCritSects;
+ pUVM->pdm.s.pRwCritSects = pCritSect;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ LogIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_PDM_CRITSECTRW, ("pdmR3CritSectRwInitOne: %p %s\n", pCritSect, pszName));
+
+ return VINF_SUCCESS;
+ }
+
+ RTStrFree(pszName);
+ }
+ else
+ rc = VERR_NO_STR_MEMORY;
+ SUPSemEventMultiClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.hEvtRead);
+ }
+ SUPSemEventClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.hEvtWrite);
+ }
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM critical section for internal use.
+ *
+ * The PDM critical sections are derived from the IPRT critical sections, but
+ * works in ring-0 and raw-mode context as well.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect Pointer to the critical section.
+ * @param SRC_POS Use RT_SRC_POS.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
+{
+#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
+ AssertCompile(sizeof(pCritSect->padding) >= sizeof(pCritSect->s));
+#endif
+ Assert(RT_ALIGN_P(pCritSect, sizeof(uintptr_t)) == pCritSect);
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectInitOne(pVM, &pCritSect->s, pCritSect, RT_SRC_POS_ARGS, false /*fUniqueClass*/, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM read/write critical section for internal use.
+ *
+ * The PDM read/write critical sections are derived from the IPRT read/write
+ * critical sections, but works in ring-0 and raw-mode context as well.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param SRC_POS Use RT_SRC_POS.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3CritSectRwInit(PVM pVM, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
+{
+#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
+ AssertCompile(sizeof(pCritSect->padding) >= sizeof(pCritSect->s));
+#endif
+ Assert(RT_ALIGN_P(pCritSect, sizeof(uintptr_t)) == pCritSect);
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pCritSect, RT_SRC_POS_ARGS, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM critical section for a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param pCritSect Pointer to the critical section.
+ * @param SRC_POS The source position. Optional.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+int pdmR3CritSectInitDevice(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ return pdmR3CritSectInitOne(pVM, &pCritSect->s, pDevIns, RT_SRC_POS_ARGS, false /*fUniqueClass*/, pszNameFmt, va);
+}
+
+
+/**
+ * Initializes a PDM read/write critical section for a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param SRC_POS The source position. Optional.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+int pdmR3CritSectRwInitDevice(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ return pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pDevIns, RT_SRC_POS_ARGS, pszNameFmt, va);
+}
+
+
+/**
+ * Initializes the automatic default PDM critical section for a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param SRC_POS The source position. Optional.
+ * @param pCritSect Pointer to the critical section.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ */
+int pdmR3CritSectInitDeviceAuto(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...)
+{
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectInitOne(pVM, &pCritSect->s, pDevIns, RT_SRC_POS_ARGS, true /*fUniqueClass*/, pszNameFmt, va);
+ if (RT_SUCCESS(rc))
+ pCritSect->s.fAutomaticDefaultCritsect = true;
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM critical section for a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param pCritSect Pointer to the critical section.
+ * @param SRC_POS The source position. Optional.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ */
+int pdmR3CritSectInitDriver(PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...)
+{
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectInitOne(pVM, &pCritSect->s, pDrvIns, RT_SRC_POS_ARGS, false /*fUniqueClass*/, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM read/write critical section for a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param SRC_POS The source position. Optional.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ */
+int pdmR3CritSectRwInitDriver(PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...)
+{
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pDrvIns, RT_SRC_POS_ARGS, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Deletes one critical section.
+ *
+ * @returns Return code from RTCritSectDelete.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUVM The user mode VM handle.
+ * @param pCritSect The critical section.
+ * @param pPrev The previous critical section in the list.
+ * @param fFinal Set if this is the final call and statistics shouldn't be deregistered.
+ *
+ * @remarks Caller must have entered the ListCritSect.
+ */
+static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal)
+{
+ /*
+ * Assert free waiters and so on (c&p from RTCritSectDelete).
+ */
+ Assert(pCritSect->Core.u32Magic == RTCRITSECT_MAGIC);
+ //Assert(pCritSect->Core.cNestings == 0); - we no longer reset this when leaving.
+ Assert(pCritSect->Core.cLockers == -1);
+ Assert(pCritSect->Core.NativeThreadOwner == NIL_RTNATIVETHREAD);
+ Assert(RTCritSectIsOwner(&pUVM->pdm.s.ListCritSect));
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ pPrev->pNext = pCritSect->pNext;
+ else
+ pUVM->pdm.s.pCritSects = pCritSect->pNext;
+
+ /*
+ * Delete it (parts taken from RTCritSectDelete).
+ * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
+ */
+ ASMAtomicWriteU32(&pCritSect->Core.u32Magic, 0);
+ SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->Core.EventSem;
+ pCritSect->Core.EventSem = NIL_RTSEMEVENT;
+ while (pCritSect->Core.cLockers-- >= 0)
+ SUPSemEventSignal(pVM->pSession, hEvent);
+ ASMAtomicWriteS32(&pCritSect->Core.cLockers, -1);
+ int rc = SUPSemEventClose(pVM->pSession, hEvent);
+ AssertRC(rc);
+ RTLockValidatorRecExclDestroy(&pCritSect->Core.pValidatorRec);
+ pCritSect->pNext = NULL;
+ pCritSect->pvKey = NULL;
+ if (!fFinal)
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/CritSects/%s/*", pCritSect->pszName);
+ RTStrFree((char *)pCritSect->pszName);
+ pCritSect->pszName = NULL;
+ return rc;
+}
+
+
+/**
+ * Deletes one read/write critical section.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUVM The user mode VM handle.
+ * @param pCritSect The read/write critical section.
+ * @param pPrev The previous critical section in the list.
+ * @param fFinal Set if this is the final call and statistics shouldn't be deregistered.
+ *
+ * @remarks Caller must have entered the ListCritSect.
+ */
+static int pdmR3CritSectRwDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTRWINT pCritSect, PPDMCRITSECTRWINT pPrev, bool fFinal)
+{
+ /*
+ * Assert free waiters and so on (c&p from RTCritSectRwDelete).
+ */
+ Assert(pCritSect->Core.u32Magic == RTCRITSECTRW_MAGIC);
+ //Assert(pCritSect->Core.cNestings == 0);
+ //Assert(pCritSect->Core.cLockers == -1);
+ Assert(pCritSect->Core.u.s.hNativeWriter == NIL_RTNATIVETHREAD);
+
+ /*
+ * Invalidate the structure and free the semaphores.
+ */
+ if (!ASMAtomicCmpXchgU32(&pCritSect->Core.u32Magic, RTCRITSECTRW_MAGIC_DEAD, RTCRITSECTRW_MAGIC))
+ AssertFailed();
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ pPrev->pNext = pCritSect->pNext;
+ else
+ pUVM->pdm.s.pRwCritSects = pCritSect->pNext;
+
+ /*
+ * Delete it (parts taken from RTCritSectRwDelete).
+ * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
+ */
+ pCritSect->Core.fFlags = 0;
+ pCritSect->Core.u.s.u64State = 0;
+
+ SUPSEMEVENT hEvtWrite = (SUPSEMEVENT)pCritSect->Core.hEvtWrite;
+ pCritSect->Core.hEvtWrite = NIL_RTSEMEVENT;
+ AssertCompile(sizeof(hEvtWrite) == sizeof(pCritSect->Core.hEvtWrite));
+
+ SUPSEMEVENTMULTI hEvtRead = (SUPSEMEVENTMULTI)pCritSect->Core.hEvtRead;
+ pCritSect->Core.hEvtRead = NIL_RTSEMEVENTMULTI;
+ AssertCompile(sizeof(hEvtRead) == sizeof(pCritSect->Core.hEvtRead));
+
+ int rc1 = SUPSemEventClose(pVM->pSession, hEvtWrite); AssertRC(rc1);
+ int rc2 = SUPSemEventMultiClose(pVM->pSession, hEvtRead); AssertRC(rc2);
+
+ RTLockValidatorRecSharedDestroy(&pCritSect->Core.pValidatorRead);
+ RTLockValidatorRecExclDestroy(&pCritSect->Core.pValidatorWrite);
+
+ pCritSect->pNext = NULL;
+ pCritSect->pvKey = NULL;
+ if (!fFinal)
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/CritSectsRw/%s/*", pCritSect->pszName);
+ RTStrFree((char *)pCritSect->pszName);
+ pCritSect->pszName = NULL;
+
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
+}
+
+
+/**
+ * Deletes all critical sections with a give initializer key.
+ *
+ * @returns VBox status code.
+ * The entire list is processed on failure, so we'll only
+ * return the first error code. This shouldn't be a problem
+ * since errors really shouldn't happen here.
+ * @param pVM The cross context VM structure.
+ * @param pvKey The initializer key.
+ */
+static int pdmR3CritSectDeleteByKey(PVM pVM, void *pvKey)
+{
+ /*
+ * Iterate the list and match key.
+ */
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ PPDMCRITSECTINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTINT pCur = pUVM->pdm.s.pCritSects;
+ while (pCur)
+ {
+ if (pCur->pvKey == pvKey)
+ {
+ int rc2 = pdmR3CritSectDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Deletes all read/write critical sections with a give initializer key.
+ *
+ * @returns VBox status code.
+ * The entire list is processed on failure, so we'll only
+ * return the first error code. This shouldn't be a problem
+ * since errors really shouldn't happen here.
+ * @param pVM The cross context VM structure.
+ * @param pvKey The initializer key.
+ */
+static int pdmR3CritSectRwDeleteByKey(PVM pVM, void *pvKey)
+{
+ /*
+ * Iterate the list and match key.
+ */
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ PPDMCRITSECTRWINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTRWINT pCur = pUVM->pdm.s.pRwCritSects;
+ while (pCur)
+ {
+ if (pCur->pvKey == pvKey)
+ {
+ int rc2 = pdmR3CritSectRwDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Deletes all undeleted critical sections (both types) initialized by a given
+ * device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device handle.
+ */
+int pdmR3CritSectBothDeleteDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ int rc1 = pdmR3CritSectDeleteByKey(pVM, pDevIns);
+ int rc2 = pdmR3CritSectRwDeleteByKey(pVM, pDevIns);
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
+}
+
+
+/**
+ * Deletes all undeleted critical sections (both types) initialized by a given
+ * driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver handle.
+ */
+int pdmR3CritSectBothDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ int rc1 = pdmR3CritSectDeleteByKey(pVM, pDrvIns);
+ int rc2 = pdmR3CritSectRwDeleteByKey(pVM, pDrvIns);
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
+}
+
+
+/**
+ * Deletes the critical section.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect The PDM critical section to destroy.
+ */
+VMMR3DECL(int) PDMR3CritSectDelete(PVM pVM, PPDMCRITSECT pCritSect)
+{
+ if (!RTCritSectIsInitialized(&pCritSect->s.Core))
+ return VINF_SUCCESS;
+
+ /*
+ * Find and unlink it.
+ */
+ PUVM pUVM = pVM->pUVM;
+ AssertReleaseReturn(pVM, VERR_PDM_CRITSECT_IPE);
+ PPDMCRITSECTINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTINT pCur = pUVM->pdm.s.pCritSects;
+ while (pCur)
+ {
+ if (pCur == &pCritSect->s)
+ {
+ int rc = pdmR3CritSectDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect));
+ return VERR_PDM_CRITSECT_NOT_FOUND;
+}
+
+
+/**
+ * Deletes the read/write critical section.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect The PDM read/write critical section to destroy.
+ */
+VMMR3DECL(int) PDMR3CritSectRwDelete(PVM pVM, PPDMCRITSECTRW pCritSect)
+{
+ if (!PDMCritSectRwIsInitialized(pCritSect))
+ return VINF_SUCCESS;
+
+ /*
+ * Find and unlink it.
+ */
+ PUVM pUVM = pVM->pUVM;
+ AssertReleaseReturn(pVM, VERR_PDM_CRITSECT_IPE);
+ PPDMCRITSECTRWINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTRWINT pCur = pUVM->pdm.s.pRwCritSects;
+ while (pCur)
+ {
+ if (pCur == &pCritSect->s)
+ {
+ int rc = pdmR3CritSectRwDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect));
+ return VERR_PDM_CRITSECT_NOT_FOUND;
+}
+
+
+/**
+ * Gets the name of the critical section.
+ *
+ *
+ * @returns Pointer to the critical section name (read only) on success,
+ * NULL on failure (invalid critical section).
+ * @param pCritSect The critical section.
+ */
+VMMR3DECL(const char *) PDMR3CritSectName(PCPDMCRITSECT pCritSect)
+{
+ AssertPtrReturn(pCritSect, NULL);
+ AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, NULL);
+ return pCritSect->s.pszName;
+}
+
+
+/**
+ * Gets the name of the read/write critical section.
+ *
+ *
+ * @returns Pointer to the critical section name (read only) on success,
+ * NULL on failure (invalid critical section).
+ * @param pCritSect The read/write critical section.
+ */
+VMMR3DECL(const char *) PDMR3CritSectRwName(PCPDMCRITSECTRW pCritSect)
+{
+ AssertPtrReturn(pCritSect, NULL);
+ AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECTRW_MAGIC, NULL);
+ return pCritSect->s.pszName;
+}
+
+
+/**
+ * Yield the critical section if someone is waiting on it.
+ *
+ * When yielding, we'll leave the critical section and try to make sure the
+ * other waiting threads get a chance of entering before we reclaim it.
+ *
+ * @retval true if yielded.
+ * @retval false if not yielded.
+ * @param pVM The cross context VM structure.
+ * @param pCritSect The critical section.
+ */
+VMMR3DECL(bool) PDMR3CritSectYield(PVM pVM, PPDMCRITSECT pCritSect)
+{
+ AssertPtrReturn(pCritSect, false);
+ AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
+ Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf());
+ Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
+ RT_NOREF(pVM);
+
+ /* No recursion allowed here. */
+ int32_t const cNestings = pCritSect->s.Core.cNestings;
+ AssertReturn(cNestings == 1, false);
+
+ int32_t const cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
+ if (cLockers < cNestings)
+ return false;
+
+#ifdef PDMCRITSECT_STRICT
+ RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos;
+#endif
+ PDMCritSectLeave(pVM, pCritSect);
+
+ /*
+ * If we're lucky, then one of the waiters has entered the lock already.
+ * We spin a little bit in hope for this to happen so we can avoid the
+ * yield detour.
+ */
+ if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0)
+ {
+ int cLoops = 20;
+ while ( cLoops > 0
+ && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0
+ && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) >= 0)
+ {
+ ASMNopPause();
+ cLoops--;
+ }
+ if (cLoops == 0)
+ RTThreadYield();
+ }
+
+#ifdef PDMCRITSECT_STRICT
+ int rc = PDMCritSectEnterDebug(pVM, pCritSect, VERR_IGNORED,
+ SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction);
+#else
+ int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+#endif
+ PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, pCritSect, rc);
+ return true;
+}
+
+
+/**
+ * PDMR3CritSectBothCountOwned worker.
+ *
+ * @param pszName The critical section name.
+ * @param ppszNames Pointer to the pszNames variable.
+ * @param pcchLeft Pointer to the cchLeft variable.
+ * @param fFirst Whether this is the first name or not.
+ */
+static void pdmR3CritSectAppendNameToList(char const *pszName, char **ppszNames, size_t *pcchLeft, bool fFirst)
+{
+ size_t cchLeft = *pcchLeft;
+ if (cchLeft)
+ {
+ char *pszNames = *ppszNames;
+
+ /* try add comma. */
+ if (fFirst)
+ {
+ *pszNames++ = ',';
+ if (--cchLeft)
+ {
+ *pszNames++ = ' ';
+ cchLeft--;
+ }
+ }
+
+ /* try copy the name. */
+ if (cchLeft)
+ {
+ size_t const cchName = strlen(pszName);
+ if (cchName < cchLeft)
+ {
+ memcpy(pszNames, pszName, cchName);
+ pszNames += cchName;
+ cchLeft -= cchName;
+ }
+ else
+ {
+ if (cchLeft > 2)
+ {
+ memcpy(pszNames, pszName, cchLeft - 2);
+ pszNames += cchLeft - 2;
+ cchLeft = 2;
+ }
+ while (cchLeft-- > 0)
+ *pszNames++ = '+';
+ }
+ }
+ *pszNames = '\0';
+
+ *pcchLeft = cchLeft;
+ *ppszNames = pszNames;
+ }
+}
+
+
+/**
+ * Counts the critical sections (both type) owned by the calling thread,
+ * optionally returning a comma separated list naming them.
+ *
+ * Read ownerships are not included in non-strict builds.
+ *
+ * This is for diagnostic purposes only.
+ *
+ * @returns Lock count.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszNames Where to return the critical section names.
+ * @param cbNames The size of the buffer.
+ */
+VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNames)
+{
+ /*
+ * Init the name buffer.
+ */
+ size_t cchLeft = cbNames;
+ if (cchLeft)
+ {
+ cchLeft--;
+ pszNames[0] = pszNames[cchLeft] = '\0';
+ }
+
+ /*
+ * Iterate the critical sections.
+ */
+ uint32_t cCritSects = 0;
+ RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
+ /* This is unsafe, but wtf. */
+ for (PPDMCRITSECTINT pCur = pVM->pUVM->pdm.s.pCritSects;
+ pCur;
+ pCur = pCur->pNext)
+ {
+ /* Same as RTCritSectIsOwner(). */
+ if (pCur->Core.NativeThreadOwner == hNativeThread)
+ {
+ cCritSects++;
+ pdmR3CritSectAppendNameToList(pCur->pszName, &pszNames, &cchLeft, cCritSects == 1);
+ }
+ }
+
+ /* This is unsafe, but wtf. */
+ for (PPDMCRITSECTRWINT pCur = pVM->pUVM->pdm.s.pRwCritSects;
+ pCur;
+ pCur = pCur->pNext)
+ {
+ if ( pCur->Core.u.s.hNativeWriter == hNativeThread
+ || PDMCritSectRwIsReadOwner(pVM, (PPDMCRITSECTRW)pCur, false /*fWannaHear*/) )
+ {
+ cCritSects++;
+ pdmR3CritSectAppendNameToList(pCur->pszName, &pszNames, &cchLeft, cCritSects == 1);
+ }
+ }
+
+ return cCritSects;
+}
+
+
+/**
+ * Leave all critical sections the calling thread owns.
+ *
+ * This is only used when entering guru meditation in order to prevent other
+ * EMTs and I/O threads from deadlocking.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) PDMR3CritSectLeaveAll(PVM pVM)
+{
+ RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
+ PUVM pUVM = pVM->pUVM;
+
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMCRITSECTINT pCur = pUVM->pdm.s.pCritSects;
+ pCur;
+ pCur = pCur->pNext)
+ {
+ while ( pCur->Core.NativeThreadOwner == hNativeSelf
+ && pCur->Core.cNestings > 0)
+ PDMCritSectLeave(pVM, (PPDMCRITSECT)pCur);
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Gets the address of the NOP critical section.
+ *
+ * The NOP critical section will not perform any thread serialization but let
+ * all enter immediately and concurrently.
+ *
+ * @returns The address of the NOP critical section.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(PPDMCRITSECT) PDMR3CritSectGetNop(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
+ return &pVM->pdm.s.NopCritSect;
+}
+
+
+/**
+ * Display matching critical sections.
+ */
+static void pdmR3CritSectInfoWorker(PUVM pUVM, const char *pszPatterns, PCDBGFINFOHLP pHlp, unsigned cVerbosity)
+{
+ size_t const cchPatterns = pszPatterns ? strlen(pszPatterns) : 0;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ for (PPDMCRITSECTINT pCritSect = pUVM->pdm.s.pCritSects; pCritSect; pCritSect = pCritSect->pNext)
+ if ( !pszPatterns
+ || RTStrSimplePatternMultiMatch(pszPatterns, cchPatterns, pCritSect->pszName, RTSTR_MAX, NULL))
+ {
+ uint32_t fFlags = pCritSect->Core.fFlags;
+ pHlp->pfnPrintf(pHlp, "%p: '%s'%s%s%s%s%s\n", pCritSect, pCritSect->pszName,
+ pCritSect->fAutomaticDefaultCritsect ? " default" : "",
+ pCritSect->fUsedByTimerOrSimilar ? " used-by-timer-or-similar" : "",
+ fFlags & RTCRITSECT_FLAGS_NO_NESTING ? " no-testing" : "",
+ fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL ? " no-lock-val" : "",
+ fFlags & RTCRITSECT_FLAGS_NOP ? " nop" : "");
+
+
+ /*
+ * Get the volatile data:
+ */
+ RTNATIVETHREAD hOwner;
+ int32_t cLockers;
+ int32_t cNestings;
+ uint32_t uMagic;
+ for (uint32_t iTry = 0; iTry < 16; iTry++)
+ {
+ hOwner = pCritSect->Core.NativeThreadOwner;
+ cLockers = pCritSect->Core.cLockers;
+ cNestings = pCritSect->Core.cNestings;
+ fFlags = pCritSect->Core.fFlags;
+ uMagic = pCritSect->Core.u32Magic;
+ if ( hOwner == pCritSect->Core.NativeThreadOwner
+ && cLockers == pCritSect->Core.cLockers
+ && cNestings == pCritSect->Core.cNestings
+ && fFlags == pCritSect->Core.fFlags
+ && uMagic == pCritSect->Core.u32Magic)
+ break;
+ }
+
+ /*
+ * Check and resolve the magic to a string, print if not RTCRITSECT_MAGIC.
+ */
+ const char *pszMagic;
+ switch (uMagic)
+ {
+ case RTCRITSECT_MAGIC: pszMagic = NULL; break;
+ case ~RTCRITSECT_MAGIC: pszMagic = " deleted"; break;
+ case PDMCRITSECT_MAGIC_CORRUPTED: pszMagic = " PDMCRITSECT_MAGIC_CORRUPTED!"; break;
+ case PDMCRITSECT_MAGIC_FAILED_ABORT: pszMagic = " PDMCRITSECT_MAGIC_FAILED_ABORT!"; break;
+ default: pszMagic = " !unknown!"; break;
+ }
+ if (pszMagic || cVerbosity > 1)
+ pHlp->pfnPrintf(pHlp, " uMagic=%#x%s\n", uMagic, pszMagic ? pszMagic : "");
+
+ /*
+ * If locked, print details
+ */
+ if (cLockers != -1 || cNestings > 1 || cNestings < 0 || hOwner != NIL_RTNATIVETHREAD || cVerbosity > 1)
+ {
+ /* Translate the owner to a name if we have one and can. */
+ const char *pszOwner = NULL;
+ if (hOwner != NIL_RTNATIVETHREAD)
+ {
+ RTTHREAD hOwnerThread = RTThreadFromNative(hOwner); /* Note! Does not return a reference (crazy). */
+ if (hOwnerThread != NIL_RTTHREAD)
+ pszOwner = RTThreadGetName(hOwnerThread);
+ }
+ else
+ pszOwner = "<no-owner>";
+
+ pHlp->pfnPrintf(pHlp, " cLockers=%d cNestings=%d hOwner=%p %s%s\n", cLockers, cNestings, hOwner,
+ pszOwner ? pszOwner : "???", fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK ? " pending-unlock" : "");
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Display matching read/write critical sections.
+ */
+static void pdmR3CritSectInfoRwWorker(PUVM pUVM, const char *pszPatterns, PCDBGFINFOHLP pHlp, unsigned cVerbosity)
+{
+ size_t const cchPatterns = pszPatterns ? strlen(pszPatterns) : 0;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ for (PPDMCRITSECTRWINT pCritSect = pUVM->pdm.s.pRwCritSects; pCritSect; pCritSect = pCritSect->pNext)
+ if ( !pszPatterns
+ || RTStrSimplePatternMultiMatch(pszPatterns, cchPatterns, pCritSect->pszName, RTSTR_MAX, NULL))
+ {
+ uint16_t const fFlags = pCritSect->Core.fFlags;
+ pHlp->pfnPrintf(pHlp, "%p: '%s'%s%s%s\n", pCritSect, pCritSect->pszName,
+ fFlags & RTCRITSECT_FLAGS_NO_NESTING ? " no-testing" : "",
+ fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL ? " no-lock-val" : "",
+ fFlags & RTCRITSECT_FLAGS_NOP ? " nop" : "");
+
+ /*
+ * Get the volatile data:
+ */
+ RTNATIVETHREAD hOwner;
+ uint64_t u64State;
+ uint32_t cWriterReads;
+ uint32_t cWriteRecursions;
+ bool fNeedReset;
+ uint32_t uMagic;
+ unsigned cTries = 16;
+ do
+ {
+ u64State = pCritSect->Core.u.s.u64State;
+ hOwner = pCritSect->Core.u.s.hNativeWriter;
+ cWriterReads = pCritSect->Core.cWriterReads;
+ cWriteRecursions = pCritSect->Core.cWriteRecursions;
+ fNeedReset = pCritSect->Core.fNeedReset;
+ uMagic = pCritSect->Core.u32Magic;
+ } while ( cTries-- > 0
+ && ( u64State != pCritSect->Core.u.s.u64State
+ || hOwner != pCritSect->Core.u.s.hNativeWriter
+ || cWriterReads != pCritSect->Core.cWriterReads
+ || cWriteRecursions != pCritSect->Core.cWriteRecursions
+ || fNeedReset != pCritSect->Core.fNeedReset
+ || uMagic != pCritSect->Core.u32Magic));
+
+ /*
+ * Check and resolve the magic to a string, print if not RTCRITSECT_MAGIC.
+ */
+ const char *pszMagic;
+ switch (uMagic)
+ {
+ case RTCRITSECTRW_MAGIC: pszMagic = NULL; break;
+ case ~RTCRITSECTRW_MAGIC: pszMagic = " deleted"; break;
+ case PDMCRITSECTRW_MAGIC_CORRUPT: pszMagic = " PDMCRITSECTRW_MAGIC_CORRUPT!"; break;
+ default: pszMagic = " !unknown!"; break;
+ }
+ if (pszMagic || cVerbosity > 1)
+ pHlp->pfnPrintf(pHlp, " uMagic=%#x%s\n", uMagic, pszMagic ? pszMagic : "");
+
+ /*
+ * If locked, print details
+ */
+ if ((u64State & ~RTCSRW_DIR_MASK) || hOwner != NIL_RTNATIVETHREAD || cVerbosity > 1)
+ {
+ /* Translate the owner to a name if we have one and can. */
+ const char *pszOwner = NULL;
+ if (hOwner != NIL_RTNATIVETHREAD)
+ {
+ RTTHREAD hOwnerThread = RTThreadFromNative(hOwner); /* Note! Does not return a reference (crazy). */
+ if (hOwnerThread != NIL_RTTHREAD)
+ pszOwner = RTThreadGetName(hOwnerThread);
+ }
+ else
+ pszOwner = "<no-owner>";
+
+ pHlp->pfnPrintf(pHlp, " u64State=%#RX64 %s cReads=%u cWrites=%u cWaitingReads=%u\n",
+ u64State, (u64State & RTCSRW_DIR_MASK) == RTCSRW_DIR_WRITE ? "writing" : "reading",
+ (unsigned)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
+ (unsigned)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_RD_SHIFT),
+ (unsigned)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT));
+ if (hOwner != NIL_RTNATIVETHREAD || cVerbosity > 2)
+ pHlp->pfnPrintf(pHlp, " cNestings=%u cReadNestings=%u hWriter=%p %s\n",
+ cWriteRecursions, cWriterReads, hOwner, pszOwner ? pszOwner : "???");
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Common worker for critsect and critsectrw info items.
+ */
+static void pdmR3CritSectInfoCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fReadWrite)
+{
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Process arguments.
+ */
+ static const RTGETOPTDEF s_aOptions[] =
+ {
+ { "--verbose", 'v', RTGETOPT_REQ_NOTHING },
+ };
+ RTGETOPTSTATE State;
+ int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0, RTGETOPTINIT_FLAGS_NO_STD_OPTS);
+ AssertRC(rc);
+
+ unsigned cVerbosity = 1;
+ unsigned cProcessed = 0;
+
+ RTGETOPTUNION ValueUnion;
+ while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
+ {
+ switch (rc)
+ {
+ case 'v':
+ cVerbosity++;
+ break;
+
+ case VINF_GETOPT_NOT_OPTION:
+ if (!fReadWrite)
+ pdmR3CritSectInfoWorker(pUVM, ValueUnion.psz, pHlp, cVerbosity);
+ else
+ pdmR3CritSectInfoRwWorker(pUVM, ValueUnion.psz, pHlp, cVerbosity);
+ cProcessed++;
+ break;
+
+ default:
+ pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
+ return;
+ }
+ }
+
+ /*
+ * If we did nothing above, dump all.
+ */
+ if (!cProcessed)
+ {
+ if (!fReadWrite)
+ pdmR3CritSectInfoWorker(pUVM, NULL, pHlp, cVerbosity);
+ else
+ pdmR3CritSectInfoRwWorker(pUVM, NULL, pHlp, cVerbosity);
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT, critsect}
+ */
+static DECLCALLBACK(void) pdmR3CritSectInfo(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ return pdmR3CritSectInfoCommon(pVM, pHlp, cArgs, papszArgs, false);
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT, critsectrw}
+ */
+static DECLCALLBACK(void) pdmR3CritSectRwInfo(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ return pdmR3CritSectInfoCommon(pVM, pHlp, cArgs, papszArgs, true);
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMDevHlp.cpp b/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
new file mode 100644
index 00000000..8d6430ba
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
@@ -0,0 +1,6397 @@
+/* $Id: PDMDevHlp.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, Device Helpers.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DEVICE
+#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/version.h>
+#include <VBox/log.h>
+#include <VBox/pci.h>
+#include <VBox/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/mem.h>
+
+#include "dtrace/VBoxVMM.h"
+#include "PDMInline.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @def PDM_DEVHLP_DEADLOCK_DETECTION
+ * Define this to enable the deadlock detection when accessing physical memory.
+ */
+#if /*defined(DEBUG_bird) ||*/ defined(DOXYGEN_RUNNING)
+# define PDM_DEVHLP_DEADLOCK_DETECTION /**< @todo enable DevHlp deadlock detection! */
+#endif
+
+
+
+/** @name R3 DevHlp
+ * @{
+ */
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortCreateEx} */
+static DECLCALLBACK(int) pdmR3DevHlp_IoPortCreateEx(PPDMDEVINS pDevIns, RTIOPORT cPorts, uint32_t fFlags, PPDMPCIDEV pPciDev,
+ uint32_t iPciRegion, PFNIOMIOPORTNEWOUT pfnOut, PFNIOMIOPORTNEWIN pfnIn,
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr, PFNIOMIOPORTNEWINSTRING pfnInStr, RTR3PTR pvUser,
+ const char *pszDesc, PCIOMIOPORTDESC paExtDescs, PIOMIOPORTHANDLE phIoPorts)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortCreateEx: caller='%s'/%d: cPorts=%#x fFlags=%#x pPciDev=%p iPciRegion=%#x pfnOut=%p pfnIn=%p pfnOutStr=%p pfnInStr=%p pvUser=%p pszDesc=%p:{%s} paExtDescs=%p phIoPorts=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cPorts, fFlags, pPciDev, iPciRegion, pfnOut, pfnIn, pfnOutStr, pfnInStr,
+ pvUser, pszDesc, pszDesc, paExtDescs, phIoPorts));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ int rc = IOMR3IoPortCreate(pVM, pDevIns, cPorts, fFlags, pPciDev, iPciRegion,
+ pfnOut, pfnIn, pfnOutStr, pfnInStr, pvUser, pszDesc, paExtDescs, phIoPorts);
+
+ LogFlow(("pdmR3DevHlp_IoPortCreateEx: caller='%s'/%d: returns %Rrc (*phIoPorts=%#x)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *phIoPorts));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortMap} */
+static DECLCALLBACK(int) pdmR3DevHlp_IoPortMap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts, RTIOPORT Port)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: hIoPorts=%#x Port=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts, Port));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3IoPortMap(pVM, pDevIns, hIoPorts, Port);
+
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortUnmap} */
+static DECLCALLBACK(int) pdmR3DevHlp_IoPortUnmap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: hIoPorts=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3IoPortUnmap(pVM, pDevIns, hIoPorts);
+
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortGetMappingAddress} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_IoPortGetMappingAddress(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortGetMappingAddress: caller='%s'/%d: hIoPorts=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts));
+
+ uint32_t uAddress = IOMR3IoPortGetMappingAddress(pDevIns->Internal.s.pVMR3, pDevIns, hIoPorts);
+
+ LogFlow(("pdmR3DevHlp_IoPortGetMappingAddress: caller='%s'/%d: returns %#RX32\n", pDevIns->pReg->szName, pDevIns->iInstance, uAddress));
+ return uAddress;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortWrite} */
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlp_IoPortWrite(PPDMDEVINS pDevIns, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortWrite: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertPtrReturn(pVCpu, VERR_ACCESS_DENIED);
+
+ VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, Port, u32Value, cbValue);
+
+ LogFlow(("pdmR3DevHlp_IoPortWrite: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioCreateEx} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioCreateEx(PPDMDEVINS pDevIns, RTGCPHYS cbRegion,
+ uint32_t fFlags, PPDMPCIDEV pPciDev, uint32_t iPciRegion,
+ PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill,
+ void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioCreateEx: caller='%s'/%d: cbRegion=%#RGp fFlags=%#x pPciDev=%p iPciRegion=%#x pfnWrite=%p pfnRead=%p pfnFill=%p pvUser=%p pszDesc=%p:{%s} phRegion=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbRegion, fFlags, pPciDev, iPciRegion, pfnWrite, pfnRead, pfnFill, pvUser, pszDesc, pszDesc, phRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ if (pDevIns->iInstance > 0)
+ {
+ pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PDM_DEVICE_DESC, "%s [%u]", pszDesc, pDevIns->iInstance);
+ AssertReturn(pszDesc, VERR_NO_STR_MEMORY);
+ }
+
+ /* HACK ALERT! Round the size up to page size. The PCI bus should do something similar before mapping it. */
+ /** @todo It's possible we need to do dummy MMIO fill-in of the PCI bus or
+ * guest adds more alignment to an region. */
+ cbRegion = RT_ALIGN_T(cbRegion, GUEST_PAGE_SIZE, RTGCPHYS);
+
+ int rc = IOMR3MmioCreate(pVM, pDevIns, cbRegion, fFlags, pPciDev, iPciRegion,
+ pfnWrite, pfnRead, pfnFill, pvUser, pszDesc, phRegion);
+
+ LogFlow(("pdmR3DevHlp_MmioCreateEx: caller='%s'/%d: returns %Rrc (*phRegion=%#x)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *phRegion));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioMap} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioMap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioMap: caller='%s'/%d: hRegion=%#x GCPhys=%#RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, GCPhys));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3MmioMap(pVM, pDevIns, hRegion, GCPhys);
+
+ LogFlow(("pdmR3DevHlp_MmioMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioUnmap} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioUnmap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioUnmap: caller='%s'/%d: hRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3MmioUnmap(pVM, pDevIns, hRegion);
+
+ LogFlow(("pdmR3DevHlp_MmioUnmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioReduce} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioReduce(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioReduce: caller='%s'/%d: hRegion=%#x cbRegion=%#RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, cbRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE);
+
+ int rc = IOMR3MmioReduce(pVM, pDevIns, hRegion, cbRegion);
+
+ LogFlow(("pdmR3DevHlp_MmioReduce: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioGetMappingAddress} */
+static DECLCALLBACK(RTGCPHYS) pdmR3DevHlp_MmioGetMappingAddress(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioGetMappingAddress: caller='%s'/%d: hRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+
+ RTGCPHYS GCPhys = IOMR3MmioGetMappingAddress(pDevIns->Internal.s.pVMR3, pDevIns, hRegion);
+
+ LogFlow(("pdmR3DevHlp_MmioGetMappingAddress: caller='%s'/%d: returns %RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+ return GCPhys;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2Create} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2Create(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iPciRegion, RTGCPHYS cbRegion,
+ uint32_t fFlags, const char *pszDesc, void **ppvMapping, PPGMMMIO2HANDLE phRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_Mmio2Create: caller='%s'/%d: pPciDev=%p (%#x) iPciRegion=%#x cbRegion=%#RGp fFlags=%RX32 pszDesc=%p:{%s} ppvMapping=%p phRegion=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev ? pPciDev->uDevFn : UINT32_MAX, iPciRegion, cbRegion,
+ fFlags, pszDesc, pszDesc, ppvMapping, phRegion));
+ *ppvMapping = NULL;
+ *phRegion = NIL_PGMMMIO2HANDLE;
+ AssertReturn(!pPciDev || pPciDev->Int.s.pDevInsR3 == pDevIns, VERR_INVALID_PARAMETER);
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertMsgReturn( pVM->enmVMState == VMSTATE_CREATING
+ || pVM->enmVMState == VMSTATE_LOADING,
+ ("state %s, expected CREATING or LOADING\n", VMGetStateName(pVM->enmVMState)), VERR_VM_INVALID_VM_STATE);
+
+ AssertReturn(!(iPciRegion & UINT16_MAX), VERR_INVALID_PARAMETER); /* not implemented. */
+
+ /** @todo PGMR3PhysMmio2Register mangles the description, move it here and
+ * use a real string cache. */
+ int rc = PGMR3PhysMmio2Register(pVM, pDevIns, pPciDev ? pPciDev->Int.s.idxDevCfg : 254, iPciRegion >> 16,
+ cbRegion, fFlags, pszDesc, ppvMapping, phRegion);
+
+ LogFlow(("pdmR3DevHlp_Mmio2Create: caller='%s'/%d: returns %Rrc *ppvMapping=%p phRegion=%#RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *ppvMapping, *phRegion));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2Destroy} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2Destroy(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_Mmio2Destroy: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertMsgReturn( pVM->enmVMState == VMSTATE_DESTROYING
+ || pVM->enmVMState == VMSTATE_LOADING,
+ ("state %s, expected DESTROYING or LOADING\n", VMGetStateName(pVM->enmVMState)), VERR_VM_INVALID_VM_STATE);
+
+ int rc = PGMR3PhysMmio2Deregister(pDevIns->Internal.s.pVMR3, pDevIns, hRegion);
+
+ LogFlow(("pdmR3DevHlp_Mmio2Destroy: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2Map} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2Map(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_Mmio2Map: caller='%s'/%d: hRegion=%#RX64 GCPhys=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, GCPhys));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = PGMR3PhysMmio2Map(pDevIns->Internal.s.pVMR3, pDevIns, hRegion, GCPhys);
+
+ LogFlow(("pdmR3DevHlp_Mmio2Map: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2Unmap} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2Unmap(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_Mmio2Unmap: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = PGMR3PhysMmio2Unmap(pDevIns->Internal.s.pVMR3, pDevIns, hRegion, NIL_RTGCPHYS);
+
+ LogFlow(("pdmR3DevHlp_Mmio2Unmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2Reduce} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2Reduce(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, RTGCPHYS cbRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_Mmio2Reduce: caller='%s'/%d: hRegion=%#RX64 cbRegion=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, cbRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE);
+
+ int rc = PGMR3PhysMmio2Reduce(pDevIns->Internal.s.pVMR3, pDevIns, hRegion, cbRegion);
+
+ LogFlow(("pdmR3DevHlp_Mmio2Reduce: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2GetMappingAddress} */
+static DECLCALLBACK(RTGCPHYS) pdmR3DevHlp_Mmio2GetMappingAddress(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: hRegion=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+ VM_ASSERT_EMT0_RETURN(pVM, NIL_RTGCPHYS);
+
+ RTGCPHYS GCPhys = PGMR3PhysMmio2GetMappingAddress(pVM, pDevIns, hRegion);
+
+ LogFlow(("pdmR3DevHlp_Mmio2GetMappingAddress: caller='%s'/%d: returns %RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+ return GCPhys;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2QueryAndResetDirtyBitmap} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion,
+ void *pvBitmap, size_t cbBitmap)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: hRegion=%#RX64 pvBitmap=%p cbBitmap=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hRegion, pvBitmap, cbBitmap));
+
+ int rc = PGMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pDevIns, hRegion, pvBitmap, cbBitmap);
+
+ LogFlow(("pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmio2ControlDirtyPageTracking} */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2ControlDirtyPageTracking(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, bool fEnabled)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: hRegion=%#RX64 fEnabled=%RTbool\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hRegion, fEnabled));
+
+ int rc = PGMR3PhysMmio2ControlDirtyPageTracking(pVM, pDevIns, hRegion, fEnabled);
+
+ LogFlow(("pdmR3DevHlp_Mmio2ControlDirtyPageTracking: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/**
+ * @copydoc PDMDEVHLPR3::pfnMmio2ChangeRegionNo
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_Mmio2ChangeRegionNo(PPDMDEVINS pDevIns, PGMMMIO2HANDLE hRegion, uint32_t iNewRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: hRegion=%#RX64 iNewRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, iNewRegion));
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = PGMR3PhysMmio2ChangeRegionNo(pVM, pDevIns, hRegion, iNewRegion);
+
+ LogFlow(("pdmR3DevHlp_Mmio2ChangeRegionNo: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioMapMmio2Page} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioMapMmio2Page(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS offRegion,
+ uint64_t hMmio2, RTGCPHYS offMmio2, uint64_t fPageFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioMapMmio2Page: caller='%s'/%d: hRegion=%RX64 offRegion=%RGp hMmio2=%RX64 offMmio2=%RGp fPageFlags=%RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hRegion, offRegion, hMmio2, offMmio2, fPageFlags));
+
+ int rc = IOMMmioMapMmio2Page(pDevIns->Internal.s.pVMR3, pDevIns, hRegion, offRegion, hMmio2, offMmio2, fPageFlags);
+
+ Log(("pdmR3DevHlp_MmioMapMmio2Page: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioResetRegion} */
+static DECLCALLBACK(int) pdmR3DevHlp_MmioResetRegion(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioResetRegion: caller='%s'/%d: hRegion=%RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+
+ int rc = IOMMmioResetRegion(pDevIns->Internal.s.pVMR3, pDevIns, hRegion);
+
+ Log(("pdmR3DevHlp_MmioResetRegion: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnROMRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_ROMRegister(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t cbRange,
+ const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_ROMRegister: caller='%s'/%d: GCPhysStart=%RGp cbRange=%#x pvBinary=%p cbBinary=%#x fFlags=%#RX32 pszDesc=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, pvBinary, cbBinary, fFlags, pszDesc, pszDesc));
+
+/** @todo can we mangle pszDesc? */
+ int rc = PGMR3PhysRomRegister(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvBinary, cbBinary, fFlags, pszDesc);
+
+ LogFlow(("pdmR3DevHlp_ROMRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnROMProtectShadow} */
+static DECLCALLBACK(int) pdmR3DevHlp_ROMProtectShadow(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t cbRange, PGMROMPROT enmProt)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_ROMProtectShadow: caller='%s'/%d: GCPhysStart=%RGp cbRange=%#x enmProt=%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, enmProt));
+
+ int rc = PGMR3PhysRomProtect(pDevIns->Internal.s.pVMR3, GCPhysStart, cbRange, enmProt);
+
+ LogFlow(("pdmR3DevHlp_ROMProtectShadow: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSSMRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_SSMRegister(PPDMDEVINS pDevIns, uint32_t uVersion, size_t cbGuess, const char *pszBefore,
+ PFNSSMDEVLIVEPREP pfnLivePrep, PFNSSMDEVLIVEEXEC pfnLiveExec, PFNSSMDEVLIVEVOTE pfnLiveVote,
+ PFNSSMDEVSAVEPREP pfnSavePrep, PFNSSMDEVSAVEEXEC pfnSaveExec, PFNSSMDEVSAVEDONE pfnSaveDone,
+ PFNSSMDEVLOADPREP pfnLoadPrep, PFNSSMDEVLOADEXEC pfnLoadExec, PFNSSMDEVLOADDONE pfnLoadDone)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_SSMRegister: caller='%s'/%d: uVersion=%#x cbGuess=%#x pszBefore=%p:{%s}\n"
+ " pfnLivePrep=%p pfnLiveExec=%p pfnLiveVote=%p pfnSavePrep=%p pfnSaveExec=%p pfnSaveDone=%p pfnLoadPrep=%p pfnLoadExec=%p pfnLoadDone=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uVersion, cbGuess, pszBefore, pszBefore,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone));
+
+ int rc = SSMR3RegisterDevice(pDevIns->Internal.s.pVMR3, pDevIns, pDevIns->pReg->szName, pDevIns->iInstance,
+ uVersion, cbGuess, pszBefore,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone);
+
+ LogFlow(("pdmR3DevHlp_SSMRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSSMRegisterLegacy} */
+static DECLCALLBACK(int) pdmR3DevHlp_SSMRegisterLegacy(PPDMDEVINS pDevIns, const char *pszOldName, PFNSSMDEVLOADPREP pfnLoadPrep,
+ PFNSSMDEVLOADEXEC pfnLoadExec, PFNSSMDEVLOADDONE pfnLoadDone)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_SSMRegisterLegacy: caller='%s'/%d: pszOldName=%p:{%s} pfnLoadPrep=%p pfnLoadExec=%p pfnLoadDone=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszOldName, pszOldName, pfnLoadPrep, pfnLoadExec, pfnLoadDone));
+
+ int rc = SSMR3RegisterDevice(pDevIns->Internal.s.pVMR3, pDevIns, pszOldName, pDevIns->iInstance,
+ 0 /*uVersion*/, 0 /*cbGuess*/, NULL /*pszBefore*/,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone);
+
+ LogFlow(("pdmR3DevHlp_SSMRegisterLegacy: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerCreate(PPDMDEVINS pDevIns, TMCLOCK enmClock, PFNTMTIMERDEV pfnCallback,
+ void *pvUser, uint32_t fFlags, const char *pszDesc, PTMTIMERHANDLE phTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_TimerCreate: caller='%s'/%d: enmClock=%d pfnCallback=%p pvUser=%p fFlags=%#x pszDesc=%p:{%s} phTimer=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmClock, pfnCallback, pvUser, fFlags, pszDesc, pszDesc, phTimer));
+
+ /* Mangle the timer name if there are more than one instance of this device. */
+ char szName[32];
+ AssertReturn(strlen(pszDesc) < sizeof(szName) - 3, VERR_INVALID_NAME);
+ if (pDevIns->iInstance > 0)
+ {
+ RTStrPrintf(szName, sizeof(szName), "%s[%u]", pszDesc, pDevIns->iInstance);
+ pszDesc = szName;
+ }
+
+ /* Clear the ring-0 flag if the device isn't configured for ring-0. */
+ if (fFlags & TMTIMER_FLAGS_RING0)
+ {
+ Assert(pDevIns->Internal.s.pDevR3->pReg->fFlags & PDM_DEVREG_FLAGS_R0);
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED))
+ fFlags &= ~TMTIMER_FLAGS_RING0;
+ }
+ else
+ Assert(fFlags & TMTIMER_FLAGS_NO_RING0 /* just to make sure all devices has been considered */);
+
+ int rc = TMR3TimerCreateDevice(pVM, pDevIns, enmClock, pfnCallback, pvUser, fFlags, pszDesc, phTimer);
+
+ LogFlow(("pdmR3DevHlp_TimerCreate: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerFromMicro} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerFromMicro(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerFromMicro(pDevIns->Internal.s.pVMR3, hTimer, cMicroSecs);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerFromMilli} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerFromMilli(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerFromMilli(pDevIns->Internal.s.pVMR3, hTimer, cMilliSecs);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerFromNano} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerFromNano(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerFromNano(pDevIns->Internal.s.pVMR3, hTimer, cNanoSecs);
+}
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerGet} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerGet(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerGet(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerGetFreq} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerGetFreq(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerGetFreq(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerGetNano} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TimerGetNano(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerGetNano(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerIsActive} */
+static DECLCALLBACK(bool) pdmR3DevHlp_TimerIsActive(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerIsActive(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerIsLockOwner} */
+static DECLCALLBACK(bool) pdmR3DevHlp_TimerIsLockOwner(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerIsLockOwner(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerLockClock} */
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlp_TimerLockClock(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, int rcBusy)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerLock(pDevIns->Internal.s.pVMR3, hTimer, rcBusy);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerLockClock2} */
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlp_TimerLockClock2(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer,
+ PPDMCRITSECT pCritSect, int rcBusy)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM const pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rc = TMTimerLock(pVM, hTimer, rcBusy);
+ if (rc == VINF_SUCCESS)
+ {
+ rc = PDMCritSectEnter(pVM, pCritSect, rcBusy);
+ if (rc == VINF_SUCCESS)
+ return rc;
+ AssertRC(VBOXSTRICTRC_VAL(rc));
+ TMTimerUnlock(pVM, hTimer);
+ }
+ else
+ AssertRC(VBOXSTRICTRC_VAL(rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSet} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSet(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t uExpire)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSet(pDevIns->Internal.s.pVMR3, hTimer, uExpire);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetFrequencyHint} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetFrequencyHint(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint32_t uHz)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSetFrequencyHint(pDevIns->Internal.s.pVMR3, hTimer, uHz);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetMicro} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetMicro(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSetMicro(pDevIns->Internal.s.pVMR3, hTimer, cMicrosToNext);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetMillies} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetMillies(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cMilliesToNext)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSetMillies(pDevIns->Internal.s.pVMR3, hTimer, cMilliesToNext);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetNano} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetNano(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSetNano(pDevIns->Internal.s.pVMR3, hTimer, cNanosToNext);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetRelative} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetRelative(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerSetRelative(pDevIns->Internal.s.pVMR3, hTimer, cTicksToNext, pu64Now);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerStop} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerStop(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMTimerStop(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerUnlockClock} */
+static DECLCALLBACK(void) pdmR3DevHlp_TimerUnlockClock(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ TMTimerUnlock(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerUnlockClock2} */
+static DECLCALLBACK(void) pdmR3DevHlp_TimerUnlockClock2(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM const pVM = pDevIns->Internal.s.pVMR3;
+ TMTimerUnlock(pVM, hTimer);
+ int rc = PDMCritSectLeave(pVM, pCritSect);
+ AssertRC(rc);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSetCritSect} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSetCritSect(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMR3TimerSetCritSect(pDevIns->Internal.s.pVMR3, hTimer, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerSave} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerSave(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMR3TimerSave(pDevIns->Internal.s.pVMR3, hTimer, pSSM);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerLoad} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerLoad(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMR3TimerLoad(pDevIns->Internal.s.pVMR3, hTimer, pSSM);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTimerDestroy} */
+static DECLCALLBACK(int) pdmR3DevHlp_TimerDestroy(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return TMR3TimerDestroy(pDevIns->Internal.s.pVMR3, hTimer);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTMUtcNow} */
+static DECLCALLBACK(PRTTIMESPEC) pdmR3DevHlp_TMUtcNow(PPDMDEVINS pDevIns, PRTTIMESPEC pTime)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TMUtcNow: caller='%s'/%d: pTime=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pTime));
+
+ pTime = TMR3UtcNow(pDevIns->Internal.s.pVMR3, pTime);
+
+ LogFlow(("pdmR3DevHlp_TMUtcNow: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, RTTimeSpecGetNano(pTime)));
+ return pTime;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTMTimeVirtGet} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMTimeVirtGet(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGet: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t u64Time = TMVirtualSyncGet(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGet: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64Time));
+ return u64Time;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTMTimeVirtGetFreq} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMTimeVirtGetFreq(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGetFreq: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t u64Freq = TMVirtualGetFreq(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGetFreq: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64Freq));
+ return u64Freq;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTMTimeVirtGetNano} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMTimeVirtGetNano(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGetNano: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t u64Time = TMVirtualGet(pDevIns->Internal.s.pVMR3);
+ uint64_t u64Nano = TMVirtualToNano(pDevIns->Internal.s.pVMR3, u64Time);
+
+ LogFlow(("pdmR3DevHlp_TMTimeVirtGetNano: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64Nano));
+ return u64Nano;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTMCpuTicksPerSecond} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMCpuTicksPerSecond(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TMCpuTicksPerSecond: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t u64CpuTicksPerSec = TMCpuTicksPerSecond(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_TMCpuTicksPerSecond: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64CpuTicksPerSec));
+ return u64CpuTicksPerSec;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetSupDrvSession} */
+static DECLCALLBACK(PSUPDRVSESSION) pdmR3DevHlp_GetSupDrvSession(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_GetSupDrvSession: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ PSUPDRVSESSION pSession = pDevIns->Internal.s.pVMR3->pSession;
+
+ LogFlow(("pdmR3DevHlp_GetSupDrvSession: caller='%s'/%d: returns %#p\n", pDevIns->pReg->szName, pDevIns->iInstance, pSession));
+ return pSession;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueryGenericUserObject} */
+static DECLCALLBACK(void *) pdmR3DevHlp_QueryGenericUserObject(PPDMDEVINS pDevIns, PCRTUUID pUuid)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_QueryGenericUserObject: caller='%s'/%d: pUuid=%p:%RTuuid\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pUuid, pUuid));
+
+#if defined(DEBUG_bird) || defined(DEBUG_ramshankar) || defined(DEBUG_sunlover) || defined(DEBUG_michael) || defined(DEBUG_andy)
+ AssertMsgFailed(("'%s' wants %RTuuid - external only interface!\n", pDevIns->pReg->szName, pUuid));
+#endif
+
+ void *pvRet;
+ PUVM pUVM = pDevIns->Internal.s.pVMR3->pUVM;
+ if (pUVM->pVmm2UserMethods->pfnQueryGenericObject)
+ pvRet = pUVM->pVmm2UserMethods->pfnQueryGenericObject(pUVM->pVmm2UserMethods, pUVM, pUuid);
+ else
+ pvRet = NULL;
+
+ LogRel(("pdmR3DevHlp_QueryGenericUserObject: caller='%s'/%d: returns %#p for %RTuuid\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pvRet, pUuid));
+ return pvRet;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalTypeRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PGMHandlerPhysicalTypeRegister(PPDMDEVINS pDevIns, PGMPHYSHANDLERKIND enmKind,
+ PFNPGMPHYSHANDLER pfnHandler, const char *pszDesc,
+ PPGMPHYSHANDLERTYPE phType)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PGMHandlerPhysicalTypeRegister: caller='%s'/%d: enmKind=%d pfnHandler=%p pszDesc=%p:{%s} phType=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmKind, pfnHandler, pszDesc, pszDesc, phType));
+
+ int rc = PGMR3HandlerPhysicalTypeRegister(pVM, enmKind,
+ pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED
+ ? PGMPHYSHANDLER_F_R0_DEVINS_IDX : 0,
+ pfnHandler, pszDesc, phType);
+
+ Log(("pdmR3DevHlp_PGMHandlerPhysicalTypeRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PGMHandlerPhysicalRegister(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
+ PGMPHYSHANDLERTYPE hType, R3PTRTYPE(const char *) pszDesc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PGMHandlerPhysicalRegister: caller='%s'/%d: GCPhys=%RGp GCPhysLast=%RGp hType=%u pszDesc=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, GCPhysLast, hType, pszDesc, pszDesc));
+
+ int rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType,
+ pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED
+ ? pDevIns->Internal.s.idxR0Device : (uintptr_t)pDevIns,
+ pszDesc);
+
+ Log(("pdmR3DevHlp_PGMHandlerPhysicalRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalDeregister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PGMHandlerPhysicalDeregister(PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PGMHandlerPhysicalDeregister: caller='%s'/%d: GCPhys=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+
+ int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
+
+ Log(("pdmR3DevHlp_PGMHandlerPhysicalDeregister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalPageTempOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_PGMHandlerPhysicalPageTempOff(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PGMHandlerPhysicalPageTempOff: caller='%s'/%d: GCPhys=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+
+ int rc = PGMHandlerPhysicalPageTempOff(pVM, GCPhys, GCPhysPage);
+
+ Log(("pdmR3DevHlp_PGMHandlerPhysicalPageTempOff: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalReset} */
+static DECLCALLBACK(int) pdmR3DevHlp_PGMHandlerPhysicalReset(PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PGMHandlerPhysicalReset: caller='%s'/%d: GCPhys=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+
+ int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
+
+ Log(("pdmR3DevHlp_PGMHandlerPhysicalReset: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysRead} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
+{
+ RT_NOREF(fFlags);
+
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysRead: caller='%s'/%d: GCPhys=%RGp pvBuf=%p cbRead=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, pvBuf, cbRead));
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ VBOXSTRICTRC rcStrict;
+ if (VM_IS_EMT(pVM))
+ rcStrict = PGMPhysRead(pVM, GCPhys, pvBuf, cbRead, PGMACCESSORIGIN_DEVICE);
+ else
+ rcStrict = PGMR3PhysReadExternal(pVM, GCPhys, pvBuf, cbRead, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ Log(("pdmR3DevHlp_PhysRead: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysWrite} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags)
+{
+ RT_NOREF(fFlags);
+
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysWrite: caller='%s'/%d: GCPhys=%RGp pvBuf=%p cbWrite=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, pvBuf, cbWrite));
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ VBOXSTRICTRC rcStrict;
+ if (VM_IS_EMT(pVM))
+ rcStrict = PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite, PGMACCESSORIGIN_DEVICE);
+ else
+ rcStrict = PGMR3PhysWriteExternal(pVM, GCPhys, pvBuf, cbWrite, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ Log(("pdmR3DevHlp_PhysWrite: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysGCPhys2CCPtr} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysGCPhys2CCPtr(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint32_t fFlags, void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysGCPhys2CCPtr: caller='%s'/%d: GCPhys=%RGp fFlags=%#x ppv=%p pLock=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, fFlags, ppv, pLock));
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ int rc = PGMR3PhysGCPhys2CCPtrExternal(pVM, GCPhys, ppv, pLock);
+
+ Log(("pdmR3DevHlp_PhysGCPhys2CCPtr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysGCPhys2CCPtrReadOnly} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint32_t fFlags, const void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly: caller='%s'/%d: GCPhys=%RGp fFlags=%#x ppv=%p pLock=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, fFlags, ppv, pLock));
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ int rc = PGMR3PhysGCPhys2CCPtrReadOnlyExternal(pVM, GCPhys, ppv, pLock);
+
+ Log(("pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysReleasePageMappingLock} */
+static DECLCALLBACK(void) pdmR3DevHlp_PhysReleasePageMappingLock(PPDMDEVINS pDevIns, PPGMPAGEMAPLOCK pLock)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysReleasePageMappingLock: caller='%s'/%d: pLock=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pLock));
+
+ PGMPhysReleasePageMappingLock(pVM, pLock);
+
+ Log(("pdmR3DevHlp_PhysReleasePageMappingLock: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtr} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtr(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
+ uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,..) fFlags=%#x papvPages=%p paLocks=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ int rc = PGMR3PhysBulkGCPhys2CCPtrExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
+
+ Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkGCPhys2CCPtrReadOnly} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
+ uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: cPages=%#x paGCPhysPages=%p (%RGp,...) fFlags=%#x papvPages=%p paLocks=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cPages, paGCPhysPages, paGCPhysPages[0], fFlags, papvPages, paLocks));
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ int rc = PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(pVM, cPages, paGCPhysPages, papvPages, paLocks);
+
+ Log(("pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysBulkReleasePageMappingLocks} */
+static DECLCALLBACK(void) pdmR3DevHlp_PhysBulkReleasePageMappingLocks(PPDMDEVINS pDevIns, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: cPages=%#x paLocks=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cPages, paLocks));
+ Assert(cPages > 0);
+
+ PGMPhysBulkReleasePageMappingLocks(pVM, cPages, paLocks);
+
+ Log(("pdmR3DevHlp_PhysBulkReleasePageMappingLocks: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysIsGCPhysNormal} */
+static DECLCALLBACK(bool) pdmR3DevHlp_PhysIsGCPhysNormal(PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_PhysIsGCPhysNormal: caller='%s'/%d: GCPhys=%RGp\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys));
+
+ bool fNormal = PGMPhysIsGCPhysNormal(pDevIns->Internal.s.pVMR3, GCPhys);
+
+ Log(("pdmR3DevHlp_PhysIsGCPhysNormal: caller='%s'/%d: returns %RTbool\n", pDevIns->pReg->szName, pDevIns->iInstance, fNormal));
+ return fNormal;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysChangeMemBalloon} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysChangeMemBalloon(PPDMDEVINS pDevIns, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_PhysChangeMemBalloon: caller='%s'/%d: fInflate=%RTbool cPages=%u paPhysPage=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, fInflate, cPages, paPhysPage));
+
+ int rc = PGMR3PhysChangeMemBalloon(pDevIns->Internal.s.pVMR3, fInflate, cPages, paPhysPage);
+
+ Log(("pdmR3DevHlp_PhysChangeMemBalloon: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCpuGetGuestMicroarch} */
+static DECLCALLBACK(CPUMMICROARCH) pdmR3DevHlp_CpuGetGuestMicroarch(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_CpuGetGuestMicroarch: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ CPUMMICROARCH enmMicroarch = CPUMGetGuestMicroarch(pVM);
+
+ Log(("pdmR3DevHlp_CpuGetGuestMicroarch: caller='%s'/%d: returns %u\n", pDevIns->pReg->szName, pDevIns->iInstance, enmMicroarch));
+ return enmMicroarch;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCpuGetGuestAddrWidths} */
+static DECLCALLBACK(void) pdmR3DevHlp_CpuGetGuestAddrWidths(PPDMDEVINS pDevIns, uint8_t *pcPhysAddrWidth,
+ uint8_t *pcLinearAddrWidth)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_CpuGetGuestAddrWidths: caller='%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ AssertPtrReturnVoid(pcPhysAddrWidth);
+ AssertPtrReturnVoid(pcLinearAddrWidth);
+
+ CPUMGetGuestAddrWidths(pVM, pcPhysAddrWidth, pcLinearAddrWidth);
+
+ Log(("pdmR3DevHlp_CpuGetGuestAddrWidths: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCpuGetGuestScalableBusFrequency} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_CpuGetGuestScalableBusFrequency(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_CpuGetGuestScalableBusFrequency: caller='%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t u64Fsb = CPUMGetGuestScalableBusFrequency(pDevIns->Internal.s.pVMR3);
+
+ Log(("pdmR3DevHlp_CpuGetGuestScalableBusFrequency: caller='%s'/%d: returns %#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64Fsb));
+ return u64Fsb;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysReadGCVirt} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysReadGCVirt(PPDMDEVINS pDevIns, void *pvDst, RTGCPTR GCVirtSrc, size_t cb)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_PhysReadGCVirt: caller='%s'/%d: pvDst=%p GCVirt=%RGv cb=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pvDst, GCVirtSrc, cb));
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ return VERR_ACCESS_DENIED;
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ /** @todo SMP. */
+#endif
+
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, pvDst, GCVirtSrc, cb);
+
+ LogFlow(("pdmR3DevHlp_PhysReadGCVirt: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysWriteGCVirt} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysWriteGCVirt(PPDMDEVINS pDevIns, RTGCPTR GCVirtDst, const void *pvSrc, size_t cb)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_PhysWriteGCVirt: caller='%s'/%d: GCVirtDst=%RGv pvSrc=%p cb=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCVirtDst, pvSrc, cb));
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ return VERR_ACCESS_DENIED;
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ /** @todo SMP. */
+#endif
+
+ int rc = PGMPhysSimpleWriteGCPtr(pVCpu, GCVirtDst, pvSrc, cb);
+
+ LogFlow(("pdmR3DevHlp_PhysWriteGCVirt: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysGCPtr2GCPhys} */
+static DECLCALLBACK(int) pdmR3DevHlp_PhysGCPtr2GCPhys(PPDMDEVINS pDevIns, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_PhysGCPtr2GCPhys: caller='%s'/%d: GCPtr=%RGv pGCPhys=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPtr, pGCPhys));
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ return VERR_ACCESS_DENIED;
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ /** @todo SMP. */
+#endif
+
+ int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, pGCPhys);
+
+ LogFlow(("pdmR3DevHlp_PhysGCPtr2GCPhys: caller='%s'/%d: returns %Rrc *pGCPhys=%RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, rc, *pGCPhys));
+
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMHeapAlloc} */
+static DECLCALLBACK(void *) pdmR3DevHlp_MMHeapAlloc(PPDMDEVINS pDevIns, size_t cb)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMHeapAlloc: caller='%s'/%d: cb=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, cb));
+
+ void *pv = MMR3HeapAlloc(pDevIns->Internal.s.pVMR3, MM_TAG_PDM_DEVICE_USER, cb);
+
+ LogFlow(("pdmR3DevHlp_MMHeapAlloc: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pv));
+ return pv;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMHeapAllocZ} */
+static DECLCALLBACK(void *) pdmR3DevHlp_MMHeapAllocZ(PPDMDEVINS pDevIns, size_t cb)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMHeapAllocZ: caller='%s'/%d: cb=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, cb));
+
+ void *pv = MMR3HeapAllocZ(pDevIns->Internal.s.pVMR3, MM_TAG_PDM_DEVICE_USER, cb);
+
+ LogFlow(("pdmR3DevHlp_MMHeapAllocZ: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pv));
+ return pv;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMHeapAPrintfV} */
+static DECLCALLBACK(char *) pdmR3DevHlp_MMHeapAPrintfV(PPDMDEVINS pDevIns, MMTAG enmTag, const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMHeapAPrintfV: caller='%s'/%d: enmTag=%u pszFormat=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmTag, pszFormat, pszFormat));
+
+ char *psz = MMR3HeapAPrintfV(pDevIns->Internal.s.pVMR3, enmTag, pszFormat, va);
+
+ LogFlow(("pdmR3DevHlp_MMHeapAPrintfV: caller='%s'/%d: returns %p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, psz, psz));
+ return psz;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMHeapFree} */
+static DECLCALLBACK(void) pdmR3DevHlp_MMHeapFree(PPDMDEVINS pDevIns, void *pv)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMHeapFree: caller='%s'/%d: pv=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pv));
+
+ MMR3HeapFree(pv);
+
+ LogFlow(("pdmR3DevHlp_MMHeapAlloc: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMPhysGetRamSize} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_MMPhysGetRamSize(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSize: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t cb = MMR3PhysGetRamSize(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSize: caller='%s'/%d: returns %RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cb));
+ return cb;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMPhysGetRamSizeBelow4GB} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_MMPhysGetRamSizeBelow4GB(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSizeBelow4GB: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint32_t cb = MMR3PhysGetRamSizeBelow4GB(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSizeBelow4GB: caller='%s'/%d: returns %RU32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cb));
+ return cb;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMMPhysGetRamSizeAbove4GB} */
+static DECLCALLBACK(uint64_t) pdmR3DevHlp_MMPhysGetRamSizeAbove4GB(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSizeAbove4GB: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint64_t cb = MMR3PhysGetRamSizeAbove4GB(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_MMPhysGetRamSizeAbove4GB: caller='%s'/%d: returns %RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cb));
+ return cb;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMState} */
+static DECLCALLBACK(VMSTATE) pdmR3DevHlp_VMState(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ VMSTATE enmVMState = VMR3GetState(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_VMState: caller='%s'/%d: returns %d (%s)\n", pDevIns->pReg->szName, pDevIns->iInstance,
+ enmVMState, VMR3GetStateName(enmVMState)));
+ return enmVMState;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMTeleportedAndNotFullyResumedYet} */
+static DECLCALLBACK(bool) pdmR3DevHlp_VMTeleportedAndNotFullyResumedYet(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ bool fRc = VMR3TeleportedAndNotFullyResumedYet(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_VMState: caller='%s'/%d: returns %RTbool\n", pDevIns->pReg->szName, pDevIns->iInstance,
+ fRc));
+ return fRc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSetErrorV} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMSetErrorV(PPDMDEVINS pDevIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ int rc2 = VMSetErrorV(pDevIns->Internal.s.pVMR3, rc, RT_SRC_POS_ARGS, pszFormat, va); Assert(rc2 == rc); NOREF(rc2);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSetRuntimeErrorV} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMSetRuntimeErrorV(PPDMDEVINS pDevIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ int rc = VMSetRuntimeErrorV(pDevIns->Internal.s.pVMR3, fFlags, pszErrorId, pszFormat, va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMWaitForDeviceReady} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMWaitForDeviceReady(PPDMDEVINS pDevIns, VMCPUID idCpu)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_VMWaitForDeviceReady: caller='%s'/%d: idCpu=%u\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu));
+
+ int rc = VMR3WaitForDeviceReady(pDevIns->Internal.s.pVMR3, idCpu);
+
+ LogFlow(("pdmR3DevHlp_VMWaitForDeviceReady: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMNotifyCpuDeviceReady} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMNotifyCpuDeviceReady(PPDMDEVINS pDevIns, VMCPUID idCpu)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_VMNotifyCpuDeviceReady: caller='%s'/%d: idCpu=%u\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu));
+
+ int rc = VMR3NotifyCpuDeviceReady(pDevIns->Internal.s.pVMR3, idCpu);
+
+ LogFlow(("pdmR3DevHlp_VMNotifyCpuDeviceReady: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMReqCallNoWaitV} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMReqCallNoWaitV(PPDMDEVINS pDevIns, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, va_list Args)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_VMReqCallNoWaitV: caller='%s'/%d: idDstCpu=%u pfnFunction=%p cArgs=%u\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, idDstCpu, pfnFunction, cArgs));
+
+ int rc = VMR3ReqCallVU(pDevIns->Internal.s.pVMR3->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
+ pfnFunction, cArgs, Args);
+
+ LogFlow(("pdmR3DevHlp_VMReqCallNoWaitV: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMReqPriorityCallWaitV} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMReqPriorityCallWaitV(PPDMDEVINS pDevIns, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, va_list Args)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_VMReqCallNoWaitV: caller='%s'/%d: idDstCpu=%u pfnFunction=%p cArgs=%u\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, idDstCpu, pfnFunction, cArgs));
+
+ PVMREQ pReq;
+ int rc = VMR3ReqCallVU(pDevIns->Internal.s.pVMR3->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
+ pfnFunction, cArgs, Args);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+
+ LogFlow(("pdmR3DevHlp_VMReqCallNoWaitV: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFStopV} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFStopV(PPDMDEVINS pDevIns, const char *pszFile, unsigned iLine, const char *pszFunction, const char *pszFormat, va_list args)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+#ifdef LOG_ENABLED
+ va_list va2;
+ va_copy(va2, args);
+ LogFlow(("pdmR3DevHlp_DBGFStopV: caller='%s'/%d: pszFile=%p:{%s} iLine=%d pszFunction=%p:{%s} pszFormat=%p:{%s} (%N)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszFile, pszFile, iLine, pszFunction, pszFunction, pszFormat, pszFormat, pszFormat, &va2));
+ va_end(va2);
+#endif
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3EventSrcV(pVM, DBGFEVENT_DEV_STOP, pszFile, iLine, pszFunction, pszFormat, args);
+ if (rc == VERR_DBGF_NOT_ATTACHED)
+ rc = VINF_SUCCESS;
+
+ LogFlow(("pdmR3DevHlp_DBGFStopV: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFInfoRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFInfoRegister(PPDMDEVINS pDevIns, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDEV pfnHandler)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFInfoRegister: caller='%s'/%d: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszName, pszName, pszDesc, pszDesc, pfnHandler));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3InfoRegisterDevice(pVM, pszName, pszDesc, pfnHandler, pDevIns);
+
+ LogFlow(("pdmR3DevHlp_DBGFInfoRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFInfoRegisterArgv} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFInfoRegisterArgv(PPDMDEVINS pDevIns, const char *pszName, const char *pszDesc, PFNDBGFINFOARGVDEV pfnHandler)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFInfoRegisterArgv: caller='%s'/%d: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszName, pszName, pszDesc, pszDesc, pfnHandler));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3InfoRegisterDeviceArgv(pVM, pszName, pszDesc, pfnHandler, pDevIns);
+
+ LogFlow(("pdmR3DevHlp_DBGFInfoRegisterArgv: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFRegRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFRegRegister(PPDMDEVINS pDevIns, PCDBGFREGDESC paRegisters)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFRegRegister: caller='%s'/%d: paRegisters=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, paRegisters));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3RegRegisterDevice(pVM, paRegisters, pDevIns, pDevIns->pReg->szName, pDevIns->iInstance);
+
+ LogFlow(("pdmR3DevHlp_DBGFRegRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFTraceBuf} */
+static DECLCALLBACK(RTTRACEBUF) pdmR3DevHlp_DBGFTraceBuf(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RTTRACEBUF hTraceBuf = pDevIns->Internal.s.pVMR3->hTraceBufR3;
+ LogFlow(("pdmR3DevHlp_DBGFTraceBuf: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, hTraceBuf));
+ return hTraceBuf;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFReportBugCheck} */
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlp_DBGFReportBugCheck(PPDMDEVINS pDevIns, DBGFEVENTTYPE enmEvent, uint64_t uBugCheck,
+ uint64_t uP1, uint64_t uP2, uint64_t uP3, uint64_t uP4)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFReportBugCheck: caller='%s'/%d: enmEvent=%u uBugCheck=%#x uP1=%#x uP2=%#x uP3=%#x uP4=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmEvent, uBugCheck, uP1, uP2, uP3, uP4));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VBOXSTRICTRC rcStrict = DBGFR3ReportBugCheck(pVM, VMMGetCpu(pVM), enmEvent, uBugCheck, uP1, uP2, uP3, uP4);
+
+ LogFlow(("pdmR3DevHlp_DBGFReportBugCheck: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFCoreWrite} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFCoreWrite(PPDMDEVINS pDevIns, const char *pszFilename, bool fReplaceFile)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFCoreWrite: caller='%s'/%d: pszFilename=%p:{%s} fReplaceFile=%RTbool\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszFilename, pszFilename, fReplaceFile));
+
+ int rc = DBGFR3CoreWrite(pDevIns->Internal.s.pVMR3->pUVM, pszFilename, fReplaceFile);
+
+ LogFlow(("pdmR3DevHlp_DBGFCoreWrite: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFInfoLogHlp} */
+static DECLCALLBACK(PCDBGFINFOHLP) pdmR3DevHlp_DBGFInfoLogHlp(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFInfoLogHlp: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ PCDBGFINFOHLP pHlp = DBGFR3InfoLogHlp();
+
+ LogFlow(("pdmR3DevHlp_DBGFInfoLogHlp: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pHlp));
+ return pHlp;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFRegNmQueryU64} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFRegNmQueryU64(PPDMDEVINS pDevIns, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFRegNmQueryU64: caller='%s'/%d: idDefCpu=%u pszReg=%p:{%s} pu64=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, idDefCpu, pszReg, pszReg, pu64));
+
+ int rc = DBGFR3RegNmQueryU64(pDevIns->Internal.s.pVMR3->pUVM, idDefCpu, pszReg, pu64);
+
+ LogFlow(("pdmR3DevHlp_DBGFRegNmQueryU64: caller='%s'/%d: returns %Rrc *pu64=%#RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *pu64));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFRegPrintfV} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFRegPrintfV(PPDMDEVINS pDevIns, VMCPUID idCpu, char *pszBuf, size_t cbBuf,
+ const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFRegPrintfV: caller='%s'/%d: idCpu=%u pszBuf=%p cbBuf=%u pszFormat=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, idCpu, pszBuf, cbBuf, pszFormat, pszFormat));
+
+ int rc = DBGFR3RegPrintfV(pDevIns->Internal.s.pVMR3->pUVM, idCpu, pszBuf, cbBuf, pszFormat, va);
+
+ LogFlow(("pdmR3DevHlp_DBGFRegPrintfV: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSTAMRegister} */
+static DECLCALLBACK(void) pdmR3DevHlp_STAMRegister(PPDMDEVINS pDevIns, void *pvSample, STAMTYPE enmType, const char *pszName,
+ STAMUNIT enmUnit, const char *pszDesc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc;
+ if (*pszName == '/')
+ rc = STAMR3Register(pVM, pvSample, enmType, STAMVISIBILITY_ALWAYS, pszName, enmUnit, pszDesc);
+ /* Provide default device statistics prefix: */
+ else if (pDevIns->pReg->cMaxInstances == 1)
+ rc = STAMR3RegisterF(pVM, pvSample, enmType, STAMVISIBILITY_ALWAYS, enmUnit, pszDesc,
+ "/Devices/%s/%s", pDevIns->pReg->szName, pszName);
+ else
+ rc = STAMR3RegisterF(pVM, pvSample, enmType, STAMVISIBILITY_ALWAYS, enmUnit, pszDesc,
+ "/Devices/%s#%u/%s", pDevIns->pReg->szName, pDevIns->iInstance, pszName);
+ AssertRC(rc);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSTAMRegisterV} */
+static DECLCALLBACK(void) pdmR3DevHlp_STAMRegisterV(PPDMDEVINS pDevIns, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ STAMUNIT enmUnit, const char *pszDesc, const char *pszName, va_list args)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc;
+ if (*pszName == '/')
+ rc = STAMR3RegisterV(pVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, args);
+ else
+ {
+ /* Provide default device statistics prefix: */
+ va_list vaCopy;
+ va_copy(vaCopy, args);
+ if (pDevIns->pReg->cMaxInstances == 1)
+ rc = STAMR3RegisterF(pVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc,
+ "/Devices/%s/%N", pDevIns->pReg->szName, pszName, &vaCopy);
+ else
+ rc = STAMR3RegisterF(pVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc,
+ "/Devices/%s#%u/%N", pDevIns->pReg->szName, pDevIns->iInstance, pszName, &vaCopy);
+ va_end(vaCopy);
+ }
+ AssertRC(rc);
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVHLPR3,pfnSTAMDeregisterByPrefix}
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_STAMDeregisterByPrefix(PPDMDEVINS pDevIns, const char *pszPrefix)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc;
+ if (*pszPrefix == '/')
+ rc = STAMR3DeregisterByPrefix(pVM->pUVM, pszPrefix);
+ else
+ {
+ char szQualifiedPrefix[1024];
+ ssize_t cch;
+ if (pDevIns->pReg->cMaxInstances == 1)
+ cch = RTStrPrintf2(szQualifiedPrefix, sizeof(szQualifiedPrefix), "/Devices/%s/%s", pDevIns->pReg->szName, pszPrefix);
+ else
+ cch = RTStrPrintf2(szQualifiedPrefix, sizeof(szQualifiedPrefix), "/Devices/%s#%u/%s",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszPrefix);
+ AssertReturn(cch > 0, VERR_OUT_OF_RANGE);
+ rc = STAMR3DeregisterByPrefix(pVM->pUVM, szQualifiedPrefix);
+ }
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVHLPR3,pfnPCIRegister}
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIRegister(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t fFlags,
+ uint8_t uPciDevNo, uint8_t uPciFunNo, const char *pszName)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_PCIRegister: caller='%s'/%d: pPciDev=%p:{.config={%#.256Rhxs} fFlags=%#x uPciDevNo=%#x uPciFunNo=%#x pszName=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev->abConfig, fFlags, uPciDevNo, uPciFunNo, pszName, pszName ? pszName : ""));
+
+ /*
+ * Validate input.
+ */
+ AssertLogRelMsgReturn(pDevIns->pReg->cMaxPciDevices > 0,
+ ("'%s'/%d: cMaxPciDevices is 0\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_WRONG_ORDER);
+ AssertLogRelMsgReturn(RT_VALID_PTR(pPciDev),
+ ("'%s'/%d: Invalid pPciDev value: %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pPciDev),
+ VERR_INVALID_POINTER);
+ AssertLogRelMsgReturn(PDMPciDevGetVendorId(pPciDev),
+ ("'%s'/%d: Vendor ID is not set!\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_INVALID_POINTER);
+ AssertLogRelMsgReturn( uPciDevNo < 32
+ || uPciDevNo == PDMPCIDEVREG_DEV_NO_FIRST_UNUSED
+ || uPciDevNo == PDMPCIDEVREG_DEV_NO_SAME_AS_PREV,
+ ("'%s'/%d: Invalid PCI device number: %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, uPciDevNo),
+ VERR_INVALID_PARAMETER);
+ AssertLogRelMsgReturn( uPciFunNo < 8
+ || uPciFunNo == PDMPCIDEVREG_FUN_NO_FIRST_UNUSED,
+ ("'%s'/%d: Invalid PCI funcion number: %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, uPciFunNo),
+ VERR_INVALID_PARAMETER);
+ AssertLogRelMsgReturn(!(fFlags & ~PDMPCIDEVREG_F_VALID_MASK),
+ ("'%s'/%d: Invalid flags: %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, fFlags),
+ VERR_INVALID_FLAGS);
+ if (!pszName)
+ pszName = pDevIns->pReg->szName;
+ AssertLogRelReturn(RT_VALID_PTR(pszName), VERR_INVALID_POINTER);
+ AssertLogRelReturn(!pPciDev->Int.s.fRegistered, VERR_PDM_NOT_PCI_DEVICE);
+ AssertLogRelReturn(pPciDev == PDMDEV_GET_PPCIDEV(pDevIns, pPciDev->Int.s.idxSubDev), VERR_PDM_NOT_PCI_DEVICE);
+ AssertLogRelReturn(pPciDev == PDMDEV_CALC_PPCIDEV(pDevIns, pPciDev->Int.s.idxSubDev), VERR_PDM_NOT_PCI_DEVICE);
+ AssertMsgReturn(pPciDev->u32Magic == PDMPCIDEV_MAGIC, ("%#x\n", pPciDev->u32Magic), VERR_PDM_NOT_PCI_DEVICE);
+
+ /*
+ * Check the registration order - must be following PDMDEVINSR3::apPciDevs.
+ */
+ PPDMPCIDEV const pPrevPciDev = pPciDev->Int.s.idxSubDev == 0 ? NULL
+ : PDMDEV_GET_PPCIDEV(pDevIns, pPciDev->Int.s.idxSubDev - 1);
+ if (pPrevPciDev)
+ {
+ AssertLogRelReturn(pPrevPciDev->u32Magic == PDMPCIDEV_MAGIC, VERR_INVALID_MAGIC);
+ AssertLogRelReturn(pPrevPciDev->Int.s.fRegistered, VERR_WRONG_ORDER);
+ }
+
+ /*
+ * Resolve the PCI configuration node for the device. The default (zero'th)
+ * is the same as the PDM device, the rest are "PciCfg1..255" CFGM sub-nodes.
+ */
+ PCFGMNODE pCfg = pDevIns->Internal.s.pCfgHandle;
+ if (pPciDev->Int.s.idxSubDev > 0)
+ pCfg = CFGMR3GetChildF(pDevIns->Internal.s.pCfgHandle, "PciCfg%u", pPciDev->Int.s.idxSubDev);
+
+ /*
+ * We resolve PDMPCIDEVREG_DEV_NO_SAME_AS_PREV, the PCI bus handles
+ * PDMPCIDEVREG_DEV_NO_FIRST_UNUSED and PDMPCIDEVREG_FUN_NO_FIRST_UNUSED.
+ */
+ uint8_t const uPciDevNoRaw = uPciDevNo;
+ uint32_t uDefPciBusNo = 0;
+ if (uPciDevNo == PDMPCIDEVREG_DEV_NO_SAME_AS_PREV)
+ {
+ if (pPrevPciDev)
+ {
+ uPciDevNo = pPrevPciDev->uDevFn >> 3;
+ uDefPciBusNo = pPrevPciDev->Int.s.idxPdmBus;
+ }
+ else
+ {
+ /* Look for PCI device registered with an earlier device instance so we can more
+ easily have multiple functions spanning multiple PDM device instances. */
+ PPDMDEVINS pPrevIns = pDevIns->Internal.s.pDevR3->pInstances;
+ for (;;)
+ {
+ AssertLogRelMsgReturn(pPrevIns && pPrevIns != pDevIns,
+ ("'%s'/%d: Can't use PDMPCIDEVREG_DEV_NO_SAME_AS_PREV without a previously registered PCI device by the same or earlier PDM device instance!\n",
+ pDevIns->pReg->szName, pDevIns->iInstance), VERR_WRONG_ORDER);
+ if (pPrevIns->Internal.s.pNextR3 == pDevIns)
+ break;
+ pPrevIns = pPrevIns->Internal.s.pNextR3;
+ }
+
+ PPDMPCIDEV pOtherPciDev = PDMDEV_GET_PPCIDEV(pPrevIns, 0);
+ AssertLogRelMsgReturn(pOtherPciDev && pOtherPciDev->Int.s.fRegistered,
+ ("'%s'/%d: Can't use PDMPCIDEVREG_DEV_NO_SAME_AS_PREV without a previously registered PCI device by the same or earlier PDM device instance!\n",
+ pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_WRONG_ORDER);
+ for (uint32_t iPrevPciDev = 1; iPrevPciDev < pDevIns->cPciDevs; iPrevPciDev++)
+ {
+ PPDMPCIDEV pCur = PDMDEV_GET_PPCIDEV(pPrevIns, iPrevPciDev);
+ AssertBreak(pCur);
+ if (!pCur->Int.s.fRegistered)
+ break;
+ pOtherPciDev = pCur;
+ }
+
+ uPciDevNo = pOtherPciDev->uDevFn >> 3;
+ uDefPciBusNo = pOtherPciDev->Int.s.idxPdmBus;
+ }
+ }
+
+ /*
+ * Choose the PCI bus for the device.
+ *
+ * This is simple. If the device was configured for a particular bus, the PCIBusNo
+ * configuration value will be set. If not the default bus is 0.
+ */
+ /** @cfgm{/Devices/NAME/XX/[PciCfgYY/]PCIBusNo, uint8_t, 0, 7, 0}
+ * Selects the PCI bus number of a device. The default value isn't necessarily
+ * zero if the device is registered using PDMPCIDEVREG_DEV_NO_SAME_AS_PREV, it
+ * will then also inherit the bus number from the previously registered device.
+ */
+ uint8_t u8Bus;
+ int rc = CFGMR3QueryU8Def(pCfg, "PCIBusNo", &u8Bus, (uint8_t)uDefPciBusNo);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: PCIBusNo query failed with rc=%Rrc (%s/%d)\n",
+ rc, pDevIns->pReg->szName, pDevIns->iInstance), rc);
+ AssertLogRelMsgReturn(u8Bus < RT_ELEMENTS(pVM->pdm.s.aPciBuses),
+ ("Configuration error: PCIBusNo=%d, max is %d. (%s/%d)\n", u8Bus,
+ RT_ELEMENTS(pVM->pdm.s.aPciBuses), pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_PDM_NO_PCI_BUS);
+ pPciDev->Int.s.idxPdmBus = u8Bus;
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[u8Bus];
+ if (pBus->pDevInsR3)
+ {
+ /*
+ * Check the configuration for PCI device and function assignment.
+ */
+ /** @cfgm{/Devices/NAME/XX/[PciCfgYY/]PCIDeviceNo, uint8_t, 0, 31}
+ * Overrides the default PCI device number of a device.
+ */
+ uint8_t uCfgDevice;
+ rc = CFGMR3QueryU8(pCfg, "PCIDeviceNo", &uCfgDevice);
+ if (RT_SUCCESS(rc))
+ {
+ AssertMsgReturn(uCfgDevice <= 31,
+ ("Configuration error: PCIDeviceNo=%d, max is 31. (%s/%d/%d)\n",
+ uCfgDevice, pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->Int.s.idxSubDev),
+ VERR_PDM_BAD_PCI_CONFIG);
+ uPciDevNo = uCfgDevice;
+ }
+ else
+ AssertMsgReturn(rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT,
+ ("Configuration error: PCIDeviceNo query failed with rc=%Rrc (%s/%d/%d)\n",
+ rc, pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->Int.s.idxSubDev),
+ rc);
+
+ /** @cfgm{/Devices/NAME/XX/[PciCfgYY/]PCIFunctionNo, uint8_t, 0, 7}
+ * Overrides the default PCI function number of a device.
+ */
+ uint8_t uCfgFunction;
+ rc = CFGMR3QueryU8(pCfg, "PCIFunctionNo", &uCfgFunction);
+ if (RT_SUCCESS(rc))
+ {
+ AssertMsgReturn(uCfgFunction <= 7,
+ ("Configuration error: PCIFunctionNo=%#x, max is 7. (%s/%d/%d)\n",
+ uCfgFunction, pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->Int.s.idxSubDev),
+ VERR_PDM_BAD_PCI_CONFIG);
+ uPciFunNo = uCfgFunction;
+ }
+ else
+ AssertMsgReturn(rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT,
+ ("Configuration error: PCIFunctionNo query failed with rc=%Rrc (%s/%d/%d)\n",
+ rc, pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->Int.s.idxSubDev),
+ rc);
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ PPDMIOMMUR3 pIommu = &pVM->pdm.s.aIommus[0];
+ PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
+ if (pDevInsIommu)
+ {
+ /*
+ * If the PCI device/function number has been explicitly specified via CFGM,
+ * ensure it's not the BDF reserved for the southbridge I/O APIC expected
+ * by linux guests when using an AMD IOMMU, see @bugref{9654#c23}.
+ *
+ * In the Intel IOMMU case, we re-use the same I/O APIC address to reserve a
+ * PCI slot so the same check below is sufficient, see @bugref{9967#c13}.
+ */
+ uint16_t const uDevFn = VBOX_PCI_DEVFN_MAKE(uPciDevNo, uPciFunNo);
+ uint16_t const uBusDevFn = PCIBDF_MAKE(u8Bus, uDevFn);
+ if (uBusDevFn == VBOX_PCI_BDF_SB_IOAPIC)
+ {
+ LogRel(("Configuration error: PCI BDF (%u:%u:%u) conflicts with SB I/O APIC (%s/%d/%d)\n", u8Bus,
+ uCfgDevice, uCfgFunction, pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->Int.s.idxSubDev));
+ return VERR_NOT_AVAILABLE;
+ }
+ }
+#endif
+
+ /*
+ * Initialize the internal data. We only do the wipe and the members
+ * owned by PDM, the PCI bus does the rest in the registration call.
+ */
+ RT_ZERO(pPciDev->Int);
+
+ pPciDev->Int.s.idxDevCfg = pPciDev->Int.s.idxSubDev;
+ pPciDev->Int.s.fReassignableDevNo = uPciDevNoRaw >= VBOX_PCI_MAX_DEVICES;
+ pPciDev->Int.s.fReassignableFunNo = uPciFunNo >= VBOX_PCI_MAX_FUNCTIONS;
+ pPciDev->Int.s.pDevInsR3 = pDevIns;
+ pPciDev->Int.s.idxPdmBus = u8Bus;
+ pPciDev->Int.s.fRegistered = true;
+
+ /* Set some of the public members too. */
+ pPciDev->pszNameR3 = pszName;
+
+ /*
+ * Call the pci bus device to do the actual registration.
+ */
+ pdmLock(pVM);
+ rc = pBus->pfnRegister(pBus->pDevInsR3, pPciDev, fFlags, uPciDevNo, uPciFunNo, pszName);
+ pdmUnlock(pVM);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Registered device '%s'/%d as PCI device %d on bus %d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev->uDevFn, pBus->iBus));
+ else
+ pPciDev->Int.s.fRegistered = false;
+ }
+ else
+ {
+ AssertLogRelMsgFailed(("Configuration error: No PCI bus available. This could be related to init order too!\n"));
+ rc = VERR_PDM_NO_PCI_BUS;
+ }
+
+ LogFlow(("pdmR3DevHlp_PCIRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIRegisterMsi} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIRegisterMsi(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, PPDMMSIREG pMsiReg)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ LogFlow(("pdmR3DevHlp_PCIRegisterMsi: caller='%s'/%d: pPciDev=%p:{%#x} pMsgReg=%p:{cMsiVectors=%d, cMsixVectors=%d}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, pMsiReg, pMsiReg->cMsiVectors, pMsiReg->cMsixVectors));
+ PDMPCIDEV_ASSERT_VALID_RET(pDevIns, pPciDev);
+
+ AssertLogRelMsgReturn(pDevIns->pReg->cMaxPciDevices > 0,
+ ("'%s'/%d: cMaxPciDevices is 0\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_WRONG_ORDER);
+ AssertLogRelMsgReturn(pMsiReg->cMsixVectors <= pDevIns->pReg->cMaxMsixVectors,
+ ("'%s'/%d: cMsixVectors=%u cMaxMsixVectors=%u\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pMsiReg->cMsixVectors, pDevIns->pReg->cMaxMsixVectors),
+ VERR_INVALID_FLAGS);
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturn(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), VERR_WRONG_ORDER);
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ pdmLock(pVM);
+ int rc;
+ if (pBus->pfnRegisterMsi)
+ rc = pBus->pfnRegisterMsi(pBus->pDevInsR3, pPciDev, pMsiReg);
+ else
+ rc = VERR_NOT_IMPLEMENTED;
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_PCIRegisterMsi: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIIORegionRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIIORegionRegister(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS cbRegion, PCIADDRESSSPACE enmType, uint32_t fFlags,
+ uint64_t hHandle, PFNPCIIOREGIONMAP pfnMapUnmap)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ LogFlow(("pdmR3DevHlp_PCIIORegionRegister: caller='%s'/%d: pPciDev=%p:{%#x} iRegion=%d cbRegion=%RGp enmType=%d fFlags=%#x, hHandle=%#RX64 pfnMapUnmap=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iRegion, cbRegion, enmType, fFlags, hHandle, pfnMapUnmap));
+ PDMPCIDEV_ASSERT_VALID_RET(pDevIns, pPciDev);
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertLogRelMsgReturn(VMR3GetState(pVM) == VMSTATE_CREATING,
+ ("caller='%s'/%d: %s\n", pDevIns->pReg->szName, pDevIns->iInstance, VMR3GetStateName(VMR3GetState(pVM))),
+ VERR_WRONG_ORDER);
+
+ if (iRegion >= VBOX_PCI_NUM_REGIONS)
+ {
+ Assert(iRegion < VBOX_PCI_NUM_REGIONS);
+ LogFlow(("pdmR3DevHlp_PCIIORegionRegister: caller='%s'/%d: returns %Rrc (iRegion)\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ switch ((int)enmType)
+ {
+ case PCI_ADDRESS_SPACE_IO:
+ /*
+ * Sanity check: don't allow to register more than 32K of the PCI I/O space.
+ */
+ AssertLogRelMsgReturn(cbRegion <= _32K,
+ ("caller='%s'/%d: %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, cbRegion),
+ VERR_INVALID_PARAMETER);
+ break;
+
+ case PCI_ADDRESS_SPACE_MEM:
+ case PCI_ADDRESS_SPACE_MEM_PREFETCH:
+ /*
+ * Sanity check: Don't allow to register more than 2GB of the PCI MMIO space.
+ */
+ AssertLogRelMsgReturn(cbRegion <= MM_MMIO_32_MAX,
+ ("caller='%s'/%d: %RGp (max %RGp)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbRegion, (RTGCPHYS)MM_MMIO_32_MAX),
+ VERR_OUT_OF_RANGE);
+ break;
+
+ case PCI_ADDRESS_SPACE_BAR64 | PCI_ADDRESS_SPACE_MEM:
+ case PCI_ADDRESS_SPACE_BAR64 | PCI_ADDRESS_SPACE_MEM_PREFETCH:
+ /*
+ * Sanity check: Don't allow to register more than 64GB of the 64-bit PCI MMIO space.
+ */
+ AssertLogRelMsgReturn(cbRegion <= MM_MMIO_64_MAX,
+ ("caller='%s'/%d: %RGp (max %RGp)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbRegion, MM_MMIO_64_MAX),
+ VERR_OUT_OF_RANGE);
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%#x is unknown\n", enmType));
+ LogFlow(("pdmR3DevHlp_PCIIORegionRegister: caller='%s'/%d: returns %Rrc (enmType)\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ AssertMsgReturn( pfnMapUnmap
+ || ( hHandle != UINT64_MAX
+ && (fFlags & PDMPCIDEV_IORGN_F_HANDLE_MASK) != PDMPCIDEV_IORGN_F_NO_HANDLE),
+ ("caller='%s'/%d: fFlags=%#x hHandle=%#RX64\n", pDevIns->pReg->szName, pDevIns->iInstance, fFlags, hHandle),
+ VERR_INVALID_PARAMETER);
+
+ AssertMsgReturn(!(fFlags & ~PDMPCIDEV_IORGN_F_VALID_MASK), ("fFlags=%#x\n", fFlags), VERR_INVALID_FLAGS);
+ int rc;
+ switch (fFlags & PDMPCIDEV_IORGN_F_HANDLE_MASK)
+ {
+ case PDMPCIDEV_IORGN_F_NO_HANDLE:
+ break;
+ case PDMPCIDEV_IORGN_F_IOPORT_HANDLE:
+ AssertReturn(enmType == PCI_ADDRESS_SPACE_IO, VERR_INVALID_FLAGS);
+ rc = IOMR3IoPortValidateHandle(pVM, pDevIns, (IOMIOPORTHANDLE)hHandle);
+ AssertRCReturn(rc, rc);
+ break;
+ case PDMPCIDEV_IORGN_F_MMIO_HANDLE:
+ AssertReturn( (enmType & ~PCI_ADDRESS_SPACE_BAR64) == PCI_ADDRESS_SPACE_MEM
+ || (enmType & ~PCI_ADDRESS_SPACE_BAR64) == PCI_ADDRESS_SPACE_MEM_PREFETCH,
+ VERR_INVALID_FLAGS);
+ rc = IOMR3MmioValidateHandle(pVM, pDevIns, (IOMMMIOHANDLE)hHandle);
+ AssertRCReturn(rc, rc);
+ break;
+ case PDMPCIDEV_IORGN_F_MMIO2_HANDLE:
+ AssertReturn( (enmType & ~PCI_ADDRESS_SPACE_BAR64) == PCI_ADDRESS_SPACE_MEM
+ || (enmType & ~PCI_ADDRESS_SPACE_BAR64) == PCI_ADDRESS_SPACE_MEM_PREFETCH,
+ VERR_INVALID_FLAGS);
+ rc = PGMR3PhysMmio2ValidateHandle(pVM, pDevIns, (PGMMMIO2HANDLE)hHandle);
+ AssertRCReturn(rc, rc);
+ break;
+ default:
+ AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
+ break;
+ }
+
+ /* This flag is required now. */
+ AssertLogRelMsgReturn(fFlags & PDMPCIDEV_IORGN_F_NEW_STYLE,
+ ("'%s'/%d: Invalid flags: %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, fFlags),
+ VERR_INVALID_FLAGS);
+
+ /*
+ * We're currently restricted to page aligned MMIO regions.
+ */
+ if ( ((enmType & ~(PCI_ADDRESS_SPACE_BAR64 | PCI_ADDRESS_SPACE_MEM_PREFETCH)) == PCI_ADDRESS_SPACE_MEM)
+ && cbRegion != RT_ALIGN_64(cbRegion, GUEST_PAGE_SIZE))
+ {
+ Log(("pdmR3DevHlp_PCIIORegionRegister: caller='%s'/%d: aligning cbRegion %RGp -> %RGp\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbRegion, RT_ALIGN_64(cbRegion, GUEST_PAGE_SIZE)));
+ cbRegion = RT_ALIGN_64(cbRegion, GUEST_PAGE_SIZE);
+ }
+
+ /*
+ * For registering PCI MMIO memory or PCI I/O memory, the size of the region must be a power of 2!
+ */
+ int iLastSet = ASMBitLastSetU64(cbRegion);
+ Assert(iLastSet > 0);
+ uint64_t cbRegionAligned = RT_BIT_64(iLastSet - 1);
+ if (cbRegion > cbRegionAligned)
+ cbRegion = cbRegionAligned * 2; /* round up */
+
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturn(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), VERR_WRONG_ORDER);
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ pdmLock(pVM);
+ rc = pBus->pfnIORegionRegister(pBus->pDevInsR3, pPciDev, iRegion, cbRegion, enmType, fFlags, hHandle, pfnMapUnmap);
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_PCIIORegionRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIInterceptConfigAccesses} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIInterceptConfigAccesses(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
+ PFNPCICONFIGREAD pfnRead, PFNPCICONFIGWRITE pfnWrite)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ LogFlow(("pdmR3DevHlp_PCIInterceptConfigAccesses: caller='%s'/%d: pPciDev=%p pfnRead=%p pfnWrite=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pfnRead, pfnWrite));
+ PDMPCIDEV_ASSERT_VALID_RET(pDevIns, pPciDev);
+
+ /*
+ * Validate input.
+ */
+ AssertPtr(pfnRead);
+ AssertPtr(pfnWrite);
+ AssertPtr(pPciDev);
+
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturn(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), VERR_INTERNAL_ERROR_2);
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+ AssertRelease(VMR3GetState(pVM) != VMSTATE_RUNNING);
+
+ /*
+ * Do the job.
+ */
+ pdmLock(pVM);
+ pBus->pfnInterceptConfigAccesses(pBus->pDevInsR3, pPciDev, pfnRead, pfnWrite);
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_PCIInterceptConfigAccesses: caller='%s'/%d: returns VINF_SUCCESS\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIConfigWrite} */
+static DECLCALLBACK(VBOXSTRICTRC)
+pdmR3DevHlp_PCIConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress, unsigned cb, uint32_t u32Value)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ AssertPtrReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ LogFlow(("pdmR3DevHlp_PCIConfigWrite: caller='%s'/%d: pPciDev=%p uAddress=%#x cd=%d u32Value=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, uAddress, cb, u32Value));
+
+ /*
+ * Resolve the bus.
+ */
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturn(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), VERR_INTERNAL_ERROR_2);
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ /*
+ * Do the job.
+ */
+ VBOXSTRICTRC rcStrict = pBus->pfnConfigWrite(pBus->pDevInsR3, pPciDev, uAddress, cb, u32Value);
+
+ LogFlow(("pdmR3DevHlp_PCIConfigWrite: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIConfigRead} */
+static DECLCALLBACK(VBOXSTRICTRC)
+pdmR3DevHlp_PCIConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ AssertPtrReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ LogFlow(("pdmR3DevHlp_PCIConfigRead: caller='%s'/%d: pPciDev=%p uAddress=%#x cd=%d pu32Value=%p:{%#x}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, uAddress, cb, pu32Value, *pu32Value));
+
+ /*
+ * Resolve the bus.
+ */
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturn(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), VERR_INTERNAL_ERROR_2);
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ /*
+ * Do the job.
+ */
+ VBOXSTRICTRC rcStrict = pBus->pfnConfigRead(pBus->pDevInsR3, pPciDev, uAddress, cb, pu32Value);
+
+ LogFlow(("pdmR3DevHlp_PCIConfigRead: caller='%s'/%d: returns %Rrc (*pu32Value=%#x)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict), *pu32Value));
+ return rcStrict;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysRead} */
+static DECLCALLBACK(int)
+pdmR3DevHlp_PCIPhysRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ Log(("pdmR3DevHlp_PCIPhysRead: caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbRead=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbRead));
+ memset(pvBuf, 0xff, cbRead);
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmIommuMemAccessRead(pDevIns, pPciDev, GCPhys, pvBuf, cbRead, fFlags);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysRead(pDevIns, GCPhys, pvBuf, cbRead, fFlags);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysWrite} */
+static DECLCALLBACK(int)
+pdmR3DevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ Log(("pdmR3DevHlp_PCIPhysWrite: caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmIommuMemAccessWrite(pDevIns, pPciDev, GCPhys, pvBuf, cbWrite, fFlags);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite, fFlags);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysGCPhys2CCPtr} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysGCPhys2CCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys,
+ uint32_t fFlags, void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ LogFunc(("caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp fFlags=%#RX32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, fFlags));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmR3IommuMemAccessWriteCCPtr(pDevIns, pPciDev, GCPhys, fFlags, ppv, pLock);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtr(pDevIns, GCPhys, fFlags, ppv, pLock);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysGCPhys2CCPtrReadOnly} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys,
+ uint32_t fFlags, void const **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ LogFunc(("caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp fFlags=%#RX32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, fFlags));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmR3IommuMemAccessReadCCPtr(pDevIns, pPciDev, GCPhys, fFlags, ppv, pLock);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhys, fFlags, ppv, pLock);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysBulkGCPhys2CCPtr} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages,
+ PCRTGCPHYS paGCPhysPages, uint32_t fFlags, void **papvPages,
+ PPGMPAGEMAPLOCK paLocks)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ LogFunc(("caller='%s'/%d: returns %Rrc - Not bus master! cPages=%zu fFlags=%#RX32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, cPages, fFlags));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmR3IommuMemAccessBulkWriteCCPtr(pDevIns, pPciDev, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtr(pDevIns, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysBulkGCPhys2CCPtrReadOnly} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtrReadOnly(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages,
+ PCRTGCPHYS paGCPhysPages, uint32_t fFlags,
+ const void **papvPages, PPGMPAGEMAPLOCK paLocks)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ LogFunc(("caller='%s'/%d: returns %Rrc - Not bus master! cPages=%zu fFlags=%#RX32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, cPages, fFlags));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmR3IommuMemAccessBulkReadCCPtr(pDevIns, pPciDev, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, paGCPhysPages, fFlags, papvPages, paLocks);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCISetIrq} */
+static DECLCALLBACK(void) pdmR3DevHlp_PCISetIrq(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturnVoid(pPciDev);
+ LogFlow(("pdmR3DevHlp_PCISetIrq: caller='%s'/%d: pPciDev=%p:{%#x} iIrq=%d iLevel=%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iIrq, iLevel));
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+ /*
+ * Validate input.
+ */
+ Assert(iIrq == 0);
+ Assert((uint32_t)iLevel <= PDM_IRQ_LEVEL_FLIP_FLOP);
+
+ /*
+ * Must have a PCI device registered!
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturnVoid(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses));
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ pBus->pfnSetIrqR3(pBus->pDevInsR3, pPciDev, iIrq, iLevel, uTagSrc);
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_PCISetIrq: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCISetIrqNoWait} */
+static DECLCALLBACK(void) pdmR3DevHlp_PCISetIrqNoWait(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel)
+{
+ pdmR3DevHlp_PCISetIrq(pDevIns, pPciDev, iIrq, iLevel);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnISASetIrq} */
+static DECLCALLBACK(void) pdmR3DevHlp_ISASetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_ISASetIrq: caller='%s'/%d: iIrq=%d iLevel=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, iIrq, iLevel));
+
+ /*
+ * Validate input.
+ */
+ Assert(iIrq < 16);
+ Assert((uint32_t)iLevel <= PDM_IRQ_LEVEL_FLIP_FLOP);
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ /*
+ * Do the job.
+ */
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ PDMIsaSetIrq(pVM, iIrq, iLevel, uTagSrc); /* (The API takes the lock recursively.) */
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_ISASetIrq: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnISASetIrqNoWait} */
+static DECLCALLBACK(void) pdmR3DevHlp_ISASetIrqNoWait(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ pdmR3DevHlp_ISASetIrq(pDevIns, iIrq, iLevel);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDriverAttach} */
+static DECLCALLBACK(int) pdmR3DevHlp_DriverAttach(PPDMDEVINS pDevIns, uint32_t iLun, PPDMIBASE pBaseInterface, PPDMIBASE *ppBaseInterface, const char *pszDesc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DriverAttach: caller='%s'/%d: iLun=%d pBaseInterface=%p ppBaseInterface=%p pszDesc=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, iLun, pBaseInterface, ppBaseInterface, pszDesc, pszDesc));
+
+ /*
+ * Lookup the LUN, it might already be registered.
+ */
+ PPDMLUN pLunPrev = NULL;
+ PPDMLUN pLun = pDevIns->Internal.s.pLunsR3;
+ for (; pLun; pLunPrev = pLun, pLun = pLun->pNext)
+ if (pLun->iLun == iLun)
+ break;
+
+ /*
+ * Create the LUN if if wasn't found, else check if driver is already attached to it.
+ */
+ if (!pLun)
+ {
+ if ( !pBaseInterface
+ || !pszDesc
+ || !*pszDesc)
+ {
+ Assert(pBaseInterface);
+ Assert(pszDesc || *pszDesc);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ pLun = (PPDMLUN)MMR3HeapAlloc(pVM, MM_TAG_PDM_LUN, sizeof(*pLun));
+ if (!pLun)
+ return VERR_NO_MEMORY;
+
+ pLun->iLun = iLun;
+ pLun->pNext = pLunPrev ? pLunPrev->pNext : NULL;
+ pLun->pTop = NULL;
+ pLun->pBottom = NULL;
+ pLun->pDevIns = pDevIns;
+ pLun->pUsbIns = NULL;
+ pLun->pszDesc = pszDesc;
+ pLun->pBase = pBaseInterface;
+ if (!pLunPrev)
+ pDevIns->Internal.s.pLunsR3 = pLun;
+ else
+ pLunPrev->pNext = pLun;
+ Log(("pdmR3DevHlp_DriverAttach: Registered LUN#%d '%s' with device '%s'/%d.\n",
+ iLun, pszDesc, pDevIns->pReg->szName, pDevIns->iInstance));
+ }
+ else if (pLun->pTop)
+ {
+ AssertMsgFailed(("Already attached! The device should keep track of such things!\n"));
+ LogFlow(("pdmR3DevHlp_DriverAttach: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_DRIVER_ALREADY_ATTACHED));
+ return VERR_PDM_DRIVER_ALREADY_ATTACHED;
+ }
+ Assert(pLun->pBase == pBaseInterface);
+
+
+ /*
+ * Get the attached driver configuration.
+ */
+ int rc;
+ PCFGMNODE pNode = CFGMR3GetChildF(pDevIns->Internal.s.pCfgHandle, "LUN#%u", iLun);
+ if (pNode)
+ rc = pdmR3DrvInstantiate(pVM, pNode, pBaseInterface, NULL /*pDrvAbove*/, pLun, ppBaseInterface);
+ else
+ rc = VERR_PDM_NO_ATTACHED_DRIVER;
+
+ LogFlow(("pdmR3DevHlp_DriverAttach: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDriverDetach} */
+static DECLCALLBACK(int) pdmR3DevHlp_DriverDetach(PPDMDEVINS pDevIns, PPDMDRVINS pDrvIns, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ LogFlow(("pdmR3DevHlp_DriverDetach: caller='%s'/%d: pDrvIns=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDrvIns));
+
+#ifdef VBOX_STRICT
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+#endif
+
+ int rc = pdmR3DrvDetach(pDrvIns, fFlags);
+
+ LogFlow(("pdmR3DevHlp_DriverDetach: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDriverReconfigure} */
+static DECLCALLBACK(int) pdmR3DevHlp_DriverReconfigure(PPDMDEVINS pDevIns, uint32_t iLun, uint32_t cDepth,
+ const char * const *papszDrivers, PCFGMNODE *papConfigs, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DriverReconfigure: caller='%s'/%d: iLun=%u cDepth=%u fFlags=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, iLun, cDepth, fFlags));
+
+ /*
+ * Validate input.
+ */
+ AssertReturn(cDepth <= 8, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(papszDrivers, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(papConfigs, VERR_INVALID_POINTER);
+ for (uint32_t i = 0; i < cDepth; i++)
+ {
+ AssertPtrReturn(papszDrivers[i], VERR_INVALID_POINTER);
+ size_t cchDriver = strlen(papszDrivers[i]);
+ AssertReturn(cchDriver > 0 && cchDriver < RT_SIZEOFMEMB(PDMDRVREG, szName), VERR_OUT_OF_RANGE);
+
+ if (papConfigs)
+ AssertPtrNullReturn(papConfigs[i], VERR_INVALID_POINTER);
+ }
+ AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);
+
+ /*
+ * Do we have to detach an existing driver first?
+ */
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ if (pLun->iLun == iLun)
+ {
+ if (pLun->pTop)
+ {
+ int rc = pdmR3DrvDetach(pLun->pTop, 0);
+ AssertRCReturn(rc, rc);
+ }
+ break;
+ }
+
+ /*
+ * Remove the old tree.
+ */
+ PCFGMNODE pCfgDev = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "Devices/%s/%u/", pDevIns->pReg->szName, pDevIns->iInstance);
+ AssertReturn(pCfgDev, VERR_INTERNAL_ERROR_2);
+ PCFGMNODE pCfgLun = CFGMR3GetChildF(pCfgDev, "LUN#%u", iLun);
+ if (pCfgLun)
+ CFGMR3RemoveNode(pCfgLun);
+
+ /*
+ * Construct a new tree.
+ */
+ int rc = CFGMR3InsertNodeF(pCfgDev, &pCfgLun, "LUN#%u", iLun);
+ AssertRCReturn(rc, rc);
+ PCFGMNODE pCfgDrv = pCfgLun;
+ for (uint32_t i = 0; i < cDepth; i++)
+ {
+ rc = CFGMR3InsertString(pCfgDrv, "Driver", papszDrivers[i]);
+ AssertRCReturn(rc, rc);
+ if (papConfigs && papConfigs[i])
+ {
+ rc = CFGMR3InsertSubTree(pCfgDrv, "Config", papConfigs[i], NULL);
+ AssertRCReturn(rc, rc);
+ papConfigs[i] = NULL;
+ }
+ else
+ {
+ rc = CFGMR3InsertNode(pCfgDrv, "Config", NULL);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (i + 1 >= cDepth)
+ break;
+ rc = CFGMR3InsertNode(pCfgDrv, "AttachedDriver", &pCfgDrv);
+ AssertRCReturn(rc, rc);
+ }
+
+ LogFlow(("pdmR3DevHlp_DriverReconfigure: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueueCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_QueueCreate(PPDMDEVINS pDevIns, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEDEV pfnCallback, bool fRZEnabled, const char *pszName,
+ PDMQUEUEHANDLE *phQueue)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_QueueCreate: caller='%s'/%d: cbItem=%#x cItems=%#x cMilliesInterval=%u pfnCallback=%p fRZEnabled=%RTbool pszName=%p:{%s} phQueue=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbItem, cItems, cMilliesInterval, pfnCallback, fRZEnabled, pszName, pszName, phQueue));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ if (pDevIns->iInstance > 0)
+ {
+ pszName = MMR3HeapAPrintf(pVM, MM_TAG_PDM_DEVICE_DESC, "%s_%u", pszName, pDevIns->iInstance);
+ AssertLogRelReturn(pszName, VERR_NO_MEMORY);
+ }
+
+ int rc = PDMR3QueueCreateDevice(pVM, pDevIns, cbItem, cItems, cMilliesInterval, pfnCallback, fRZEnabled, pszName, phQueue);
+
+ LogFlow(("pdmR3DevHlp_QueueCreate: caller='%s'/%d: returns %Rrc *phQueue=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *phQueue));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueueAlloc} */
+static DECLCALLBACK(PPDMQUEUEITEMCORE) pdmR3DevHlp_QueueAlloc(PPDMDEVINS pDevIns, PDMQUEUEHANDLE hQueue)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMQueueAlloc(pDevIns->Internal.s.pVMR3, hQueue, pDevIns);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueueInsert} */
+static DECLCALLBACK(int) pdmR3DevHlp_QueueInsert(PPDMDEVINS pDevIns, PDMQUEUEHANDLE hQueue, PPDMQUEUEITEMCORE pItem)
+{
+ return PDMQueueInsert(pDevIns->Internal.s.pVMR3, hQueue, pDevIns, pItem);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueueFlushIfNecessary} */
+static DECLCALLBACK(bool) pdmR3DevHlp_QueueFlushIfNecessary(PPDMDEVINS pDevIns, PDMQUEUEHANDLE hQueue)
+{
+ return PDMQueueFlushIfNecessary(pDevIns->Internal.s.pVMR3, hQueue, pDevIns) == VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTaskCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_TaskCreate(PPDMDEVINS pDevIns, uint32_t fFlags, const char *pszName,
+ PFNPDMTASKDEV pfnCallback, void *pvUser, PDMTASKHANDLE *phTask)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TaskTrigger: caller='%s'/%d: pfnCallback=%p fFlags=%#x pszName=%p:{%s} phTask=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pfnCallback, fFlags, pszName, pszName, phTask));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc = PDMR3TaskCreate(pVM, fFlags, pszName, PDMTASKTYPE_DEV, pDevIns, (PFNRT)pfnCallback, pvUser, phTask);
+
+ LogFlow(("pdmR3DevHlp_TaskTrigger: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnTaskTrigger} */
+static DECLCALLBACK(int) pdmR3DevHlp_TaskTrigger(PPDMDEVINS pDevIns, PDMTASKHANDLE hTask)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_TaskTrigger: caller='%s'/%d: hTask=%RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, hTask));
+
+ int rc = PDMTaskTrigger(pDevIns->Internal.s.pVMR3, PDMTASKTYPE_DEV, pDevIns, hTask);
+
+ LogFlow(("pdmR3DevHlp_TaskTrigger: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventCreate(PPDMDEVINS pDevIns, PSUPSEMEVENT phEvent)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventCreate: caller='%s'/%d: phEvent=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, phEvent));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc = SUPSemEventCreate(pVM->pSession, phEvent);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventCreate: caller='%s'/%d: returns %Rrc *phEvent=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, rc, *phEvent));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventClose} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventClose(PPDMDEVINS pDevIns, SUPSEMEVENT hEvent)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventClose: caller='%s'/%d: hEvent=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, hEvent));
+
+ int rc = SUPSemEventClose(pDevIns->Internal.s.pVMR3->pSession, hEvent);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventClose: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventSignal} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventSignal(PPDMDEVINS pDevIns, SUPSEMEVENT hEvent)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventSignal: caller='%s'/%d: hEvent=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, hEvent));
+
+ int rc = SUPSemEventSignal(pDevIns->Internal.s.pVMR3->pSession, hEvent);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventSignal: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventWaitNoResume} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventWaitNoResume(PPDMDEVINS pDevIns, SUPSEMEVENT hEvent, uint32_t cMillies)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNoResume: caller='%s'/%d: hEvent=%p cNsTimeout=%RU32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEvent, cMillies));
+
+ int rc = SUPSemEventWaitNoResume(pDevIns->Internal.s.pVMR3->pSession, hEvent, cMillies);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNoResume: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventWaitNsAbsIntr} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventWaitNsAbsIntr(PPDMDEVINS pDevIns, SUPSEMEVENT hEvent, uint64_t uNsTimeout)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNsAbsIntr: caller='%s'/%d: hEvent=%p uNsTimeout=%RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEvent, uNsTimeout));
+
+ int rc = SUPSemEventWaitNsAbsIntr(pDevIns->Internal.s.pVMR3->pSession, hEvent, uNsTimeout);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNsAbsIntr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventWaitNsRelIntr} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventWaitNsRelIntr(PPDMDEVINS pDevIns, SUPSEMEVENT hEvent, uint64_t cNsTimeout)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNsRelIntr: caller='%s'/%d: hEvent=%p cNsTimeout=%RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEvent, cNsTimeout));
+
+ int rc = SUPSemEventWaitNsRelIntr(pDevIns->Internal.s.pVMR3->pSession, hEvent, cNsTimeout);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventWaitNsRelIntr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventGetResolution} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_SUPSemEventGetResolution(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventGetResolution: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint32_t cNsResolution = SUPSemEventGetResolution(pDevIns->Internal.s.pVMR3->pSession);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventGetResolution: caller='%s'/%d: returns %u\n", pDevIns->pReg->szName, pDevIns->iInstance, cNsResolution));
+ return cNsResolution;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiCreate(PPDMDEVINS pDevIns, PSUPSEMEVENTMULTI phEventMulti)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiCreate: caller='%s'/%d: phEventMulti=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, phEventMulti));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc = SUPSemEventMultiCreate(pVM->pSession, phEventMulti);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiCreate: caller='%s'/%d: returns %Rrc *phEventMulti=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, rc, *phEventMulti));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiClose} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiClose(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiClose: caller='%s'/%d: hEventMulti=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti));
+
+ int rc = SUPSemEventMultiClose(pDevIns->Internal.s.pVMR3->pSession, hEventMulti);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiClose: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiSignal} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiSignal(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiSignal: caller='%s'/%d: hEventMulti=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti));
+
+ int rc = SUPSemEventMultiSignal(pDevIns->Internal.s.pVMR3->pSession, hEventMulti);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiSignal: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiReset} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiReset(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiReset: caller='%s'/%d: hEventMulti=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti));
+
+ int rc = SUPSemEventMultiReset(pDevIns->Internal.s.pVMR3->pSession, hEventMulti);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiReset: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiWaitNoResume} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiWaitNoResume(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti,
+ uint32_t cMillies)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNoResume: caller='%s'/%d: hEventMulti=%p cMillies=%RU32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti, cMillies));
+
+ int rc = SUPSemEventMultiWaitNoResume(pDevIns->Internal.s.pVMR3->pSession, hEventMulti, cMillies);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNoResume: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiWaitNsAbsIntr} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti,
+ uint64_t uNsTimeout)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr: caller='%s'/%d: hEventMulti=%p uNsTimeout=%RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti, uNsTimeout));
+
+ int rc = SUPSemEventMultiWaitNsAbsIntr(pDevIns->Internal.s.pVMR3->pSession, hEventMulti, uNsTimeout);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiWaitNsRelIntr} */
+static DECLCALLBACK(int) pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr(PPDMDEVINS pDevIns, SUPSEMEVENTMULTI hEventMulti,
+ uint64_t cNsTimeout)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr: caller='%s'/%d: hEventMulti=%p cNsTimeout=%RU64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, hEventMulti, cNsTimeout));
+
+ int rc = SUPSemEventMultiWaitNsRelIntr(pDevIns->Internal.s.pVMR3->pSession, hEventMulti, cNsTimeout);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSUPSemEventMultiGetResolution} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_SUPSemEventMultiGetResolution(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiGetResolution: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ uint32_t cNsResolution = SUPSemEventMultiGetResolution(pDevIns->Internal.s.pVMR3->pSession);
+
+ LogFlow(("pdmR3DevHlp_SUPSemEventMultiGetResolution: caller='%s'/%d: returns %u\n", pDevIns->pReg->szName, pDevIns->iInstance, cNsResolution));
+ return cNsResolution;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectInit} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectInit(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_CritSectInit: caller='%s'/%d: pCritSect=%p pszNameFmt=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pCritSect, pszNameFmt, pszNameFmt));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = pdmR3CritSectInitDevice(pVM, pDevIns, pCritSect, RT_SRC_POS_ARGS, pszNameFmt, va);
+
+ LogFlow(("pdmR3DevHlp_CritSectInit: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectGetNop} */
+static DECLCALLBACK(PPDMCRITSECT) pdmR3DevHlp_CritSectGetNop(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ PPDMCRITSECT pCritSect = PDMR3CritSectGetNop(pVM);
+ LogFlow(("pdmR3DevHlp_CritSectGetNop: caller='%s'/%d: return %p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pCritSect));
+ return pCritSect;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSetDeviceCritSect} */
+static DECLCALLBACK(int) pdmR3DevHlp_SetDeviceCritSect(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect)
+{
+ /*
+ * Validate input.
+ *
+ * Note! We only allow the automatically created default critical section
+ * to be replaced by this API.
+ */
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertPtrReturn(pCritSect, VERR_INVALID_POINTER);
+ LogFlow(("pdmR3DevHlp_SetDeviceCritSect: caller='%s'/%d: pCritSect=%p (%s)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pCritSect, pCritSect->s.pszName));
+ AssertReturn(PDMCritSectIsInitialized(pCritSect), VERR_INVALID_PARAMETER);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ VM_ASSERT_EMT(pVM);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+
+ AssertReturn(pDevIns->pCritSectRoR3, VERR_PDM_DEV_IPE_1);
+ AssertReturn(pDevIns->pCritSectRoR3->s.fAutomaticDefaultCritsect, VERR_WRONG_ORDER);
+ AssertReturn(!pDevIns->pCritSectRoR3->s.fUsedByTimerOrSimilar, VERR_WRONG_ORDER);
+ AssertReturn(pDevIns->pCritSectRoR3 != pCritSect, VERR_INVALID_PARAMETER);
+
+ /*
+ * Replace the critical section and destroy the automatic default section.
+ */
+ PPDMCRITSECT pOldCritSect = pDevIns->pCritSectRoR3;
+ pDevIns->pCritSectRoR3 = pCritSect;
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_CHANGED_CRITSECT;
+
+ Assert(RT_BOOL(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED) == pDevIns->fR0Enabled);
+ if ( (pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED)
+ && !(pDevIns->Internal.s.pDevR3->pReg->fFlags & PDM_DEVREG_FLAGS_NEW_STYLE))
+ {
+ PDMDEVICECOMPATSETCRITSECTREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.idxR0Device = pDevIns->Internal.s.idxR0Device;
+ Req.pDevInsR3 = pDevIns;
+ Req.pCritSectR3 = pCritSect;
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT, 0, &Req.Hdr);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ PDMR3CritSectDelete(pVM, pOldCritSect);
+ Assert((uintptr_t)pOldCritSect - (uintptr_t)pDevIns < pDevIns->cbRing3);
+
+ LogFlow(("pdmR3DevHlp_SetDeviceCritSect: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectYield} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectYield(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMR3CritSectYield(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectEnter} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectEnter(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, int rcBusy)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectEnter(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectEnterDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectEnterDebug(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectEnterDebug(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectTryEnter} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectTryEnter(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectTryEnter(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectTryEnterDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectTryEnterDebug(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectTryEnterDebug(pDevIns->Internal.s.pVMR3, pCritSect, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectLeave} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectLeave(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectLeave(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectIsOwner} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectIsOwner(PPDMDEVINS pDevIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectIsOwner(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectIsInitialized} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectIsInitialized(PPDMDEVINS pDevIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectIsInitialized(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectHasWaiters} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectHasWaiters(PPDMDEVINS pDevIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectHasWaiters(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectGetRecursion} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_CritSectGetRecursion(PPDMDEVINS pDevIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectGetRecursion(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectScheduleExitEvent} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectScheduleExitEvent(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect,
+ SUPSEMEVENT hEventToSignal)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMHCCritSectScheduleExitEvent(pCritSect, hEventToSignal);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectDelete} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectDelete(PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMR3CritSectDelete(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwInit} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwInit(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_CritSectRwInit: caller='%s'/%d: pCritSect=%p pszNameFmt=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pCritSect, pszNameFmt, pszNameFmt));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = pdmR3CritSectRwInitDevice(pVM, pDevIns, pCritSect, RT_SRC_POS_ARGS, pszNameFmt, va);
+
+ LogFlow(("pdmR3DevHlp_CritSectInit: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwDelete} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwDelete(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMR3CritSectRwDelete(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwEnterShared} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwEnterShared(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, int rcBusy)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwEnterShared(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwEnterSharedDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwEnterSharedDebug(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, int rcBusy,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwEnterSharedDebug(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwTryEnterShared} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwTryEnterShared(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwTryEnterShared(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwTryEnterSharedDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwTryEnterSharedDebug(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwTryEnterSharedDebug(pDevIns->Internal.s.pVMR3, pCritSect, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwLeaveShared} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwLeaveShared(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwLeaveShared(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwEnterExcl} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwEnterExcl(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, int rcBusy)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwEnterExcl(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwEnterExclDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwEnterExclDebug(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, int rcBusy,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwEnterExclDebug(pDevIns->Internal.s.pVMR3, pCritSect, rcBusy, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwTryEnterExcl} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwTryEnterExcl(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwTryEnterExcl(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwTryEnterExclDebug} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwTryEnterExclDebug(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwTryEnterExclDebug(pDevIns->Internal.s.pVMR3, pCritSect, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwLeaveExcl} */
+static DECLCALLBACK(int) pdmR3DevHlp_CritSectRwLeaveExcl(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwLeaveExcl(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwIsWriteOwner} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectRwIsWriteOwner(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwIsWriteOwner(pDevIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwIsReadOwner} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectRwIsReadOwner(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, bool fWannaHear)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return PDMCritSectRwIsReadOwner(pDevIns->Internal.s.pVMR3, pCritSect, fWannaHear);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwGetWriteRecursion} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_CritSectRwGetWriteRecursion(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectRwGetWriteRecursion(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwGetWriterReadRecursion} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_CritSectRwGetWriterReadRecursion(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectRwGetWriterReadRecursion(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwGetReadCount} */
+static DECLCALLBACK(uint32_t) pdmR3DevHlp_CritSectRwGetReadCount(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectRwGetReadCount(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCritSectRwIsInitialized} */
+static DECLCALLBACK(bool) pdmR3DevHlp_CritSectRwIsInitialized(PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns);
+ return PDMCritSectRwIsInitialized(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnThreadCreate} */
+static DECLCALLBACK(int) pdmR3DevHlp_ThreadCreate(PPDMDEVINS pDevIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDEV pfnThread,
+ PFNPDMTHREADWAKEUPDEV pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_ThreadCreate: caller='%s'/%d: ppThread=%p pvUser=%p pfnThread=%p pfnWakeup=%p cbStack=%#zx enmType=%d pszName=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName, pszName));
+
+ int rc = pdmR3ThreadCreateDevice(pDevIns->Internal.s.pVMR3, pDevIns, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName);
+
+ LogFlow(("pdmR3DevHlp_ThreadCreate: caller='%s'/%d: returns %Rrc *ppThread=%RTthrd\n", pDevIns->pReg->szName, pDevIns->iInstance,
+ rc, *ppThread));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSetAsyncNotification} */
+static DECLCALLBACK(int) pdmR3DevHlp_SetAsyncNotification(PPDMDEVINS pDevIns, PFNPDMDEVASYNCNOTIFY pfnAsyncNotify)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT0(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_SetAsyncNotification: caller='%s'/%d: pfnAsyncNotify=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pfnAsyncNotify));
+
+ int rc = VINF_SUCCESS;
+ AssertStmt(pfnAsyncNotify, rc = VERR_INVALID_PARAMETER);
+ AssertStmt(!pDevIns->Internal.s.pfnAsyncNotify, rc = VERR_WRONG_ORDER);
+ AssertStmt(pDevIns->Internal.s.fIntFlags & (PDMDEVINSINT_FLAGS_SUSPENDED | PDMDEVINSINT_FLAGS_RESET), rc = VERR_WRONG_ORDER);
+ VMSTATE enmVMState = VMR3GetState(pDevIns->Internal.s.pVMR3);
+ AssertStmt( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS,
+ rc = VERR_INVALID_STATE);
+
+ if (RT_SUCCESS(rc))
+ pDevIns->Internal.s.pfnAsyncNotify = pfnAsyncNotify;
+
+ LogFlow(("pdmR3DevHlp_SetAsyncNotification: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnAsyncNotificationCompleted} */
+static DECLCALLBACK(void) pdmR3DevHlp_AsyncNotificationCompleted(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ if ( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS)
+ {
+ LogFlow(("pdmR3DevHlp_AsyncNotificationCompleted: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ VMR3AsyncPdmNotificationWakeupU(pVM->pUVM);
+ }
+ else
+ LogFlow(("pdmR3DevHlp_AsyncNotificationCompleted: caller='%s'/%d: enmVMState=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, enmVMState));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnRTCRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_RTCRegister(PPDMDEVINS pDevIns, PCPDMRTCREG pRtcReg, PCPDMRTCHLP *ppRtcHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: pRtcReg=%p:{.u32Version=%#x, .pfnWrite=%p, .pfnRead=%p} ppRtcHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pRtcReg, pRtcReg->u32Version, pRtcReg->pfnWrite,
+ pRtcReg->pfnWrite, ppRtcHlp));
+
+ /*
+ * Validate input.
+ */
+ if (pRtcReg->u32Version != PDM_RTCREG_VERSION)
+ {
+ AssertMsgFailed(("u32Version=%#x expected %#x\n", pRtcReg->u32Version,
+ PDM_RTCREG_VERSION));
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: returns %Rrc (version)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+ if ( !pRtcReg->pfnWrite
+ || !pRtcReg->pfnRead)
+ {
+ Assert(pRtcReg->pfnWrite);
+ Assert(pRtcReg->pfnRead);
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: returns %Rrc (callbacks)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (!ppRtcHlp)
+ {
+ Assert(ppRtcHlp);
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: returns %Rrc (ppRtcHlp)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Only one DMA device.
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ if (pVM->pdm.s.pRtc)
+ {
+ AssertMsgFailed(("Only one RTC device is supported!\n"));
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Allocate and initialize pci bus structure.
+ */
+ int rc = VINF_SUCCESS;
+ PPDMRTC pRtc = (PPDMRTC)MMR3HeapAlloc(pDevIns->Internal.s.pVMR3, MM_TAG_PDM_DEVICE, sizeof(*pRtc));
+ if (pRtc)
+ {
+ pRtc->pDevIns = pDevIns;
+ pRtc->Reg = *pRtcReg;
+ pVM->pdm.s.pRtc = pRtc;
+
+ /* set the helper pointer. */
+ *ppRtcHlp = &g_pdmR3DevRtcHlp;
+ Log(("PDM: Registered RTC device '%s'/%d pDevIns=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ LogFlow(("pdmR3DevHlp_RTCRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMARegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_DMARegister(PPDMDEVINS pDevIns, unsigned uChannel, PFNDMATRANSFERHANDLER pfnTransferHandler, void *pvUser)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMARegister: caller='%s'/%d: uChannel=%d pfnTransferHandler=%p pvUser=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uChannel, pfnTransferHandler, pvUser));
+ int rc = VINF_SUCCESS;
+ if (pVM->pdm.s.pDmac)
+ pVM->pdm.s.pDmac->Reg.pfnRegister(pVM->pdm.s.pDmac->pDevIns, uChannel, pDevIns, pfnTransferHandler, pvUser);
+ else
+ {
+ AssertMsgFailed(("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ rc = VERR_PDM_NO_DMAC_INSTANCE;
+ }
+ LogFlow(("pdmR3DevHlp_DMARegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMAReadMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_DMAReadMemory(PPDMDEVINS pDevIns, unsigned uChannel, void *pvBuffer, uint32_t off, uint32_t cbBlock, uint32_t *pcbRead)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMAReadMemory: caller='%s'/%d: uChannel=%d pvBuffer=%p off=%#x cbBlock=%#x pcbRead=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uChannel, pvBuffer, off, cbBlock, pcbRead));
+ int rc = VINF_SUCCESS;
+ if (pVM->pdm.s.pDmac)
+ {
+ uint32_t cb = pVM->pdm.s.pDmac->Reg.pfnReadMemory(pVM->pdm.s.pDmac->pDevIns, uChannel, pvBuffer, off, cbBlock);
+ if (pcbRead)
+ *pcbRead = cb;
+ }
+ else
+ {
+ AssertMsgFailed(("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ rc = VERR_PDM_NO_DMAC_INSTANCE;
+ }
+ LogFlow(("pdmR3DevHlp_DMAReadMemory: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMAWriteMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_DMAWriteMemory(PPDMDEVINS pDevIns, unsigned uChannel, const void *pvBuffer, uint32_t off, uint32_t cbBlock, uint32_t *pcbWritten)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMAWriteMemory: caller='%s'/%d: uChannel=%d pvBuffer=%p off=%#x cbBlock=%#x pcbWritten=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uChannel, pvBuffer, off, cbBlock, pcbWritten));
+ int rc = VINF_SUCCESS;
+ if (pVM->pdm.s.pDmac)
+ {
+ uint32_t cb = pVM->pdm.s.pDmac->Reg.pfnWriteMemory(pVM->pdm.s.pDmac->pDevIns, uChannel, pvBuffer, off, cbBlock);
+ if (pcbWritten)
+ *pcbWritten = cb;
+ }
+ else
+ {
+ AssertMsgFailed(("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ rc = VERR_PDM_NO_DMAC_INSTANCE;
+ }
+ LogFlow(("pdmR3DevHlp_DMAWriteMemory: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMASetDREQ} */
+static DECLCALLBACK(int) pdmR3DevHlp_DMASetDREQ(PPDMDEVINS pDevIns, unsigned uChannel, unsigned uLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMASetDREQ: caller='%s'/%d: uChannel=%d uLevel=%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uChannel, uLevel));
+ int rc = VINF_SUCCESS;
+ if (pVM->pdm.s.pDmac)
+ pVM->pdm.s.pDmac->Reg.pfnSetDREQ(pVM->pdm.s.pDmac->pDevIns, uChannel, uLevel);
+ else
+ {
+ AssertMsgFailed(("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ rc = VERR_PDM_NO_DMAC_INSTANCE;
+ }
+ LogFlow(("pdmR3DevHlp_DMASetDREQ: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMAGetChannelMode} */
+static DECLCALLBACK(uint8_t) pdmR3DevHlp_DMAGetChannelMode(PPDMDEVINS pDevIns, unsigned uChannel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMAGetChannelMode: caller='%s'/%d: uChannel=%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uChannel));
+ uint8_t u8Mode;
+ if (pVM->pdm.s.pDmac)
+ u8Mode = pVM->pdm.s.pDmac->Reg.pfnGetChannelMode(pVM->pdm.s.pDmac->pDevIns, uChannel);
+ else
+ {
+ AssertMsgFailed(("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ u8Mode = 3 << 2 /* illegal mode type */;
+ }
+ LogFlow(("pdmR3DevHlp_DMAGetChannelMode: caller='%s'/%d: returns %#04x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, u8Mode));
+ return u8Mode;
+}
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMASchedule} */
+static DECLCALLBACK(void) pdmR3DevHlp_DMASchedule(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_DMASchedule: caller='%s'/%d: VM_FF_PDM_DMA %d -> 1\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)));
+
+ AssertMsg(pVM->pdm.s.pDmac, ("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
+ VM_FF_SET(pVM, VM_FF_PDM_DMA);
+ VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCMOSWrite} */
+static DECLCALLBACK(int) pdmR3DevHlp_CMOSWrite(PPDMDEVINS pDevIns, unsigned iReg, uint8_t u8Value)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ LogFlow(("pdmR3DevHlp_CMOSWrite: caller='%s'/%d: iReg=%#04x u8Value=%#04x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, iReg, u8Value));
+ int rc;
+ if (pVM->pdm.s.pRtc)
+ {
+ PPDMDEVINS pDevInsRtc = pVM->pdm.s.pRtc->pDevIns;
+ rc = PDMCritSectEnter(pVM, pDevInsRtc->pCritSectRoR3, VERR_IGNORED);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pVM->pdm.s.pRtc->Reg.pfnWrite(pDevInsRtc, iReg, u8Value);
+ PDMCritSectLeave(pVM, pDevInsRtc->pCritSectRoR3);
+ }
+ }
+ else
+ rc = VERR_PDM_NO_RTC_INSTANCE;
+
+ LogFlow(("pdmR3DevHlp_CMOSWrite: caller='%s'/%d: return %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCMOSRead} */
+static DECLCALLBACK(int) pdmR3DevHlp_CMOSRead(PPDMDEVINS pDevIns, unsigned iReg, uint8_t *pu8Value)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ LogFlow(("pdmR3DevHlp_CMOSWrite: caller='%s'/%d: iReg=%#04x pu8Value=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, iReg, pu8Value));
+ int rc;
+ if (pVM->pdm.s.pRtc)
+ {
+ PPDMDEVINS pDevInsRtc = pVM->pdm.s.pRtc->pDevIns;
+ rc = PDMCritSectEnter(pVM, pDevInsRtc->pCritSectRoR3, VERR_IGNORED);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pVM->pdm.s.pRtc->Reg.pfnRead(pDevInsRtc, iReg, pu8Value);
+ PDMCritSectLeave(pVM, pDevInsRtc->pCritSectRoR3);
+ }
+ }
+ else
+ rc = VERR_PDM_NO_RTC_INSTANCE;
+
+ LogFlow(("pdmR3DevHlp_CMOSWrite: caller='%s'/%d: return %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnAssertEMT} */
+static DECLCALLBACK(bool) pdmR3DevHlp_AssertEMT(PPDMDEVINS pDevIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (VM_IS_EMT(pDevIns->Internal.s.pVMR3))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertEMT '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnAssertOther} */
+static DECLCALLBACK(bool) pdmR3DevHlp_AssertOther(PPDMDEVINS pDevIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!VM_IS_EMT(pDevIns->Internal.s.pVMR3))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertOther '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnLdrGetRCInterfaceSymbols} */
+static DECLCALLBACK(int) pdmR3DevHlp_LdrGetRCInterfaceSymbols(PPDMDEVINS pDevIns, void *pvInterface, size_t cbInterface,
+ const char *pszSymPrefix, const char *pszSymList)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_PDMLdrGetRCInterfaceSymbols: caller='%s'/%d: pvInterface=%p cbInterface=%zu pszSymPrefix=%p:{%s} pszSymList=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pvInterface, cbInterface, pszSymPrefix, pszSymPrefix, pszSymList, pszSymList));
+
+ int rc;
+ if ( strncmp(pszSymPrefix, "dev", 3) == 0
+ && RTStrIStr(pszSymPrefix + 3, pDevIns->pReg->szName) != NULL)
+ {
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ rc = PDMR3LdrGetInterfaceSymbols(pDevIns->Internal.s.pVMR3,
+ pvInterface, cbInterface,
+ pDevIns->pReg->pszRCMod, pDevIns->Internal.s.pDevR3->pszRCSearchPath,
+ pszSymPrefix, pszSymList,
+ false /*fRing0OrRC*/);
+ else
+ {
+ AssertMsgFailed(("Not a raw-mode enabled driver\n"));
+ rc = VERR_PERMISSION_DENIED;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("Invalid prefix '%s' for '%s'; must start with 'dev' and contain the driver name!\n",
+ pszSymPrefix, pDevIns->pReg->szName));
+ rc = VERR_INVALID_NAME;
+ }
+
+ LogFlow(("pdmR3DevHlp_PDMLdrGetRCInterfaceSymbols: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName,
+ pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnLdrGetR0InterfaceSymbols} */
+static DECLCALLBACK(int) pdmR3DevHlp_LdrGetR0InterfaceSymbols(PPDMDEVINS pDevIns, void *pvInterface, size_t cbInterface,
+ const char *pszSymPrefix, const char *pszSymList)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_PDMLdrGetR0InterfaceSymbols: caller='%s'/%d: pvInterface=%p cbInterface=%zu pszSymPrefix=%p:{%s} pszSymList=%p:{%s}\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pvInterface, cbInterface, pszSymPrefix, pszSymPrefix, pszSymList, pszSymList));
+
+ int rc;
+ if ( strncmp(pszSymPrefix, "dev", 3) == 0
+ && RTStrIStr(pszSymPrefix + 3, pDevIns->pReg->szName) != NULL)
+ {
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0)
+ rc = PDMR3LdrGetInterfaceSymbols(pDevIns->Internal.s.pVMR3,
+ pvInterface, cbInterface,
+ pDevIns->pReg->pszR0Mod, pDevIns->Internal.s.pDevR3->pszR0SearchPath,
+ pszSymPrefix, pszSymList,
+ true /*fRing0OrRC*/);
+ else
+ {
+ AssertMsgFailed(("Not a ring-0 enabled driver\n"));
+ rc = VERR_PERMISSION_DENIED;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("Invalid prefix '%s' for '%s'; must start with 'dev' and contain the driver name!\n",
+ pszSymPrefix, pDevIns->pReg->szName));
+ rc = VERR_INVALID_NAME;
+ }
+
+ LogFlow(("pdmR3DevHlp_PDMLdrGetR0InterfaceSymbols: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName,
+ pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnCallR0} */
+static DECLCALLBACK(int) pdmR3DevHlp_CallR0(PPDMDEVINS pDevIns, uint32_t uOperation, uint64_t u64Arg)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_IS_EMT);
+ LogFlow(("pdmR3DevHlp_CallR0: caller='%s'/%d: uOperation=%#x u64Arg=%#RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, uOperation, u64Arg));
+
+ /*
+ * Resolve the ring-0 entry point. There is not need to remember this like
+ * we do for drivers since this is mainly for construction time hacks and
+ * other things that aren't performance critical.
+ */
+ int rc;
+ if (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0)
+ {
+ /*
+ * Make the ring-0 call.
+ */
+ PDMDEVICEGENCALLREQ Req;
+ RT_ZERO(Req.Params);
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pDevInsR3 = pDevIns;
+ Req.idxR0Device = pDevIns->Internal.s.idxR0Device;
+ Req.enmCall = PDMDEVICEGENCALL_REQUEST;
+ Req.Params.Req.uReq = uOperation;
+ Req.Params.Req.uArg = u64Arg;
+ rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PDM_DEVICE_GEN_CALL, 0, &Req.Hdr);
+ }
+ else
+ rc = VERR_ACCESS_DENIED;
+ LogFlow(("pdmR3DevHlp_CallR0: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName,
+ pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3DevHlp_VMGetSuspendReason(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3DevHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3DevHlp_VMGetResumeReason(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3DevHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetUVM} */
+static DECLCALLBACK(PUVM) pdmR3DevHlp_GetUVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_GetUVM: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns->Internal.s.pVMR3));
+ return pDevIns->Internal.s.pVMR3->pUVM;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetVM} */
+static DECLCALLBACK(PVM) pdmR3DevHlp_GetVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_GetVM: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns->Internal.s.pVMR3));
+ return pDevIns->Internal.s.pVMR3;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetVMCPU} */
+static DECLCALLBACK(PVMCPU) pdmR3DevHlp_GetVMCPU(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_GetVMCPU: caller='%s'/%d for CPU %u\n", pDevIns->pReg->szName, pDevIns->iInstance, VMMGetCpuId(pDevIns->Internal.s.pVMR3)));
+ return VMMGetCpu(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetCurrentCpuId} */
+static DECLCALLBACK(VMCPUID) pdmR3DevHlp_GetCurrentCpuId(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VMCPUID idCpu = VMMGetCpuId(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_GetCurrentCpuId: caller='%s'/%d for CPU %u\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu));
+ return idCpu;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIBusRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIBusRegister(PPDMDEVINS pDevIns, PPDMPCIBUSREGR3 pPciBusReg,
+ PCPDMPCIHLPR3 *ppPciHlp, uint32_t *piBus)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_PCIBusRegister: caller='%s'/%d: pPciBusReg=%p:{.u32Version=%#x, .pfnRegisterR3=%p, .pfnIORegionRegisterR3=%p, "
+ ".pfnInterceptConfigAccesses=%p, pfnConfigRead=%p, pfnConfigWrite=%p, .pfnSetIrqR3=%p, .u32EndVersion=%#x} ppPciHlpR3=%p piBus=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciBusReg, pPciBusReg->u32Version, pPciBusReg->pfnRegisterR3,
+ pPciBusReg->pfnIORegionRegisterR3, pPciBusReg->pfnInterceptConfigAccesses, pPciBusReg->pfnConfigRead,
+ pPciBusReg->pfnConfigWrite, pPciBusReg->pfnSetIrqR3, pPciBusReg->u32EndVersion, ppPciHlp, piBus));
+
+ /*
+ * Validate the structure and output parameters.
+ */
+ AssertLogRelMsgReturn(pPciBusReg->u32Version == PDM_PCIBUSREGR3_VERSION,
+ ("u32Version=%#x expected %#x\n", pPciBusReg->u32Version, PDM_PCIBUSREGR3_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pPciBusReg->pfnRegisterR3, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(pPciBusReg->pfnRegisterMsiR3, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPciBusReg->pfnIORegionRegisterR3, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPciBusReg->pfnInterceptConfigAccesses, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPciBusReg->pfnConfigWrite, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPciBusReg->pfnConfigRead, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPciBusReg->pfnSetIrqR3, VERR_INVALID_POINTER);
+ AssertLogRelMsgReturn(pPciBusReg->u32EndVersion == PDM_PCIBUSREGR3_VERSION,
+ ("u32Version=%#x expected %#x\n", pPciBusReg->u32Version, PDM_PCIBUSREGR3_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppPciHlp, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(piBus, VERR_INVALID_POINTER);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+
+ /*
+ * Find free PCI bus entry.
+ */
+ unsigned iBus = 0;
+ for (iBus = 0; iBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses); iBus++)
+ if (!pVM->pdm.s.aPciBuses[iBus].pDevInsR3)
+ break;
+ AssertLogRelMsgReturn(iBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses),
+ ("Too many PCI buses. Max=%u\n", RT_ELEMENTS(pVM->pdm.s.aPciBuses)),
+ VERR_OUT_OF_RESOURCES);
+ PPDMPCIBUS pPciBus = &pVM->pdm.s.aPciBuses[iBus];
+
+ /*
+ * Init the R3 bits.
+ */
+ pPciBus->iBus = iBus;
+ pPciBus->pDevInsR3 = pDevIns;
+ pPciBus->pfnRegister = pPciBusReg->pfnRegisterR3;
+ pPciBus->pfnRegisterMsi = pPciBusReg->pfnRegisterMsiR3;
+ pPciBus->pfnIORegionRegister = pPciBusReg->pfnIORegionRegisterR3;
+ pPciBus->pfnInterceptConfigAccesses = pPciBusReg->pfnInterceptConfigAccesses;
+ pPciBus->pfnConfigRead = pPciBusReg->pfnConfigRead;
+ pPciBus->pfnConfigWrite = pPciBusReg->pfnConfigWrite;
+ pPciBus->pfnSetIrqR3 = pPciBusReg->pfnSetIrqR3;
+
+ Log(("PDM: Registered PCI bus device '%s'/%d pDevIns=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+
+ /* set the helper pointer and return. */
+ *ppPciHlp = &g_pdmR3DevPciHlp;
+ if (piBus)
+ *piBus = iBus;
+ LogFlow(("pdmR3DevHlp_PCIBusRegister: caller='%s'/%d: returns %Rrc *piBus=%u\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS, iBus));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIommuRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_IommuRegister(PPDMDEVINS pDevIns, PPDMIOMMUREGR3 pIommuReg, PCPDMIOMMUHLPR3 *ppIommuHlp,
+ uint32_t *pidxIommu)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_IommuRegister: caller='%s'/%d: pIommuReg=%p:{.u32Version=%#x, .u32TheEnd=%#x } ppIommuHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pIommuReg, pIommuReg->u32Version, pIommuReg->u32TheEnd, ppIommuHlp));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pIommuReg->u32Version == PDM_IOMMUREGR3_VERSION,
+ ("%s/%d: u32Version=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pIommuReg->u32Version, PDM_IOMMUREGR3_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pIommuReg->pfnMemAccess, VERR_INVALID_POINTER);
+ AssertPtrReturn(pIommuReg->pfnMemBulkAccess, VERR_INVALID_POINTER);
+ AssertPtrReturn(pIommuReg->pfnMsiRemap, VERR_INVALID_POINTER);
+ AssertMsgReturn(pIommuReg->u32TheEnd == PDM_IOMMUREGR3_VERSION,
+ ("%s/%d: u32TheEnd=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pIommuReg->u32TheEnd, PDM_IOMMUREGR3_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppIommuHlp, VERR_INVALID_POINTER);
+
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Find free IOMMU slot.
+ * The IOMMU at the root complex is the one at 0.
+ */
+ unsigned idxIommu = 0;
+#if 0
+ for (idxIommu = 0; idxIommu < RT_ELEMENTS(pVM->pdm.s.aIommus); idxIommu++)
+ if (!pVM->pdm.s.aIommus[idxIommu].pDevInsR3)
+ break;
+ AssertLogRelMsgReturn(idxIommu < RT_ELEMENTS(pVM->pdm.s.aIommus),
+ ("Too many IOMMUs. Max=%u\n", RT_ELEMENTS(pVM->pdm.s.aIommus)),
+ VERR_OUT_OF_RESOURCES);
+#else
+ /* Currently we support only a single IOMMU. */
+ AssertMsgReturn(!pVM->pdm.s.aIommus[0].pDevInsR3,
+ ("%s/%u: Only one IOMMU device is supported!\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_ALREADY_EXISTS);
+#endif
+ PPDMIOMMUR3 pIommu = &pVM->pdm.s.aIommus[idxIommu];
+
+ /*
+ * Init the R3 bits.
+ */
+ pIommu->idxIommu = idxIommu;
+ pIommu->pDevInsR3 = pDevIns;
+ pIommu->pfnMemAccess = pIommuReg->pfnMemAccess;
+ pIommu->pfnMemBulkAccess = pIommuReg->pfnMemBulkAccess;
+ pIommu->pfnMsiRemap = pIommuReg->pfnMsiRemap;
+ Log(("PDM: Registered IOMMU device '%s'/%d pDevIns=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+
+ /* Set the helper pointer and return. */
+ *ppIommuHlp = &g_pdmR3DevIommuHlp;
+ if (pidxIommu)
+ *pidxIommu = idxIommu;
+ LogFlow(("pdmR3DevHlp_IommuRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPICRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PICRegister(PPDMDEVINS pDevIns, PPDMPICREG pPicReg, PCPDMPICHLP *ppPicHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_PICRegister: caller='%s'/%d: pPicReg=%p:{.u32Version=%#x, .pfnSetIrq=%p, .pfnGetInterrupt=%p, .u32TheEnd=%#x } ppPicHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPicReg, pPicReg->u32Version, pPicReg->pfnSetIrq, pPicReg->pfnGetInterrupt, pPicReg->u32TheEnd, ppPicHlp));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pPicReg->u32Version == PDM_PICREG_VERSION,
+ ("%s/%d: u32Version=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pPicReg->u32Version, PDM_PICREG_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pPicReg->pfnSetIrq, VERR_INVALID_POINTER);
+ AssertPtrReturn(pPicReg->pfnGetInterrupt, VERR_INVALID_POINTER);
+ AssertMsgReturn(pPicReg->u32TheEnd == PDM_PICREG_VERSION,
+ ("%s/%d: u32TheEnd=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pPicReg->u32TheEnd, PDM_PICREG_VERSION),
+ VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppPicHlp, VERR_INVALID_POINTER);
+
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Only one PIC device.
+ */
+ AssertMsgReturn(pVM->pdm.s.Pic.pDevInsR3 == NULL, ("%s/%d: Only one PIC!\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_ALREADY_EXISTS);
+
+ /*
+ * Take down the callbacks and instance.
+ */
+ pVM->pdm.s.Pic.pDevInsR3 = pDevIns;
+ pVM->pdm.s.Pic.pfnSetIrqR3 = pPicReg->pfnSetIrq;
+ pVM->pdm.s.Pic.pfnGetInterruptR3 = pPicReg->pfnGetInterrupt;
+ Log(("PDM: Registered PIC device '%s'/%d pDevIns=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+
+ /* set the helper pointer and return. */
+ *ppPicHlp = &g_pdmR3DevPicHlp;
+ LogFlow(("pdmR3DevHlp_PICRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnApicRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_ApicRegister(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ /*
+ * Validate caller context.
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Only one APIC device. On SMP we have single logical device covering all LAPICs,
+ * as they need to communicate and share state easily.
+ */
+ AssertMsgReturn(pVM->pdm.s.Apic.pDevInsR3 == NULL,
+ ("%s/%u: Only one APIC device is supported!\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_ALREADY_EXISTS);
+
+ /*
+ * Set the ring-3 and raw-mode bits, leave the ring-0 to ring-0 setup.
+ */
+ pVM->pdm.s.Apic.pDevInsR3 = pDevIns;
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+ pVM->pdm.s.Apic.pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
+ Assert(pVM->pdm.s.Apic.pDevInsRC || !VM_IS_RAW_MODE_ENABLED(pVM));
+#endif
+
+ LogFlow(("pdmR3DevHlp_ApicRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoApicRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_IoApicRegister(PPDMDEVINS pDevIns, PPDMIOAPICREG pIoApicReg, PCPDMIOAPICHLP *ppIoApicHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoApicRegister: caller='%s'/%d: pIoApicReg=%p:{.u32Version=%#x, .pfnSetIrq=%p, .pfnSendMsi=%p, .pfnSetEoi=%p, .u32TheEnd=%#x } ppIoApicHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pIoApicReg, pIoApicReg->u32Version, pIoApicReg->pfnSetIrq, pIoApicReg->pfnSendMsi, pIoApicReg->pfnSetEoi, pIoApicReg->u32TheEnd, ppIoApicHlp));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pIoApicReg->u32Version == PDM_IOAPICREG_VERSION,
+ ("%s/%d: u32Version=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pIoApicReg->u32Version, PDM_IOAPICREG_VERSION),
+ VERR_VERSION_MISMATCH);
+ AssertPtrReturn(pIoApicReg->pfnSetIrq, VERR_INVALID_POINTER);
+ AssertPtrReturn(pIoApicReg->pfnSendMsi, VERR_INVALID_POINTER);
+ AssertPtrReturn(pIoApicReg->pfnSetEoi, VERR_INVALID_POINTER);
+ AssertMsgReturn(pIoApicReg->u32TheEnd == PDM_IOAPICREG_VERSION,
+ ("%s/%d: u32TheEnd=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pIoApicReg->u32TheEnd, PDM_IOAPICREG_VERSION),
+ VERR_VERSION_MISMATCH);
+ AssertPtrReturn(ppIoApicHlp, VERR_INVALID_POINTER);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * The I/O APIC requires the APIC to be present (hacks++).
+ * If the I/O APIC does GC stuff so must the APIC.
+ */
+ AssertMsgReturn(pVM->pdm.s.Apic.pDevInsR3 != NULL, ("Configuration error / Init order error! No APIC!\n"), VERR_WRONG_ORDER);
+
+ /*
+ * Only one I/O APIC device.
+ */
+ AssertMsgReturn(pVM->pdm.s.IoApic.pDevInsR3 == NULL,
+ ("Only one IOAPIC device is supported! (caller %s/%d)\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_ALREADY_EXISTS);
+
+ /*
+ * Initialize the R3 bits.
+ */
+ pVM->pdm.s.IoApic.pDevInsR3 = pDevIns;
+ pVM->pdm.s.IoApic.pfnSetIrqR3 = pIoApicReg->pfnSetIrq;
+ pVM->pdm.s.IoApic.pfnSendMsiR3 = pIoApicReg->pfnSendMsi;
+ pVM->pdm.s.IoApic.pfnSetEoiR3 = pIoApicReg->pfnSetEoi;
+ Log(("PDM: Registered I/O APIC device '%s'/%d pDevIns=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+
+ /* set the helper pointer and return. */
+ *ppIoApicHlp = &g_pdmR3DevIoApicHlp;
+ LogFlow(("pdmR3DevHlp_IoApicRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnHpetRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_HpetRegister(PPDMDEVINS pDevIns, PPDMHPETREG pHpetReg, PCPDMHPETHLPR3 *ppHpetHlpR3)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_HpetRegister: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pHpetReg->u32Version == PDM_HPETREG_VERSION,
+ ("%s/%u: u32Version=%#x expected %#x\n", pDevIns->pReg->szName, pDevIns->iInstance, pHpetReg->u32Version, PDM_HPETREG_VERSION),
+ VERR_VERSION_MISMATCH);
+ AssertPtrReturn(ppHpetHlpR3, VERR_INVALID_POINTER);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Only one HPET device.
+ */
+ AssertMsgReturn(pVM->pdm.s.pHpet == NULL,
+ ("Only one HPET device is supported! (caller %s/%d)\n", pDevIns->pReg->szName, pDevIns->iInstance),
+ VERR_ALREADY_EXISTS);
+
+ /*
+ * Do the job (what there is of it).
+ */
+ pVM->pdm.s.pHpet = pDevIns;
+ *ppHpetHlpR3 = &g_pdmR3DevHpetHlp;
+
+ LogFlow(("pdmR3DevHlp_HpetRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPciRawRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_PciRawRegister(PPDMDEVINS pDevIns, PPDMPCIRAWREG pPciRawReg, PCPDMPCIRAWHLPR3 *ppPciRawHlpR3)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); RT_NOREF_PV(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_PciRawRegister: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+ /*
+ * Validate input.
+ */
+ if (pPciRawReg->u32Version != PDM_PCIRAWREG_VERSION)
+ {
+ AssertMsgFailed(("u32Version=%#x expected %#x\n", pPciRawReg->u32Version, PDM_PCIRAWREG_VERSION));
+ LogFlow(("pdmR3DevHlp_PciRawRegister: caller='%s'/%d: returns %Rrc (version)\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (!ppPciRawHlpR3)
+ {
+ Assert(ppPciRawHlpR3);
+ LogFlow(("pdmR3DevHlp_PciRawRegister: caller='%s'/%d: returns %Rrc (ppPciRawHlpR3)\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* set the helper pointer and return. */
+ *ppPciRawHlpR3 = &g_pdmR3DevPciRawHlp;
+ LogFlow(("pdmR3DevHlp_PciRawRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnDMACRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_DMACRegister(PPDMDEVINS pDevIns, PPDMDMACREG pDmacReg, PCPDMDMACHLP *ppDmacHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: pDmacReg=%p:{.u32Version=%#x, .pfnRun=%p, .pfnRegister=%p, .pfnReadMemory=%p, .pfnWriteMemory=%p, .pfnSetDREQ=%p, .pfnGetChannelMode=%p} ppDmacHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDmacReg, pDmacReg->u32Version, pDmacReg->pfnRun, pDmacReg->pfnRegister,
+ pDmacReg->pfnReadMemory, pDmacReg->pfnWriteMemory, pDmacReg->pfnSetDREQ, pDmacReg->pfnGetChannelMode, ppDmacHlp));
+
+ /*
+ * Validate input.
+ */
+ if (pDmacReg->u32Version != PDM_DMACREG_VERSION)
+ {
+ AssertMsgFailed(("u32Version=%#x expected %#x\n", pDmacReg->u32Version,
+ PDM_DMACREG_VERSION));
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: returns %Rrc (version)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+ if ( !pDmacReg->pfnRun
+ || !pDmacReg->pfnRegister
+ || !pDmacReg->pfnReadMemory
+ || !pDmacReg->pfnWriteMemory
+ || !pDmacReg->pfnSetDREQ
+ || !pDmacReg->pfnGetChannelMode)
+ {
+ Assert(pDmacReg->pfnRun);
+ Assert(pDmacReg->pfnRegister);
+ Assert(pDmacReg->pfnReadMemory);
+ Assert(pDmacReg->pfnWriteMemory);
+ Assert(pDmacReg->pfnSetDREQ);
+ Assert(pDmacReg->pfnGetChannelMode);
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: returns %Rrc (callbacks)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (!ppDmacHlp)
+ {
+ Assert(ppDmacHlp);
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: returns %Rrc (ppDmacHlp)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Only one DMA device.
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ if (pVM->pdm.s.pDmac)
+ {
+ AssertMsgFailed(("Only one DMA device is supported!\n"));
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Allocate and initialize pci bus structure.
+ */
+ int rc = VINF_SUCCESS;
+ PPDMDMAC pDmac = (PPDMDMAC)MMR3HeapAlloc(pDevIns->Internal.s.pVMR3, MM_TAG_PDM_DEVICE, sizeof(*pDmac));
+ if (pDmac)
+ {
+ pDmac->pDevIns = pDevIns;
+ pDmac->Reg = *pDmacReg;
+ pVM->pdm.s.pDmac = pDmac;
+
+ /* set the helper pointer. */
+ *ppDmacHlp = &g_pdmR3DevDmacHlp;
+ Log(("PDM: Registered DMAC device '%s'/%d pDevIns=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ LogFlow(("pdmR3DevHlp_DMACRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/**
+ * @copydoc PDMDEVHLPR3::pfnRegisterVMMDevHeap
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_RegisterVMMDevHeap(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTR3PTR pvHeap, unsigned cbHeap)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_RegisterVMMDevHeap: caller='%s'/%d: GCPhys=%RGp pvHeap=%p cbHeap=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, pvHeap, cbHeap));
+
+ if (pVM->pdm.s.pvVMMDevHeap == NULL)
+ {
+ pVM->pdm.s.pvVMMDevHeap = pvHeap;
+ pVM->pdm.s.GCPhysVMMDevHeap = GCPhys;
+ pVM->pdm.s.cbVMMDevHeap = cbHeap;
+ pVM->pdm.s.cbVMMDevHeapLeft = cbHeap;
+ }
+ else
+ {
+ Assert(pVM->pdm.s.pvVMMDevHeap == pvHeap);
+ Assert(pVM->pdm.s.cbVMMDevHeap == cbHeap);
+ Assert(pVM->pdm.s.GCPhysVMMDevHeap != GCPhys || GCPhys == NIL_RTGCPHYS);
+ if (pVM->pdm.s.GCPhysVMMDevHeap != GCPhys)
+ {
+ pVM->pdm.s.GCPhysVMMDevHeap = GCPhys;
+ if (pVM->pdm.s.pfnVMMDevHeapNotify)
+ pVM->pdm.s.pfnVMMDevHeapNotify(pVM, pvHeap, GCPhys);
+ }
+ }
+
+ LogFlow(("pdmR3DevHlp_RegisterVMMDevHeap: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVHLPR3,pfnFirmwareRegister}
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_FirmwareRegister(PPDMDEVINS pDevIns, PCPDMFWREG pFwReg, PCPDMFWHLPR3 *ppFwHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: pFWReg=%p:{.u32Version=%#x, .pfnIsHardReset=%p, .u32TheEnd=%#x} ppFwHlp=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pFwReg, pFwReg->u32Version, pFwReg->pfnIsHardReset, pFwReg->u32TheEnd, ppFwHlp));
+
+ /*
+ * Validate input.
+ */
+ if (pFwReg->u32Version != PDM_FWREG_VERSION)
+ {
+ AssertMsgFailed(("u32Version=%#x expected %#x\n", pFwReg->u32Version, PDM_FWREG_VERSION));
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: returns %Rrc (version)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+ if (!pFwReg->pfnIsHardReset)
+ {
+ Assert(pFwReg->pfnIsHardReset);
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: returns %Rrc (callbacks)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (!ppFwHlp)
+ {
+ Assert(ppFwHlp);
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: returns %Rrc (ppFwHlp)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Only one DMA device.
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ if (pVM->pdm.s.pFirmware)
+ {
+ AssertMsgFailed(("Only one firmware device is supported!\n"));
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Allocate and initialize pci bus structure.
+ */
+ int rc = VINF_SUCCESS;
+ PPDMFW pFirmware = (PPDMFW)MMR3HeapAlloc(pDevIns->Internal.s.pVMR3, MM_TAG_PDM_DEVICE, sizeof(*pFirmware));
+ if (pFirmware)
+ {
+ pFirmware->pDevIns = pDevIns;
+ pFirmware->Reg = *pFwReg;
+ pVM->pdm.s.pFirmware = pFirmware;
+
+ /* set the helper pointer. */
+ *ppFwHlp = &g_pdmR3DevFirmwareHlp;
+ Log(("PDM: Registered firmware device '%s'/%d pDevIns=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ LogFlow(("pdmR3DevHlp_FirmwareRegister: caller='%s'/%d: returns %Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMReset} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMReset(PPDMDEVINS pDevIns, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_VMReset: caller='%s'/%d: fFlags=%#x VM_FF_RESET %d -> 1\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, fFlags, VM_FF_IS_SET(pVM, VM_FF_RESET)));
+
+ /*
+ * We postpone this operation because we're likely to be inside a I/O instruction
+ * and the EIP will be updated when we return.
+ * We still return VINF_EM_RESET to break out of any execution loops and force FF evaluation.
+ */
+ bool fHaltOnReset;
+ int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
+ if (RT_SUCCESS(rc) && fHaltOnReset)
+ {
+ Log(("pdmR3DevHlp_VMReset: Halt On Reset!\n"));
+ rc = VINF_EM_HALT;
+ }
+ else
+ {
+ pVM->pdm.s.fResetFlags = fFlags;
+ VM_FF_SET(pVM, VM_FF_RESET);
+ rc = VINF_EM_RESET;
+ }
+
+ LogFlow(("pdmR3DevHlp_VMReset: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSuspend} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMSuspend(PPDMDEVINS pDevIns)
+{
+ int rc;
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_VMSuspend: caller='%s'/%d:\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ /** @todo Always take the SMP path - fewer code paths. */
+ if (pVM->cCpus > 1)
+ {
+ /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3Suspend, 2, pVM->pUVM, VMSUSPENDREASON_VM);
+ AssertRC(rc);
+ rc = VINF_EM_SUSPEND;
+ }
+ else
+ rc = VMR3Suspend(pVM->pUVM, VMSUSPENDREASON_VM);
+
+ LogFlow(("pdmR3DevHlp_VMSuspend: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/**
+ * Worker for pdmR3DevHlp_VMSuspendSaveAndPowerOff that is invoked via a queued
+ * EMT request to avoid deadlocks.
+ *
+ * @returns VBox status code fit for scheduling.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device that triggered this action.
+ */
+static DECLCALLBACK(int) pdmR3DevHlp_VMSuspendSaveAndPowerOffWorker(PVM pVM, PPDMDEVINS pDevIns)
+{
+ /*
+ * Suspend the VM first then do the saving.
+ */
+ int rc = VMR3Suspend(pVM->pUVM, VMSUSPENDREASON_VM);
+ if (RT_SUCCESS(rc))
+ {
+ PUVM pUVM = pVM->pUVM;
+ rc = pUVM->pVmm2UserMethods->pfnSaveState(pVM->pUVM->pVmm2UserMethods, pUVM);
+
+ /*
+ * On success, power off the VM, on failure we'll leave it suspended.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ rc = VMR3PowerOff(pVM->pUVM);
+ if (RT_FAILURE(rc))
+ LogRel(("%s/SSP: VMR3PowerOff failed: %Rrc\n", pDevIns->pReg->szName, rc));
+ }
+ else
+ LogRel(("%s/SSP: pfnSaveState failed: %Rrc\n", pDevIns->pReg->szName, rc));
+ }
+ else
+ LogRel(("%s/SSP: Suspend failed: %Rrc\n", pDevIns->pReg->szName, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSuspendSaveAndPowerOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMSuspendSaveAndPowerOff(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_VMSuspendSaveAndPowerOff: caller='%s'/%d:\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ int rc;
+ if ( pVM->pUVM->pVmm2UserMethods
+ && pVM->pUVM->pVmm2UserMethods->pfnSaveState)
+ {
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pdmR3DevHlp_VMSuspendSaveAndPowerOffWorker, 2, pVM, pDevIns);
+ if (RT_SUCCESS(rc))
+ {
+ LogRel(("%s: Suspending, Saving and Powering Off the VM\n", pDevIns->pReg->szName));
+ rc = VINF_EM_SUSPEND;
+ }
+ }
+ else
+ rc = VERR_NOT_SUPPORTED;
+
+ LogFlow(("pdmR3DevHlp_VMSuspendSaveAndPowerOff: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMPowerOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMPowerOff(PPDMDEVINS pDevIns)
+{
+ int rc;
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DevHlp_VMPowerOff: caller='%s'/%d:\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ /** @todo Always take the SMP path - fewer code paths. */
+ if (pVM->cCpus > 1)
+ {
+ /* We might be holding locks here and could cause a deadlock since
+ VMR3PowerOff rendezvous with the other CPUs. */
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3PowerOff, 1, pVM->pUVM);
+ AssertRC(rc);
+ /* Set the VCPU state to stopped here as well to make sure no
+ inconsistency with the EM state occurs. */
+ VMCPU_SET_STATE(VMMGetCpu(pVM), VMCPUSTATE_STOPPED);
+ rc = VINF_EM_OFF;
+ }
+ else
+ rc = VMR3PowerOff(pVM->pUVM);
+
+ LogFlow(("pdmR3DevHlp_VMPowerOff: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnA20IsEnabled} */
+static DECLCALLBACK(bool) pdmR3DevHlp_A20IsEnabled(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+
+ bool fRc = PGMPhysIsA20Enabled(VMMGetCpu(pDevIns->Internal.s.pVMR3));
+
+ LogFlow(("pdmR3DevHlp_A20IsEnabled: caller='%s'/%d: returns %d\n", pDevIns->pReg->szName, pDevIns->iInstance, fRc));
+ return fRc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnA20Set} */
+static DECLCALLBACK(void) pdmR3DevHlp_A20Set(PPDMDEVINS pDevIns, bool fEnable)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_A20Set: caller='%s'/%d: fEnable=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, fEnable));
+ PGMR3PhysSetA20(VMMGetCpu(pDevIns->Internal.s.pVMR3), fEnable);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetCpuId} */
+static DECLCALLBACK(void) pdmR3DevHlp_GetCpuId(PPDMDEVINS pDevIns, uint32_t iLeaf,
+ uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DevHlp_GetCpuId: caller='%s'/%d: iLeaf=%d pEax=%p pEbx=%p pEcx=%p pEdx=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, iLeaf, pEax, pEbx, pEcx, pEdx));
+ AssertPtr(pEax); AssertPtr(pEbx); AssertPtr(pEcx); AssertPtr(pEdx);
+
+ CPUMGetGuestCpuId(VMMGetCpu(pDevIns->Internal.s.pVMR3), iLeaf, 0 /*iSubLeaf*/, -1 /*f64BitMode*/, pEax, pEbx, pEcx, pEdx);
+
+ LogFlow(("pdmR3DevHlp_GetCpuId: caller='%s'/%d: returns void - *pEax=%#x *pEbx=%#x *pEcx=%#x *pEdx=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, *pEax, *pEbx, *pEcx, *pEdx));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetMainExecutionEngine} */
+static DECLCALLBACK(uint8_t) pdmR3DevHlp_GetMainExecutionEngine(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DevHlp_GetMainExecutionEngine: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return pDevIns->Internal.s.pVMR3->bMainExecutionEngine;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMMRegisterPatchMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMMRegisterPatchMemory(PPDMDEVINS pDevIns, RTGCPTR GCPtrPatchMem, uint32_t cbPatchMem)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_VMMRegisterPatchMemory: caller='%s'/%d: GCPtrPatchMem=%RGv cbPatchMem=%RU32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPtrPatchMem, cbPatchMem));
+
+ int rc = VMMR3RegisterPatchMemory(pDevIns->Internal.s.pVMR3, GCPtrPatchMem, cbPatchMem);
+
+ LogFlow(("pdmR3DevHlp_VMMRegisterPatchMemory: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMMDeregisterPatchMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_VMMDeregisterPatchMemory(PPDMDEVINS pDevIns, RTGCPTR GCPtrPatchMem, uint32_t cbPatchMem)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_VMMDeregisterPatchMemory: caller='%s'/%d: GCPtrPatchMem=%RGv cbPatchMem=%RU32\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPtrPatchMem, cbPatchMem));
+
+ int rc = VMMR3DeregisterPatchMemory(pDevIns->Internal.s.pVMR3, GCPtrPatchMem, cbPatchMem);
+
+ LogFlow(("pdmR3DevHlp_VMMDeregisterPatchMemory: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_SharedModuleRegister(PPDMDEVINS pDevIns, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
+ RTGCPTR GCBaseAddr, uint32_t cbModule,
+ uint32_t cRegions, VMMDEVSHAREDREGIONDESC const *paRegions)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_SharedModuleRegister: caller='%s'/%d: enmGuestOS=%u pszModuleName=%p:{%s} pszVersion=%p:{%s} GCBaseAddr=%RGv cbModule=%#x cRegions=%u paRegions=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmGuestOS, pszModuleName, pszModuleName, pszVersion, pszVersion, GCBaseAddr, cbModule, cRegions, paRegions));
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ int rc = PGMR3SharedModuleRegister(pDevIns->Internal.s.pVMR3, enmGuestOS, pszModuleName, pszVersion,
+ GCBaseAddr, cbModule, cRegions, paRegions);
+#else
+ RT_NOREF(pDevIns, enmGuestOS, pszModuleName, pszVersion, GCBaseAddr, cbModule, cRegions, paRegions);
+ int rc = VERR_NOT_SUPPORTED;
+#endif
+
+ LogFlow(("pdmR3DevHlp_SharedModuleRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleUnregister} */
+static DECLCALLBACK(int) pdmR3DevHlp_SharedModuleUnregister(PPDMDEVINS pDevIns, char *pszModuleName, char *pszVersion,
+ RTGCPTR GCBaseAddr, uint32_t cbModule)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_SharedModuleUnregister: caller='%s'/%d: pszModuleName=%p:{%s} pszVersion=%p:{%s} GCBaseAddr=%RGv cbModule=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszModuleName, pszModuleName, pszVersion, pszVersion, GCBaseAddr, cbModule));
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ int rc = PGMR3SharedModuleUnregister(pDevIns->Internal.s.pVMR3, pszModuleName, pszVersion, GCBaseAddr, cbModule);
+#else
+ RT_NOREF(pDevIns, pszModuleName, pszVersion, GCBaseAddr, cbModule);
+ int rc = VERR_NOT_SUPPORTED;
+#endif
+
+ LogFlow(("pdmR3DevHlp_SharedModuleUnregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleGetPageState} */
+static DECLCALLBACK(int) pdmR3DevHlp_SharedModuleGetPageState(PPDMDEVINS pDevIns, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_SharedModuleGetPageState: caller='%s'/%d: GCPtrPage=%RGv pfShared=%p pfPageFlags=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPtrPage, pfShared, pfPageFlags));
+
+#if defined(VBOX_WITH_PAGE_SHARING) && defined(DEBUG)
+ int rc = PGMR3SharedModuleGetPageState(pDevIns->Internal.s.pVMR3, GCPtrPage, pfShared, pfPageFlags);
+#else
+ RT_NOREF(pDevIns, GCPtrPage, pfShared, pfPageFlags);
+ int rc = VERR_NOT_IMPLEMENTED;
+#endif
+
+ LogFlow(("pdmR3DevHlp_SharedModuleGetPageState: caller='%s'/%d: returns %Rrc *pfShared=%RTbool *pfPageFlags=%#RX64\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *pfShared, *pfPageFlags));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleCheckAll} */
+static DECLCALLBACK(int) pdmR3DevHlp_SharedModuleCheckAll(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_SharedModuleCheckAll: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ int rc = PGMR3SharedModuleCheckAll(pDevIns->Internal.s.pVMR3);
+#else
+ RT_NOREF(pDevIns);
+ int rc = VERR_NOT_SUPPORTED;
+#endif
+
+ LogFlow(("pdmR3DevHlp_SharedModuleCheckAll: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueryLun} */
+static DECLCALLBACK(int) pdmR3DevHlp_QueryLun(PPDMDEVINS pDevIns, const char *pszDevice,
+ unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_QueryLun: caller='%s'/%d: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pszDevice, pszDevice, iInstance, iLun, ppBase));
+
+ int rc = PDMR3QueryLun(pDevIns->Internal.s.pVMR3->pUVM, pszDevice, iInstance, iLun, ppBase);
+
+ LogFlow(("pdmR3DevHlp_QueryLun: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMDeviceRegister} */
+static DECLCALLBACK(void) pdmR3DevHlp_GIMDeviceRegister(PPDMDEVINS pDevIns, PGIMDEBUG pDbg)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_GIMDeviceRegister: caller='%s'/%d: pDbg=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDbg));
+
+ GIMR3GimDeviceRegister(pDevIns->Internal.s.pVMR3, pDevIns, pDbg);
+
+ LogFlow(("pdmR3DevHlp_GIMDeviceRegister: caller='%s'/%d: returns\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMGetDebugSetup} */
+static DECLCALLBACK(int) pdmR3DevHlp_GIMGetDebugSetup(PPDMDEVINS pDevIns, PGIMDEBUGSETUP pDbgSetup)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_GIMGetDebugSetup: caller='%s'/%d: pDbgSetup=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pDbgSetup));
+
+ int rc = GIMR3GetDebugSetup(pDevIns->Internal.s.pVMR3, pDbgSetup);
+
+ LogFlow(("pdmR3DevHlp_GIMGetDebugSetup: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMGetMmio2Regions} */
+static DECLCALLBACK(PGIMMMIO2REGION) pdmR3DevHlp_GIMGetMmio2Regions(PPDMDEVINS pDevIns, uint32_t *pcRegions)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ LogFlow(("pdmR3DevHlp_GIMGetMmio2Regions: caller='%s'/%d: pcRegions=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pcRegions));
+
+ PGIMMMIO2REGION pRegion = GIMGetMmio2Regions(pDevIns->Internal.s.pVMR3, pcRegions);
+
+ LogFlow(("pdmR3DevHlp_GIMGetMmio2Regions: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pRegion));
+ return pRegion;
+}
+
+
+/**
+ * The device helper structure for trusted devices.
+ */
+const PDMDEVHLPR3 g_pdmR3DevHlpTrusted =
+{
+ PDM_DEVHLPR3_VERSION,
+ pdmR3DevHlp_IoPortCreateEx,
+ pdmR3DevHlp_IoPortMap,
+ pdmR3DevHlp_IoPortUnmap,
+ pdmR3DevHlp_IoPortGetMappingAddress,
+ pdmR3DevHlp_IoPortWrite,
+ pdmR3DevHlp_MmioCreateEx,
+ pdmR3DevHlp_MmioMap,
+ pdmR3DevHlp_MmioUnmap,
+ pdmR3DevHlp_MmioReduce,
+ pdmR3DevHlp_MmioGetMappingAddress,
+ pdmR3DevHlp_Mmio2Create,
+ pdmR3DevHlp_Mmio2Destroy,
+ pdmR3DevHlp_Mmio2Map,
+ pdmR3DevHlp_Mmio2Unmap,
+ pdmR3DevHlp_Mmio2Reduce,
+ pdmR3DevHlp_Mmio2GetMappingAddress,
+ pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
+ pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
+ pdmR3DevHlp_Mmio2ChangeRegionNo,
+ pdmR3DevHlp_MmioMapMmio2Page,
+ pdmR3DevHlp_MmioResetRegion,
+ pdmR3DevHlp_ROMRegister,
+ pdmR3DevHlp_ROMProtectShadow,
+ pdmR3DevHlp_SSMRegister,
+ pdmR3DevHlp_SSMRegisterLegacy,
+ SSMR3PutStruct,
+ SSMR3PutStructEx,
+ SSMR3PutBool,
+ SSMR3PutU8,
+ SSMR3PutS8,
+ SSMR3PutU16,
+ SSMR3PutS16,
+ SSMR3PutU32,
+ SSMR3PutS32,
+ SSMR3PutU64,
+ SSMR3PutS64,
+ SSMR3PutU128,
+ SSMR3PutS128,
+ SSMR3PutUInt,
+ SSMR3PutSInt,
+ SSMR3PutGCUInt,
+ SSMR3PutGCUIntReg,
+ SSMR3PutGCPhys32,
+ SSMR3PutGCPhys64,
+ SSMR3PutGCPhys,
+ SSMR3PutGCPtr,
+ SSMR3PutGCUIntPtr,
+ SSMR3PutRCPtr,
+ SSMR3PutIOPort,
+ SSMR3PutSel,
+ SSMR3PutMem,
+ SSMR3PutStrZ,
+ SSMR3GetStruct,
+ SSMR3GetStructEx,
+ SSMR3GetBool,
+ SSMR3GetBoolV,
+ SSMR3GetU8,
+ SSMR3GetU8V,
+ SSMR3GetS8,
+ SSMR3GetS8V,
+ SSMR3GetU16,
+ SSMR3GetU16V,
+ SSMR3GetS16,
+ SSMR3GetS16V,
+ SSMR3GetU32,
+ SSMR3GetU32V,
+ SSMR3GetS32,
+ SSMR3GetS32V,
+ SSMR3GetU64,
+ SSMR3GetU64V,
+ SSMR3GetS64,
+ SSMR3GetS64V,
+ SSMR3GetU128,
+ SSMR3GetU128V,
+ SSMR3GetS128,
+ SSMR3GetS128V,
+ SSMR3GetGCPhys32,
+ SSMR3GetGCPhys32V,
+ SSMR3GetGCPhys64,
+ SSMR3GetGCPhys64V,
+ SSMR3GetGCPhys,
+ SSMR3GetGCPhysV,
+ SSMR3GetUInt,
+ SSMR3GetSInt,
+ SSMR3GetGCUInt,
+ SSMR3GetGCUIntReg,
+ SSMR3GetGCPtr,
+ SSMR3GetGCUIntPtr,
+ SSMR3GetRCPtr,
+ SSMR3GetIOPort,
+ SSMR3GetSel,
+ SSMR3GetMem,
+ SSMR3GetStrZ,
+ SSMR3GetStrZEx,
+ SSMR3Skip,
+ SSMR3SkipToEndOfUnit,
+ SSMR3SetLoadError,
+ SSMR3SetLoadErrorV,
+ SSMR3SetCfgError,
+ SSMR3SetCfgErrorV,
+ SSMR3HandleGetStatus,
+ SSMR3HandleGetAfter,
+ SSMR3HandleIsLiveSave,
+ SSMR3HandleMaxDowntime,
+ SSMR3HandleHostBits,
+ SSMR3HandleRevision,
+ SSMR3HandleVersion,
+ SSMR3HandleHostOSAndArch,
+ pdmR3DevHlp_TimerCreate,
+ pdmR3DevHlp_TimerFromMicro,
+ pdmR3DevHlp_TimerFromMilli,
+ pdmR3DevHlp_TimerFromNano,
+ pdmR3DevHlp_TimerGet,
+ pdmR3DevHlp_TimerGetFreq,
+ pdmR3DevHlp_TimerGetNano,
+ pdmR3DevHlp_TimerIsActive,
+ pdmR3DevHlp_TimerIsLockOwner,
+ pdmR3DevHlp_TimerLockClock,
+ pdmR3DevHlp_TimerLockClock2,
+ pdmR3DevHlp_TimerSet,
+ pdmR3DevHlp_TimerSetFrequencyHint,
+ pdmR3DevHlp_TimerSetMicro,
+ pdmR3DevHlp_TimerSetMillies,
+ pdmR3DevHlp_TimerSetNano,
+ pdmR3DevHlp_TimerSetRelative,
+ pdmR3DevHlp_TimerStop,
+ pdmR3DevHlp_TimerUnlockClock,
+ pdmR3DevHlp_TimerUnlockClock2,
+ pdmR3DevHlp_TimerSetCritSect,
+ pdmR3DevHlp_TimerSave,
+ pdmR3DevHlp_TimerLoad,
+ pdmR3DevHlp_TimerDestroy,
+ TMR3TimerSkip,
+ pdmR3DevHlp_TMUtcNow,
+ CFGMR3Exists,
+ CFGMR3QueryType,
+ CFGMR3QuerySize,
+ CFGMR3QueryInteger,
+ CFGMR3QueryIntegerDef,
+ CFGMR3QueryString,
+ CFGMR3QueryStringDef,
+ CFGMR3QueryPassword,
+ CFGMR3QueryPasswordDef,
+ CFGMR3QueryBytes,
+ CFGMR3QueryU64,
+ CFGMR3QueryU64Def,
+ CFGMR3QueryS64,
+ CFGMR3QueryS64Def,
+ CFGMR3QueryU32,
+ CFGMR3QueryU32Def,
+ CFGMR3QueryS32,
+ CFGMR3QueryS32Def,
+ CFGMR3QueryU16,
+ CFGMR3QueryU16Def,
+ CFGMR3QueryS16,
+ CFGMR3QueryS16Def,
+ CFGMR3QueryU8,
+ CFGMR3QueryU8Def,
+ CFGMR3QueryS8,
+ CFGMR3QueryS8Def,
+ CFGMR3QueryBool,
+ CFGMR3QueryBoolDef,
+ CFGMR3QueryPort,
+ CFGMR3QueryPortDef,
+ CFGMR3QueryUInt,
+ CFGMR3QueryUIntDef,
+ CFGMR3QuerySInt,
+ CFGMR3QuerySIntDef,
+ CFGMR3QueryGCPtr,
+ CFGMR3QueryGCPtrDef,
+ CFGMR3QueryGCPtrU,
+ CFGMR3QueryGCPtrUDef,
+ CFGMR3QueryGCPtrS,
+ CFGMR3QueryGCPtrSDef,
+ CFGMR3QueryStringAlloc,
+ CFGMR3QueryStringAllocDef,
+ CFGMR3GetParent,
+ CFGMR3GetChild,
+ CFGMR3GetChildF,
+ CFGMR3GetChildFV,
+ CFGMR3GetFirstChild,
+ CFGMR3GetNextChild,
+ CFGMR3GetName,
+ CFGMR3GetNameLen,
+ CFGMR3AreChildrenValid,
+ CFGMR3GetFirstValue,
+ CFGMR3GetNextValue,
+ CFGMR3GetValueName,
+ CFGMR3GetValueNameLen,
+ CFGMR3GetValueType,
+ CFGMR3AreValuesValid,
+ CFGMR3ValidateConfig,
+ pdmR3DevHlp_PhysRead,
+ pdmR3DevHlp_PhysWrite,
+ pdmR3DevHlp_PhysGCPhys2CCPtr,
+ pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysReleasePageMappingLock,
+ pdmR3DevHlp_PhysReadGCVirt,
+ pdmR3DevHlp_PhysWriteGCVirt,
+ pdmR3DevHlp_PhysGCPtr2GCPhys,
+ pdmR3DevHlp_PhysIsGCPhysNormal,
+ pdmR3DevHlp_PhysChangeMemBalloon,
+ pdmR3DevHlp_MMHeapAlloc,
+ pdmR3DevHlp_MMHeapAllocZ,
+ pdmR3DevHlp_MMHeapAPrintfV,
+ pdmR3DevHlp_MMHeapFree,
+ pdmR3DevHlp_MMPhysGetRamSize,
+ pdmR3DevHlp_MMPhysGetRamSizeBelow4GB,
+ pdmR3DevHlp_MMPhysGetRamSizeAbove4GB,
+ pdmR3DevHlp_VMState,
+ pdmR3DevHlp_VMTeleportedAndNotFullyResumedYet,
+ pdmR3DevHlp_VMSetErrorV,
+ pdmR3DevHlp_VMSetRuntimeErrorV,
+ pdmR3DevHlp_VMWaitForDeviceReady,
+ pdmR3DevHlp_VMNotifyCpuDeviceReady,
+ pdmR3DevHlp_VMReqCallNoWaitV,
+ pdmR3DevHlp_VMReqPriorityCallWaitV,
+ pdmR3DevHlp_DBGFStopV,
+ pdmR3DevHlp_DBGFInfoRegister,
+ pdmR3DevHlp_DBGFInfoRegisterArgv,
+ pdmR3DevHlp_DBGFRegRegister,
+ pdmR3DevHlp_DBGFTraceBuf,
+ pdmR3DevHlp_DBGFReportBugCheck,
+ pdmR3DevHlp_DBGFCoreWrite,
+ pdmR3DevHlp_DBGFInfoLogHlp,
+ pdmR3DevHlp_DBGFRegNmQueryU64,
+ pdmR3DevHlp_DBGFRegPrintfV,
+ pdmR3DevHlp_STAMRegister,
+ pdmR3DevHlp_STAMRegisterV,
+ pdmR3DevHlp_PCIRegister,
+ pdmR3DevHlp_PCIRegisterMsi,
+ pdmR3DevHlp_PCIIORegionRegister,
+ pdmR3DevHlp_PCIInterceptConfigAccesses,
+ pdmR3DevHlp_PCIConfigWrite,
+ pdmR3DevHlp_PCIConfigRead,
+ pdmR3DevHlp_PCIPhysRead,
+ pdmR3DevHlp_PCIPhysWrite,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PCISetIrq,
+ pdmR3DevHlp_PCISetIrqNoWait,
+ pdmR3DevHlp_ISASetIrq,
+ pdmR3DevHlp_ISASetIrqNoWait,
+ pdmR3DevHlp_DriverAttach,
+ pdmR3DevHlp_DriverDetach,
+ pdmR3DevHlp_DriverReconfigure,
+ pdmR3DevHlp_QueueCreate,
+ pdmR3DevHlp_QueueAlloc,
+ pdmR3DevHlp_QueueInsert,
+ pdmR3DevHlp_QueueFlushIfNecessary,
+ pdmR3DevHlp_TaskCreate,
+ pdmR3DevHlp_TaskTrigger,
+ pdmR3DevHlp_SUPSemEventCreate,
+ pdmR3DevHlp_SUPSemEventClose,
+ pdmR3DevHlp_SUPSemEventSignal,
+ pdmR3DevHlp_SUPSemEventWaitNoResume,
+ pdmR3DevHlp_SUPSemEventWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventGetResolution,
+ pdmR3DevHlp_SUPSemEventMultiCreate,
+ pdmR3DevHlp_SUPSemEventMultiClose,
+ pdmR3DevHlp_SUPSemEventMultiSignal,
+ pdmR3DevHlp_SUPSemEventMultiReset,
+ pdmR3DevHlp_SUPSemEventMultiWaitNoResume,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventMultiGetResolution,
+ pdmR3DevHlp_CritSectInit,
+ pdmR3DevHlp_CritSectGetNop,
+ pdmR3DevHlp_SetDeviceCritSect,
+ pdmR3DevHlp_CritSectYield,
+ pdmR3DevHlp_CritSectEnter,
+ pdmR3DevHlp_CritSectEnterDebug,
+ pdmR3DevHlp_CritSectTryEnter,
+ pdmR3DevHlp_CritSectTryEnterDebug,
+ pdmR3DevHlp_CritSectLeave,
+ pdmR3DevHlp_CritSectIsOwner,
+ pdmR3DevHlp_CritSectIsInitialized,
+ pdmR3DevHlp_CritSectHasWaiters,
+ pdmR3DevHlp_CritSectGetRecursion,
+ pdmR3DevHlp_CritSectScheduleExitEvent,
+ pdmR3DevHlp_CritSectDelete,
+ pdmR3DevHlp_CritSectRwInit,
+ pdmR3DevHlp_CritSectRwDelete,
+ pdmR3DevHlp_CritSectRwEnterShared,
+ pdmR3DevHlp_CritSectRwEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwTryEnterShared,
+ pdmR3DevHlp_CritSectRwTryEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwLeaveShared,
+ pdmR3DevHlp_CritSectRwEnterExcl,
+ pdmR3DevHlp_CritSectRwEnterExclDebug,
+ pdmR3DevHlp_CritSectRwTryEnterExcl,
+ pdmR3DevHlp_CritSectRwTryEnterExclDebug,
+ pdmR3DevHlp_CritSectRwLeaveExcl,
+ pdmR3DevHlp_CritSectRwIsWriteOwner,
+ pdmR3DevHlp_CritSectRwIsReadOwner,
+ pdmR3DevHlp_CritSectRwGetWriteRecursion,
+ pdmR3DevHlp_CritSectRwGetWriterReadRecursion,
+ pdmR3DevHlp_CritSectRwGetReadCount,
+ pdmR3DevHlp_CritSectRwIsInitialized,
+ pdmR3DevHlp_ThreadCreate,
+ PDMR3ThreadDestroy,
+ PDMR3ThreadIAmSuspending,
+ PDMR3ThreadIAmRunning,
+ PDMR3ThreadSleep,
+ PDMR3ThreadSuspend,
+ PDMR3ThreadResume,
+ pdmR3DevHlp_SetAsyncNotification,
+ pdmR3DevHlp_AsyncNotificationCompleted,
+ pdmR3DevHlp_RTCRegister,
+ pdmR3DevHlp_PCIBusRegister,
+ pdmR3DevHlp_IommuRegister,
+ pdmR3DevHlp_PICRegister,
+ pdmR3DevHlp_ApicRegister,
+ pdmR3DevHlp_IoApicRegister,
+ pdmR3DevHlp_HpetRegister,
+ pdmR3DevHlp_PciRawRegister,
+ pdmR3DevHlp_DMACRegister,
+ pdmR3DevHlp_DMARegister,
+ pdmR3DevHlp_DMAReadMemory,
+ pdmR3DevHlp_DMAWriteMemory,
+ pdmR3DevHlp_DMASetDREQ,
+ pdmR3DevHlp_DMAGetChannelMode,
+ pdmR3DevHlp_DMASchedule,
+ pdmR3DevHlp_CMOSWrite,
+ pdmR3DevHlp_CMOSRead,
+ pdmR3DevHlp_AssertEMT,
+ pdmR3DevHlp_AssertOther,
+ pdmR3DevHlp_LdrGetRCInterfaceSymbols,
+ pdmR3DevHlp_LdrGetR0InterfaceSymbols,
+ pdmR3DevHlp_CallR0,
+ pdmR3DevHlp_VMGetSuspendReason,
+ pdmR3DevHlp_VMGetResumeReason,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
+ pdmR3DevHlp_CpuGetGuestMicroarch,
+ pdmR3DevHlp_CpuGetGuestAddrWidths,
+ pdmR3DevHlp_CpuGetGuestScalableBusFrequency,
+ pdmR3DevHlp_STAMDeregisterByPrefix,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ pdmR3DevHlp_GetUVM,
+ pdmR3DevHlp_GetVM,
+ pdmR3DevHlp_GetVMCPU,
+ pdmR3DevHlp_GetCurrentCpuId,
+ pdmR3DevHlp_RegisterVMMDevHeap,
+ pdmR3DevHlp_FirmwareRegister,
+ pdmR3DevHlp_VMReset,
+ pdmR3DevHlp_VMSuspend,
+ pdmR3DevHlp_VMSuspendSaveAndPowerOff,
+ pdmR3DevHlp_VMPowerOff,
+ pdmR3DevHlp_A20IsEnabled,
+ pdmR3DevHlp_A20Set,
+ pdmR3DevHlp_GetCpuId,
+ pdmR3DevHlp_GetMainExecutionEngine,
+ pdmR3DevHlp_TMTimeVirtGet,
+ pdmR3DevHlp_TMTimeVirtGetFreq,
+ pdmR3DevHlp_TMTimeVirtGetNano,
+ pdmR3DevHlp_TMCpuTicksPerSecond,
+ pdmR3DevHlp_GetSupDrvSession,
+ pdmR3DevHlp_QueryGenericUserObject,
+ pdmR3DevHlp_PGMHandlerPhysicalTypeRegister,
+ pdmR3DevHlp_PGMHandlerPhysicalRegister,
+ pdmR3DevHlp_PGMHandlerPhysicalDeregister,
+ pdmR3DevHlp_PGMHandlerPhysicalPageTempOff,
+ pdmR3DevHlp_PGMHandlerPhysicalReset,
+ pdmR3DevHlp_VMMRegisterPatchMemory,
+ pdmR3DevHlp_VMMDeregisterPatchMemory,
+ pdmR3DevHlp_SharedModuleRegister,
+ pdmR3DevHlp_SharedModuleUnregister,
+ pdmR3DevHlp_SharedModuleGetPageState,
+ pdmR3DevHlp_SharedModuleCheckAll,
+ pdmR3DevHlp_QueryLun,
+ pdmR3DevHlp_GIMDeviceRegister,
+ pdmR3DevHlp_GIMGetDebugSetup,
+ pdmR3DevHlp_GIMGetMmio2Regions,
+ PDM_DEVHLPR3_VERSION /* the end */
+};
+
+
+#ifdef VBOX_WITH_DBGF_TRACING
+/**
+ * The device helper structure for trusted devices - tracing variant.
+ */
+const PDMDEVHLPR3 g_pdmR3DevHlpTracing =
+{
+ PDM_DEVHLPR3_VERSION,
+ pdmR3DevHlpTracing_IoPortCreateEx,
+ pdmR3DevHlpTracing_IoPortMap,
+ pdmR3DevHlpTracing_IoPortUnmap,
+ pdmR3DevHlp_IoPortGetMappingAddress,
+ pdmR3DevHlp_IoPortWrite,
+ pdmR3DevHlpTracing_MmioCreateEx,
+ pdmR3DevHlpTracing_MmioMap,
+ pdmR3DevHlpTracing_MmioUnmap,
+ pdmR3DevHlp_MmioReduce,
+ pdmR3DevHlp_MmioGetMappingAddress,
+ pdmR3DevHlp_Mmio2Create,
+ pdmR3DevHlp_Mmio2Destroy,
+ pdmR3DevHlp_Mmio2Map,
+ pdmR3DevHlp_Mmio2Unmap,
+ pdmR3DevHlp_Mmio2Reduce,
+ pdmR3DevHlp_Mmio2GetMappingAddress,
+ pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
+ pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
+ pdmR3DevHlp_Mmio2ChangeRegionNo,
+ pdmR3DevHlp_MmioMapMmio2Page,
+ pdmR3DevHlp_MmioResetRegion,
+ pdmR3DevHlp_ROMRegister,
+ pdmR3DevHlp_ROMProtectShadow,
+ pdmR3DevHlp_SSMRegister,
+ pdmR3DevHlp_SSMRegisterLegacy,
+ SSMR3PutStruct,
+ SSMR3PutStructEx,
+ SSMR3PutBool,
+ SSMR3PutU8,
+ SSMR3PutS8,
+ SSMR3PutU16,
+ SSMR3PutS16,
+ SSMR3PutU32,
+ SSMR3PutS32,
+ SSMR3PutU64,
+ SSMR3PutS64,
+ SSMR3PutU128,
+ SSMR3PutS128,
+ SSMR3PutUInt,
+ SSMR3PutSInt,
+ SSMR3PutGCUInt,
+ SSMR3PutGCUIntReg,
+ SSMR3PutGCPhys32,
+ SSMR3PutGCPhys64,
+ SSMR3PutGCPhys,
+ SSMR3PutGCPtr,
+ SSMR3PutGCUIntPtr,
+ SSMR3PutRCPtr,
+ SSMR3PutIOPort,
+ SSMR3PutSel,
+ SSMR3PutMem,
+ SSMR3PutStrZ,
+ SSMR3GetStruct,
+ SSMR3GetStructEx,
+ SSMR3GetBool,
+ SSMR3GetBoolV,
+ SSMR3GetU8,
+ SSMR3GetU8V,
+ SSMR3GetS8,
+ SSMR3GetS8V,
+ SSMR3GetU16,
+ SSMR3GetU16V,
+ SSMR3GetS16,
+ SSMR3GetS16V,
+ SSMR3GetU32,
+ SSMR3GetU32V,
+ SSMR3GetS32,
+ SSMR3GetS32V,
+ SSMR3GetU64,
+ SSMR3GetU64V,
+ SSMR3GetS64,
+ SSMR3GetS64V,
+ SSMR3GetU128,
+ SSMR3GetU128V,
+ SSMR3GetS128,
+ SSMR3GetS128V,
+ SSMR3GetGCPhys32,
+ SSMR3GetGCPhys32V,
+ SSMR3GetGCPhys64,
+ SSMR3GetGCPhys64V,
+ SSMR3GetGCPhys,
+ SSMR3GetGCPhysV,
+ SSMR3GetUInt,
+ SSMR3GetSInt,
+ SSMR3GetGCUInt,
+ SSMR3GetGCUIntReg,
+ SSMR3GetGCPtr,
+ SSMR3GetGCUIntPtr,
+ SSMR3GetRCPtr,
+ SSMR3GetIOPort,
+ SSMR3GetSel,
+ SSMR3GetMem,
+ SSMR3GetStrZ,
+ SSMR3GetStrZEx,
+ SSMR3Skip,
+ SSMR3SkipToEndOfUnit,
+ SSMR3SetLoadError,
+ SSMR3SetLoadErrorV,
+ SSMR3SetCfgError,
+ SSMR3SetCfgErrorV,
+ SSMR3HandleGetStatus,
+ SSMR3HandleGetAfter,
+ SSMR3HandleIsLiveSave,
+ SSMR3HandleMaxDowntime,
+ SSMR3HandleHostBits,
+ SSMR3HandleRevision,
+ SSMR3HandleVersion,
+ SSMR3HandleHostOSAndArch,
+ pdmR3DevHlp_TimerCreate,
+ pdmR3DevHlp_TimerFromMicro,
+ pdmR3DevHlp_TimerFromMilli,
+ pdmR3DevHlp_TimerFromNano,
+ pdmR3DevHlp_TimerGet,
+ pdmR3DevHlp_TimerGetFreq,
+ pdmR3DevHlp_TimerGetNano,
+ pdmR3DevHlp_TimerIsActive,
+ pdmR3DevHlp_TimerIsLockOwner,
+ pdmR3DevHlp_TimerLockClock,
+ pdmR3DevHlp_TimerLockClock2,
+ pdmR3DevHlp_TimerSet,
+ pdmR3DevHlp_TimerSetFrequencyHint,
+ pdmR3DevHlp_TimerSetMicro,
+ pdmR3DevHlp_TimerSetMillies,
+ pdmR3DevHlp_TimerSetNano,
+ pdmR3DevHlp_TimerSetRelative,
+ pdmR3DevHlp_TimerStop,
+ pdmR3DevHlp_TimerUnlockClock,
+ pdmR3DevHlp_TimerUnlockClock2,
+ pdmR3DevHlp_TimerSetCritSect,
+ pdmR3DevHlp_TimerSave,
+ pdmR3DevHlp_TimerLoad,
+ pdmR3DevHlp_TimerDestroy,
+ TMR3TimerSkip,
+ pdmR3DevHlp_TMUtcNow,
+ CFGMR3Exists,
+ CFGMR3QueryType,
+ CFGMR3QuerySize,
+ CFGMR3QueryInteger,
+ CFGMR3QueryIntegerDef,
+ CFGMR3QueryString,
+ CFGMR3QueryStringDef,
+ CFGMR3QueryPassword,
+ CFGMR3QueryPasswordDef,
+ CFGMR3QueryBytes,
+ CFGMR3QueryU64,
+ CFGMR3QueryU64Def,
+ CFGMR3QueryS64,
+ CFGMR3QueryS64Def,
+ CFGMR3QueryU32,
+ CFGMR3QueryU32Def,
+ CFGMR3QueryS32,
+ CFGMR3QueryS32Def,
+ CFGMR3QueryU16,
+ CFGMR3QueryU16Def,
+ CFGMR3QueryS16,
+ CFGMR3QueryS16Def,
+ CFGMR3QueryU8,
+ CFGMR3QueryU8Def,
+ CFGMR3QueryS8,
+ CFGMR3QueryS8Def,
+ CFGMR3QueryBool,
+ CFGMR3QueryBoolDef,
+ CFGMR3QueryPort,
+ CFGMR3QueryPortDef,
+ CFGMR3QueryUInt,
+ CFGMR3QueryUIntDef,
+ CFGMR3QuerySInt,
+ CFGMR3QuerySIntDef,
+ CFGMR3QueryGCPtr,
+ CFGMR3QueryGCPtrDef,
+ CFGMR3QueryGCPtrU,
+ CFGMR3QueryGCPtrUDef,
+ CFGMR3QueryGCPtrS,
+ CFGMR3QueryGCPtrSDef,
+ CFGMR3QueryStringAlloc,
+ CFGMR3QueryStringAllocDef,
+ CFGMR3GetParent,
+ CFGMR3GetChild,
+ CFGMR3GetChildF,
+ CFGMR3GetChildFV,
+ CFGMR3GetFirstChild,
+ CFGMR3GetNextChild,
+ CFGMR3GetName,
+ CFGMR3GetNameLen,
+ CFGMR3AreChildrenValid,
+ CFGMR3GetFirstValue,
+ CFGMR3GetNextValue,
+ CFGMR3GetValueName,
+ CFGMR3GetValueNameLen,
+ CFGMR3GetValueType,
+ CFGMR3AreValuesValid,
+ CFGMR3ValidateConfig,
+ pdmR3DevHlpTracing_PhysRead,
+ pdmR3DevHlpTracing_PhysWrite,
+ pdmR3DevHlp_PhysGCPhys2CCPtr,
+ pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysReleasePageMappingLock,
+ pdmR3DevHlp_PhysReadGCVirt,
+ pdmR3DevHlp_PhysWriteGCVirt,
+ pdmR3DevHlp_PhysGCPtr2GCPhys,
+ pdmR3DevHlp_PhysIsGCPhysNormal,
+ pdmR3DevHlp_PhysChangeMemBalloon,
+ pdmR3DevHlp_MMHeapAlloc,
+ pdmR3DevHlp_MMHeapAllocZ,
+ pdmR3DevHlp_MMHeapAPrintfV,
+ pdmR3DevHlp_MMHeapFree,
+ pdmR3DevHlp_MMPhysGetRamSize,
+ pdmR3DevHlp_MMPhysGetRamSizeBelow4GB,
+ pdmR3DevHlp_MMPhysGetRamSizeAbove4GB,
+ pdmR3DevHlp_VMState,
+ pdmR3DevHlp_VMTeleportedAndNotFullyResumedYet,
+ pdmR3DevHlp_VMSetErrorV,
+ pdmR3DevHlp_VMSetRuntimeErrorV,
+ pdmR3DevHlp_VMWaitForDeviceReady,
+ pdmR3DevHlp_VMNotifyCpuDeviceReady,
+ pdmR3DevHlp_VMReqCallNoWaitV,
+ pdmR3DevHlp_VMReqPriorityCallWaitV,
+ pdmR3DevHlp_DBGFStopV,
+ pdmR3DevHlp_DBGFInfoRegister,
+ pdmR3DevHlp_DBGFInfoRegisterArgv,
+ pdmR3DevHlp_DBGFRegRegister,
+ pdmR3DevHlp_DBGFTraceBuf,
+ pdmR3DevHlp_DBGFReportBugCheck,
+ pdmR3DevHlp_DBGFCoreWrite,
+ pdmR3DevHlp_DBGFInfoLogHlp,
+ pdmR3DevHlp_DBGFRegNmQueryU64,
+ pdmR3DevHlp_DBGFRegPrintfV,
+ pdmR3DevHlp_STAMRegister,
+ pdmR3DevHlp_STAMRegisterV,
+ pdmR3DevHlp_PCIRegister,
+ pdmR3DevHlp_PCIRegisterMsi,
+ pdmR3DevHlp_PCIIORegionRegister,
+ pdmR3DevHlp_PCIInterceptConfigAccesses,
+ pdmR3DevHlp_PCIConfigWrite,
+ pdmR3DevHlp_PCIConfigRead,
+ pdmR3DevHlpTracing_PCIPhysRead,
+ pdmR3DevHlpTracing_PCIPhysWrite,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlpTracing_PCISetIrq,
+ pdmR3DevHlpTracing_PCISetIrqNoWait,
+ pdmR3DevHlpTracing_ISASetIrq,
+ pdmR3DevHlpTracing_ISASetIrqNoWait,
+ pdmR3DevHlp_DriverAttach,
+ pdmR3DevHlp_DriverDetach,
+ pdmR3DevHlp_DriverReconfigure,
+ pdmR3DevHlp_QueueCreate,
+ pdmR3DevHlp_QueueAlloc,
+ pdmR3DevHlp_QueueInsert,
+ pdmR3DevHlp_QueueFlushIfNecessary,
+ pdmR3DevHlp_TaskCreate,
+ pdmR3DevHlp_TaskTrigger,
+ pdmR3DevHlp_SUPSemEventCreate,
+ pdmR3DevHlp_SUPSemEventClose,
+ pdmR3DevHlp_SUPSemEventSignal,
+ pdmR3DevHlp_SUPSemEventWaitNoResume,
+ pdmR3DevHlp_SUPSemEventWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventGetResolution,
+ pdmR3DevHlp_SUPSemEventMultiCreate,
+ pdmR3DevHlp_SUPSemEventMultiClose,
+ pdmR3DevHlp_SUPSemEventMultiSignal,
+ pdmR3DevHlp_SUPSemEventMultiReset,
+ pdmR3DevHlp_SUPSemEventMultiWaitNoResume,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventMultiGetResolution,
+ pdmR3DevHlp_CritSectInit,
+ pdmR3DevHlp_CritSectGetNop,
+ pdmR3DevHlp_SetDeviceCritSect,
+ pdmR3DevHlp_CritSectYield,
+ pdmR3DevHlp_CritSectEnter,
+ pdmR3DevHlp_CritSectEnterDebug,
+ pdmR3DevHlp_CritSectTryEnter,
+ pdmR3DevHlp_CritSectTryEnterDebug,
+ pdmR3DevHlp_CritSectLeave,
+ pdmR3DevHlp_CritSectIsOwner,
+ pdmR3DevHlp_CritSectIsInitialized,
+ pdmR3DevHlp_CritSectHasWaiters,
+ pdmR3DevHlp_CritSectGetRecursion,
+ pdmR3DevHlp_CritSectScheduleExitEvent,
+ pdmR3DevHlp_CritSectDelete,
+ pdmR3DevHlp_CritSectRwInit,
+ pdmR3DevHlp_CritSectRwDelete,
+ pdmR3DevHlp_CritSectRwEnterShared,
+ pdmR3DevHlp_CritSectRwEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwTryEnterShared,
+ pdmR3DevHlp_CritSectRwTryEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwLeaveShared,
+ pdmR3DevHlp_CritSectRwEnterExcl,
+ pdmR3DevHlp_CritSectRwEnterExclDebug,
+ pdmR3DevHlp_CritSectRwTryEnterExcl,
+ pdmR3DevHlp_CritSectRwTryEnterExclDebug,
+ pdmR3DevHlp_CritSectRwLeaveExcl,
+ pdmR3DevHlp_CritSectRwIsWriteOwner,
+ pdmR3DevHlp_CritSectRwIsReadOwner,
+ pdmR3DevHlp_CritSectRwGetWriteRecursion,
+ pdmR3DevHlp_CritSectRwGetWriterReadRecursion,
+ pdmR3DevHlp_CritSectRwGetReadCount,
+ pdmR3DevHlp_CritSectRwIsInitialized,
+ pdmR3DevHlp_ThreadCreate,
+ PDMR3ThreadDestroy,
+ PDMR3ThreadIAmSuspending,
+ PDMR3ThreadIAmRunning,
+ PDMR3ThreadSleep,
+ PDMR3ThreadSuspend,
+ PDMR3ThreadResume,
+ pdmR3DevHlp_SetAsyncNotification,
+ pdmR3DevHlp_AsyncNotificationCompleted,
+ pdmR3DevHlp_RTCRegister,
+ pdmR3DevHlp_PCIBusRegister,
+ pdmR3DevHlp_IommuRegister,
+ pdmR3DevHlp_PICRegister,
+ pdmR3DevHlp_ApicRegister,
+ pdmR3DevHlp_IoApicRegister,
+ pdmR3DevHlp_HpetRegister,
+ pdmR3DevHlp_PciRawRegister,
+ pdmR3DevHlp_DMACRegister,
+ pdmR3DevHlp_DMARegister,
+ pdmR3DevHlp_DMAReadMemory,
+ pdmR3DevHlp_DMAWriteMemory,
+ pdmR3DevHlp_DMASetDREQ,
+ pdmR3DevHlp_DMAGetChannelMode,
+ pdmR3DevHlp_DMASchedule,
+ pdmR3DevHlp_CMOSWrite,
+ pdmR3DevHlp_CMOSRead,
+ pdmR3DevHlp_AssertEMT,
+ pdmR3DevHlp_AssertOther,
+ pdmR3DevHlp_LdrGetRCInterfaceSymbols,
+ pdmR3DevHlp_LdrGetR0InterfaceSymbols,
+ pdmR3DevHlp_CallR0,
+ pdmR3DevHlp_VMGetSuspendReason,
+ pdmR3DevHlp_VMGetResumeReason,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
+ pdmR3DevHlp_CpuGetGuestMicroarch,
+ pdmR3DevHlp_CpuGetGuestAddrWidths,
+ pdmR3DevHlp_CpuGetGuestScalableBusFrequency,
+ pdmR3DevHlp_STAMDeregisterByPrefix,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ pdmR3DevHlp_GetUVM,
+ pdmR3DevHlp_GetVM,
+ pdmR3DevHlp_GetVMCPU,
+ pdmR3DevHlp_GetCurrentCpuId,
+ pdmR3DevHlp_RegisterVMMDevHeap,
+ pdmR3DevHlp_FirmwareRegister,
+ pdmR3DevHlp_VMReset,
+ pdmR3DevHlp_VMSuspend,
+ pdmR3DevHlp_VMSuspendSaveAndPowerOff,
+ pdmR3DevHlp_VMPowerOff,
+ pdmR3DevHlp_A20IsEnabled,
+ pdmR3DevHlp_A20Set,
+ pdmR3DevHlp_GetCpuId,
+ pdmR3DevHlp_GetMainExecutionEngine,
+ pdmR3DevHlp_TMTimeVirtGet,
+ pdmR3DevHlp_TMTimeVirtGetFreq,
+ pdmR3DevHlp_TMTimeVirtGetNano,
+ pdmR3DevHlp_TMCpuTicksPerSecond,
+ pdmR3DevHlp_GetSupDrvSession,
+ pdmR3DevHlp_QueryGenericUserObject,
+ pdmR3DevHlp_PGMHandlerPhysicalTypeRegister,
+ pdmR3DevHlp_PGMHandlerPhysicalRegister,
+ pdmR3DevHlp_PGMHandlerPhysicalDeregister,
+ pdmR3DevHlp_PGMHandlerPhysicalPageTempOff,
+ pdmR3DevHlp_PGMHandlerPhysicalReset,
+ pdmR3DevHlp_VMMRegisterPatchMemory,
+ pdmR3DevHlp_VMMDeregisterPatchMemory,
+ pdmR3DevHlp_SharedModuleRegister,
+ pdmR3DevHlp_SharedModuleUnregister,
+ pdmR3DevHlp_SharedModuleGetPageState,
+ pdmR3DevHlp_SharedModuleCheckAll,
+ pdmR3DevHlp_QueryLun,
+ pdmR3DevHlp_GIMDeviceRegister,
+ pdmR3DevHlp_GIMGetDebugSetup,
+ pdmR3DevHlp_GIMGetMmio2Regions,
+ PDM_DEVHLPR3_VERSION /* the end */
+};
+#endif /* VBOX_WITH_DBGF_TRACING */
+
+
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetUVM} */
+static DECLCALLBACK(PUVM) pdmR3DevHlp_Untrusted_GetUVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return NULL;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetVM} */
+static DECLCALLBACK(PVM) pdmR3DevHlp_Untrusted_GetVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return NULL;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetVMCPU} */
+static DECLCALLBACK(PVMCPU) pdmR3DevHlp_Untrusted_GetVMCPU(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return NULL;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetCurrentCpuId} */
+static DECLCALLBACK(VMCPUID) pdmR3DevHlp_Untrusted_GetCurrentCpuId(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return NIL_VMCPUID;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnRegisterVMMDevHeap} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_RegisterVMMDevHeap(PPDMDEVINS pDevIns, RTGCPHYS GCPhys,
+ RTR3PTR pvHeap, unsigned cbHeap)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ NOREF(GCPhys); NOREF(pvHeap); NOREF(cbHeap);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnFirmwareRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_FirmwareRegister(PPDMDEVINS pDevIns, PCPDMFWREG pFwReg, PCPDMFWHLPR3 *ppFwHlp)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ NOREF(pFwReg); NOREF(ppFwHlp);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMReset} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMReset(PPDMDEVINS pDevIns, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns); NOREF(fFlags);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSuspend} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMSuspend(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMSuspendSaveAndPowerOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMSuspendSaveAndPowerOff(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMPowerOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMPowerOff(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnA20IsEnabled} */
+static DECLCALLBACK(bool) pdmR3DevHlp_Untrusted_A20IsEnabled(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnA20Set} */
+static DECLCALLBACK(void) pdmR3DevHlp_Untrusted_A20Set(PPDMDEVINS pDevIns, bool fEnable)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ NOREF(fEnable);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetCpuId} */
+static DECLCALLBACK(void) pdmR3DevHlp_Untrusted_GetCpuId(PPDMDEVINS pDevIns, uint32_t iLeaf,
+ uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ NOREF(iLeaf); NOREF(pEax); NOREF(pEbx); NOREF(pEcx); NOREF(pEdx);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetMainExecutionEngine} */
+static DECLCALLBACK(uint8_t) pdmR3DevHlp_Untrusted_GetMainExecutionEngine(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VM_EXEC_ENGINE_NOT_SET;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetSupDrvSession} */
+static DECLCALLBACK(PSUPDRVSESSION) pdmR3DevHlp_Untrusted_GetSupDrvSession(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return (PSUPDRVSESSION)0;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueryGenericUserObject} */
+static DECLCALLBACK(void *) pdmR3DevHlp_Untrusted_QueryGenericUserObject(PPDMDEVINS pDevIns, PCRTUUID pUuid)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d %RTuuid\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pUuid));
+ return NULL;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalTypeRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_PGMHandlerPhysicalTypeRegister(PPDMDEVINS pDevIns, PGMPHYSHANDLERKIND enmKind,
+ PFNPGMPHYSHANDLER pfnHandler,
+ const char *pszDesc, PPGMPHYSHANDLERTYPE phType)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDevIns, enmKind, pfnHandler, pszDesc);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ *phType = NIL_PGMPHYSHANDLERTYPE;
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_PGMHandlerPhysicalRegister(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
+ PGMPHYSHANDLERTYPE hType, R3PTRTYPE(const char *) pszDesc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPhys, GCPhysLast, hType, pszDesc);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalDeregister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_PGMHandlerPhysicalDeregister(PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPhys);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalPageTempOff} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_PGMHandlerPhysicalPageTempOff(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPhys, GCPhysPage);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPGMHandlerPhysicalReset} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_PGMHandlerPhysicalReset(PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPhys);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMMRegisterPatchMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMMRegisterPatchMemory(PPDMDEVINS pDevIns, RTGCPTR GCPtrPatchMem, uint32_t cbPatchMem)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPtrPatchMem, cbPatchMem);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnVMMDeregisterPatchMemory} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_VMMDeregisterPatchMemory(PPDMDEVINS pDevIns, RTGCPTR GCPtrPatchMem, uint32_t cbPatchMem)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPtrPatchMem, cbPatchMem);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_SharedModuleRegister(PPDMDEVINS pDevIns, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
+ RTGCPTR GCBaseAddr, uint32_t cbModule,
+ uint32_t cRegions, VMMDEVSHAREDREGIONDESC const *paRegions)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(enmGuestOS, pszModuleName, pszVersion, GCBaseAddr, cbModule, cRegions, paRegions);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleUnregister} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_SharedModuleUnregister(PPDMDEVINS pDevIns, char *pszModuleName, char *pszVersion,
+ RTGCPTR GCBaseAddr, uint32_t cbModule)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pszModuleName, pszVersion, GCBaseAddr, cbModule);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleGetPageState} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_SharedModuleGetPageState(PPDMDEVINS pDevIns, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(GCPtrPage, pfShared, pfPageFlags);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnSharedModuleCheckAll} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_SharedModuleCheckAll(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnQueryLun} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_QueryLun(PPDMDEVINS pDevIns, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pszDevice, iInstance, iLun, ppBase);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMDeviceRegister} */
+static DECLCALLBACK(void) pdmR3DevHlp_Untrusted_GIMDeviceRegister(PPDMDEVINS pDevIns, PGIMDEBUG pDbg)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDbg);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMGetDebugSetup} */
+static DECLCALLBACK(int) pdmR3DevHlp_Untrusted_GIMGetDebugSetup(PPDMDEVINS pDevIns, PGIMDEBUGSETUP pDbgSetup)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pDbgSetup);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return VERR_ACCESS_DENIED;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGIMGetMmio2Regions} */
+static DECLCALLBACK(PGIMMMIO2REGION) pdmR3DevHlp_Untrusted_GIMGetMmio2Regions(PPDMDEVINS pDevIns, uint32_t *pcRegions)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RT_NOREF(pcRegions);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+ return NULL;
+}
+
+
+/**
+ * The device helper structure for non-trusted devices.
+ */
+const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted =
+{
+ PDM_DEVHLPR3_VERSION,
+ pdmR3DevHlp_IoPortCreateEx,
+ pdmR3DevHlp_IoPortMap,
+ pdmR3DevHlp_IoPortUnmap,
+ pdmR3DevHlp_IoPortGetMappingAddress,
+ pdmR3DevHlp_IoPortWrite,
+ pdmR3DevHlp_MmioCreateEx,
+ pdmR3DevHlp_MmioMap,
+ pdmR3DevHlp_MmioUnmap,
+ pdmR3DevHlp_MmioReduce,
+ pdmR3DevHlp_MmioGetMappingAddress,
+ pdmR3DevHlp_Mmio2Create,
+ pdmR3DevHlp_Mmio2Destroy,
+ pdmR3DevHlp_Mmio2Map,
+ pdmR3DevHlp_Mmio2Unmap,
+ pdmR3DevHlp_Mmio2Reduce,
+ pdmR3DevHlp_Mmio2GetMappingAddress,
+ pdmR3DevHlp_Mmio2QueryAndResetDirtyBitmap,
+ pdmR3DevHlp_Mmio2ControlDirtyPageTracking,
+ pdmR3DevHlp_Mmio2ChangeRegionNo,
+ pdmR3DevHlp_MmioMapMmio2Page,
+ pdmR3DevHlp_MmioResetRegion,
+ pdmR3DevHlp_ROMRegister,
+ pdmR3DevHlp_ROMProtectShadow,
+ pdmR3DevHlp_SSMRegister,
+ pdmR3DevHlp_SSMRegisterLegacy,
+ SSMR3PutStruct,
+ SSMR3PutStructEx,
+ SSMR3PutBool,
+ SSMR3PutU8,
+ SSMR3PutS8,
+ SSMR3PutU16,
+ SSMR3PutS16,
+ SSMR3PutU32,
+ SSMR3PutS32,
+ SSMR3PutU64,
+ SSMR3PutS64,
+ SSMR3PutU128,
+ SSMR3PutS128,
+ SSMR3PutUInt,
+ SSMR3PutSInt,
+ SSMR3PutGCUInt,
+ SSMR3PutGCUIntReg,
+ SSMR3PutGCPhys32,
+ SSMR3PutGCPhys64,
+ SSMR3PutGCPhys,
+ SSMR3PutGCPtr,
+ SSMR3PutGCUIntPtr,
+ SSMR3PutRCPtr,
+ SSMR3PutIOPort,
+ SSMR3PutSel,
+ SSMR3PutMem,
+ SSMR3PutStrZ,
+ SSMR3GetStruct,
+ SSMR3GetStructEx,
+ SSMR3GetBool,
+ SSMR3GetBoolV,
+ SSMR3GetU8,
+ SSMR3GetU8V,
+ SSMR3GetS8,
+ SSMR3GetS8V,
+ SSMR3GetU16,
+ SSMR3GetU16V,
+ SSMR3GetS16,
+ SSMR3GetS16V,
+ SSMR3GetU32,
+ SSMR3GetU32V,
+ SSMR3GetS32,
+ SSMR3GetS32V,
+ SSMR3GetU64,
+ SSMR3GetU64V,
+ SSMR3GetS64,
+ SSMR3GetS64V,
+ SSMR3GetU128,
+ SSMR3GetU128V,
+ SSMR3GetS128,
+ SSMR3GetS128V,
+ SSMR3GetGCPhys32,
+ SSMR3GetGCPhys32V,
+ SSMR3GetGCPhys64,
+ SSMR3GetGCPhys64V,
+ SSMR3GetGCPhys,
+ SSMR3GetGCPhysV,
+ SSMR3GetUInt,
+ SSMR3GetSInt,
+ SSMR3GetGCUInt,
+ SSMR3GetGCUIntReg,
+ SSMR3GetGCPtr,
+ SSMR3GetGCUIntPtr,
+ SSMR3GetRCPtr,
+ SSMR3GetIOPort,
+ SSMR3GetSel,
+ SSMR3GetMem,
+ SSMR3GetStrZ,
+ SSMR3GetStrZEx,
+ SSMR3Skip,
+ SSMR3SkipToEndOfUnit,
+ SSMR3SetLoadError,
+ SSMR3SetLoadErrorV,
+ SSMR3SetCfgError,
+ SSMR3SetCfgErrorV,
+ SSMR3HandleGetStatus,
+ SSMR3HandleGetAfter,
+ SSMR3HandleIsLiveSave,
+ SSMR3HandleMaxDowntime,
+ SSMR3HandleHostBits,
+ SSMR3HandleRevision,
+ SSMR3HandleVersion,
+ SSMR3HandleHostOSAndArch,
+ pdmR3DevHlp_TimerCreate,
+ pdmR3DevHlp_TimerFromMicro,
+ pdmR3DevHlp_TimerFromMilli,
+ pdmR3DevHlp_TimerFromNano,
+ pdmR3DevHlp_TimerGet,
+ pdmR3DevHlp_TimerGetFreq,
+ pdmR3DevHlp_TimerGetNano,
+ pdmR3DevHlp_TimerIsActive,
+ pdmR3DevHlp_TimerIsLockOwner,
+ pdmR3DevHlp_TimerLockClock,
+ pdmR3DevHlp_TimerLockClock2,
+ pdmR3DevHlp_TimerSet,
+ pdmR3DevHlp_TimerSetFrequencyHint,
+ pdmR3DevHlp_TimerSetMicro,
+ pdmR3DevHlp_TimerSetMillies,
+ pdmR3DevHlp_TimerSetNano,
+ pdmR3DevHlp_TimerSetRelative,
+ pdmR3DevHlp_TimerStop,
+ pdmR3DevHlp_TimerUnlockClock,
+ pdmR3DevHlp_TimerUnlockClock2,
+ pdmR3DevHlp_TimerSetCritSect,
+ pdmR3DevHlp_TimerSave,
+ pdmR3DevHlp_TimerLoad,
+ pdmR3DevHlp_TimerDestroy,
+ TMR3TimerSkip,
+ pdmR3DevHlp_TMUtcNow,
+ CFGMR3Exists,
+ CFGMR3QueryType,
+ CFGMR3QuerySize,
+ CFGMR3QueryInteger,
+ CFGMR3QueryIntegerDef,
+ CFGMR3QueryString,
+ CFGMR3QueryStringDef,
+ CFGMR3QueryPassword,
+ CFGMR3QueryPasswordDef,
+ CFGMR3QueryBytes,
+ CFGMR3QueryU64,
+ CFGMR3QueryU64Def,
+ CFGMR3QueryS64,
+ CFGMR3QueryS64Def,
+ CFGMR3QueryU32,
+ CFGMR3QueryU32Def,
+ CFGMR3QueryS32,
+ CFGMR3QueryS32Def,
+ CFGMR3QueryU16,
+ CFGMR3QueryU16Def,
+ CFGMR3QueryS16,
+ CFGMR3QueryS16Def,
+ CFGMR3QueryU8,
+ CFGMR3QueryU8Def,
+ CFGMR3QueryS8,
+ CFGMR3QueryS8Def,
+ CFGMR3QueryBool,
+ CFGMR3QueryBoolDef,
+ CFGMR3QueryPort,
+ CFGMR3QueryPortDef,
+ CFGMR3QueryUInt,
+ CFGMR3QueryUIntDef,
+ CFGMR3QuerySInt,
+ CFGMR3QuerySIntDef,
+ CFGMR3QueryGCPtr,
+ CFGMR3QueryGCPtrDef,
+ CFGMR3QueryGCPtrU,
+ CFGMR3QueryGCPtrUDef,
+ CFGMR3QueryGCPtrS,
+ CFGMR3QueryGCPtrSDef,
+ CFGMR3QueryStringAlloc,
+ CFGMR3QueryStringAllocDef,
+ CFGMR3GetParent,
+ CFGMR3GetChild,
+ CFGMR3GetChildF,
+ CFGMR3GetChildFV,
+ CFGMR3GetFirstChild,
+ CFGMR3GetNextChild,
+ CFGMR3GetName,
+ CFGMR3GetNameLen,
+ CFGMR3AreChildrenValid,
+ CFGMR3GetFirstValue,
+ CFGMR3GetNextValue,
+ CFGMR3GetValueName,
+ CFGMR3GetValueNameLen,
+ CFGMR3GetValueType,
+ CFGMR3AreValuesValid,
+ CFGMR3ValidateConfig,
+ pdmR3DevHlp_PhysRead,
+ pdmR3DevHlp_PhysWrite,
+ pdmR3DevHlp_PhysGCPhys2CCPtr,
+ pdmR3DevHlp_PhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysReleasePageMappingLock,
+ pdmR3DevHlp_PhysReadGCVirt,
+ pdmR3DevHlp_PhysWriteGCVirt,
+ pdmR3DevHlp_PhysGCPtr2GCPhys,
+ pdmR3DevHlp_PhysIsGCPhysNormal,
+ pdmR3DevHlp_PhysChangeMemBalloon,
+ pdmR3DevHlp_MMHeapAlloc,
+ pdmR3DevHlp_MMHeapAllocZ,
+ pdmR3DevHlp_MMHeapAPrintfV,
+ pdmR3DevHlp_MMHeapFree,
+ pdmR3DevHlp_MMPhysGetRamSize,
+ pdmR3DevHlp_MMPhysGetRamSizeBelow4GB,
+ pdmR3DevHlp_MMPhysGetRamSizeAbove4GB,
+ pdmR3DevHlp_VMState,
+ pdmR3DevHlp_VMTeleportedAndNotFullyResumedYet,
+ pdmR3DevHlp_VMSetErrorV,
+ pdmR3DevHlp_VMSetRuntimeErrorV,
+ pdmR3DevHlp_VMWaitForDeviceReady,
+ pdmR3DevHlp_VMNotifyCpuDeviceReady,
+ pdmR3DevHlp_VMReqCallNoWaitV,
+ pdmR3DevHlp_VMReqPriorityCallWaitV,
+ pdmR3DevHlp_DBGFStopV,
+ pdmR3DevHlp_DBGFInfoRegister,
+ pdmR3DevHlp_DBGFInfoRegisterArgv,
+ pdmR3DevHlp_DBGFRegRegister,
+ pdmR3DevHlp_DBGFTraceBuf,
+ pdmR3DevHlp_DBGFReportBugCheck,
+ pdmR3DevHlp_DBGFCoreWrite,
+ pdmR3DevHlp_DBGFInfoLogHlp,
+ pdmR3DevHlp_DBGFRegNmQueryU64,
+ pdmR3DevHlp_DBGFRegPrintfV,
+ pdmR3DevHlp_STAMRegister,
+ pdmR3DevHlp_STAMRegisterV,
+ pdmR3DevHlp_PCIRegister,
+ pdmR3DevHlp_PCIRegisterMsi,
+ pdmR3DevHlp_PCIIORegionRegister,
+ pdmR3DevHlp_PCIInterceptConfigAccesses,
+ pdmR3DevHlp_PCIConfigWrite,
+ pdmR3DevHlp_PCIConfigRead,
+ pdmR3DevHlp_PCIPhysRead,
+ pdmR3DevHlp_PCIPhysWrite,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PCIPhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PCISetIrq,
+ pdmR3DevHlp_PCISetIrqNoWait,
+ pdmR3DevHlp_ISASetIrq,
+ pdmR3DevHlp_ISASetIrqNoWait,
+ pdmR3DevHlp_DriverAttach,
+ pdmR3DevHlp_DriverDetach,
+ pdmR3DevHlp_DriverReconfigure,
+ pdmR3DevHlp_QueueCreate,
+ pdmR3DevHlp_QueueAlloc,
+ pdmR3DevHlp_QueueInsert,
+ pdmR3DevHlp_QueueFlushIfNecessary,
+ pdmR3DevHlp_TaskCreate,
+ pdmR3DevHlp_TaskTrigger,
+ pdmR3DevHlp_SUPSemEventCreate,
+ pdmR3DevHlp_SUPSemEventClose,
+ pdmR3DevHlp_SUPSemEventSignal,
+ pdmR3DevHlp_SUPSemEventWaitNoResume,
+ pdmR3DevHlp_SUPSemEventWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventGetResolution,
+ pdmR3DevHlp_SUPSemEventMultiCreate,
+ pdmR3DevHlp_SUPSemEventMultiClose,
+ pdmR3DevHlp_SUPSemEventMultiSignal,
+ pdmR3DevHlp_SUPSemEventMultiReset,
+ pdmR3DevHlp_SUPSemEventMultiWaitNoResume,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsAbsIntr,
+ pdmR3DevHlp_SUPSemEventMultiWaitNsRelIntr,
+ pdmR3DevHlp_SUPSemEventMultiGetResolution,
+ pdmR3DevHlp_CritSectInit,
+ pdmR3DevHlp_CritSectGetNop,
+ pdmR3DevHlp_SetDeviceCritSect,
+ pdmR3DevHlp_CritSectYield,
+ pdmR3DevHlp_CritSectEnter,
+ pdmR3DevHlp_CritSectEnterDebug,
+ pdmR3DevHlp_CritSectTryEnter,
+ pdmR3DevHlp_CritSectTryEnterDebug,
+ pdmR3DevHlp_CritSectLeave,
+ pdmR3DevHlp_CritSectIsOwner,
+ pdmR3DevHlp_CritSectIsInitialized,
+ pdmR3DevHlp_CritSectHasWaiters,
+ pdmR3DevHlp_CritSectGetRecursion,
+ pdmR3DevHlp_CritSectScheduleExitEvent,
+ pdmR3DevHlp_CritSectDelete,
+ pdmR3DevHlp_CritSectRwInit,
+ pdmR3DevHlp_CritSectRwDelete,
+ pdmR3DevHlp_CritSectRwEnterShared,
+ pdmR3DevHlp_CritSectRwEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwTryEnterShared,
+ pdmR3DevHlp_CritSectRwTryEnterSharedDebug,
+ pdmR3DevHlp_CritSectRwLeaveShared,
+ pdmR3DevHlp_CritSectRwEnterExcl,
+ pdmR3DevHlp_CritSectRwEnterExclDebug,
+ pdmR3DevHlp_CritSectRwTryEnterExcl,
+ pdmR3DevHlp_CritSectRwTryEnterExclDebug,
+ pdmR3DevHlp_CritSectRwLeaveExcl,
+ pdmR3DevHlp_CritSectRwIsWriteOwner,
+ pdmR3DevHlp_CritSectRwIsReadOwner,
+ pdmR3DevHlp_CritSectRwGetWriteRecursion,
+ pdmR3DevHlp_CritSectRwGetWriterReadRecursion,
+ pdmR3DevHlp_CritSectRwGetReadCount,
+ pdmR3DevHlp_CritSectRwIsInitialized,
+ pdmR3DevHlp_ThreadCreate,
+ PDMR3ThreadDestroy,
+ PDMR3ThreadIAmSuspending,
+ PDMR3ThreadIAmRunning,
+ PDMR3ThreadSleep,
+ PDMR3ThreadSuspend,
+ PDMR3ThreadResume,
+ pdmR3DevHlp_SetAsyncNotification,
+ pdmR3DevHlp_AsyncNotificationCompleted,
+ pdmR3DevHlp_RTCRegister,
+ pdmR3DevHlp_PCIBusRegister,
+ pdmR3DevHlp_IommuRegister,
+ pdmR3DevHlp_PICRegister,
+ pdmR3DevHlp_ApicRegister,
+ pdmR3DevHlp_IoApicRegister,
+ pdmR3DevHlp_HpetRegister,
+ pdmR3DevHlp_PciRawRegister,
+ pdmR3DevHlp_DMACRegister,
+ pdmR3DevHlp_DMARegister,
+ pdmR3DevHlp_DMAReadMemory,
+ pdmR3DevHlp_DMAWriteMemory,
+ pdmR3DevHlp_DMASetDREQ,
+ pdmR3DevHlp_DMAGetChannelMode,
+ pdmR3DevHlp_DMASchedule,
+ pdmR3DevHlp_CMOSWrite,
+ pdmR3DevHlp_CMOSRead,
+ pdmR3DevHlp_AssertEMT,
+ pdmR3DevHlp_AssertOther,
+ pdmR3DevHlp_LdrGetRCInterfaceSymbols,
+ pdmR3DevHlp_LdrGetR0InterfaceSymbols,
+ pdmR3DevHlp_CallR0,
+ pdmR3DevHlp_VMGetSuspendReason,
+ pdmR3DevHlp_VMGetResumeReason,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtr,
+ pdmR3DevHlp_PhysBulkGCPhys2CCPtrReadOnly,
+ pdmR3DevHlp_PhysBulkReleasePageMappingLocks,
+ pdmR3DevHlp_CpuGetGuestMicroarch,
+ pdmR3DevHlp_CpuGetGuestAddrWidths,
+ pdmR3DevHlp_CpuGetGuestScalableBusFrequency,
+ pdmR3DevHlp_STAMDeregisterByPrefix,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ pdmR3DevHlp_Untrusted_GetUVM,
+ pdmR3DevHlp_Untrusted_GetVM,
+ pdmR3DevHlp_Untrusted_GetVMCPU,
+ pdmR3DevHlp_Untrusted_GetCurrentCpuId,
+ pdmR3DevHlp_Untrusted_RegisterVMMDevHeap,
+ pdmR3DevHlp_Untrusted_FirmwareRegister,
+ pdmR3DevHlp_Untrusted_VMReset,
+ pdmR3DevHlp_Untrusted_VMSuspend,
+ pdmR3DevHlp_Untrusted_VMSuspendSaveAndPowerOff,
+ pdmR3DevHlp_Untrusted_VMPowerOff,
+ pdmR3DevHlp_Untrusted_A20IsEnabled,
+ pdmR3DevHlp_Untrusted_A20Set,
+ pdmR3DevHlp_Untrusted_GetCpuId,
+ pdmR3DevHlp_Untrusted_GetMainExecutionEngine,
+ pdmR3DevHlp_TMTimeVirtGet,
+ pdmR3DevHlp_TMTimeVirtGetFreq,
+ pdmR3DevHlp_TMTimeVirtGetNano,
+ pdmR3DevHlp_TMCpuTicksPerSecond,
+ pdmR3DevHlp_Untrusted_GetSupDrvSession,
+ pdmR3DevHlp_Untrusted_QueryGenericUserObject,
+ pdmR3DevHlp_Untrusted_PGMHandlerPhysicalTypeRegister,
+ pdmR3DevHlp_Untrusted_PGMHandlerPhysicalRegister,
+ pdmR3DevHlp_Untrusted_PGMHandlerPhysicalDeregister,
+ pdmR3DevHlp_Untrusted_PGMHandlerPhysicalPageTempOff,
+ pdmR3DevHlp_Untrusted_PGMHandlerPhysicalReset,
+ pdmR3DevHlp_Untrusted_VMMRegisterPatchMemory,
+ pdmR3DevHlp_Untrusted_VMMDeregisterPatchMemory,
+ pdmR3DevHlp_Untrusted_SharedModuleRegister,
+ pdmR3DevHlp_Untrusted_SharedModuleUnregister,
+ pdmR3DevHlp_Untrusted_SharedModuleGetPageState,
+ pdmR3DevHlp_Untrusted_SharedModuleCheckAll,
+ pdmR3DevHlp_Untrusted_QueryLun,
+ pdmR3DevHlp_Untrusted_GIMDeviceRegister,
+ pdmR3DevHlp_Untrusted_GIMGetDebugSetup,
+ pdmR3DevHlp_Untrusted_GIMGetMmio2Regions,
+ PDM_DEVHLPR3_VERSION /* the end */
+};
+
+
+
+/**
+ * Queue consumer callback for internal component.
+ *
+ * @returns Success indicator.
+ * If false the item will not be removed and the flushing will stop.
+ * @param pVM The cross context VM structure.
+ * @param pItem The item to consume. Upon return this item will be freed.
+ */
+DECLCALLBACK(bool) pdmR3DevHlpQueueConsumer(PVM pVM, PPDMQUEUEITEMCORE pItem)
+{
+ PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)pItem;
+ LogFlow(("pdmR3DevHlpQueueConsumer: enmOp=%d pDevIns=%p\n", pTask->enmOp, pTask->pDevInsR3));
+ switch (pTask->enmOp)
+ {
+ case PDMDEVHLPTASKOP_ISA_SET_IRQ:
+ PDMIsaSetIrq(pVM, pTask->u.IsaSetIrq.iIrq, pTask->u.IsaSetIrq.iLevel, pTask->u.IsaSetIrq.uTagSrc);
+ break;
+
+ case PDMDEVHLPTASKOP_PCI_SET_IRQ:
+ {
+ /* Same as pdmR3DevHlp_PCISetIrq, except we've got a tag already. */
+ PPDMDEVINSR3 pDevIns = pTask->pDevInsR3;
+ PPDMPCIDEV pPciDev = pTask->u.PciSetIrq.idxPciDev < RT_ELEMENTS(pDevIns->apPciDevs)
+ ? pDevIns->apPciDevs[pTask->u.PciSetIrq.idxPciDev] : NULL;
+ if (pPciDev)
+ {
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertBreak(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses));
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ pdmLock(pVM);
+ pBus->pfnSetIrqR3(pBus->pDevInsR3, pPciDev, pTask->u.PciSetIrq.iIrq,
+ pTask->u.PciSetIrq.iLevel, pTask->u.PciSetIrq.uTagSrc);
+ pdmUnlock(pVM);
+ }
+ else
+ AssertReleaseMsgFailed(("No PCI device given! (%#x)\n", pPciDev->Int.s.idxSubDev));
+ break;
+ }
+
+ case PDMDEVHLPTASKOP_IOAPIC_SET_IRQ:
+ {
+ PDMIoApicSetIrq(pVM, pTask->u.IoApicSetIrq.uBusDevFn, pTask->u.IoApicSetIrq.iIrq, pTask->u.IoApicSetIrq.iLevel,
+ pTask->u.IoApicSetIrq.uTagSrc);
+ break;
+ }
+
+ case PDMDEVHLPTASKOP_IOAPIC_SEND_MSI:
+ {
+ PDMIoApicSendMsi(pVM, pTask->u.IoApicSendMsi.uBusDevFn, &pTask->u.IoApicSendMsi.Msi, pTask->u.IoApicSendMsi.uTagSrc);
+ break;
+ }
+
+ case PDMDEVHLPTASKOP_IOAPIC_SET_EOI:
+ {
+ PDMIoApicBroadcastEoi(pVM, pTask->u.IoApicSetEoi.uVector);
+ break;
+ }
+
+ default:
+ AssertReleaseMsgFailed(("Invalid operation %d\n", pTask->enmOp));
+ break;
+ }
+ return true;
+}
+
+/** @} */
+
diff --git a/src/VBox/VMM/VMMR3/PDMDevHlpTracing.cpp b/src/VBox/VMM/VMMR3/PDMDevHlpTracing.cpp
new file mode 100644
index 00000000..aa7ef0c0
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMDevHlpTracing.cpp
@@ -0,0 +1,587 @@
+/* $Id: PDMDevHlpTracing.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, Device Helper variants when tracing is enabled.
+ */
+
+/*
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DEVICE
+#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/version.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#include "dtrace/VBoxVMM.h"
+#include "PDMInline.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name R3 DevHlp
+ * @{
+ */
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_IoPortNewIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(!pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rcStrict = pTrack->u.IoPort.pfnIn(pDevIns, pTrack->pvUser, offPort, pu32, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtIoPortRead(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.IoPort.hIoPorts, offPort, pu32, cb);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_IoPortNewInStr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint8_t *pbDst,
+ uint32_t *pcTransfers, unsigned cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(!pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ uint32_t cTransfersReq = *pcTransfers;
+ VBOXSTRICTRC rcStrict = pTrack->u.IoPort.pfnInStr(pDevIns, pTrack->pvUser, offPort, pbDst, pcTransfers, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtIoPortReadStr(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.IoPort.hIoPorts, offPort, pbDst, cb,
+ cTransfersReq, cTransfersReq - *pcTransfers);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_IoPortNewOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(!pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rcStrict = pTrack->u.IoPort.pfnOut(pDevIns, pTrack->pvUser, offPort, u32, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtIoPortWrite(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.IoPort.hIoPorts, offPort, &u32, cb);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_IoPortNewOutStr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, const uint8_t *pbSrc,
+ uint32_t *pcTransfers, unsigned cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(!pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ uint32_t cTransfersReq = *pcTransfers;
+ VBOXSTRICTRC rcStrict = pTrack->u.IoPort.pfnOutStr(pDevIns, pTrack->pvUser, offPort, pbSrc, pcTransfers, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtIoPortWriteStr(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.IoPort.hIoPorts, offPort, pbSrc, cb,
+ cTransfersReq, cTransfersReq - *pcTransfers);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rcStrict = pTrack->u.Mmio.pfnRead(pDevIns, pTrack->pvUser, off, pv, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtMmioRead(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.Mmio.hMmioRegion, off, pv, cb);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rcStrict = pTrack->u.Mmio.pfnWrite(pDevIns, pTrack->pvUser, off, pv, cb);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtMmioWrite(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.Mmio.hMmioRegion, off, pv, cb);
+
+ return rcStrict;
+}
+
+
+static DECLCALLBACK(VBOXSTRICTRC) pdmR3DevHlpTracing_MmioFill(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off,
+ uint32_t u32Item, uint32_t cbItem, uint32_t cItems)
+{
+ PCPDMDEVINSDBGFTRACK pTrack = (PCPDMDEVINSDBGFTRACK)pvUser;
+
+ Assert(pTrack->fMmio);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VBOXSTRICTRC rcStrict = pTrack->u.Mmio.pfnFill(pDevIns, pTrack->pvUser, off, u32Item, cbItem, cItems);
+ if (RT_SUCCESS(rcStrict))
+ DBGFTracerEvtMmioFill(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, pTrack->u.Mmio.hMmioRegion, off,
+ u32Item, cbItem, cItems);
+
+ return rcStrict;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortCreateEx} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_IoPortCreateEx(PPDMDEVINS pDevIns, RTIOPORT cPorts, uint32_t fFlags, PPDMPCIDEV pPciDev,
+ uint32_t iPciRegion, PFNIOMIOPORTNEWOUT pfnOut, PFNIOMIOPORTNEWIN pfnIn,
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr, PFNIOMIOPORTNEWINSTRING pfnInStr, RTR3PTR pvUser,
+ const char *pszDesc, PCIOMIOPORTDESC paExtDescs, PIOMIOPORTHANDLE phIoPorts)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlpTracing_IoPortCreateEx: caller='%s'/%d: cPorts=%#x fFlags=%#x pPciDev=%p iPciRegion=%#x pfnOut=%p pfnIn=%p pfnOutStr=%p pfnInStr=%p pvUser=%p pszDesc=%p:{%s} paExtDescs=%p phIoPorts=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cPorts, fFlags, pPciDev, iPciRegion, pfnOut, pfnIn, pfnOutStr, pfnInStr,
+ pvUser, pszDesc, pszDesc, paExtDescs, phIoPorts));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ int rc = VINF_SUCCESS;
+ if (pDevIns->Internal.s.idxDbgfTraceTrackNext < pDevIns->Internal.s.cDbgfTraceTrackMax)
+ {
+ PPDMDEVINSDBGFTRACK pTrack = &pDevIns->Internal.s.paDbgfTraceTrack[pDevIns->Internal.s.idxDbgfTraceTrackNext];
+
+ rc = IOMR3IoPortCreate(pVM, pDevIns, cPorts, fFlags, pPciDev, iPciRegion,
+ pfnOut ? pdmR3DevHlpTracing_IoPortNewOut : NULL,
+ pfnIn ? pdmR3DevHlpTracing_IoPortNewIn : NULL,
+ pfnOutStr ? pdmR3DevHlpTracing_IoPortNewOutStr : NULL,
+ pfnInStr ? pdmR3DevHlpTracing_IoPortNewInStr : NULL,
+ pTrack, pszDesc, paExtDescs, phIoPorts);
+ if (RT_SUCCESS(rc))
+ {
+ pTrack->fMmio = false;
+ pTrack->pvUser = pvUser;
+ pTrack->u.IoPort.hIoPorts = *phIoPorts;
+ pTrack->u.IoPort.pfnOut = pfnOut;
+ pTrack->u.IoPort.pfnIn = pfnIn;
+ pTrack->u.IoPort.pfnOutStr = pfnOutStr;
+ pTrack->u.IoPort.pfnInStr = pfnInStr;
+ pDevIns->Internal.s.idxDbgfTraceTrackNext++;
+ DBGFR3TracerEvtIoPortCreate(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, *phIoPorts, cPorts, fFlags, iPciRegion);
+ }
+ }
+ else
+ rc = VERR_OUT_OF_RESOURCES;
+
+ LogFlow(("pdmR3DevHlpTracing_IoPortCreateEx: caller='%s'/%d: returns %Rrc (*phIoPorts=%#x)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *phIoPorts));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortMap} */
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_IoPortMap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts, RTIOPORT Port)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: hIoPorts=%#x Port=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts, Port));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3IoPortMap(pVM, pDevIns, hIoPorts, Port);
+ DBGFTracerEvtIoPortMap(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, hIoPorts, Port);
+
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnIoPortUnmap} */
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_IoPortUnmap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: hIoPorts=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3IoPortUnmap(pVM, pDevIns, hIoPorts);
+ DBGFTracerEvtIoPortUnmap(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, hIoPorts);
+
+ LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioCreateEx} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_MmioCreateEx(PPDMDEVINS pDevIns, RTGCPHYS cbRegion,
+ uint32_t fFlags, PPDMPCIDEV pPciDev, uint32_t iPciRegion,
+ PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill,
+ void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioCreateEx: caller='%s'/%d: cbRegion=%#RGp fFlags=%#x pPciDev=%p iPciRegion=%#x pfnWrite=%p pfnRead=%p pfnFill=%p pvUser=%p pszDesc=%p:{%s} phRegion=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, cbRegion, fFlags, pPciDev, iPciRegion, pfnWrite, pfnRead, pfnFill, pvUser, pszDesc, pszDesc, phRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ /* HACK ALERT! Round the size up to page size. The PCI bus should do something similar before mapping it. */
+ /** @todo It's possible we need to do dummy MMIO fill-in of the PCI bus or
+ * guest adds more alignment to an region. */
+ cbRegion = RT_ALIGN_T(cbRegion, GUEST_PAGE_SIZE, RTGCPHYS);
+
+ int rc = VINF_SUCCESS;
+ if (pDevIns->Internal.s.idxDbgfTraceTrackNext < pDevIns->Internal.s.cDbgfTraceTrackMax)
+ {
+ PPDMDEVINSDBGFTRACK pTrack = &pDevIns->Internal.s.paDbgfTraceTrack[pDevIns->Internal.s.idxDbgfTraceTrackNext];
+
+ rc = IOMR3MmioCreate(pVM, pDevIns, cbRegion, fFlags, pPciDev, iPciRegion,
+ pfnWrite ? pdmR3DevHlpTracing_MmioWrite : NULL,
+ pfnRead ? pdmR3DevHlpTracing_MmioRead : NULL,
+ pfnFill ? pdmR3DevHlpTracing_MmioFill : NULL,
+ pTrack, pszDesc, phRegion);
+ if (RT_SUCCESS(rc))
+ {
+ pTrack->fMmio = true;
+ pTrack->pvUser = pvUser;
+ pTrack->u.Mmio.hMmioRegion = *phRegion;
+ pTrack->u.Mmio.pfnWrite = pfnWrite;
+ pTrack->u.Mmio.pfnRead = pfnRead;
+ pTrack->u.Mmio.pfnFill = pfnFill;
+ pDevIns->Internal.s.idxDbgfTraceTrackNext++;
+ DBGFR3TracerEvtMmioCreate(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, *phRegion, cbRegion, fFlags, iPciRegion);
+ }
+ }
+ else
+ rc = VERR_OUT_OF_RESOURCES;
+
+ LogFlow(("pdmR3DevHlp_MmioCreateEx: caller='%s'/%d: returns %Rrc (*phRegion=%#x)\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc, *phRegion));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioMap} */
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_MmioMap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioMap: caller='%s'/%d: hRegion=%#x GCPhys=%#RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion, GCPhys));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3MmioMap(pVM, pDevIns, hRegion, GCPhys);
+ DBGFTracerEvtMmioMap(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, hRegion, GCPhys);
+
+ LogFlow(("pdmR3DevHlp_MmioMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnMmioUnmap} */
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_MmioUnmap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_MmioUnmap: caller='%s'/%d: hRegion=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hRegion));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = IOMR3MmioUnmap(pVM, pDevIns, hRegion);
+ DBGFTracerEvtMmioUnmap(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, hRegion);
+
+ LogFlow(("pdmR3DevHlp_MmioUnmap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysRead} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
+{
+ RT_NOREF(fFlags);
+
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysRead: caller='%s'/%d: GCPhys=%RGp pvBuf=%p cbRead=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, pvBuf, cbRead));
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ VBOXSTRICTRC rcStrict;
+ if (VM_IS_EMT(pVM))
+ rcStrict = PGMPhysRead(pVM, GCPhys, pvBuf, cbRead, PGMACCESSORIGIN_DEVICE);
+ else
+ rcStrict = PGMR3PhysReadExternal(pVM, GCPhys, pvBuf, cbRead, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ if (!(fFlags & PDM_DEVHLP_PHYS_RW_F_DATA_USER))
+ DBGFTracerEvtGCPhysRead(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, GCPhys, pvBuf, cbRead);
+
+ Log(("pdmR3DevHlp_PhysRead: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPhysWrite} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags)
+{
+ RT_NOREF(fFlags);
+
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3DevHlp_PhysWrite: caller='%s'/%d: GCPhys=%RGp pvBuf=%p cbWrite=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, GCPhys, pvBuf, cbWrite));
+
+#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
+ if (!VM_IS_EMT(pVM))
+ {
+ char szNames[128];
+ uint32_t cLocks = PDMR3CritSectCountOwned(pVM, szNames, sizeof(szNames));
+ AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
+ }
+#endif
+
+ VBOXSTRICTRC rcStrict;
+ if (VM_IS_EMT(pVM))
+ rcStrict = PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite, PGMACCESSORIGIN_DEVICE);
+ else
+ rcStrict = PGMR3PhysWriteExternal(pVM, GCPhys, pvBuf, cbWrite, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ if (!(fFlags & PDM_DEVHLP_PHYS_RW_F_DATA_USER))
+ DBGFTracerEvtGCPhysWrite(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, GCPhys, pvBuf, cbWrite);
+
+ Log(("pdmR3DevHlp_PhysWrite: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysRead} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_PCIPhysRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ LogFunc(("caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbRead=%#zx\n", pDevIns->pReg->szName,
+ pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbRead));
+ memset(pvBuf, 0xff, cbRead);
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmIommuMemAccessRead(pDevIns, pPciDev, GCPhys, pvBuf, cbRead, fFlags);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysRead(pDevIns, GCPhys, pvBuf, cbRead, fFlags);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysWrite} */
+DECL_HIDDEN_CALLBACK(int)
+pdmR3DevHlpTracing_PCIPhysWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ Log(("pdmR3DevHlp_PCIPhysWrite: caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ int rc = pdmIommuMemAccessWrite(pDevIns, pPciDev, GCPhys, pvBuf, cbWrite, fFlags);
+ if ( rc == VERR_IOMMU_NOT_PRESENT
+ || rc == VERR_IOMMU_CANNOT_CALL_SELF)
+ { /* likely - ASSUMING most VMs won't be configured with an IOMMU. */ }
+ else
+ return rc;
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite, fFlags);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCISetIrq} */
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_PCISetIrq(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->apPciDevs[0];
+ AssertReturnVoid(pPciDev);
+ LogFlow(("pdmR3DevHlp_PCISetIrq: caller='%s'/%d: pPciDev=%p:{%#x} iIrq=%d iLevel=%d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iIrq, iLevel));
+ PDMPCIDEV_ASSERT_VALID_AND_REGISTERED(pDevIns, pPciDev);
+
+ /*
+ * Validate input.
+ */
+ Assert(iIrq == 0);
+ Assert((uint32_t)iLevel <= PDM_IRQ_LEVEL_FLIP_FLOP);
+
+ /*
+ * Must have a PCI device registered!
+ */
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ size_t const idxBus = pPciDev->Int.s.idxPdmBus;
+ AssertReturnVoid(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses));
+ PPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
+
+ DBGFTracerEvtIrq(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, iIrq, iLevel);
+
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ pBus->pfnSetIrqR3(pBus->pDevInsR3, pPciDev, iIrq, iLevel, uTagSrc);
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_PCISetIrq: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCISetIrqNoWait} */
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_PCISetIrqNoWait(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel)
+{
+ pdmR3DevHlpTracing_PCISetIrq(pDevIns, pPciDev, iIrq, iLevel);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnISASetIrq} */
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_ISASetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_ISASetIrq: caller='%s'/%d: iIrq=%d iLevel=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, iIrq, iLevel));
+
+ /*
+ * Validate input.
+ */
+ Assert(iIrq < 16);
+ Assert((uint32_t)iLevel <= PDM_IRQ_LEVEL_FLIP_FLOP);
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ DBGFTracerEvtIrq(pVM, pDevIns->Internal.s.hDbgfTraceEvtSrc, iIrq, iLevel);
+
+ /*
+ * Do the job.
+ */
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ PDMIsaSetIrq(pVM, iIrq, iLevel, uTagSrc); /* (The API takes the lock recursively.) */
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+
+ LogFlow(("pdmR3DevHlp_ISASetIrq: caller='%s'/%d: returns void\n", pDevIns->pReg->szName, pDevIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnISASetIrqNoWait} */
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_ISASetIrqNoWait(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ pdmR3DevHlpTracing_ISASetIrq(pDevIns, iIrq, iLevel);
+}
+
+
+/** @} */
+
diff --git a/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp b/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
new file mode 100644
index 00000000..f11f0b21
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
@@ -0,0 +1,509 @@
+/* $Id: PDMDevMiscHlp.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, Misc. Device Helpers.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DEVICE
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <VBox/msi.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+
+
+#include "PDMInline.h"
+#include "dtrace/VBoxVMM.h"
+
+
+
+/** @name Ring-3 PIC Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMPICHLP,pfnSetInterruptFF} */
+static DECLCALLBACK(void) pdmR3PicHlp_SetInterruptFF(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */
+
+ /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
+ Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded);
+
+ APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */);
+}
+
+
+/** @interface_method_impl{PDMPICHLP,pfnClearInterruptFF} */
+static DECLCALLBACK(void) pdmR3PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */
+
+ /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
+ Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded);
+
+ APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */);
+}
+
+
+/** @interface_method_impl{PDMPICHLP,pfnLock} */
+static DECLCALLBACK(int) pdmR3PicHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return pdmLockEx(pDevIns->Internal.s.pVMR3, rc);
+}
+
+
+/** @interface_method_impl{PDMPICHLP,pfnUnlock} */
+static DECLCALLBACK(void) pdmR3PicHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ pdmUnlock(pDevIns->Internal.s.pVMR3);
+}
+
+
+/**
+ * PIC Device Helpers.
+ */
+const PDMPICHLP g_pdmR3DevPicHlp =
+{
+ PDM_PICHLP_VERSION,
+ pdmR3PicHlp_SetInterruptFF,
+ pdmR3PicHlp_ClearInterruptFF,
+ pdmR3PicHlp_Lock,
+ pdmR3PicHlp_Unlock,
+ PDM_PICHLP_VERSION /* the end */
+};
+
+/** @} */
+
+
+/** @name Ring-3 I/O APIC Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMIOAPICHLP,pfnApicBusDeliver} */
+static DECLCALLBACK(int) pdmR3IoApicHlp_ApicBusDeliver(PPDMDEVINS pDevIns, uint8_t u8Dest, uint8_t u8DestMode,
+ uint8_t u8DeliveryMode, uint8_t uVector, uint8_t u8Polarity,
+ uint8_t u8TriggerMode, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ LogFlow(("pdmR3IoApicHlp_ApicBusDeliver: caller='%s'/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc));
+ return APICBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLP,pfnLock} */
+static DECLCALLBACK(int) pdmR3IoApicHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3IoApicHlp_Lock: caller='%s'/%d: rc=%Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return pdmLockEx(pDevIns->Internal.s.pVMR3, rc);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLP,pfnUnlock} */
+static DECLCALLBACK(void) pdmR3IoApicHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3IoApicHlp_Unlock: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pdmUnlock(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLP,pfnLockIsOwner} */
+static DECLCALLBACK(bool) pdmR3IoApicHlp_LockIsOwner(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3IoApicHlp_LockIsOwner: caller='%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return pdmLockIsOwner(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLP,pfnIommuMsiRemap} */
+static DECLCALLBACK(int) pdmR3IoApicHlp_IommuMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3IoApicHlp_IommuRemapMsi: caller='%s'/%d: pMsiIn=(%#RX64, %#RU32)\n", pDevIns->pReg->szName,
+ pDevIns->iInstance, pMsiIn->Addr.u64, pMsiIn->Data.u32));
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+ if (pdmIommuIsPresent(pDevIns))
+ return pdmIommuMsiRemap(pDevIns, idDevice, pMsiIn, pMsiOut);
+#else
+ RT_NOREF(pDevIns, idDevice);
+#endif
+ return VERR_IOMMU_NOT_PRESENT;
+}
+
+
+/**
+ * I/O APIC Device Helpers.
+ */
+const PDMIOAPICHLP g_pdmR3DevIoApicHlp =
+{
+ PDM_IOAPICHLP_VERSION,
+ pdmR3IoApicHlp_ApicBusDeliver,
+ pdmR3IoApicHlp_Lock,
+ pdmR3IoApicHlp_Unlock,
+ pdmR3IoApicHlp_LockIsOwner,
+ pdmR3IoApicHlp_IommuMsiRemap,
+ PDM_IOAPICHLP_VERSION /* the end */
+};
+
+/** @} */
+
+
+
+
+/** @name Ring-3 PCI Bus Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnIsaSetIrq} */
+static DECLCALLBACK(void) pdmR3PciHlp_IsaSetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Log4(("pdmR3PciHlp_IsaSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc));
+ PDMIsaSetIrq(pDevIns->Internal.s.pVMR3, iIrq, iLevel, uTagSrc);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnIoApicSetIrq} */
+static DECLCALLBACK(void) pdmR3PciHlp_IoApicSetIrq(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, int iIrq, int iLevel, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Log4(("pdmR3PciHlp_IoApicSetIrq: uBusDevFn=%#x iIrq=%d iLevel=%d uTagSrc=%#x\n", uBusDevFn, iIrq, iLevel, uTagSrc));
+ PDMIoApicSetIrq(pDevIns->Internal.s.pVMR3, uBusDevFn, iIrq, iLevel, uTagSrc);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnIoApicSendMsi} */
+static DECLCALLBACK(void) pdmR3PciHlp_IoApicSendMsi(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, PCMSIMSG pMsi, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Assert(PCIBDF_IS_VALID(uBusDevFn));
+ Log4(("pdmR3PciHlp_IoApicSendMsi: uBusDevFn=%#x Msi (Addr=%#RX64 Data=%#x) uTagSrc=%#x\n", uBusDevFn,
+ pMsi->Addr.u64, pMsi->Data.u32, uTagSrc));
+ PDMIoApicSendMsi(pDevIns->Internal.s.pVMR3, uBusDevFn, pMsi, uTagSrc);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnLock} */
+static DECLCALLBACK(int) pdmR3PciHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3PciHlp_Lock: caller='%s'/%d: rc=%Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return pdmLockEx(pDevIns->Internal.s.pVMR3, rc);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnUnlock} */
+static DECLCALLBACK(void) pdmR3PciHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3PciHlp_Unlock: caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pdmUnlock(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPR3,pfnGetBusByNo} */
+static DECLCALLBACK(PPDMDEVINS) pdmR3PciHlp_GetBusByNo(PPDMDEVINS pDevIns, uint32_t idxPdmBus)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ AssertReturn(idxPdmBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses), NULL);
+ PPDMDEVINS pRetDevIns = pVM->pdm.s.aPciBuses[idxPdmBus].pDevInsR3;
+ LogFlow(("pdmR3PciHlp_GetBusByNo: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pRetDevIns));
+ return pRetDevIns;
+}
+
+
+/**
+ * PCI Bus Device Helpers.
+ */
+const PDMPCIHLPR3 g_pdmR3DevPciHlp =
+{
+ PDM_PCIHLPR3_VERSION,
+ pdmR3PciHlp_IsaSetIrq,
+ pdmR3PciHlp_IoApicSetIrq,
+ pdmR3PciHlp_IoApicSendMsi,
+ pdmR3PciHlp_Lock,
+ pdmR3PciHlp_Unlock,
+ pdmR3PciHlp_GetBusByNo,
+ PDM_PCIHLPR3_VERSION, /* the end */
+};
+
+/** @} */
+
+
+/** @name Ring-3 IOMMU Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMIOMMUHLPR3,pfnLock} */
+static DECLCALLBACK(int) pdmR3IommuHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlowFunc(("caller='%s'/%d: rc=%Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return pdmLockEx(pDevIns->Internal.s.pVMR3, rc);
+}
+
+
+/** @interface_method_impl{PDMIOMMUHLPR3,pfnUnlock} */
+static DECLCALLBACK(void) pdmR3IommuHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlowFunc(("caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ pdmUnlock(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMIOMMUHLPR3,pfnLockIsOwner} */
+static DECLCALLBACK(bool) pdmR3IommuHlp_LockIsOwner(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlowFunc(("caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return pdmLockIsOwner(pDevIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMIOMMUHLPR3,pfnSendMsi} */
+static DECLCALLBACK(void) pdmR3IommuHlp_SendMsi(PPDMDEVINS pDevIns, PCMSIMSG pMsi, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlowFunc(("caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ PDMIoApicSendMsi(pDevIns->Internal.s.pVMR3, NIL_PCIBDF, pMsi, uTagSrc);
+}
+
+
+/**
+ * IOMMU Device Helpers.
+ */
+const PDMIOMMUHLPR3 g_pdmR3DevIommuHlp =
+{
+ PDM_IOMMUHLPR3_VERSION,
+ pdmR3IommuHlp_Lock,
+ pdmR3IommuHlp_Unlock,
+ pdmR3IommuHlp_LockIsOwner,
+ pdmR3IommuHlp_SendMsi,
+ PDM_IOMMUHLPR3_VERSION /* the end */
+};
+
+/** @} */
+
+
+/** @name Ring-3 HPET Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMHPETHLPR3,pfnSetLegacyMode} */
+static DECLCALLBACK(int) pdmR3HpetHlp_SetLegacyMode(PPDMDEVINS pDevIns, bool fActivated)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3HpetHlp_SetLegacyMode: caller='%s'/%d: fActivated=%RTbool\n", pDevIns->pReg->szName, pDevIns->iInstance, fActivated));
+
+ size_t i;
+ int rc = VINF_SUCCESS;
+ static const char * const s_apszDevsToNotify[] =
+ {
+ "i8254",
+ "mc146818"
+ };
+ for (i = 0; i < RT_ELEMENTS(s_apszDevsToNotify); i++)
+ {
+ PPDMIBASE pBase;
+ rc = PDMR3QueryDevice(pDevIns->Internal.s.pVMR3->pUVM, "i8254", 0, &pBase);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMIHPETLEGACYNOTIFY pPort = PDMIBASE_QUERY_INTERFACE(pBase, PDMIHPETLEGACYNOTIFY);
+ AssertLogRelMsgBreakStmt(pPort, ("%s\n", s_apszDevsToNotify[i]), rc = VERR_PDM_HPET_LEGACY_NOTIFY_MISSING);
+ pPort->pfnModeChanged(pPort, fActivated);
+ }
+ else if ( rc == VERR_PDM_DEVICE_NOT_FOUND
+ || rc == VERR_PDM_DEVICE_INSTANCE_NOT_FOUND)
+ rc = VINF_SUCCESS; /* the device isn't configured, ignore. */
+ else
+ AssertLogRelMsgFailedBreak(("%s -> %Rrc\n", s_apszDevsToNotify[i], rc));
+ }
+
+ /* Don't bother cleaning up, any failure here will cause a guru meditation. */
+
+ LogFlow(("pdmR3HpetHlp_SetLegacyMode: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMHPETHLPR3,pfnSetIrq} */
+static DECLCALLBACK(int) pdmR3HpetHlp_SetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3HpetHlp_SetIrq: caller='%s'/%d: iIrq=%d iLevel=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, iIrq, iLevel));
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ PDMIsaSetIrq(pVM, iIrq, iLevel, uTagSrc); /* (The API takes the lock recursively.) */
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+ return 0;
+}
+
+
+/**
+ * HPET Device Helpers.
+ */
+const PDMHPETHLPR3 g_pdmR3DevHpetHlp =
+{
+ PDM_HPETHLPR3_VERSION,
+ pdmR3HpetHlp_SetLegacyMode,
+ pdmR3HpetHlp_SetIrq,
+ PDM_HPETHLPR3_VERSION, /* the end */
+};
+
+/** @} */
+
+
+/** @name Ring-3 Raw PCI Device Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMPCIRAWHLPR3,pfnGetRCHelpers} */
+static DECLCALLBACK(PCPDMPCIRAWHLPRC) pdmR3PciRawHlp_GetRCHelpers(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+#if 0
+ if (VM_IS_RAW_MODE_ENABLED(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCPciRawHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+#else
+ RT_NOREF(pVM, pDevIns);
+#endif
+
+ LogFlow(("pdmR3PciRawHlp_GetGCHelpers: caller='%s'/%d: returns %RRv\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
+ return pRCHelpers;
+}
+
+
+/** @interface_method_impl{PDMPCIRAWHLPR3,pfnGetR0Helpers} */
+static DECLCALLBACK(PCPDMPCIRAWHLPR0) pdmR3PciRawHlp_GetR0Helpers(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ PCPDMHPETHLPR0 pR0Helpers = NIL_RTR0PTR;
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0PciRawHlp", &pR0Helpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pR0Helpers);
+ LogFlow(("pdmR3PciRawHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, pR0Helpers));
+ return pR0Helpers;
+}
+
+
+/**
+ * Raw PCI Device Helpers.
+ */
+const PDMPCIRAWHLPR3 g_pdmR3DevPciRawHlp =
+{
+ PDM_PCIRAWHLPR3_VERSION,
+ pdmR3PciRawHlp_GetRCHelpers,
+ pdmR3PciRawHlp_GetR0Helpers,
+ PDM_PCIRAWHLPR3_VERSION, /* the end */
+};
+
+/** @} */
+
+
+/* none yet */
+
+/**
+ * Firmware Device Helpers.
+ */
+const PDMFWHLPR3 g_pdmR3DevFirmwareHlp =
+{
+ PDM_FWHLPR3_VERSION,
+ PDM_FWHLPR3_VERSION
+};
+
+/**
+ * DMAC Device Helpers.
+ */
+const PDMDMACHLP g_pdmR3DevDmacHlp =
+{
+ PDM_DMACHLP_VERSION
+};
+
+
+
+
+/* none yet */
+
+/**
+ * RTC Device Helpers.
+ */
+const PDMRTCHLP g_pdmR3DevRtcHlp =
+{
+ PDM_RTCHLP_VERSION
+};
+
diff --git a/src/VBox/VMM/VMMR3/PDMDevice.cpp b/src/VBox/VMM/VMMR3/PDMDevice.cpp
new file mode 100644
index 00000000..155d4294
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMDevice.cpp
@@ -0,0 +1,1291 @@
+/* $Id: PDMDevice.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, Device parts.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DEVICE
+#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vmm.h>
+
+#include <VBox/version.h>
+#include <VBox/log.h>
+#include <VBox/msi.h>
+#include <VBox/err.h>
+#include <iprt/alloc.h>
+#include <iprt/alloca.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/path.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Internal callback structure pointer.
+ * The main purpose is to define the extra data we associate
+ * with PDMDEVREGCB so we can find the VM instance and so on.
+ */
+typedef struct PDMDEVREGCBINT
+{
+ /** The callback structure. */
+ PDMDEVREGCB Core;
+ /** A bit of padding. */
+ uint32_t u32[4];
+ /** VM Handle. */
+ PVM pVM;
+ /** Pointer to the configuration node the registrations should be
+ * associated with. Can be NULL. */
+ PCFGMNODE pCfgNode;
+} PDMDEVREGCBINT;
+/** Pointer to a PDMDEVREGCBINT structure. */
+typedef PDMDEVREGCBINT *PPDMDEVREGCBINT;
+/** Pointer to a const PDMDEVREGCBINT structure. */
+typedef const PDMDEVREGCBINT *PCPDMDEVREGCBINT;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) pdmR3DevReg_Register(PPDMDEVREGCB pCallbacks, PCPDMDEVREG pReg);
+static int pdmR3DevLoadModules(PVM pVM);
+static int pdmR3DevLoad(PVM pVM, PPDMDEVREGCBINT pRegCB, const char *pszFilename, const char *pszName);
+
+
+
+
+/**
+ * This function will initialize the devices for this VM instance.
+ *
+ *
+ * First of all this mean loading the builtin device and letting them
+ * register themselves. Beyond that any additional device modules are
+ * loaded and called for registration.
+ *
+ * Then the device configuration is enumerated, the instantiation order
+ * is determined, and finally they are instantiated.
+ *
+ * After all devices have been successfully instantiated the primary
+ * PCI Bus device is called to emulate the PCI BIOS, i.e. making the
+ * resource assignments. If there is no PCI device, this step is of course
+ * skipped.
+ *
+ * Finally the init completion routines of the instantiated devices
+ * are called.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3DevInit(PVM pVM)
+{
+ LogFlow(("pdmR3DevInit:\n"));
+
+ AssertRelease(!(RT_UOFFSETOF(PDMDEVINS, achInstanceData) & 15));
+ AssertRelease(sizeof(pVM->pdm.s.pDevInstances->Internal.s) <= sizeof(pVM->pdm.s.pDevInstances->Internal.padding));
+
+ /*
+ * Load device modules.
+ */
+ int rc = pdmR3DevLoadModules(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+
+#ifdef VBOX_WITH_USB
+ /* ditto for USB Devices. */
+ rc = pdmR3UsbLoadModules(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+#endif
+
+ /*
+ * Get the RC & R0 devhlps and create the devhlp R3 task queue.
+ */
+ rc = PDMR3QueueCreateInternal(pVM, sizeof(PDMDEVHLPTASK), pVM->cCpus * 8, 0, pdmR3DevHlpQueueConsumer, true, "DevHlp",
+ &pVM->pdm.s.hDevHlpQueue);
+ AssertRCReturn(rc, rc);
+
+ /*
+ *
+ * Enumerate the device instance configurations
+ * and come up with a instantiation order.
+ *
+ */
+ /* Switch to /Devices, which contains the device instantiations. */
+ PCFGMNODE pDevicesNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices");
+
+ /*
+ * Count the device instances.
+ */
+ PCFGMNODE pCur;
+ PCFGMNODE pInstanceNode;
+ unsigned cDevs = 0;
+ for (pCur = CFGMR3GetFirstChild(pDevicesNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ for (pInstanceNode = CFGMR3GetFirstChild(pCur); pInstanceNode; pInstanceNode = CFGMR3GetNextChild(pInstanceNode))
+ cDevs++;
+ if (!cDevs)
+ {
+ Log(("PDM: No devices were configured!\n"));
+ return VINF_SUCCESS;
+ }
+ Log2(("PDM: cDevs=%u\n", cDevs));
+
+ /*
+ * Collect info on each device instance.
+ */
+ struct DEVORDER
+ {
+ /** Configuration node. */
+ PCFGMNODE pNode;
+ /** Pointer to device. */
+ PPDMDEV pDev;
+ /** Init order. */
+ uint32_t u32Order;
+ /** VBox instance number. */
+ uint32_t iInstance;
+ } *paDevs = (struct DEVORDER *)alloca(sizeof(paDevs[0]) * (cDevs + 1)); /* (One extra for swapping) */
+ Assert(paDevs);
+ unsigned i = 0;
+ for (pCur = CFGMR3GetFirstChild(pDevicesNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /* Get the device name. */
+ char szName[sizeof(paDevs[0].pDev->pReg->szName)];
+ rc = CFGMR3GetName(pCur, szName, sizeof(szName));
+ AssertMsgRCReturn(rc, ("Configuration error: device name is too long (or something)! rc=%Rrc\n", rc), rc);
+
+ /* Find the device. */
+ PPDMDEV pDev = pdmR3DevLookup(pVM, szName);
+ AssertLogRelMsgReturn(pDev, ("Configuration error: device '%s' not found!\n", szName), VERR_PDM_DEVICE_NOT_FOUND);
+
+ /* Configured priority or use default based on device class? */
+ uint32_t u32Order;
+ rc = CFGMR3QueryU32(pCur, "Priority", &u32Order);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ uint32_t u32 = pDev->pReg->fClass;
+ for (u32Order = 1; !(u32 & u32Order); u32Order <<= 1)
+ /* nop */;
+ }
+ else
+ AssertMsgRCReturn(rc, ("Configuration error: reading \"Priority\" for the '%s' device failed rc=%Rrc!\n", szName, rc), rc);
+
+ /* Enumerate the device instances. */
+ uint32_t const iStart = i;
+ for (pInstanceNode = CFGMR3GetFirstChild(pCur); pInstanceNode; pInstanceNode = CFGMR3GetNextChild(pInstanceNode))
+ {
+ paDevs[i].pNode = pInstanceNode;
+ paDevs[i].pDev = pDev;
+ paDevs[i].u32Order = u32Order;
+
+ /* Get the instance number. */
+ char szInstance[32];
+ rc = CFGMR3GetName(pInstanceNode, szInstance, sizeof(szInstance));
+ AssertMsgRCReturn(rc, ("Configuration error: instance name is too long (or something)! rc=%Rrc\n", rc), rc);
+ char *pszNext = NULL;
+ rc = RTStrToUInt32Ex(szInstance, &pszNext, 0, &paDevs[i].iInstance);
+ AssertMsgRCReturn(rc, ("Configuration error: RTStrToInt32Ex failed on the instance name '%s'! rc=%Rrc\n", szInstance, rc), rc);
+ AssertMsgReturn(!*pszNext, ("Configuration error: the instance name '%s' isn't all digits. (%s)\n", szInstance, pszNext), VERR_INVALID_PARAMETER);
+
+ /* next instance */
+ i++;
+ }
+
+ /* check the number of instances */
+ if (i - iStart > pDev->pReg->cMaxInstances)
+ AssertLogRelMsgFailedReturn(("Configuration error: Too many instances of %s was configured: %u, max %u\n",
+ szName, i - iStart, pDev->pReg->cMaxInstances),
+ VERR_PDM_TOO_MANY_DEVICE_INSTANCES);
+ } /* devices */
+ Assert(i == cDevs);
+
+ /*
+ * Sort (bubble) the device array ascending on u32Order and instance number
+ * for a device.
+ */
+ unsigned c = cDevs - 1;
+ while (c)
+ {
+ unsigned j = 0;
+ for (i = 0; i < c; i++)
+ if ( paDevs[i].u32Order > paDevs[i + 1].u32Order
+ || ( paDevs[i].u32Order == paDevs[i + 1].u32Order
+ && paDevs[i].iInstance > paDevs[i + 1].iInstance
+ && paDevs[i].pDev == paDevs[i + 1].pDev) )
+ {
+ paDevs[cDevs] = paDevs[i + 1];
+ paDevs[i + 1] = paDevs[i];
+ paDevs[i] = paDevs[cDevs];
+ j = i;
+ }
+ c = j;
+ }
+
+
+ /*
+ *
+ * Instantiate the devices.
+ *
+ */
+ for (i = 0; i < cDevs; i++)
+ {
+ PDMDEVREGR3 const * const pReg = paDevs[i].pDev->pReg;
+
+ /*
+ * Gather a bit of config.
+ */
+ /* trusted */
+ bool fTrusted;
+ rc = CFGMR3QueryBool(paDevs[i].pNode, "Trusted", &fTrusted);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ fTrusted = false;
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("configuration error: failed to query boolean \"Trusted\", rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /* RZEnabled, R0Enabled, RCEnabled*/
+ bool fR0Enabled = false;
+ bool fRCEnabled = false;
+ if ( (pReg->fFlags & (PDM_DEVREG_FLAGS_R0 | PDM_DEVREG_FLAGS_RC))
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ && !PGMR3IsNemModeEnabled(pVM) /* No ring-0 in simplified memory mode. */
+#endif
+ && !SUPR3IsDriverless())
+ {
+ if (pReg->fFlags & PDM_DEVREG_FLAGS_R0)
+ {
+ if (pReg->fFlags & PDM_DEVREG_FLAGS_REQUIRE_R0)
+ fR0Enabled = true;
+ else
+ {
+ rc = CFGMR3QueryBoolDef(paDevs[i].pNode, "R0Enabled", &fR0Enabled,
+ !(pReg->fFlags & PDM_DEVREG_FLAGS_OPT_IN_R0));
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+
+ if (pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ {
+ if (pReg->fFlags & PDM_DEVREG_FLAGS_REQUIRE_RC)
+ fRCEnabled = true;
+ else
+ {
+ rc = CFGMR3QueryBoolDef(paDevs[i].pNode, "RCEnabled", &fRCEnabled,
+ !(pReg->fFlags & PDM_DEVREG_FLAGS_OPT_IN_RC));
+ AssertLogRelRCReturn(rc, rc);
+ }
+ fRCEnabled = false;
+ }
+ }
+
+#ifdef VBOX_WITH_DBGF_TRACING
+ DBGFTRACEREVTSRC hDbgfTraceEvtSrc = NIL_DBGFTRACEREVTSRC;
+ bool fTracingEnabled = false;
+ bool fGCPhysRwAll = false;
+ rc = CFGMR3QueryBoolDef(paDevs[i].pNode, "TracingEnabled", &fTracingEnabled,
+ false);
+ AssertLogRelRCReturn(rc, rc);
+ if (fTracingEnabled)
+ {
+ rc = CFGMR3QueryBoolDef(paDevs[i].pNode, "TraceAllGstMemRw", &fGCPhysRwAll,
+ false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /* Traced devices need to be trusted for now. */
+ if (fTrusted)
+ {
+ rc = DBGFR3TracerRegisterEvtSrc(pVM, pReg->szName, &hDbgfTraceEvtSrc);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ else
+ AssertMsgFailedReturn(("configuration error: Device tracing needs a trusted device\n"), VERR_INCOMPATIBLE_CONFIG);
+ }
+#endif
+
+ /* config node */
+ PCFGMNODE pConfigNode = CFGMR3GetChild(paDevs[i].pNode, "Config");
+ if (!pConfigNode)
+ {
+ rc = CFGMR3InsertNode(paDevs[i].pNode, "Config", &pConfigNode);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Failed to create Config node! rc=%Rrc\n", rc));
+ return rc;
+ }
+ }
+ CFGMR3SetRestrictedRoot(pConfigNode);
+
+ /*
+ * Allocate the device instance and critical section.
+ */
+ AssertLogRelReturn(paDevs[i].pDev->cInstances < pReg->cMaxInstances,
+ VERR_PDM_TOO_MANY_DEVICE_INSTANCES);
+ PPDMDEVINS pDevIns;
+ PPDMCRITSECT pCritSect;
+ if (fR0Enabled || fRCEnabled)
+ {
+ AssertLogRel(fR0Enabled /* not possible to just enabled raw-mode atm. */);
+
+ rc = PDMR3LdrLoadR0(pVM->pUVM, pReg->pszR0Mod, paDevs[i].pDev->pszR0SearchPath);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pVM->pUVM, rc, RT_SRC_POS, "Failed to load ring-0 module '%s' for device '%s'",
+ pReg->pszR0Mod, pReg->szName);
+
+ PDMDEVICECREATEREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pDevInsR3 = NULL;
+ /** @todo Add tracer id in request so R0 can set up DEVINSR0 properly. */
+ Req.fFlags = pReg->fFlags;
+ Req.fClass = pReg->fClass;
+ Req.cMaxInstances = pReg->cMaxInstances;
+ Req.uSharedVersion = pReg->uSharedVersion;
+ Req.cbInstanceShared = pReg->cbInstanceShared;
+ Req.cbInstanceR3 = pReg->cbInstanceCC;
+ Req.cbInstanceRC = pReg->cbInstanceRC;
+ Req.cMaxPciDevices = pReg->cMaxPciDevices;
+ Req.cMaxMsixVectors = pReg->cMaxMsixVectors;
+ Req.iInstance = paDevs[i].iInstance;
+ Req.fRCEnabled = fRCEnabled;
+ Req.afReserved[0] = false;
+ Req.afReserved[1] = false;
+ Req.afReserved[2] = false;
+#ifdef VBOX_WITH_DBGF_TRACING
+ Req.hDbgfTracerEvtSrc = hDbgfTraceEvtSrc;
+#else
+ Req.hDbgfTracerEvtSrc = NIL_DBGFTRACEREVTSRC;
+#endif
+ rc = RTStrCopy(Req.szDevName, sizeof(Req.szDevName), pReg->szName);
+ AssertLogRelRCReturn(rc, rc);
+ rc = RTStrCopy(Req.szModName, sizeof(Req.szModName), pReg->pszR0Mod);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_PDM_DEVICE_CREATE, 0, &Req.Hdr);
+ AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_PDM_DEVICE_CREATE for %s failed: %Rrc\n", pReg->szName, rc), rc);
+
+ pDevIns = Req.pDevInsR3;
+ pCritSect = pDevIns->pCritSectRoR3;
+
+ Assert(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED);
+ AssertLogRelReturn(pDevIns->Internal.s.idxR0Device < PDM_MAX_RING0_DEVICE_INSTANCES, VERR_PDM_DEV_IPE_1);
+ AssertLogRelReturn(pVM->pdm.s.apDevRing0Instances[pDevIns->Internal.s.idxR0Device] == pDevIns, VERR_PDM_DEV_IPE_1);
+ }
+ else
+ {
+ /* The code in this else branch works by the same rules as the PDMR0Device.cpp
+ code, except there is only the ring-3 components of the device instance.
+ Changes here may need to be reflected in PDMR0DEvice.cpp and vice versa! */
+ uint32_t cb = RT_UOFFSETOF_DYN(PDMDEVINS, achInstanceData[pReg->cbInstanceCC]);
+ cb = RT_ALIGN_32(cb, 64);
+ uint32_t const offShared = cb;
+ cb += RT_ALIGN_32(pReg->cbInstanceShared, 64);
+ uint32_t const cbCritSect = RT_ALIGN_32(sizeof(*pCritSect), 64);
+ cb += cbCritSect;
+ uint32_t const cbMsixState = RT_ALIGN_32(pReg->cMaxMsixVectors * 16 + (pReg->cMaxMsixVectors + 7) / 8, _4K);
+ uint32_t const cbPciDev = RT_ALIGN_32(RT_UOFFSETOF_DYN(PDMPCIDEV, abMsixState[cbMsixState]), 64);
+ uint32_t const cPciDevs = RT_MIN(pReg->cMaxPciDevices, 1024);
+ uint32_t const cbPciDevs = cbPciDev * cPciDevs;
+ cb += cbPciDevs;
+ AssertLogRelMsgReturn(cb <= PDM_MAX_DEVICE_INSTANCE_SIZE_R3,
+ ("Device %s total instance size is to big: %u, max %u\n",
+ pReg->szName, cb, PDM_MAX_DEVICE_INSTANCE_SIZE_R3),
+ VERR_ALLOCATION_TOO_BIG);
+
+#if 0 /* Several devices demands cacheline aligned data, if not page aligned. Real problem in NEM mode. */
+ rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_DEVICE, cb, (void **)&pDevIns);
+ AssertLogRelMsgRCReturn(rc, ("Failed to allocate %zu bytes of instance data for device '%s'. rc=%Rrc\n",
+ cb, pReg->szName, rc), rc);
+#else
+ pDevIns = (PPDMDEVINS)RTMemPageAllocZ(cb);
+ AssertLogRelMsgReturn(pDevIns, ("Failed to allocate %zu bytes of instance data for device '%s'\n", cb, pReg->szName),
+ VERR_NO_PAGE_MEMORY);
+#endif
+
+ /* Initialize it: */
+ pDevIns->u32Version = PDM_DEVINSR3_VERSION;
+ pDevIns->iInstance = paDevs[i].iInstance;
+ pDevIns->cbRing3 = cb;
+ //pDevIns->fR0Enabled = false;
+ //pDevIns->fRCEnabled = false;
+ pDevIns->pvInstanceDataR3 = (uint8_t *)pDevIns + offShared;
+ pDevIns->pvInstanceDataForR3 = &pDevIns->achInstanceData[0];
+ pCritSect = (PPDMCRITSECT)((uint8_t *)pDevIns + offShared + RT_ALIGN_32(pReg->cbInstanceShared, 64));
+ pDevIns->pCritSectRoR3 = pCritSect;
+ pDevIns->cbPciDev = cbPciDev;
+ pDevIns->cPciDevs = cPciDevs;
+ for (uint32_t iPciDev = 0; iPciDev < cPciDevs; iPciDev++)
+ {
+ PPDMPCIDEV pPciDev = (PPDMPCIDEV)((uint8_t *)pDevIns->pCritSectRoR3 + cbCritSect + cbPciDev * iPciDev);
+ if (iPciDev < RT_ELEMENTS(pDevIns->apPciDevs))
+ pDevIns->apPciDevs[iPciDev] = pPciDev;
+ pPciDev->cbConfig = _4K;
+ pPciDev->cbMsixState = cbMsixState;
+ pPciDev->idxSubDev = (uint16_t)iPciDev;
+ pPciDev->Int.s.idxSubDev = (uint16_t)iPciDev;
+ pPciDev->u32Magic = PDMPCIDEV_MAGIC;
+ }
+ }
+
+ pDevIns->pHlpR3 = fTrusted ? &g_pdmR3DevHlpTrusted : &g_pdmR3DevHlpUnTrusted;
+ pDevIns->pReg = pReg;
+ pDevIns->pCfg = pConfigNode;
+ //pDevIns->IBase.pfnQueryInterface = NULL;
+ //pDevIns->fTracing = 0;
+ pDevIns->idTracing = ++pVM->pdm.s.idTracingDev;
+
+ //pDevIns->Internal.s.pNextR3 = NULL;
+ //pDevIns->Internal.s.pPerDeviceNextR3 = NULL;
+ pDevIns->Internal.s.pDevR3 = paDevs[i].pDev;
+ //pDevIns->Internal.s.pLunsR3 = NULL;
+ //pDevIns->Internal.s.pfnAsyncNotify = NULL;
+ pDevIns->Internal.s.pCfgHandle = paDevs[i].pNode;
+ pDevIns->Internal.s.pVMR3 = pVM;
+#ifdef VBOX_WITH_DBGF_TRACING
+ pDevIns->Internal.s.hDbgfTraceEvtSrc = hDbgfTraceEvtSrc;
+#else
+ pDevIns->Internal.s.hDbgfTraceEvtSrc = NIL_DBGFTRACEREVTSRC;
+#endif
+ //pDevIns->Internal.s.pHeadPciDevR3 = NULL;
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_SUSPENDED;
+ //pDevIns->Internal.s.uLastIrqTag = 0;
+
+ rc = pdmR3CritSectInitDeviceAuto(pVM, pDevIns, pCritSect, RT_SRC_POS,
+ "%s#%uAuto", pDevIns->pReg->szName, pDevIns->iInstance);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Link it into all the lists.
+ */
+ /* The global instance FIFO. */
+ PPDMDEVINS pPrev1 = pVM->pdm.s.pDevInstances;
+ if (!pPrev1)
+ pVM->pdm.s.pDevInstances = pDevIns;
+ else
+ {
+ while (pPrev1->Internal.s.pNextR3)
+ pPrev1 = pPrev1->Internal.s.pNextR3;
+ pPrev1->Internal.s.pNextR3 = pDevIns;
+ }
+
+ /* The per device instance FIFO. */
+ PPDMDEVINS pPrev2 = paDevs[i].pDev->pInstances;
+ if (!pPrev2)
+ paDevs[i].pDev->pInstances = pDevIns;
+ else
+ {
+ while (pPrev2->Internal.s.pPerDeviceNextR3)
+ pPrev2 = pPrev2->Internal.s.pPerDeviceNextR3;
+ pPrev2->Internal.s.pPerDeviceNextR3 = pDevIns;
+ }
+
+#ifdef VBOX_WITH_DBGF_TRACING
+ /*
+ * Allocate memory for the MMIO/IO port registration tracking if DBGF tracing is enabled.
+ */
+ if (hDbgfTraceEvtSrc != NIL_DBGFTRACEREVTSRC)
+ {
+ pDevIns->Internal.s.paDbgfTraceTrack = (PPDMDEVINSDBGFTRACK)RTMemAllocZ(PDM_MAX_DEVICE_DBGF_TRACING_TRACK);
+ if (!pDevIns->Internal.s.paDbgfTraceTrack)
+ {
+ LogRel(("PDM: Failed to construct '%s'/%d! %Rra\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_NO_MEMORY));
+ if (VMR3GetErrorCount(pVM->pUVM) == 0)
+ VMSetError(pVM, rc, RT_SRC_POS, "Failed to construct device '%s' instance #%u",
+ pDevIns->pReg->szName, pDevIns->iInstance);
+ paDevs[i].pDev->cInstances--;
+ return VERR_NO_MEMORY;
+ }
+
+ pDevIns->Internal.s.idxDbgfTraceTrackNext = 0;
+ pDevIns->Internal.s.cDbgfTraceTrackMax = PDM_MAX_DEVICE_DBGF_TRACING_TRACK / sizeof(PDMDEVINSDBGFTRACK);
+ pDevIns->pHlpR3 = &g_pdmR3DevHlpTracing;
+ }
+#endif
+
+ /*
+ * Call the constructor.
+ */
+ paDevs[i].pDev->cInstances++;
+ Log(("PDM: Constructing device '%s' instance %d...\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ rc = pDevIns->pReg->pfnConstruct(pDevIns, pDevIns->iInstance, pDevIns->pCfg);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDM: Failed to construct '%s'/%d! %Rra\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ if (VMR3GetErrorCount(pVM->pUVM) == 0)
+ VMSetError(pVM, rc, RT_SRC_POS, "Failed to construct device '%s' instance #%u",
+ pDevIns->pReg->szName, pDevIns->iInstance);
+ /* Because we're damn lazy, the destructor will be called even if
+ the constructor fails. So, no unlinking. */
+ paDevs[i].pDev->cInstances--;
+ return rc == VERR_VERSION_MISMATCH ? VERR_PDM_DEVICE_VERSION_MISMATCH : rc;
+ }
+
+ /*
+ * Call the ring-0 constructor if applicable.
+ */
+ if (fR0Enabled)
+ {
+ PDMDEVICEGENCALLREQ Req;
+ RT_ZERO(Req.Params);
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.enmCall = PDMDEVICEGENCALL_CONSTRUCT;
+ Req.idxR0Device = pDevIns->Internal.s.idxR0Device;
+ Req.pDevInsR3 = pDevIns;
+ rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_PDM_DEVICE_GEN_CALL, 0, &Req.Hdr);
+ pDevIns->Internal.s.fIntFlags |= PDMDEVINSINT_FLAGS_R0_CONTRUCT;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDM: Failed to construct (ring-0) '%s'/%d! %Rra\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ if (VMR3GetErrorCount(pVM->pUVM) == 0)
+ VMSetError(pVM, rc, RT_SRC_POS, "The ring-0 constructor of device '%s' instance #%u failed",
+ pDevIns->pReg->szName, pDevIns->iInstance);
+ paDevs[i].pDev->cInstances--;
+ return rc == VERR_VERSION_MISMATCH ? VERR_PDM_DEVICE_VERSION_MISMATCH : rc;
+ }
+ }
+
+ } /* for device instances */
+
+#ifdef VBOX_WITH_USB
+ /* ditto for USB Devices. */
+ rc = pdmR3UsbInstantiateDevices(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+#endif
+
+ LogFlow(("pdmR3DevInit: returns %Rrc\n", VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Performs the init complete callback after ring-0 and raw-mode has been
+ * initialized.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3DevInitComplete(PVM pVM)
+{
+ int rc;
+
+ /*
+ * Iterate thru the device instances and work the callback.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ {
+ if (pDevIns->pReg->pfnInitComplete)
+ {
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ rc = pDevIns->pReg->pfnInitComplete(pDevIns);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("InitComplete on device '%s'/%d failed with rc=%Rrc\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+ }
+ }
+ }
+
+#ifdef VBOX_WITH_USB
+ rc = pdmR3UsbVMInitComplete(pVM);
+ if (RT_FAILURE(rc))
+ {
+ Log(("pdmR3DevInit: returns %Rrc\n", rc));
+ return rc;
+ }
+#endif
+
+ LogFlow(("pdmR3DevInit: returns %Rrc\n", VINF_SUCCESS));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Lookups a device structure by name.
+ * @internal
+ */
+PPDMDEV pdmR3DevLookup(PVM pVM, const char *pszName)
+{
+ size_t cchName = strlen(pszName);
+ for (PPDMDEV pDev = pVM->pdm.s.pDevs; pDev; pDev = pDev->pNext)
+ if ( pDev->cchName == cchName
+ && !strcmp(pDev->pReg->szName, pszName))
+ return pDev;
+ return NULL;
+}
+
+
+/**
+ * Loads the device modules.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int pdmR3DevLoadModules(PVM pVM)
+{
+ /*
+ * Initialize the callback structure.
+ */
+ PDMDEVREGCBINT RegCB;
+ RegCB.Core.u32Version = PDM_DEVREG_CB_VERSION;
+ RegCB.Core.pfnRegister = pdmR3DevReg_Register;
+ RegCB.pVM = pVM;
+ RegCB.pCfgNode = NULL;
+
+ /*
+ * Register the internal VMM APIC device.
+ */
+ int rc = pdmR3DevReg_Register(&RegCB.Core, &g_DeviceAPIC);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Load the builtin module.
+ */
+ PCFGMNODE pDevicesNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM/Devices");
+ bool fLoadBuiltin;
+ rc = CFGMR3QueryBool(pDevicesNode, "LoadBuiltin", &fLoadBuiltin);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ fLoadBuiltin = true;
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Configuration error: Querying boolean \"LoadBuiltin\" failed with %Rrc\n", rc));
+ return rc;
+ }
+ if (fLoadBuiltin)
+ {
+ /* make filename */
+ char *pszFilename = pdmR3FileR3("VBoxDD", true /*fShared*/);
+ if (!pszFilename)
+ return VERR_NO_TMP_MEMORY;
+ rc = pdmR3DevLoad(pVM, &RegCB, pszFilename, "VBoxDD");
+ RTMemTmpFree(pszFilename);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* make filename */
+ pszFilename = pdmR3FileR3("VBoxDD2", true /*fShared*/);
+ if (!pszFilename)
+ return VERR_NO_TMP_MEMORY;
+ rc = pdmR3DevLoad(pVM, &RegCB, pszFilename, "VBoxDD2");
+ RTMemTmpFree(pszFilename);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Load additional device modules.
+ */
+ PCFGMNODE pCur;
+ for (pCur = CFGMR3GetFirstChild(pDevicesNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /*
+ * Get the name and path.
+ */
+ char szName[PDMMOD_NAME_LEN];
+ rc = CFGMR3GetName(pCur, &szName[0], sizeof(szName));
+ if (rc == VERR_CFGM_NOT_ENOUGH_SPACE)
+ {
+ AssertMsgFailed(("configuration error: The module name is too long, cchName=%zu.\n", CFGMR3GetNameLen(pCur)));
+ return VERR_PDM_MODULE_NAME_TOO_LONG;
+ }
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("CFGMR3GetName -> %Rrc.\n", rc));
+ return rc;
+ }
+
+ /* the path is optional, if no path the module name + path is used. */
+ char szFilename[RTPATH_MAX];
+ rc = CFGMR3QueryString(pCur, "Path", &szFilename[0], sizeof(szFilename));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ strcpy(szFilename, szName);
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("configuration error: Failure to query the module path, rc=%Rrc.\n", rc));
+ return rc;
+ }
+
+ /* prepend path? */
+ if (!RTPathHavePath(szFilename))
+ {
+ char *psz = pdmR3FileR3(szFilename, false /*fShared*/);
+ if (!psz)
+ return VERR_NO_TMP_MEMORY;
+ size_t cch = strlen(psz) + 1;
+ if (cch > sizeof(szFilename))
+ {
+ RTMemTmpFree(psz);
+ AssertMsgFailed(("Filename too long! cch=%d '%s'\n", cch, psz));
+ return VERR_FILENAME_TOO_LONG;
+ }
+ memcpy(szFilename, psz, cch);
+ RTMemTmpFree(psz);
+ }
+
+ /*
+ * Load the module and register it's devices.
+ */
+ RegCB.pCfgNode = pCur;
+ rc = pdmR3DevLoad(pVM, &RegCB, szFilename, szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads one device module and call the registration entry point.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegCB The registration callback stuff.
+ * @param pszFilename Module filename.
+ * @param pszName Module name.
+ */
+static int pdmR3DevLoad(PVM pVM, PPDMDEVREGCBINT pRegCB, const char *pszFilename, const char *pszName)
+{
+ /*
+ * Load it.
+ */
+ int rc = pdmR3LoadR3U(pVM->pUVM, pszFilename, pszName);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Get the registration export and call it.
+ */
+ FNPDMVBOXDEVICESREGISTER *pfnVBoxDevicesRegister;
+ rc = PDMR3LdrGetSymbolR3(pVM, pszName, "VBoxDevicesRegister", (void **)&pfnVBoxDevicesRegister);
+ if (RT_SUCCESS(rc))
+ {
+ Log(("PDM: Calling VBoxDevicesRegister (%p) of %s (%s)\n", pfnVBoxDevicesRegister, pszName, pszFilename));
+ rc = pfnVBoxDevicesRegister(&pRegCB->Core, VBOX_VERSION);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Successfully loaded device module %s (%s).\n", pszName, pszFilename));
+ else
+ {
+ VMR3SetError(pVM->pUVM, rc, RT_SRC_POS, "VBoxDevicesRegister failed with rc=%Rrc for module %s (%s)",
+ rc, pszName, pszFilename);
+ AssertMsgFailed(("VBoxDevicesRegister failed with rc=%Rrc for module %s (%s)\n", rc, pszName, pszFilename));
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("Failed to locate 'VBoxDevicesRegister' in %s (%s) rc=%Rrc\n", pszName, pszFilename, rc));
+ if (rc == VERR_SYMBOL_NOT_FOUND)
+ rc = VERR_PDM_NO_REGISTRATION_EXPORT;
+ VMR3SetError(pVM->pUVM, rc, RT_SRC_POS, "Failed to locate 'VBoxDevicesRegister' in %s (%s) rc=%Rrc",
+ pszName, pszFilename, rc);
+ }
+ }
+ else
+ AssertMsgFailed(("Failed to load %s %s!\n", pszFilename, pszName));
+ return rc;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREGCB,pfnRegister}
+ */
+static DECLCALLBACK(int) pdmR3DevReg_Register(PPDMDEVREGCB pCallbacks, PCPDMDEVREG pReg)
+{
+ /*
+ * Validate the registration structure.
+ */
+ Assert(pReg);
+ AssertMsgReturn(pReg->u32Version == PDM_DEVREG_VERSION,
+ ("Unknown struct version %#x!\n", pReg->u32Version),
+ VERR_PDM_UNKNOWN_DEVREG_VERSION);
+
+ AssertMsgReturn( pReg->szName[0]
+ && strlen(pReg->szName) < sizeof(pReg->szName)
+ && pdmR3IsValidName(pReg->szName),
+ ("Invalid name '%.*s'\n", sizeof(pReg->szName), pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn( !(pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ || ( pReg->pszRCMod[0]
+ && strlen(pReg->pszRCMod) < RT_SIZEOFMEMB(PDMDEVICECREATEREQ, szModName)),
+ ("Invalid GC module name '%s' - (Device %s)\n", pReg->pszRCMod, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn( !(pReg->fFlags & PDM_DEVREG_FLAGS_R0)
+ || ( pReg->pszR0Mod[0]
+ && strlen(pReg->pszR0Mod) < RT_SIZEOFMEMB(PDMDEVICECREATEREQ, szModName)),
+ ("Invalid R0 module name '%s' - (Device %s)\n", pReg->pszR0Mod, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn((pReg->fFlags & PDM_DEVREG_FLAGS_HOST_BITS_MASK) == PDM_DEVREG_FLAGS_HOST_BITS_DEFAULT,
+ ("Invalid host bits flags! fFlags=%#x (Device %s)\n", pReg->fFlags, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_HOST_BITS);
+ AssertMsgReturn((pReg->fFlags & PDM_DEVREG_FLAGS_GUEST_BITS_MASK),
+ ("Invalid guest bits flags! fFlags=%#x (Device %s)\n", pReg->fFlags, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn(pReg->fClass,
+ ("No class! (Device %s)\n", pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn(pReg->cMaxInstances > 0,
+ ("Max instances %u! (Device %s)\n", pReg->cMaxInstances, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ uint32_t const cbMaxInstance = pReg->fFlags & (PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0)
+ ? PDM_MAX_DEVICE_INSTANCE_SIZE : PDM_MAX_DEVICE_INSTANCE_SIZE_R3;
+ AssertMsgReturn(pReg->cbInstanceShared <= cbMaxInstance,
+ ("Instance size %u bytes! (Max %u; Device %s)\n", pReg->cbInstanceShared, cbMaxInstance, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn(pReg->cbInstanceCC <= cbMaxInstance,
+ ("Instance size %d bytes! (Max %u; Device %s)\n", pReg->cbInstanceCC, cbMaxInstance, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertMsgReturn(pReg->pfnConstruct,
+ ("No constructor! (Device %s)\n", pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertLogRelMsgReturn((pReg->fFlags & PDM_DEVREG_FLAGS_GUEST_BITS_MASK) == PDM_DEVREG_FLAGS_GUEST_BITS_DEFAULT,
+ ("PDM: Rejected device '%s' because it didn't match the guest bits.\n", pReg->szName),
+ VERR_PDM_INVALID_DEVICE_GUEST_BITS);
+ AssertLogRelMsg(pReg->u32VersionEnd == PDM_DEVREG_VERSION,
+ ("u32VersionEnd=%#x, expected %#x. (szName=%s)\n",
+ pReg->u32VersionEnd, PDM_DEVREG_VERSION, pReg->szName));
+ AssertLogRelMsgReturn(pReg->cMaxPciDevices <= 8, ("%#x (szName=%s)\n", pReg->cMaxPciDevices, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertLogRelMsgReturn(pReg->cMaxMsixVectors <= VBOX_MSIX_MAX_ENTRIES,
+ ("%#x (szName=%s)\n", pReg->cMaxMsixVectors, pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+ AssertLogRelMsgReturn(pReg->fFlags & PDM_DEVREG_FLAGS_NEW_STYLE /* the flag is required now */,
+ ("PDM_DEVREG_FLAGS_NEW_STYLE not set for szName=%s!\n", pReg->szName),
+ VERR_PDM_INVALID_DEVICE_REGISTRATION);
+
+ /*
+ * Check for duplicate and find FIFO entry at the same time.
+ */
+ PCPDMDEVREGCBINT pRegCB = (PCPDMDEVREGCBINT)pCallbacks;
+ PPDMDEV pDevPrev = NULL;
+ PPDMDEV pDev = pRegCB->pVM->pdm.s.pDevs;
+ for (; pDev; pDevPrev = pDev, pDev = pDev->pNext)
+ AssertMsgReturn(strcmp(pDev->pReg->szName, pReg->szName),
+ ("Device '%s' already exists\n", pReg->szName),
+ VERR_PDM_DEVICE_NAME_CLASH);
+
+ /*
+ * Allocate new device structure, initialize and insert it into the list.
+ */
+ int rc;
+ pDev = (PPDMDEV)MMR3HeapAlloc(pRegCB->pVM, MM_TAG_PDM_DEVICE, sizeof(*pDev));
+ if (pDev)
+ {
+ pDev->pNext = NULL;
+ pDev->cInstances = 0;
+ pDev->pInstances = NULL;
+ pDev->pReg = pReg;
+ pDev->cchName = (uint32_t)strlen(pReg->szName);
+ rc = CFGMR3QueryStringAllocDef( pRegCB->pCfgNode, "RCSearchPath", &pDev->pszRCSearchPath, NULL);
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3QueryStringAllocDef(pRegCB->pCfgNode, "R0SearchPath", &pDev->pszR0SearchPath, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ if (pDevPrev)
+ pDevPrev->pNext = pDev;
+ else
+ pRegCB->pVM->pdm.s.pDevs = pDev;
+ Log(("PDM: Registered device '%s'\n", pReg->szName));
+ return VINF_SUCCESS;
+ }
+
+ MMR3HeapFree(pDev);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Locates a LUN.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param ppLun Where to store the pointer to the LUN if found.
+ * @thread Try only do this in EMT...
+ */
+int pdmR3DevFindLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPPDMLUN ppLun)
+{
+ /*
+ * Iterate registered devices looking for the device.
+ */
+ size_t cchDevice = strlen(pszDevice);
+ for (PPDMDEV pDev = pVM->pdm.s.pDevs; pDev; pDev = pDev->pNext)
+ {
+ if ( pDev->cchName == cchDevice
+ && !memcmp(pDev->pReg->szName, pszDevice, cchDevice))
+ {
+ /*
+ * Iterate device instances.
+ */
+ for (PPDMDEVINS pDevIns = pDev->pInstances; pDevIns; pDevIns = pDevIns->Internal.s.pPerDeviceNextR3)
+ {
+ if (pDevIns->iInstance == iInstance)
+ {
+ /*
+ * Iterate luns.
+ */
+ for (PPDMLUN pLun = pDevIns->Internal.s.pLunsR3; pLun; pLun = pLun->pNext)
+ {
+ if (pLun->iLun == iLun)
+ {
+ *ppLun = pLun;
+ return VINF_SUCCESS;
+ }
+ }
+ return VERR_PDM_LUN_NOT_FOUND;
+ }
+ }
+ return VERR_PDM_DEVICE_INSTANCE_NOT_FOUND;
+ }
+ }
+ return VERR_PDM_DEVICE_NOT_FOUND;
+}
+
+
+/**
+ * Attaches a preconfigured driver to an existing device instance.
+ *
+ * This is used to change drivers and suchlike at runtime.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ * @param ppBase Where to store the base interface pointer. Optional.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3DeviceAttach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("PDMR3DeviceAttach: pszDevice=%p:{%s} iInstance=%d iLun=%d fFlags=%#x ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, fFlags, ppBase));
+
+ /*
+ * Find the LUN in question.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Can we attach anything at runtime?
+ */
+ PPDMDEVINS pDevIns = pLun->pDevIns;
+ if (pDevIns->pReg->pfnAttach)
+ {
+ if (!pLun->pTop)
+ {
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ rc = pDevIns->pReg->pfnAttach(pDevIns, iLun, fFlags);
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ }
+ else
+ rc = VERR_PDM_DRIVER_ALREADY_ATTACHED;
+ }
+ else
+ rc = VERR_PDM_DEVICE_NO_RT_ATTACH;
+
+ if (ppBase)
+ *ppBase = pLun->pTop ? &pLun->pTop->IBase : NULL;
+ }
+ else if (ppBase)
+ *ppBase = NULL;
+
+ if (ppBase)
+ LogFlow(("PDMR3DeviceAttach: returns %Rrc *ppBase=%p\n", rc, *ppBase));
+ else
+ LogFlow(("PDMR3DeviceAttach: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Detaches a driver chain from an existing device instance.
+ *
+ * This is used to change drivers and suchlike at runtime.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3DeviceDetach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags)
+{
+ return PDMR3DriverDetach(pUVM, pszDevice, iInstance, iLun, NULL, 0, fFlags);
+}
+
+
+/**
+ * References the critical section associated with a device for the use by a
+ * timer or similar created by the device.
+ *
+ * @returns Pointer to the critical section.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance in question.
+ *
+ * @internal
+ */
+VMMR3_INT_DECL(PPDMCRITSECT) PDMR3DevGetCritSect(PVM pVM, PPDMDEVINS pDevIns)
+{
+ VM_ASSERT_EMT(pVM); RT_NOREF_PV(pVM);
+ VM_ASSERT_STATE(pVM, VMSTATE_CREATING);
+ AssertPtr(pDevIns);
+
+ PPDMCRITSECT pCritSect = pDevIns->pCritSectRoR3;
+ AssertPtr(pCritSect);
+ pCritSect->s.fUsedByTimerOrSimilar = true;
+
+ return pCritSect;
+}
+
+
+/**
+ * Attaches a preconfigured driver to an existing device or driver instance.
+ *
+ * This is used to change drivers and suchlike at runtime. The driver or device
+ * at the end of the chain will be told to attach to whatever is configured
+ * below it.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ * @param ppBase Where to store the base interface pointer. Optional.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3DriverAttach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
+{
+ LogFlow(("PDMR3DriverAttach: pszDevice=%p:{%s} iInstance=%d iLun=%d fFlags=%#x ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, fFlags, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+
+ if (ppBase)
+ *ppBase = NULL;
+
+ /*
+ * Find the LUN in question.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Anything attached to the LUN?
+ */
+ PPDMDRVINS pDrvIns = pLun->pTop;
+ if (!pDrvIns)
+ {
+ /* No, ask the device to attach to the new stuff. */
+ PPDMDEVINS pDevIns = pLun->pDevIns;
+ if (pDevIns->pReg->pfnAttach)
+ {
+ PDMCritSectEnter(pVM, pDevIns->pCritSectRoR3, VERR_IGNORED);
+ rc = pDevIns->pReg->pfnAttach(pDevIns, iLun, fFlags);
+ if (RT_SUCCESS(rc) && ppBase)
+ *ppBase = pLun->pTop ? &pLun->pTop->IBase : NULL;
+ PDMCritSectLeave(pVM, pDevIns->pCritSectRoR3);
+ }
+ else
+ rc = VERR_PDM_DEVICE_NO_RT_ATTACH;
+ }
+ else
+ {
+ /* Yes, find the bottom most driver and ask it to attach to the new stuff. */
+ while (pDrvIns->Internal.s.pDown)
+ pDrvIns = pDrvIns->Internal.s.pDown;
+ if (pDrvIns->pReg->pfnAttach)
+ {
+ rc = pDrvIns->pReg->pfnAttach(pDrvIns, fFlags);
+ if (RT_SUCCESS(rc) && ppBase)
+ *ppBase = pDrvIns->Internal.s.pDown
+ ? &pDrvIns->Internal.s.pDown->IBase
+ : NULL;
+ }
+ else
+ rc = VERR_PDM_DRIVER_NO_RT_ATTACH;
+ }
+ }
+
+ if (ppBase)
+ LogFlow(("PDMR3DriverAttach: returns %Rrc *ppBase=%p\n", rc, *ppBase));
+ else
+ LogFlow(("PDMR3DriverAttach: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Detaches the specified driver instance.
+ *
+ * This is used to replumb drivers at runtime for simulating hot plugging and
+ * media changes.
+ *
+ * This is a superset of PDMR3DeviceDetach. It allows detaching drivers from
+ * any driver or device by specifying the driver to start detaching at. The
+ * only prerequisite is that the driver or device above implements the
+ * pfnDetach callback (PDMDRVREG / PDMDEVREG).
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iDevIns Device instance.
+ * @param iLun The Logical Unit in which to look for the driver.
+ * @param pszDriver The name of the driver which to detach. If NULL
+ * then the entire driver chain is detatched.
+ * @param iOccurrence The occurrence of that driver in the chain. This is
+ * usually 0.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3DriverDetach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
+ const char *pszDriver, unsigned iOccurrence, uint32_t fFlags)
+{
+ LogFlow(("PDMR3DriverDetach: pszDevice=%p:{%s} iDevIns=%u iLun=%u pszDriver=%p:{%s} iOccurrence=%u fFlags=%#x\n",
+ pszDevice, pszDevice, iDevIns, iLun, pszDriver, pszDriver, iOccurrence, fFlags));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+ AssertPtr(pszDevice);
+ AssertPtrNull(pszDriver);
+ Assert(iOccurrence == 0 || pszDriver);
+ Assert(!(fFlags & ~(PDM_TACH_FLAGS_NOT_HOT_PLUG)));
+
+ /*
+ * Find the LUN in question.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3DevFindLun(pVM, pszDevice, iDevIns, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Locate the driver.
+ */
+ PPDMDRVINS pDrvIns = pLun->pTop;
+ if (pDrvIns)
+ {
+ if (pszDriver)
+ {
+ while (pDrvIns)
+ {
+ if (!strcmp(pDrvIns->pReg->szName, pszDriver))
+ {
+ if (iOccurrence == 0)
+ break;
+ iOccurrence--;
+ }
+ pDrvIns = pDrvIns->Internal.s.pDown;
+ }
+ }
+ if (pDrvIns)
+ rc = pdmR3DrvDetach(pDrvIns, fFlags);
+ else
+ rc = VERR_PDM_DRIVER_INSTANCE_NOT_FOUND;
+ }
+ else
+ rc = VINF_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+
+ LogFlow(("PDMR3DriverDetach: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Runtime detach and reattach of a new driver chain or sub chain.
+ *
+ * This is intended to be called on a non-EMT thread, this will instantiate the
+ * new driver (sub-)chain, and then the EMTs will do the actual replumbing. The
+ * destruction of the old driver chain will be taken care of on the calling
+ * thread.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iDevIns Device instance.
+ * @param iLun The Logical Unit in which to look for the driver.
+ * @param pszDriver The name of the driver which to detach and replace.
+ * If NULL then the entire driver chain is to be
+ * reattached.
+ * @param iOccurrence The occurrence of that driver in the chain. This is
+ * usually 0.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ * @param pCfg The configuration of the new driver chain that is
+ * going to be attached. The subtree starts with the
+ * node containing a Driver key, a Config subtree and
+ * optionally an AttachedDriver subtree.
+ * If this parameter is NULL, then this call will work
+ * like at a non-pause version of PDMR3DriverDetach.
+ * @param ppBase Where to store the base interface pointer to the new
+ * driver. Optional.
+ *
+ * @thread Any thread. The EMTs will be involved at some point though.
+ */
+VMMR3DECL(int) PDMR3DriverReattach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
+ const char *pszDriver, unsigned iOccurrence, uint32_t fFlags,
+ PCFGMNODE pCfg, PPPDMIBASE ppBase)
+{
+ NOREF(pUVM); NOREF(pszDevice); NOREF(iDevIns); NOREF(iLun); NOREF(pszDriver); NOREF(iOccurrence);
+ NOREF(fFlags); NOREF(pCfg); NOREF(ppBase);
+ return VERR_NOT_IMPLEMENTED;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMDriver.cpp b/src/VBox/VMM/VMMR3/PDMDriver.cpp
new file mode 100644
index 00000000..75e51f8d
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMDriver.cpp
@@ -0,0 +1,2347 @@
+/* $Id: PDMDriver.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, Driver parts.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DRIVER
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/version.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/ctype.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Internal callback structure pointer.
+ *
+ * The main purpose is to define the extra data we associate
+ * with PDMDRVREGCB so we can find the VM instance and so on.
+ */
+typedef struct PDMDRVREGCBINT
+{
+ /** The callback structure. */
+ PDMDRVREGCB Core;
+ /** A bit of padding. */
+ uint32_t u32[4];
+ /** VM Handle. */
+ PVM pVM;
+ /** Pointer to the configuration node the registrations should be
+ * associated with. Can be NULL. */
+ PCFGMNODE pCfgNode;
+} PDMDRVREGCBINT, *PPDMDRVREGCBINT;
+typedef const PDMDRVREGCBINT *PCPDMDRVREGCBINT;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) pdmR3DrvRegister(PCPDMDRVREGCB pCallbacks, PCPDMDRVREG pReg);
+static int pdmR3DrvLoad(PVM pVM, PPDMDRVREGCBINT pRegCB, const char *pszFilename, const char *pszName);
+
+
+/**
+ * Register drivers in a statically linked environment.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pfnCallback Driver registration callback
+ */
+VMMR3DECL(int) PDMR3DrvStaticRegistration(PVM pVM, FNPDMVBOXDRIVERSREGISTER pfnCallback)
+{
+ /*
+ * The registration callbacks.
+ */
+ PDMDRVREGCBINT RegCB;
+ RegCB.Core.u32Version = PDM_DRVREG_CB_VERSION;
+ RegCB.Core.pfnRegister = pdmR3DrvRegister;
+ RegCB.pVM = pVM;
+ RegCB.pCfgNode = NULL;
+
+ int rc = pfnCallback(&RegCB.Core, VBOX_VERSION);
+ if (RT_FAILURE(rc))
+ AssertMsgFailed(("VBoxDriversRegister failed with rc=%Rrc\n", rc));
+
+ return rc;
+}
+
+
+/**
+ * This function will initialize the drivers for this VM instance.
+ *
+ * First of all this mean loading the builtin drivers and letting them
+ * register themselves. Beyond that any additional driver modules are
+ * loaded and called for registration.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3DrvInit(PVM pVM)
+{
+ LogFlow(("pdmR3DrvInit:\n"));
+
+ AssertRelease(!(RT_UOFFSETOF(PDMDRVINS, achInstanceData) & 15));
+ PPDMDRVINS pDrvInsAssert; NOREF(pDrvInsAssert);
+ AssertCompile(sizeof(pDrvInsAssert->Internal.s) <= sizeof(pDrvInsAssert->Internal.padding));
+ AssertRelease(sizeof(pDrvInsAssert->Internal.s) <= sizeof(pDrvInsAssert->Internal.padding));
+
+ /*
+ * The registration callbacks.
+ */
+ PDMDRVREGCBINT RegCB;
+ RegCB.Core.u32Version = PDM_DRVREG_CB_VERSION;
+ RegCB.Core.pfnRegister = pdmR3DrvRegister;
+ RegCB.pVM = pVM;
+ RegCB.pCfgNode = NULL;
+
+ /*
+ * Load the builtin module
+ */
+ PCFGMNODE pDriversNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM/Drivers");
+ bool fLoadBuiltin;
+ int rc = CFGMR3QueryBool(pDriversNode, "LoadBuiltin", &fLoadBuiltin);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ fLoadBuiltin = true;
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Configuration error: Querying boolean \"LoadBuiltin\" failed with %Rrc\n", rc));
+ return rc;
+ }
+ if (fLoadBuiltin)
+ {
+ /* make filename */
+ char *pszFilename = pdmR3FileR3("VBoxDD", true /*fShared*/);
+ if (!pszFilename)
+ return VERR_NO_TMP_MEMORY;
+ rc = pdmR3DrvLoad(pVM, &RegCB, pszFilename, "VBoxDD");
+ RTMemTmpFree(pszFilename);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Load additional driver modules.
+ */
+ for (PCFGMNODE pCur = CFGMR3GetFirstChild(pDriversNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /*
+ * Get the name and path.
+ */
+ char szName[PDMMOD_NAME_LEN];
+ rc = CFGMR3GetName(pCur, &szName[0], sizeof(szName));
+ if (rc == VERR_CFGM_NOT_ENOUGH_SPACE)
+ {
+ AssertMsgFailed(("configuration error: The module name is too long, cchName=%zu.\n", CFGMR3GetNameLen(pCur)));
+ return VERR_PDM_MODULE_NAME_TOO_LONG;
+ }
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("CFGMR3GetName -> %Rrc.\n", rc));
+ return rc;
+ }
+
+ /* the path is optional, if no path the module name + path is used. */
+ char szFilename[RTPATH_MAX];
+ rc = CFGMR3QueryString(pCur, "Path", &szFilename[0], sizeof(szFilename));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ strcpy(szFilename, szName);
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("configuration error: Failure to query the module path, rc=%Rrc.\n", rc));
+ return rc;
+ }
+
+ /* prepend path? */
+ if (!RTPathHavePath(szFilename))
+ {
+ char *psz = pdmR3FileR3(szFilename, false /*fShared*/);
+ if (!psz)
+ return VERR_NO_TMP_MEMORY;
+ size_t cch = strlen(psz) + 1;
+ if (cch > sizeof(szFilename))
+ {
+ RTMemTmpFree(psz);
+ AssertMsgFailed(("Filename too long! cch=%d '%s'\n", cch, psz));
+ return VERR_FILENAME_TOO_LONG;
+ }
+ memcpy(szFilename, psz, cch);
+ RTMemTmpFree(psz);
+ }
+
+ /*
+ * Load the module and register it's drivers.
+ */
+ RegCB.pCfgNode = pCur;
+ rc = pdmR3DrvLoad(pVM, &RegCB, szFilename, szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ LogFlow(("pdmR3DrvInit: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads one driver module and call the registration entry point.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegCB The registration callback stuff.
+ * @param pszFilename Module filename.
+ * @param pszName Module name.
+ */
+static int pdmR3DrvLoad(PVM pVM, PPDMDRVREGCBINT pRegCB, const char *pszFilename, const char *pszName)
+{
+ /*
+ * Load it.
+ */
+ int rc = pdmR3LoadR3U(pVM->pUVM, pszFilename, pszName);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Get the registration export and call it.
+ */
+ FNPDMVBOXDRIVERSREGISTER *pfnVBoxDriversRegister;
+ rc = PDMR3LdrGetSymbolR3(pVM, pszName, "VBoxDriversRegister", (void **)&pfnVBoxDriversRegister);
+ if (RT_SUCCESS(rc))
+ {
+ Log(("PDM: Calling VBoxDriversRegister (%p) of %s (%s)\n", pfnVBoxDriversRegister, pszName, pszFilename));
+ rc = pfnVBoxDriversRegister(&pRegCB->Core, VBOX_VERSION);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Successfully loaded driver module %s (%s).\n", pszName, pszFilename));
+ else
+ AssertMsgFailed(("VBoxDriversRegister failed with rc=%Rrc\n", rc));
+ }
+ else
+ {
+ AssertMsgFailed(("Failed to locate 'VBoxDriversRegister' in %s (%s) rc=%Rrc\n", pszName, pszFilename, rc));
+ if (rc == VERR_SYMBOL_NOT_FOUND)
+ rc = VERR_PDM_NO_REGISTRATION_EXPORT;
+ }
+ }
+ else
+ AssertMsgFailed(("Failed to load %s (%s) rc=%Rrc!\n", pszName, pszFilename, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVREGCB,pfnRegister} */
+static DECLCALLBACK(int) pdmR3DrvRegister(PCPDMDRVREGCB pCallbacks, PCPDMDRVREG pReg)
+{
+ /*
+ * Validate the registration structure.
+ */
+ AssertPtrReturn(pReg, VERR_INVALID_POINTER);
+ AssertMsgReturn(pReg->u32Version == PDM_DRVREG_VERSION,
+ ("%#x\n", pReg->u32Version),
+ VERR_PDM_UNKNOWN_DRVREG_VERSION);
+ AssertReturn(pReg->szName[0], VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(RTStrEnd(pReg->szName, sizeof(pReg->szName)),
+ ("%.*s\n", sizeof(pReg->szName), pReg->szName),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(pdmR3IsValidName(pReg->szName), ("%.*s\n", sizeof(pReg->szName), pReg->szName),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn( !(pReg->fFlags & PDM_DRVREG_FLAGS_R0)
+ || ( pReg->szR0Mod[0]
+ && RTStrEnd(pReg->szR0Mod, sizeof(pReg->szR0Mod))),
+ ("%s: %.*s\n", pReg->szName, sizeof(pReg->szR0Mod), pReg->szR0Mod),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn( !(pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+ || ( pReg->szRCMod[0]
+ && RTStrEnd(pReg->szRCMod, sizeof(pReg->szRCMod))),
+ ("%s: %.*s\n", pReg->szName, sizeof(pReg->szRCMod), pReg->szRCMod),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(RT_VALID_PTR(pReg->pszDescription),
+ ("%s: %p\n", pReg->szName, pReg->pszDescription),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(!(pReg->fFlags & ~(PDM_DRVREG_FLAGS_HOST_BITS_MASK | PDM_DRVREG_FLAGS_R0 | PDM_DRVREG_FLAGS_RC)),
+ ("%s: %#x\n", pReg->szName, pReg->fFlags),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn((pReg->fFlags & PDM_DRVREG_FLAGS_HOST_BITS_MASK) == PDM_DRVREG_FLAGS_HOST_BITS_DEFAULT,
+ ("%s: %#x\n", pReg->szName, pReg->fFlags),
+ VERR_PDM_INVALID_DRIVER_HOST_BITS);
+ AssertMsgReturn(pReg->cMaxInstances > 0,
+ ("%s: %#x\n", pReg->szName, pReg->cMaxInstances),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(pReg->cbInstance <= _1M,
+ ("%s: %#x\n", pReg->szName, pReg->cbInstance),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(RT_VALID_PTR(pReg->pfnConstruct),
+ ("%s: %p\n", pReg->szName, pReg->pfnConstruct),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(RT_VALID_PTR(pReg->pfnRelocate) || !(pReg->fFlags & PDM_DRVREG_FLAGS_RC),
+ ("%s: %#x\n", pReg->szName, pReg->cbInstance),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(pReg->pfnSoftReset == NULL,
+ ("%s: %p\n", pReg->szName, pReg->pfnSoftReset),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+ AssertMsgReturn(pReg->u32VersionEnd == PDM_DRVREG_VERSION,
+ ("%s: %#x\n", pReg->szName, pReg->u32VersionEnd),
+ VERR_PDM_INVALID_DRIVER_REGISTRATION);
+
+ /*
+ * Check for duplicate and find FIFO entry at the same time.
+ */
+ PCPDMDRVREGCBINT pRegCB = (PCPDMDRVREGCBINT)pCallbacks;
+ PPDMDRV pDrvPrev = NULL;
+ PPDMDRV pDrv = pRegCB->pVM->pdm.s.pDrvs;
+ for (; pDrv; pDrvPrev = pDrv, pDrv = pDrv->pNext)
+ {
+ if (!strcmp(pDrv->pReg->szName, pReg->szName))
+ {
+ AssertMsgFailed(("Driver '%s' already exists\n", pReg->szName));
+ return VERR_PDM_DRIVER_NAME_CLASH;
+ }
+ }
+
+ /*
+ * Allocate new driver structure and insert it into the list.
+ */
+ int rc;
+ pDrv = (PPDMDRV)MMR3HeapAlloc(pRegCB->pVM, MM_TAG_PDM_DRIVER, sizeof(*pDrv));
+ if (pDrv)
+ {
+ pDrv->pNext = NULL;
+ pDrv->cInstances = 0;
+ pDrv->iNextInstance = 0;
+ pDrv->pReg = pReg;
+ rc = CFGMR3QueryStringAllocDef( pRegCB->pCfgNode, "RCSearchPath", &pDrv->pszRCSearchPath, NULL);
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3QueryStringAllocDef(pRegCB->pCfgNode, "R0SearchPath", &pDrv->pszR0SearchPath, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ if (pDrvPrev)
+ pDrvPrev->pNext = pDrv;
+ else
+ pRegCB->pVM->pdm.s.pDrvs = pDrv;
+ Log(("PDM: Registered driver '%s'\n", pReg->szName));
+ return VINF_SUCCESS;
+ }
+ MMR3HeapFree(pDrv);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Lookups a driver structure by name.
+ * @internal
+ */
+PPDMDRV pdmR3DrvLookup(PVM pVM, const char *pszName)
+{
+ for (PPDMDRV pDrv = pVM->pdm.s.pDrvs; pDrv; pDrv = pDrv->pNext)
+ if (!strcmp(pDrv->pReg->szName, pszName))
+ return pDrv;
+ return NULL;
+}
+
+
+/**
+ * Transforms the driver chain as it's being instantiated.
+ *
+ * Worker for pdmR3DrvInstantiate.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvAbove The driver above, NULL if top.
+ * @param pLun The LUN.
+ * @param ppNode The AttachedDriver node, replaced if any
+ * morphing took place.
+ */
+static int pdmR3DrvMaybeTransformChain(PVM pVM, PPDMDRVINS pDrvAbove, PPDMLUN pLun, PCFGMNODE *ppNode)
+{
+ /*
+ * The typical state of affairs is that there are no injections.
+ */
+ PCFGMNODE pCurTrans = CFGMR3GetFirstChild(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM/DriverTransformations"));
+ if (!pCurTrans)
+ return VINF_SUCCESS;
+
+ /*
+ * Gather the attributes used in the matching process.
+ */
+ const char *pszDevice = pLun->pDevIns
+ ? pLun->pDevIns->Internal.s.pDevR3->pReg->szName
+ : pLun->pUsbIns->Internal.s.pUsbDev->pReg->szName;
+ char szLun[32];
+ RTStrPrintf(szLun, sizeof(szLun), "%u", pLun->iLun);
+ const char *pszAbove = pDrvAbove ? pDrvAbove->Internal.s.pDrv->pReg->szName : "<top>";
+ char *pszThisDrv;
+ int rc = CFGMR3QueryStringAlloc(*ppNode, "Driver", &pszThisDrv);
+ AssertMsgRCReturn(rc, ("Query for string value of \"Driver\" -> %Rrc\n", rc),
+ rc == VERR_CFGM_VALUE_NOT_FOUND ? VERR_PDM_CFG_MISSING_DRIVER_NAME : rc);
+
+ uint64_t uInjectTransformationAbove = 0;
+ if (pDrvAbove)
+ {
+ rc = CFGMR3QueryIntegerDef(CFGMR3GetParent(*ppNode), "InjectTransformationPtr", &uInjectTransformationAbove, 0);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+
+ /*
+ * Enumerate possible driver chain transformations.
+ */
+ unsigned cTransformations = 0;
+ for (; pCurTrans != NULL; pCurTrans = CFGMR3GetNextChild(pCurTrans))
+ {
+ char szCurTransNm[256];
+ rc = CFGMR3GetName(pCurTrans, szCurTransNm, sizeof(szCurTransNm));
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/PDM/DriverTransformations/&lt;name&gt;/Device,string,*}
+ * One or more simple wildcard patters separated by '|' for matching
+ * the devices this transformation rule applies to. */
+ char *pszMultiPat;
+ rc = CFGMR3QueryStringAllocDef(pCurTrans, "Device", &pszMultiPat, "*");
+ AssertLogRelRCReturn(rc, rc);
+ bool fMatch = RTStrSimplePatternMultiMatch(pszMultiPat, RTSTR_MAX, pszDevice, RTSTR_MAX, NULL);
+ MMR3HeapFree(pszMultiPat);
+ if (!fMatch)
+ continue;
+
+ /** @cfgm{/PDM/DriverTransformations/&lt;name&gt;/LUN,string,*}
+ * One or more simple wildcard patters separated by '|' for matching
+ * the LUNs this transformation rule applies to. */
+ rc = CFGMR3QueryStringAllocDef(pCurTrans, "LUN", &pszMultiPat, "*");
+ AssertLogRelRCReturn(rc, rc);
+ fMatch = RTStrSimplePatternMultiMatch(pszMultiPat, RTSTR_MAX, szLun, RTSTR_MAX, NULL);
+ MMR3HeapFree(pszMultiPat);
+ if (!fMatch)
+ continue;
+
+ /** @cfgm{/PDM/DriverTransformations/&lt;name&gt;/BelowDriver,string,*}
+ * One or more simple wildcard patters separated by '|' for matching the
+ * drivers the transformation should be applied below. This means, that
+ * when the drivers matched here attached another driver below them, the
+ * transformation will be applied. To represent the device, '&lt;top&gt;'
+ * is used. */
+ rc = CFGMR3QueryStringAllocDef(pCurTrans, "BelowDriver", &pszMultiPat, "*");
+ AssertLogRelRCReturn(rc, rc);
+ fMatch = RTStrSimplePatternMultiMatch(pszMultiPat, RTSTR_MAX, pszAbove, RTSTR_MAX, NULL);
+ MMR3HeapFree(pszMultiPat);
+ if (!fMatch)
+ continue;
+
+ /** @cfgm{/PDM/DriverTransformations/&lt;name&gt;/AboveDriver,string,*}
+ * One or more simple wildcard patters separated by '|' for matching the
+ * drivers the transformation should be applie above or at (depending on
+ * the action). The value being matched against here is the driver that
+ * is in the process of being attached, so for mergeconfig actions this is
+ * usually what you need to match on. */
+ rc = CFGMR3QueryStringAlloc(pCurTrans, "AboveDriver", &pszMultiPat);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = VINF_SUCCESS;
+ else
+ {
+ AssertLogRelRCReturn(rc, rc);
+ fMatch = RTStrSimplePatternMultiMatch(pszMultiPat, RTSTR_MAX, pszThisDrv, RTSTR_MAX, NULL);
+ MMR3HeapFree(pszMultiPat);
+ if (!fMatch)
+ continue;
+ if (uInjectTransformationAbove == (uintptr_t)pCurTrans)
+ continue;
+ }
+
+ /*
+ * We've got a match! Now, what are we supposed to do?
+ */
+ /** @cfgm{/PDM/DriverTransformations/&lt;name&gt;/Action,string,inject}
+ * The action that the transformation takes. Possible values are:
+ * - inject
+ * - mergeconfig: This merges and the content of the 'Config' key under the
+ * transformation into the driver's own 'Config' key, replacing any
+ * duplicates.
+ * - remove
+ * - removetree
+ * - replace
+ * - replacetree
+ */
+ char szAction[16];
+ rc = CFGMR3QueryStringDef(pCurTrans, "Action", szAction, sizeof(szAction), "inject");
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgReturn( !strcmp(szAction, "inject")
+ || !strcmp(szAction, "mergeconfig")
+ || !strcmp(szAction, "remove")
+ || !strcmp(szAction, "removetree")
+ || !strcmp(szAction, "replace")
+ || !strcmp(szAction, "replacetree")
+ ,
+ ("Action='%s', valid values are 'inject', 'mergeconfig', 'replace', 'replacetree', 'remove', 'removetree'.\n", szAction),
+ VERR_PDM_MISCONFIGURED_DRV_TRANSFORMATION);
+ LogRel(("PDMDriver: Applying '%s' to '%s'::[%s]...'%s': %s\n", szCurTransNm, pszDevice, szLun, pszThisDrv, szAction));
+ CFGMR3Dump(*ppNode);
+ CFGMR3Dump(pCurTrans);
+
+ /* Get the attached driver to inject. */
+ PCFGMNODE pTransAttDrv = NULL;
+ if (!strcmp(szAction, "inject") || !strcmp(szAction, "replace") || !strcmp(szAction, "replacetree"))
+ {
+ pTransAttDrv = CFGMR3GetChild(pCurTrans, "AttachedDriver");
+ AssertLogRelMsgReturn(pTransAttDrv,
+ ("An %s transformation requires an AttachedDriver child node!\n", szAction),
+ VERR_PDM_MISCONFIGURED_DRV_TRANSFORMATION);
+ }
+
+
+ /*
+ * Remove the node.
+ */
+ if (!strcmp(szAction, "remove") || !strcmp(szAction, "removetree"))
+ {
+ PCFGMNODE pBelowThis = CFGMR3GetChild(*ppNode, "AttachedDriver");
+ if (!pBelowThis || !strcmp(szAction, "removetree"))
+ {
+ CFGMR3RemoveNode(*ppNode);
+ *ppNode = NULL;
+ }
+ else
+ {
+ PCFGMNODE pBelowThisCopy;
+ rc = CFGMR3DuplicateSubTree(pBelowThis, &pBelowThisCopy);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = CFGMR3ReplaceSubTree(*ppNode, pBelowThisCopy);
+ AssertLogRelRCReturnStmt(rc, CFGMR3RemoveNode(pBelowThis), rc);
+ }
+ }
+ /*
+ * Replace the driver about to be instantiated.
+ */
+ else if (!strcmp(szAction, "replace") || !strcmp(szAction, "replacetree"))
+ {
+ PCFGMNODE pTransCopy;
+ rc = CFGMR3DuplicateSubTree(pTransAttDrv, &pTransCopy);
+ AssertLogRelRCReturn(rc, rc);
+
+ PCFGMNODE pBelowThis = CFGMR3GetChild(*ppNode, "AttachedDriver");
+ if (!pBelowThis || !strcmp(szAction, "replacetree"))
+ rc = VINF_SUCCESS;
+ else
+ {
+ PCFGMNODE pBelowThisCopy;
+ rc = CFGMR3DuplicateSubTree(pBelowThis, &pBelowThisCopy);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertSubTree(pTransCopy, "AttachedDriver", pBelowThisCopy, NULL);
+ AssertLogRelRC(rc);
+ if (RT_FAILURE(rc))
+ CFGMR3RemoveNode(pBelowThisCopy);
+ }
+ }
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3ReplaceSubTree(*ppNode, pTransCopy);
+ if (RT_FAILURE(rc))
+ CFGMR3RemoveNode(pTransCopy);
+ }
+ /*
+ * Inject a driver before the driver about to be instantiated.
+ */
+ else if (!strcmp(szAction, "inject"))
+ {
+ PCFGMNODE pTransCopy;
+ rc = CFGMR3DuplicateSubTree(pTransAttDrv, &pTransCopy);
+ AssertLogRelRCReturn(rc, rc);
+
+ PCFGMNODE pThisCopy;
+ rc = CFGMR3DuplicateSubTree(*ppNode, &pThisCopy);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertSubTree(pTransCopy, "AttachedDriver", pThisCopy, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertInteger(pTransCopy, "InjectTransformationPtr", (uintptr_t)pCurTrans);
+ AssertLogRelRC(rc);
+ rc = CFGMR3InsertString(pTransCopy, "InjectTransformationNm", szCurTransNm);
+ AssertLogRelRC(rc);
+ if (RT_SUCCESS(rc))
+ rc = CFGMR3ReplaceSubTree(*ppNode, pTransCopy);
+ }
+ else
+ {
+ AssertLogRelRC(rc);
+ CFGMR3RemoveNode(pThisCopy);
+ }
+ }
+ if (RT_FAILURE(rc))
+ CFGMR3RemoveNode(pTransCopy);
+ }
+ /*
+ * Merge the Config node of the transformation with the one of the
+ * current driver.
+ */
+ else if (!strcmp(szAction, "mergeconfig"))
+ {
+ PCFGMNODE pTransConfig = CFGMR3GetChild(pCurTrans, "Config");
+ AssertLogRelReturn(pTransConfig, VERR_PDM_MISCONFIGURED_DRV_TRANSFORMATION);
+
+ PCFGMNODE pDrvConfig = CFGMR3GetChild(*ppNode, "Config");
+ if (*ppNode)
+ CFGMR3InsertNode(*ppNode, "Config", &pDrvConfig);
+ AssertLogRelReturn(pDrvConfig, VERR_PDM_CANNOT_TRANSFORM_REMOVED_DRIVER);
+
+ rc = CFGMR3CopyTree(pDrvConfig, pTransConfig, CFGM_COPY_FLAGS_REPLACE_VALUES | CFGM_COPY_FLAGS_MERGE_KEYS);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ else
+ AssertFailed();
+
+ cTransformations++;
+ if (*ppNode)
+ CFGMR3Dump(*ppNode);
+ else
+ LogRel(("PDMDriver: The transformation removed the driver.\n"));
+ }
+
+ /*
+ * Note what happened in the release log.
+ */
+ if (cTransformations > 0)
+ LogRel(("PDMDriver: Transformations done. Applied %u driver transformations.\n", cTransformations));
+
+ return rc;
+}
+
+
+/**
+ * Instantiate a driver.
+ *
+ * @returns VBox status code, including informational statuses.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pNode The CFGM node for the driver.
+ * @param pBaseInterface The base interface.
+ * @param pDrvAbove The driver above it. NULL if it's the top-most
+ * driver.
+ * @param pLun The LUN the driver is being attached to. NULL
+ * if we're instantiating a driver chain before
+ * attaching it - untested.
+ * @param ppBaseInterface Where to return the pointer to the base
+ * interface of the newly created driver.
+ *
+ * @remarks Recursive calls to this function is normal as the drivers will
+ * attach to anything below them during the pfnContruct call.
+ *
+ * @todo Need to extend this interface a bit so that the driver
+ * transformation feature can attach drivers to unconfigured LUNs and
+ * at the end of chains.
+ */
+int pdmR3DrvInstantiate(PVM pVM, PCFGMNODE pNode, PPDMIBASE pBaseInterface, PPDMDRVINS pDrvAbove,
+ PPDMLUN pLun, PPDMIBASE *ppBaseInterface)
+{
+ Assert(!pDrvAbove || !pDrvAbove->Internal.s.pDown);
+ Assert(!pDrvAbove || !pDrvAbove->pDownBase);
+
+ Assert(pBaseInterface->pfnQueryInterface(pBaseInterface, PDMIBASE_IID) == pBaseInterface);
+
+ /*
+ * Do driver chain injections
+ */
+ int rc = pdmR3DrvMaybeTransformChain(pVM, pDrvAbove, pLun, &pNode);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (!pNode)
+ return VERR_PDM_NO_ATTACHED_DRIVER;
+
+ /*
+ * Find the driver.
+ */
+ char *pszName;
+ rc = CFGMR3QueryStringAlloc(pNode, "Driver", &pszName);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMDRV pDrv = pdmR3DrvLookup(pVM, pszName);
+ if ( pDrv
+ && pDrv->cInstances < pDrv->pReg->cMaxInstances)
+ {
+ /* config node */
+ PCFGMNODE pConfigNode = CFGMR3GetChild(pNode, "Config");
+ if (!pConfigNode)
+ rc = CFGMR3InsertNode(pNode, "Config", &pConfigNode);
+ if (RT_SUCCESS(rc))
+ {
+ CFGMR3SetRestrictedRoot(pConfigNode);
+
+ /*
+ * Allocate the driver instance.
+ */
+ size_t cb = RT_UOFFSETOF_DYN(PDMDRVINS, achInstanceData[pDrv->pReg->cbInstance]);
+ cb = RT_ALIGN_Z(cb, 16);
+ PPDMDRVINS pNew;
+#undef PDM_WITH_RING0_DRIVERS
+#ifdef PDM_WITH_RING0_DRIVERS
+ bool const fHyperHeap = !!(pDrv->pReg->fFlags & (PDM_DRVREG_FLAGS_R0 | PDM_DRVREG_FLAGS_RC));
+ if (fHyperHeap)
+ rc = MMHyperAlloc(pVM, cb, 64, MM_TAG_PDM_DRIVER, (void **)&pNew);
+ else
+#endif
+ rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_DRIVER, cb, (void **)&pNew);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the instance structure (declaration order).
+ */
+ pNew->u32Version = PDM_DRVINS_VERSION;
+ pNew->iInstance = pDrv->iNextInstance;
+ pNew->Internal.s.pUp = pDrvAbove ? pDrvAbove : NULL;
+ //pNew->Internal.s.pDown = NULL;
+ pNew->Internal.s.pLun = pLun;
+ pNew->Internal.s.pDrv = pDrv;
+ pNew->Internal.s.pVMR3 = pVM;
+#ifdef PDM_WITH_RING0_DRIVERS
+ pNew->Internal.s.pVMR0 = pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_R0 ? pVM->pVMR0ForCall : NIL_RTR0PTR;
+ pNew->Internal.s.pVMRC = pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_RC ? pVM->pVMRC : NIL_RTRCPTR;
+#endif
+ //pNew->Internal.s.fDetaching = false;
+ pNew->Internal.s.fVMSuspended = true; /** @todo should be 'false', if driver is attached at runtime. */
+ //pNew->Internal.s.fVMReset = false;
+#ifdef PDM_WITH_RING0_DRIVERS
+ pNew->Internal.s.fHyperHeap = fHyperHeap;
+#endif
+ //pNew->Internal.s.pfnAsyncNotify = NULL;
+ pNew->Internal.s.pCfgHandle = pNode;
+ pNew->pReg = pDrv->pReg;
+ pNew->pCfg = pConfigNode;
+ pNew->pUpBase = pBaseInterface;
+ Assert(!pDrvAbove || pBaseInterface == &pDrvAbove->IBase);
+ //pNew->pDownBase = NULL;
+ //pNew->IBase.pfnQueryInterface = NULL;
+ //pNew->fTracing = 0;
+ pNew->idTracing = ++pVM->pdm.s.idTracingOther;
+ pNew->pHlpR3 = &g_pdmR3DrvHlp;
+ pNew->pvInstanceDataR3 = &pNew->achInstanceData[0];
+#ifdef PDM_WITH_RING0_DRIVERS
+ if (pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_R0)
+ {
+ pNew->pvInstanceDataR0 = MMHyperR3ToR0(pVM, &pNew->achInstanceData[0]);
+ rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0DrvHlp", &pNew->pHlpR0);
+ AssertReleaseRCReturn(rc, rc);
+ }
+# ifdef VBOX_WITH_RAW_MODE_KEEP
+ if ( (pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+ && VM_IS_RAW_MODE_ENABLED(pVM))
+ {
+ pNew->pvInstanceDataR0 = MMHyperR3ToRC(pVM, &pNew->achInstanceData[0]);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDrvHlp", &pNew->pHlpRC);
+ AssertReleaseRCReturn(rc, rc);
+ }
+# endif
+#endif
+
+ pDrv->iNextInstance++;
+ pDrv->cInstances++;
+
+ /*
+ * Link with it with the driver above / LUN.
+ */
+ if (pDrvAbove)
+ {
+ pDrvAbove->pDownBase = &pNew->IBase;
+ pDrvAbove->Internal.s.pDown = pNew;
+ }
+ else if (pLun)
+ pLun->pTop = pNew;
+ if (pLun)
+ pLun->pBottom = pNew;
+
+ /*
+ * Invoke the constructor.
+ */
+ rc = pDrv->pReg->pfnConstruct(pNew, pNew->pCfg, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ AssertPtr(pNew->IBase.pfnQueryInterface);
+ Assert(pNew->IBase.pfnQueryInterface(&pNew->IBase, PDMIBASE_IID) == &pNew->IBase);
+
+ /* Success! */
+ *ppBaseInterface = &pNew->IBase;
+ if (pLun)
+ Log(("PDM: Attached driver %p:'%s'/%d to LUN#%d on device '%s'/%d, pDrvAbove=%p:'%s'/%d\n",
+ pNew, pDrv->pReg->szName, pNew->iInstance,
+ pLun->iLun,
+ pLun->pDevIns ? pLun->pDevIns->pReg->szName : pLun->pUsbIns->pReg->szName,
+ pLun->pDevIns ? pLun->pDevIns->iInstance : pLun->pUsbIns->iInstance,
+ pDrvAbove, pDrvAbove ? pDrvAbove->pReg->szName : "", pDrvAbove ? pDrvAbove->iInstance : UINT32_MAX));
+ else
+ Log(("PDM: Attached driver %p:'%s'/%d, pDrvAbove=%p:'%s'/%d\n",
+ pNew, pDrv->pReg->szName, pNew->iInstance,
+ pDrvAbove, pDrvAbove ? pDrvAbove->pReg->szName : "", pDrvAbove ? pDrvAbove->iInstance : UINT32_MAX));
+ }
+ else
+ {
+ pdmR3DrvDestroyChain(pNew, PDM_TACH_FLAGS_NO_CALLBACKS);
+ if (rc == VERR_VERSION_MISMATCH)
+ rc = VERR_PDM_DRIVER_VERSION_MISMATCH;
+ }
+ }
+ else
+ AssertMsgFailed(("Failed to allocate %d bytes for instantiating driver '%s'! rc=%Rrc\n", cb, pszName, rc));
+ }
+ else
+ AssertMsgFailed(("Failed to create Config node! rc=%Rrc\n", rc));
+ }
+ else if (pDrv)
+ {
+ AssertMsgFailed(("Too many instances of driver '%s', max is %u\n", pszName, pDrv->pReg->cMaxInstances));
+ rc = VERR_PDM_TOO_MANY_DRIVER_INSTANCES;
+ }
+ else
+ {
+ AssertMsgFailed(("Driver '%s' wasn't found!\n", pszName));
+ rc = VERR_PDM_DRIVER_NOT_FOUND;
+ }
+ MMR3HeapFree(pszName);
+ }
+ else
+ {
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = VERR_PDM_CFG_MISSING_DRIVER_NAME;
+ else
+ AssertMsgFailed(("Query for string value of \"Driver\" -> %Rrc\n", rc));
+ }
+ return rc;
+}
+
+
+/**
+ * Detaches a driver from whatever it's attached to.
+ * This will of course lead to the destruction of the driver and all drivers below it in the chain.
+ *
+ * @returns VINF_SUCCESS
+ * @param pDrvIns The driver instance to detach.
+ * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
+ */
+int pdmR3DrvDetach(PPDMDRVINS pDrvIns, uint32_t fFlags)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvDetach: pDrvIns=%p '%s'/%d\n", pDrvIns, pDrvIns->pReg->szName, pDrvIns->iInstance));
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+
+ /*
+ * Check that we're not doing this recursively, that could have unwanted sideeffects!
+ */
+ if (pDrvIns->Internal.s.fDetaching)
+ {
+ AssertMsgFailed(("Recursive detach! '%s'/%d\n", pDrvIns->pReg->szName, pDrvIns->iInstance));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Check that we actually can detach this instance.
+ * The requirement is that the driver/device above has a detach method.
+ */
+ if ( pDrvIns->Internal.s.pUp
+ ? !pDrvIns->Internal.s.pUp->pReg->pfnDetach
+ : pDrvIns->Internal.s.pLun->pDevIns
+ ? !pDrvIns->Internal.s.pLun->pDevIns->pReg->pfnDetach
+ : !pDrvIns->Internal.s.pLun->pUsbIns->pReg->pfnDriverDetach
+ )
+ {
+ AssertMsgFailed(("Cannot detach driver instance because the driver/device above doesn't support it!\n"));
+ return VERR_PDM_DRIVER_DETACH_NOT_POSSIBLE;
+ }
+
+ /*
+ * Join paths with pdmR3DrvDestroyChain.
+ */
+ pdmR3DrvDestroyChain(pDrvIns, fFlags);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys a driver chain starting with the specified driver.
+ *
+ * This is used when unplugging a device at run time.
+ *
+ * @param pDrvIns Pointer to the driver instance to start with.
+ * @param fFlags PDM_TACH_FLAGS_NOT_HOT_PLUG, PDM_TACH_FLAGS_NO_CALLBACKS
+ * or 0.
+ */
+void pdmR3DrvDestroyChain(PPDMDRVINS pDrvIns, uint32_t fFlags)
+{
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Detach the bottommost driver until we've detached pDrvIns.
+ */
+ pDrvIns->Internal.s.fDetaching = true;
+ PPDMDRVINS pCur;
+ do
+ {
+ /* find the driver to detach. */
+ pCur = pDrvIns;
+ while (pCur->Internal.s.pDown)
+ pCur = pCur->Internal.s.pDown;
+ LogFlow(("pdmR3DrvDestroyChain: pCur=%p '%s'/%d\n", pCur, pCur->pReg->szName, pCur->iInstance));
+
+ /*
+ * Unlink it and notify parent.
+ */
+ pCur->Internal.s.fDetaching = true;
+
+ PPDMLUN pLun = pCur->Internal.s.pLun;
+ Assert(pLun->pBottom == pCur);
+ pLun->pBottom = pCur->Internal.s.pUp;
+
+ if (pCur->Internal.s.pUp)
+ {
+ /* driver parent */
+ PPDMDRVINS pParent = pCur->Internal.s.pUp;
+ pCur->Internal.s.pUp = NULL;
+ pParent->Internal.s.pDown = NULL;
+
+ if (!(fFlags & PDM_TACH_FLAGS_NO_CALLBACKS) && pParent->pReg->pfnDetach)
+ pParent->pReg->pfnDetach(pParent, fFlags);
+
+ pParent->pDownBase = NULL;
+ }
+ else
+ {
+ /* device parent */
+ Assert(pLun->pTop == pCur);
+ pLun->pTop = NULL;
+ if (!(fFlags & PDM_TACH_FLAGS_NO_CALLBACKS))
+ {
+ if (pLun->pDevIns)
+ {
+ if (pLun->pDevIns->pReg->pfnDetach)
+ {
+ PDMCritSectEnter(pVM, pLun->pDevIns->pCritSectRoR3, VERR_IGNORED);
+ pLun->pDevIns->pReg->pfnDetach(pLun->pDevIns, pLun->iLun, fFlags);
+ PDMCritSectLeave(pVM, pLun->pDevIns->pCritSectRoR3);
+ }
+ }
+ else
+ {
+ if (pLun->pUsbIns->pReg->pfnDriverDetach)
+ {
+ /** @todo USB device locking? */
+ pLun->pUsbIns->pReg->pfnDriverDetach(pLun->pUsbIns, pLun->iLun, fFlags);
+ }
+ }
+ }
+ }
+
+ /*
+ * Call destructor.
+ */
+ pCur->pUpBase = NULL;
+ if (pCur->pReg->pfnDestruct)
+ pCur->pReg->pfnDestruct(pCur);
+ pCur->Internal.s.pDrv->cInstances--;
+
+ /*
+ * Free all resources allocated by the driver.
+ */
+ /* Queues. */
+ int rc = PDMR3QueueDestroyDriver(pVM, pCur);
+ AssertRC(rc);
+
+ /* Timers. */
+ rc = TMR3TimerDestroyDriver(pVM, pCur);
+ AssertRC(rc);
+
+ /* SSM data units. */
+ rc = SSMR3DeregisterDriver(pVM, pCur, NULL, 0);
+ AssertRC(rc);
+
+ /* PDM threads. */
+ rc = pdmR3ThreadDestroyDriver(pVM, pCur);
+ AssertRC(rc);
+
+ /* Info handlers. */
+ rc = DBGFR3InfoDeregisterDriver(pVM, pCur, NULL);
+ AssertRC(rc);
+
+ /* PDM critsects. */
+ rc = pdmR3CritSectBothDeleteDriver(pVM, pCur);
+ AssertRC(rc);
+
+ /* Block caches. */
+ PDMR3BlkCacheReleaseDriver(pVM, pCur);
+
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ /* Completion templates.*/
+ pdmR3AsyncCompletionTemplateDestroyDriver(pVM, pCur);
+#endif
+
+ /* Finally, the driver it self. */
+#ifdef PDM_WITH_RING0_DRIVERS
+ bool const fHyperHeap = pCur->Internal.s.fHyperHeap;
+#endif
+ ASMMemFill32(pCur, RT_UOFFSETOF_DYN(PDMDRVINS, achInstanceData[pCur->pReg->cbInstance]), 0xdeadd0d0);
+#ifdef PDM_WITH_RING0_DRIVERS
+ if (fHyperHeap)
+ MMHyperFree(pVM, pCur);
+ else
+#endif
+ MMR3HeapFree(pCur);
+
+ } while (pCur != pDrvIns);
+}
+
+
+
+
+/** @name Driver Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnAttach} */
+static DECLCALLBACK(int) pdmR3DrvHlp_Attach(PPDMDRVINS pDrvIns, uint32_t fFlags, PPDMIBASE *ppBaseInterface)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DrvHlp_Attach: caller='%s'/%d: fFlags=%#x\n", pDrvIns->pReg->szName, pDrvIns->iInstance, fFlags));
+ Assert(!(fFlags & ~(PDM_TACH_FLAGS_NOT_HOT_PLUG)));
+ RT_NOREF_PV(fFlags);
+
+ /*
+ * Check that there isn't anything attached already.
+ */
+ int rc;
+ if (!pDrvIns->Internal.s.pDown)
+ {
+ Assert(pDrvIns->Internal.s.pLun->pBottom == pDrvIns);
+
+ /*
+ * Get the attached driver configuration.
+ */
+ PCFGMNODE pNode = CFGMR3GetChild(pDrvIns->Internal.s.pCfgHandle, "AttachedDriver");
+ if (pNode)
+ rc = pdmR3DrvInstantiate(pVM, pNode, &pDrvIns->IBase, pDrvIns, pDrvIns->Internal.s.pLun, ppBaseInterface);
+ else
+ rc = VERR_PDM_NO_ATTACHED_DRIVER;
+ }
+ else
+ {
+ AssertMsgFailed(("Already got a driver attached. The driver should keep track of such things!\n"));
+ rc = VERR_PDM_DRIVER_ALREADY_ATTACHED;
+ }
+
+ LogFlow(("pdmR3DrvHlp_Attach: caller='%s'/%d: return %Rrc\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnDetach} */
+static DECLCALLBACK(int) pdmR3DrvHlp_Detach(PPDMDRVINS pDrvIns, uint32_t fFlags)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_Detach: caller='%s'/%d: fFlags=%#x\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, fFlags));
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+
+ /*
+ * Anything attached?
+ */
+ int rc;
+ if (pDrvIns->Internal.s.pDown)
+ rc = pdmR3DrvDetach(pDrvIns->Internal.s.pDown, fFlags);
+ else
+ {
+ AssertMsgFailed(("Nothing attached!\n"));
+ rc = VERR_PDM_NO_DRIVER_ATTACHED;
+ }
+
+ LogFlow(("pdmR3DrvHlp_Detach: caller='%s'/%d: returns %Rrc\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnDetachSelf} */
+static DECLCALLBACK(int) pdmR3DrvHlp_DetachSelf(PPDMDRVINS pDrvIns, uint32_t fFlags)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_DetachSelf: caller='%s'/%d: fFlags=%#x\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, fFlags));
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+
+ int rc = pdmR3DrvDetach(pDrvIns, fFlags);
+
+ LogFlow(("pdmR3DrvHlp_Detach: returns %Rrc\n", rc)); /* pDrvIns is freed by now. */
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnMountPrepare} */
+static DECLCALLBACK(int) pdmR3DrvHlp_MountPrepare(PPDMDRVINS pDrvIns, const char *pszFilename, const char *pszCoreDriver)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_MountPrepare: caller='%s'/%d: pszFilename=%p:{%s} pszCoreDriver=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pszFilename, pszFilename, pszCoreDriver, pszCoreDriver));
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+
+ /*
+ * Do the caller have anything attached below itself?
+ */
+ if (pDrvIns->Internal.s.pDown)
+ {
+ AssertMsgFailed(("Cannot prepare a mount when something's attached to you!\n"));
+ return VERR_PDM_DRIVER_ALREADY_ATTACHED;
+ }
+
+ /*
+ * We're asked to prepare, so we'll start off by nuking the
+ * attached configuration tree.
+ */
+ PCFGMNODE pNode = CFGMR3GetChild(pDrvIns->Internal.s.pCfgHandle, "AttachedDriver");
+ if (pNode)
+ CFGMR3RemoveNode(pNode);
+
+ /*
+ * If there is no core driver, we'll have to probe for it.
+ */
+ if (!pszCoreDriver)
+ {
+ /** @todo implement image probing. */
+ AssertReleaseMsgFailed(("Not implemented!\n"));
+ return VERR_NOT_IMPLEMENTED;
+ }
+
+ /*
+ * Construct the basic attached driver configuration.
+ */
+ int rc = CFGMR3InsertNode(pDrvIns->Internal.s.pCfgHandle, "AttachedDriver", &pNode);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertString(pNode, "Driver", pszCoreDriver);
+ if (RT_SUCCESS(rc))
+ {
+ PCFGMNODE pCfg;
+ rc = CFGMR3InsertNode(pNode, "Config", &pCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CFGMR3InsertString(pCfg, "Path", pszFilename);
+ if (RT_SUCCESS(rc))
+ {
+ LogFlow(("pdmR3DrvHlp_MountPrepare: caller='%s'/%d: returns %Rrc (Driver=%s)\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, rc, pszCoreDriver));
+ return rc;
+ }
+ else
+ AssertMsgFailed(("Path string insert failed, rc=%Rrc\n", rc));
+ }
+ else
+ AssertMsgFailed(("Config node failed, rc=%Rrc\n", rc));
+ }
+ else
+ AssertMsgFailed(("Driver string insert failed, rc=%Rrc\n", rc));
+ CFGMR3RemoveNode(pNode);
+ }
+ else
+ AssertMsgFailed(("AttachedDriver node insert failed, rc=%Rrc\n", rc));
+
+ LogFlow(("pdmR3DrvHlp_MountPrepare: caller='%s'/%d: returns %Rrc\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnAssertEMT} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_AssertEMT(PPDMDRVINS pDrvIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ if (VM_IS_EMT(pDrvIns->Internal.s.pVMR3))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertEMT '%s'/%d\n", pDrvIns->pReg->szName, pDrvIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnAssertOther} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_AssertOther(PPDMDRVINS pDrvIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ if (!VM_IS_EMT(pDrvIns->Internal.s.pVMR3))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertOther '%s'/%d\n", pDrvIns->pReg->szName, pDrvIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMSetErrorV} */
+static DECLCALLBACK(int) pdmR3DrvHlp_VMSetErrorV(PPDMDRVINS pDrvIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ int rc2 = VMSetErrorV(pDrvIns->Internal.s.pVMR3, rc, RT_SRC_POS_ARGS, pszFormat, va); Assert(rc2 == rc); NOREF(rc2);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMSetRuntimeErrorV} */
+static DECLCALLBACK(int) pdmR3DrvHlp_VMSetRuntimeErrorV(PPDMDRVINS pDrvIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ int rc = VMSetRuntimeErrorV(pDrvIns->Internal.s.pVMR3, fFlags, pszErrorId, pszFormat, va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMState} */
+static DECLCALLBACK(VMSTATE) pdmR3DrvHlp_VMState(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ VMSTATE enmVMState = VMR3GetState(pDrvIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DrvHlp_VMState: caller='%s'/%d: returns %d (%s)\n", pDrvIns->pReg->szName, pDrvIns->iInstance,
+ enmVMState, VMR3GetStateName(enmVMState)));
+ return enmVMState;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMTeleportedAndNotFullyResumedYet} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_VMTeleportedAndNotFullyResumedYet(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ bool fRc = VMR3TeleportedAndNotFullyResumedYet(pDrvIns->Internal.s.pVMR3);
+
+ LogFlow(("pdmR3DrvHlp_VMState: caller='%s'/%d: returns %RTbool)\n", pDrvIns->pReg->szName, pDrvIns->iInstance,
+ fRc));
+ return fRc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnGetSupDrvSession} */
+static DECLCALLBACK(PSUPDRVSESSION) pdmR3DrvHlp_GetSupDrvSession(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ PSUPDRVSESSION pSession = pDrvIns->Internal.s.pVMR3->pSession;
+ LogFlow(("pdmR3DrvHlp_GetSupDrvSession: caller='%s'/%d: returns %p)\n", pDrvIns->pReg->szName, pDrvIns->iInstance,
+ pSession));
+ return pSession;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnQueueCreate} */
+static DECLCALLBACK(int) pdmR3DrvHlp_QueueCreate(PPDMDRVINS pDrvIns, uint32_t cbItem, uint32_t cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEDRV pfnCallback, const char *pszName, PDMQUEUEHANDLE *phQueue)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_PDMQueueCreate: caller='%s'/%d: cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p pszName=%p:{%s} phQueue=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, cbItem, cItems, cMilliesInterval, pfnCallback, pszName, pszName, phQueue));
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ if (pDrvIns->iInstance > 0)
+ {
+ pszName = MMR3HeapAPrintf(pVM, MM_TAG_PDM_DRIVER_DESC, "%s_%u", pszName, pDrvIns->iInstance);
+ AssertLogRelReturn(pszName, VERR_NO_MEMORY);
+ }
+
+ int rc = PDMR3QueueCreateDriver(pVM, pDrvIns, cbItem, cItems, cMilliesInterval, pfnCallback, pszName, phQueue);
+
+ LogFlow(("pdmR3DrvHlp_PDMQueueCreate: caller='%s'/%d: returns %Rrc *phQueue=%p\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc, *phQueue));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnQueueAlloc} */
+static DECLCALLBACK(PPDMQUEUEITEMCORE) pdmR3DrvHlp_QueueAlloc(PPDMDRVINS pDrvIns, PDMQUEUEHANDLE hQueue)
+{
+ return PDMQueueAlloc(pDrvIns->Internal.s.pVMR3, hQueue, pDrvIns);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnQueueInsert} */
+static DECLCALLBACK(int) pdmR3DrvHlp_QueueInsert(PPDMDRVINS pDrvIns, PDMQUEUEHANDLE hQueue, PPDMQUEUEITEMCORE pItem)
+{
+ return PDMQueueInsert(pDrvIns->Internal.s.pVMR3, hQueue, pDrvIns, pItem);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnQueueFlushIfNecessary} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_QueueFlushIfNecessary(PPDMDRVINS pDrvIns, PDMQUEUEHANDLE hQueue)
+{
+ return PDMQueueFlushIfNecessary(pDrvIns->Internal.s.pVMR3, hQueue, pDrvIns) == VINF_SUCCESS;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnTMGetVirtualFreq} */
+static DECLCALLBACK(uint64_t) pdmR3DrvHlp_TMGetVirtualFreq(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ return TMVirtualGetFreq(pDrvIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnTMGetVirtualTime} */
+static DECLCALLBACK(uint64_t) pdmR3DrvHlp_TMGetVirtualTime(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ return TMVirtualGet(pDrvIns->Internal.s.pVMR3);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnTimerCreate} */
+static DECLCALLBACK(int) pdmR3DrvHlp_TimerCreate(PPDMDRVINS pDrvIns, TMCLOCK enmClock, PFNTMTIMERDRV pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszDesc, PTMTIMERHANDLE phTimer)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_TimerCreate: caller='%s'/%d: enmClock=%d pfnCallback=%p pvUser=%p fFlags=%#x pszDesc=%p:{%s} phTimer=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, enmClock, pfnCallback, pvUser, fFlags, pszDesc, pszDesc, phTimer));
+
+ /* Mangle the timer name if there are more than once instance of this driver. */
+ char szName[32];
+ AssertReturn(strlen(pszDesc) < sizeof(szName) - 3, VERR_INVALID_NAME);
+ if (pDrvIns->iInstance > 0)
+ {
+ RTStrPrintf(szName, sizeof(szName), "%s[%u]", pszDesc, pDrvIns->iInstance);
+ pszDesc = szName;
+ }
+
+ /* Clear the ring-0 flag if the driver isn't configured for ring-0. */
+ if (fFlags & TMTIMER_FLAGS_RING0)
+ {
+ AssertReturn(!(fFlags & TMTIMER_FLAGS_NO_RING0), VERR_INVALID_FLAGS);
+ Assert(pDrvIns->Internal.s.pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_R0);
+#ifdef PDM_WITH_RING0_DRIVERS
+ if (!(pDrvIns->Internal.s.fIntFlags & PDMDRVINSINT_FLAGS_R0_ENABLED)) /** @todo PDMDRVINSINT_FLAGS_R0_ENABLED? */
+#endif
+ fFlags = (fFlags & ~TMTIMER_FLAGS_RING0) | TMTIMER_FLAGS_NO_RING0;
+ }
+ else
+ fFlags |= TMTIMER_FLAGS_NO_RING0;
+
+ int rc = TMR3TimerCreateDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, enmClock, pfnCallback, pvUser, fFlags, pszDesc, phTimer);
+
+ LogFlow(("pdmR3DrvHlp_TMTimerCreate: caller='%s'/%d: returns %Rrc *phTimer=%p\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc, *phTimer));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnTimerDestroy} */
+static DECLCALLBACK(int) pdmR3DrvHlp_TimerDestroy(PPDMDRVINS pDrvIns, TMTIMERHANDLE hTimer)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_TimerDestroy: caller='%s'/%d: hTimer=%RX64\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, hTimer));
+
+ int rc = TMR3TimerDestroy(pDrvIns->Internal.s.pVMR3, hTimer);
+
+ LogFlow(("pdmR3DrvHlp_TimerDestroy: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnTimerSetMillies} */
+static DECLCALLBACK(int) pdmR3DrvHlp_TimerSetMillies(PPDMDRVINS pDrvIns, TMTIMERHANDLE hTimer, uint64_t cMilliesToNext)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return TMTimerSetMillies(pDrvIns->Internal.s.pVMR3, hTimer, cMilliesToNext);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSSMRegister} */
+static DECLCALLBACK(int) pdmR3DrvHlp_SSMRegister(PPDMDRVINS pDrvIns, uint32_t uVersion, size_t cbGuess,
+ PFNSSMDRVLIVEPREP pfnLivePrep, PFNSSMDRVLIVEEXEC pfnLiveExec, PFNSSMDRVLIVEVOTE pfnLiveVote,
+ PFNSSMDRVSAVEPREP pfnSavePrep, PFNSSMDRVSAVEEXEC pfnSaveExec, PFNSSMDRVSAVEDONE pfnSaveDone,
+ PFNSSMDRVLOADPREP pfnLoadPrep, PFNSSMDRVLOADEXEC pfnLoadExec, PFNSSMDRVLOADDONE pfnLoadDone)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_SSMRegister: caller='%s'/%d: uVersion=%#x cbGuess=%#x \n"
+ " pfnLivePrep=%p pfnLiveExec=%p pfnLiveVote=%p pfnSavePrep=%p pfnSaveExec=%p pfnSaveDone=%p pszLoadPrep=%p pfnLoadExec=%p pfnLoaddone=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, uVersion, cbGuess,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone, pfnLoadPrep, pfnLoadExec, pfnLoadDone));
+
+ int rc = SSMR3RegisterDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, pDrvIns->pReg->szName, pDrvIns->iInstance,
+ uVersion, cbGuess,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone);
+
+ LogFlow(("pdmR3DrvHlp_SSMRegister: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSSMDeregister} */
+static DECLCALLBACK(int) pdmR3DrvHlp_SSMDeregister(PPDMDRVINS pDrvIns, const char *pszName, uint32_t uInstance)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_SSMDeregister: caller='%s'/%d: pszName=%p:{%s} uInstance=%#x\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pszName, pszName, uInstance));
+
+ int rc = SSMR3DeregisterDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, pszName, uInstance);
+
+ LogFlow(("pdmR3DrvHlp_SSMDeregister: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnMMHeapFree} */
+static DECLCALLBACK(void) pdmR3DrvHlp_MMHeapFree(PPDMDRVINS pDrvIns, void *pv)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns); RT_NOREF(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_MMHeapFree: caller='%s'/%d: pv=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pv));
+
+ MMR3HeapFree(pv);
+
+ LogFlow(("pdmR3DrvHlp_MMHeapFree: caller='%s'/%d: returns\n", pDrvIns->pReg->szName, pDrvIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnDBGFInfoRegister} */
+static DECLCALLBACK(int) pdmR3DrvHlp_DBGFInfoRegister(PPDMDRVINS pDrvIns, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDRV pfnHandler)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_DBGFInfoRegister: caller='%s'/%d: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pszName, pszName, pszDesc, pszDesc, pfnHandler));
+
+ int rc = DBGFR3InfoRegisterDriver(pDrvIns->Internal.s.pVMR3, pszName, pszDesc, pfnHandler, pDrvIns);
+
+ LogFlow(("pdmR3DrvHlp_DBGFInfoRegister: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnDBGFInfoRegisterArgv} */
+static DECLCALLBACK(int) pdmR3DrvHlp_DBGFInfoRegisterArgv(PPDMDRVINS pDrvIns, const char *pszName, const char *pszDesc, PFNDBGFINFOARGVDRV pfnHandler)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_DBGFInfoRegisterArgv: caller='%s'/%d: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pszName, pszName, pszDesc, pszDesc, pfnHandler));
+
+ int rc = DBGFR3InfoRegisterDriverArgv(pDrvIns->Internal.s.pVMR3, pszName, pszDesc, pfnHandler, pDrvIns);
+
+ LogFlow(("pdmR3DrvHlp_DBGFInfoRegisterArgv: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnDBGFInfoDeregister} */
+static DECLCALLBACK(int) pdmR3DrvHlp_DBGFInfoDeregister(PPDMDRVINS pDrvIns, const char *pszName)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_DBGFInfoDeregister: caller='%s'/%d: pszName=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pszName, pszName));
+
+ int rc = DBGFR3InfoDeregisterDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, pszName);
+
+ LogFlow(("pdmR3DrvHlp_DBGFInfoDeregister: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSTAMRegister} */
+static DECLCALLBACK(void) pdmR3DrvHlp_STAMRegister(PPDMDRVINS pDrvIns, void *pvSample, STAMTYPE enmType, const char *pszName,
+ STAMUNIT enmUnit, const char *pszDesc)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+#ifdef VBOX_WITH_STATISTICS /** @todo rework this to always be compiled in */
+ if (*pszName == '/')
+ STAM_REG(pDrvIns->Internal.s.pVMR3, pvSample, enmType, pszName, enmUnit, pszDesc);
+ else
+ STAMR3RegisterF(pVM, pvSample, enmType, STAMVISIBILITY_ALWAYS, enmUnit, pszDesc,
+ "/Drivers/%s-%u/%s", pDrvIns->pReg->szName, pDrvIns->iInstance, pszName);
+#else
+ RT_NOREF(pDrvIns, pvSample, enmType, pszName, enmUnit, pszDesc, pVM);
+#endif
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSTAMRegisterV} */
+static DECLCALLBACK(void) pdmR3DrvHlp_STAMRegisterV(PPDMDRVINS pDrvIns, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ STAMUNIT enmUnit, const char *pszDesc, const char *pszName, va_list args)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ int rc;
+ if (*pszName == '/')
+ rc = STAMR3RegisterV(pVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, args);
+ else
+ {
+ /* We need to format it to check whether it starts with a
+ slash or not (will rework this later). */
+ char szFormatted[2048];
+ ssize_t cchBase = RTStrPrintf2(szFormatted, sizeof(szFormatted) - 1024, "/Drivers/%s-%u/",
+ pDrvIns->pReg->szName, pDrvIns->iInstance);
+ AssertReturnVoid(cchBase > 0);
+
+ ssize_t cch2 = RTStrPrintf2V(&szFormatted[cchBase], sizeof(szFormatted) - cchBase, pszName, args);
+ AssertReturnVoid(cch2 > 0);
+
+ rc = STAMR3Register(pVM, pvSample, enmType, enmVisibility,
+ &szFormatted[szFormatted[cchBase] == '/' ? cchBase : 0], enmUnit, pszDesc);
+ }
+ AssertRC(rc);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSTAMRegisterF} */
+static DECLCALLBACK(void) pdmR3DrvHlp_STAMRegisterF(PPDMDRVINS pDrvIns, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ STAMUNIT enmUnit, const char *pszDesc, const char *pszName, ...)
+{
+ va_list va;
+ va_start(va, pszName);
+ pdmR3DrvHlp_STAMRegisterV(pDrvIns, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, va);
+ va_end(va);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSTAMDeregister} */
+static DECLCALLBACK(int) pdmR3DrvHlp_STAMDeregister(PPDMDRVINS pDrvIns, void *pvSample)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+
+ return STAMR3DeregisterByAddr(pDrvIns->Internal.s.pVMR3->pUVM, pvSample);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSTAMDeregisterByPrefix} */
+static DECLCALLBACK(int) pdmR3DrvHlp_STAMDeregisterByPrefix(PPDMDRVINS pDrvIns, const char *pszPrefix)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+
+ if (*pszPrefix == '/')
+ return STAMR3DeregisterByPrefix(pDrvIns->Internal.s.pVMR3->pUVM, pszPrefix);
+
+ char szTmp[2048];
+ ssize_t cch = RTStrPrintf2(szTmp, sizeof(szTmp), "/Drivers/%s-%u/%s", pDrvIns->pReg->szName, pDrvIns->iInstance, pszPrefix);
+ AssertReturn(cch > 0, VERR_BUFFER_OVERFLOW);
+ return STAMR3DeregisterByPrefix(pDrvIns->Internal.s.pVMR3->pUVM, szTmp);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSUPCallVMMR0Ex} */
+static DECLCALLBACK(int) pdmR3DrvHlp_SUPCallVMMR0Ex(PPDMDRVINS pDrvIns, unsigned uOperation, void *pvArg, unsigned cbArg)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_SSMCallVMMR0Ex: caller='%s'/%d: uOperation=%u pvArg=%p cbArg=%d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, uOperation, pvArg, cbArg));
+ RT_NOREF_PV(cbArg);
+
+ int rc;
+ if ( uOperation >= VMMR0_DO_SRV_START
+ && uOperation < VMMR0_DO_SRV_END)
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pDrvIns->Internal.s.pVMR3), NIL_VMCPUID, uOperation, 0, (PSUPVMMR0REQHDR)pvArg);
+ else
+ {
+ AssertMsgFailed(("Invalid uOperation=%u\n", uOperation));
+ rc = VERR_INVALID_PARAMETER;
+ }
+
+ LogFlow(("pdmR3DrvHlp_SUPCallVMMR0Ex: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnUSBRegisterHub} */
+static DECLCALLBACK(int) pdmR3DrvHlp_USBRegisterHub(PPDMDRVINS pDrvIns, uint32_t fVersions, uint32_t cPorts, PCPDMUSBHUBREG pUsbHubReg, PPCPDMUSBHUBHLP ppUsbHubHlp)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_USBRegisterHub: caller='%s'/%d: fVersions=%#x cPorts=%#x pUsbHubReg=%p ppUsbHubHlp=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, fVersions, cPorts, pUsbHubReg, ppUsbHubHlp));
+
+#ifdef VBOX_WITH_USB
+ int rc = pdmR3UsbRegisterHub(pDrvIns->Internal.s.pVMR3, pDrvIns, fVersions, cPorts, pUsbHubReg, ppUsbHubHlp);
+#else
+ int rc = VERR_NOT_SUPPORTED;
+#endif
+
+ LogFlow(("pdmR3DrvHlp_USBRegisterHub: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnSetAsyncNotification} */
+static DECLCALLBACK(int) pdmR3DrvHlp_SetAsyncNotification(PPDMDRVINS pDrvIns, PFNPDMDRVASYNCNOTIFY pfnAsyncNotify)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT0(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_SetAsyncNotification: caller='%s'/%d: pfnAsyncNotify=%p\n", pDrvIns->pReg->szName, pDrvIns->iInstance, pfnAsyncNotify));
+
+ int rc = VINF_SUCCESS;
+ AssertStmt(pfnAsyncNotify, rc = VERR_INVALID_PARAMETER);
+ AssertStmt(!pDrvIns->Internal.s.pfnAsyncNotify, rc = VERR_WRONG_ORDER);
+ AssertStmt(pDrvIns->Internal.s.fVMSuspended || pDrvIns->Internal.s.fVMReset, rc = VERR_WRONG_ORDER);
+ VMSTATE enmVMState = VMR3GetState(pDrvIns->Internal.s.pVMR3);
+ AssertStmt( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS,
+ rc = VERR_INVALID_STATE);
+
+ if (RT_SUCCESS(rc))
+ pDrvIns->Internal.s.pfnAsyncNotify = pfnAsyncNotify;
+
+ LogFlow(("pdmR3DrvHlp_SetAsyncNotification: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnAsyncNotificationCompleted} */
+static DECLCALLBACK(void) pdmR3DrvHlp_AsyncNotificationCompleted(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ if ( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS)
+ {
+ LogFlow(("pdmR3DrvHlp_AsyncNotificationCompleted: caller='%s'/%d:\n", pDrvIns->pReg->szName, pDrvIns->iInstance));
+ VMR3AsyncPdmNotificationWakeupU(pVM->pUVM);
+ }
+ else
+ LogFlow(("pdmR3DrvHlp_AsyncNotificationCompleted: caller='%s'/%d: enmVMState=%d\n", pDrvIns->pReg->szName, pDrvIns->iInstance, enmVMState));
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnThreadCreate} */
+static DECLCALLBACK(int) pdmR3DrvHlp_ThreadCreate(PPDMDRVINS pDrvIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDRV pfnThread,
+ PFNPDMTHREADWAKEUPDRV pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_ThreadCreate: caller='%s'/%d: ppThread=%p pvUser=%p pfnThread=%p pfnWakeup=%p cbStack=%#zx enmType=%d pszName=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName, pszName));
+
+ int rc = pdmR3ThreadCreateDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName);
+
+ LogFlow(("pdmR3DrvHlp_ThreadCreate: caller='%s'/%d: returns %Rrc *ppThread=%RTthrd\n", pDrvIns->pReg->szName, pDrvIns->iInstance,
+ rc, *ppThread));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnAsyncCompletionTemplateCreate} */
+static DECLCALLBACK(int) pdmR3DrvHlp_AsyncCompletionTemplateCreate(PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser,
+ const char *pszDesc)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_AsyncCompletionTemplateCreate: caller='%s'/%d: ppTemplate=%p pfnCompleted=%p pszDesc=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, ppTemplate, pfnCompleted, pszDesc, pszDesc));
+
+ int rc = pdmR3AsyncCompletionTemplateCreateDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, ppTemplate, pfnCompleted, pvTemplateUser, pszDesc);
+
+ LogFlow(("pdmR3DrvHlp_AsyncCompletionTemplateCreate: caller='%s'/%d: returns %Rrc *ppThread=%p\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc, *ppTemplate));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnNetShaperAttach} */
+static DECLCALLBACK(int) pdmR3DrvHlp_NetShaperAttach(PPDMDRVINS pDrvIns, const char *pszBwGroup, PPDMNSFILTER pFilter)
+{
+#ifdef VBOX_WITH_NETSHAPER
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_NetShaperAttach: caller='%s'/%d: pFilter=%p pszBwGroup=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter, pszBwGroup, pszBwGroup));
+
+ int rc = PDMR3NsAttach(pDrvIns->Internal.s.pVMR3, pDrvIns, pszBwGroup, pFilter);
+
+ LogFlow(("pdmR3DrvHlp_NetShaperAttach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc));
+ return rc;
+#else
+ RT_NOREF(pDrvIns, pszBwGroup, pFilter);
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnNetShaperDetach} */
+static DECLCALLBACK(int) pdmR3DrvHlp_NetShaperDetach(PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter)
+{
+#ifdef VBOX_WITH_NETSHAPER
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: pFilter=%p\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter));
+
+ int rc = PDMR3NsDetach(pDrvIns->Internal.s.pVMR3, pDrvIns, pFilter);
+
+ LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc));
+ return rc;
+#else
+ RT_NOREF(pDrvIns, pFilter);
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnNetShaperAllocateBandwidth} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_NetShaperAllocateBandwidth(PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter, size_t cbTransfer)
+{
+#ifdef VBOX_WITH_NETSHAPER
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: pFilter=%p cbTransfer=%#zx\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter, cbTransfer));
+
+ bool const fRc = PDMNetShaperAllocateBandwidth(pDrvIns->Internal.s.pVMR3, pFilter, cbTransfer);
+
+ LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: returns %RTbool\n", pDrvIns->pReg->szName, pDrvIns->iInstance, fRc));
+ return fRc;
+#else
+ RT_NOREF(pDrvIns, pFilter, cbTransfer);
+ return true;
+#endif
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnLdrGetRCInterfaceSymbols} */
+static DECLCALLBACK(int) pdmR3DrvHlp_LdrGetRCInterfaceSymbols(PPDMDRVINS pDrvIns, void *pvInterface, size_t cbInterface,
+ const char *pszSymPrefix, const char *pszSymList)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_LdrGetRCInterfaceSymbols: caller='%s'/%d: pvInterface=%p cbInterface=%zu pszSymPrefix=%p:{%s} pszSymList=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pvInterface, cbInterface, pszSymPrefix, pszSymPrefix, pszSymList, pszSymList));
+
+ int rc;
+ if ( strncmp(pszSymPrefix, "drv", 3) == 0
+ && RTStrIStr(pszSymPrefix + 3, pDrvIns->pReg->szName) != NULL)
+ {
+ if (pDrvIns->pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+#ifdef PDM_WITH_RING0_DRIVERS
+ rc = PDMR3LdrGetInterfaceSymbols(pDrvIns->Internal.s.pVMR3,
+ pvInterface, cbInterface,
+ pDrvIns->pReg->szRCMod, pDrvIns->Internal.s.pDrv->pszRCSearchPath,
+ pszSymPrefix, pszSymList,
+ false /*fRing0OrRC*/);
+#else
+ {
+ AssertLogRelMsgFailed(("ring-0 drivers are not supported in this VBox version!\n"));
+ RT_NOREF(pvInterface, cbInterface, pszSymList);
+ rc = VERR_NOT_SUPPORTED;
+ }
+#endif
+ else
+ {
+ AssertMsgFailed(("Not a raw-mode enabled driver\n"));
+ rc = VERR_PERMISSION_DENIED;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("Invalid prefix '%s' for '%s'; must start with 'drv' and contain the driver name!\n",
+ pszSymPrefix, pDrvIns->pReg->szName));
+ rc = VERR_INVALID_NAME;
+ }
+
+ LogFlow(("pdmR3DrvHlp_LdrGetRCInterfaceSymbols: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnLdrGetR0InterfaceSymbols} */
+static DECLCALLBACK(int) pdmR3DrvHlp_LdrGetR0InterfaceSymbols(PPDMDRVINS pDrvIns, void *pvInterface, size_t cbInterface,
+ const char *pszSymPrefix, const char *pszSymList)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
+ LogFlow(("pdmR3DrvHlp_LdrGetR0InterfaceSymbols: caller='%s'/%d: pvInterface=%p cbInterface=%zu pszSymPrefix=%p:{%s} pszSymList=%p:{%s}\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pvInterface, cbInterface, pszSymPrefix, pszSymPrefix, pszSymList, pszSymList));
+
+ int rc;
+ if ( strncmp(pszSymPrefix, "drv", 3) == 0
+ && RTStrIStr(pszSymPrefix + 3, pDrvIns->pReg->szName) != NULL)
+ {
+ if (pDrvIns->pReg->fFlags & PDM_DRVREG_FLAGS_R0)
+#ifdef PDM_WITH_RING0_DRIVERS
+ rc = PDMR3LdrGetInterfaceSymbols(pDrvIns->Internal.s.pVMR3,
+ pvInterface, cbInterface,
+ pDrvIns->pReg->szR0Mod, pDrvIns->Internal.s.pDrv->pszR0SearchPath,
+ pszSymPrefix, pszSymList,
+ true /*fRing0OrRC*/);
+#else
+ {
+ AssertLogRelMsgFailed(("ring-0 drivers are not supported in this VBox version!\n"));
+ RT_NOREF(pvInterface, cbInterface, pszSymList);
+ rc = VERR_NOT_SUPPORTED;
+ }
+#endif
+ else
+ {
+ AssertMsgFailed(("Not a ring-0 enabled driver\n"));
+ rc = VERR_PERMISSION_DENIED;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("Invalid prefix '%s' for '%s'; must start with 'drv' and contain the driver name!\n",
+ pszSymPrefix, pDrvIns->pReg->szName));
+ rc = VERR_INVALID_NAME;
+ }
+
+ LogFlow(("pdmR3DrvHlp_LdrGetR0InterfaceSymbols: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectInit} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectInit(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect,
+ RT_SRC_POS_DECL, const char *pszName)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3DrvHlp_CritSectInit: caller='%s'/%d: pCritSect=%p pszName=%s\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pCritSect, pszName));
+
+ int rc = pdmR3CritSectInitDriver(pVM, pDrvIns, pCritSect, RT_SRC_POS_ARGS, "%s_%u", pszName, pDrvIns->iInstance);
+
+ LogFlow(("pdmR3DrvHlp_CritSectInit: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
+ pDrvIns->iInstance, rc));
+ return rc;
+}
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectYield} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_CritSectYield(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ RT_NOREF(pDrvIns);
+ return PDMR3CritSectYield(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectEnter} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectEnter(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, int rcBusy)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectEnter(pDrvIns->Internal.s.pVMR3, pCritSect, rcBusy);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectEnterDebug} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectEnterDebug(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, int rcBusy,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectEnterDebug(pDrvIns->Internal.s.pVMR3, pCritSect, rcBusy, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectTryEnter} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectTryEnter(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectTryEnter(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectTryEnterDebug} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectTryEnterDebug(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectTryEnterDebug(pDrvIns->Internal.s.pVMR3, pCritSect, uId, RT_SRC_POS_ARGS);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectLeave} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectLeave(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectLeave(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectIsOwner} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_CritSectIsOwner(PPDMDRVINS pDrvIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectIsOwner(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectIsInitialized} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_CritSectIsInitialized(PPDMDRVINS pDrvIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ RT_NOREF(pDrvIns);
+ return PDMCritSectIsInitialized(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectHasWaiters} */
+static DECLCALLBACK(bool) pdmR3DrvHlp_CritSectHasWaiters(PPDMDRVINS pDrvIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMCritSectHasWaiters(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectGetRecursion} */
+static DECLCALLBACK(uint32_t) pdmR3DrvHlp_CritSectGetRecursion(PPDMDRVINS pDrvIns, PCPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ RT_NOREF(pDrvIns);
+ return PDMCritSectGetRecursion(pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectScheduleExitEvent} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectScheduleExitEvent(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect,
+ SUPSEMEVENT hEventToSignal)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ RT_NOREF(pDrvIns);
+ return PDMHCCritSectScheduleExitEvent(pCritSect, hEventToSignal);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCritSectDelete} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CritSectDelete(PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMR3CritSectDelete(pDrvIns->Internal.s.pVMR3, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnCallR0} */
+static DECLCALLBACK(int) pdmR3DrvHlp_CallR0(PPDMDRVINS pDrvIns, uint32_t uOperation, uint64_t u64Arg)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+#ifdef PDM_WITH_RING0_DRIVERS
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+#endif
+ LogFlow(("pdmR3DrvHlp_CallR0: caller='%s'/%d: uOperation=%#x u64Arg=%#RX64\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, uOperation, u64Arg));
+
+ /*
+ * Lazy resolve the ring-0 entry point.
+ */
+ int rc = VINF_SUCCESS;
+ PFNPDMDRVREQHANDLERR0 pfnReqHandlerR0 = pDrvIns->Internal.s.pfnReqHandlerR0;
+ if (RT_UNLIKELY(pfnReqHandlerR0 == NIL_RTR0PTR))
+ {
+ if (pDrvIns->pReg->fFlags & PDM_DRVREG_FLAGS_R0)
+ {
+#ifdef PDM_WITH_RING0_DRIVERS
+ char szSymbol[ sizeof("drvR0") + sizeof(pDrvIns->pReg->szName) + sizeof("ReqHandler")];
+ strcat(strcat(strcpy(szSymbol, "drvR0"), pDrvIns->pReg->szName), "ReqHandler");
+ szSymbol[sizeof("drvR0") - 1] = RT_C_TO_UPPER(szSymbol[sizeof("drvR0") - 1]);
+
+ rc = PDMR3LdrGetSymbolR0Lazy(pVM, pDrvIns->pReg->szR0Mod, pDrvIns->Internal.s.pDrv->pszR0SearchPath, szSymbol,
+ &pfnReqHandlerR0);
+ if (RT_SUCCESS(rc))
+ pDrvIns->Internal.s.pfnReqHandlerR0 = pfnReqHandlerR0;
+ else
+ pfnReqHandlerR0 = NIL_RTR0PTR;
+#else
+ RT_NOREF(uOperation, u64Arg);
+ rc = VERR_NOT_SUPPORTED;
+#endif
+ }
+ else
+ rc = VERR_ACCESS_DENIED;
+ }
+ if (RT_LIKELY(pfnReqHandlerR0 != NIL_RTR0PTR && RT_SUCCESS(rc)))
+ {
+#ifdef PDM_WITH_RING0_DRIVERS
+ /*
+ * Make the ring-0 call.
+ */
+ PDMDRIVERCALLREQHANDLERREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pDrvInsR0 = PDMDRVINS_2_R0PTR(pDrvIns);
+ Req.uOperation = uOperation;
+ Req.u32Alignment = 0;
+ Req.u64Arg = u64Arg;
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER, 0, &Req.Hdr);
+#else
+ rc = VERR_NOT_SUPPORTED;
+#endif
+ }
+
+ LogFlow(("pdmR3DrvHlp_CallR0: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, pDrvIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnBlkCacheRetain} */
+static DECLCALLBACK(int) pdmR3DrvHlp_BlkCacheRetain(PPDMDRVINS pDrvIns, PPPDMBLKCACHE ppBlkCache,
+ PFNPDMBLKCACHEXFERCOMPLETEDRV pfnXferComplete,
+ PFNPDMBLKCACHEXFERENQUEUEDRV pfnXferEnqueue,
+ PFNPDMBLKCACHEXFERENQUEUEDISCARDDRV pfnXferEnqueueDiscard,
+ const char *pcszId)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return PDMR3BlkCacheRetainDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, ppBlkCache,
+ pfnXferComplete, pfnXferEnqueue, pfnXferEnqueueDiscard, pcszId);
+}
+
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3DrvHlp_VMGetSuspendReason(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3DrvHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3DrvHlp_VMGetResumeReason(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3DrvHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPR3,pfnQueryGenericUserObject} */
+static DECLCALLBACK(void *) pdmR3DrvHlp_QueryGenericUserObject(PPDMDRVINS pDrvIns, PCRTUUID pUuid)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ LogFlow(("pdmR3DrvHlp_QueryGenericUserObject: caller='%s'/%d: pUuid=%p:%RTuuid\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pUuid, pUuid));
+
+ void *pvRet;
+ PUVM pUVM = pDrvIns->Internal.s.pVMR3->pUVM;
+ if (pUVM->pVmm2UserMethods->pfnQueryGenericObject)
+ pvRet = pUVM->pVmm2UserMethods->pfnQueryGenericObject(pUVM->pVmm2UserMethods, pUVM, pUuid);
+ else
+ pvRet = NULL;
+
+ LogFlow(("pdmR3DrvHlp_QueryGenericUserObject: caller='%s'/%d: returns %#p for %RTuuid\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pvRet, pUuid));
+ return pvRet;
+}
+
+
+/**
+ * The driver helper structure.
+ */
+const PDMDRVHLPR3 g_pdmR3DrvHlp =
+{
+ PDM_DRVHLPR3_VERSION,
+ pdmR3DrvHlp_Attach,
+ pdmR3DrvHlp_Detach,
+ pdmR3DrvHlp_DetachSelf,
+ pdmR3DrvHlp_MountPrepare,
+ pdmR3DrvHlp_AssertEMT,
+ pdmR3DrvHlp_AssertOther,
+ pdmR3DrvHlp_VMSetErrorV,
+ pdmR3DrvHlp_VMSetRuntimeErrorV,
+ pdmR3DrvHlp_VMState,
+ pdmR3DrvHlp_VMTeleportedAndNotFullyResumedYet,
+ pdmR3DrvHlp_GetSupDrvSession,
+ pdmR3DrvHlp_QueueCreate,
+ pdmR3DrvHlp_QueueAlloc,
+ pdmR3DrvHlp_QueueInsert,
+ pdmR3DrvHlp_QueueFlushIfNecessary,
+ pdmR3DrvHlp_TMGetVirtualFreq,
+ pdmR3DrvHlp_TMGetVirtualTime,
+ pdmR3DrvHlp_TimerCreate,
+ pdmR3DrvHlp_TimerDestroy,
+ pdmR3DrvHlp_SSMRegister,
+ pdmR3DrvHlp_SSMDeregister,
+ SSMR3PutStruct,
+ SSMR3PutStructEx,
+ SSMR3PutBool,
+ SSMR3PutU8,
+ SSMR3PutS8,
+ SSMR3PutU16,
+ SSMR3PutS16,
+ SSMR3PutU32,
+ SSMR3PutS32,
+ SSMR3PutU64,
+ SSMR3PutS64,
+ SSMR3PutU128,
+ SSMR3PutS128,
+ SSMR3PutUInt,
+ SSMR3PutSInt,
+ SSMR3PutGCUInt,
+ SSMR3PutGCUIntReg,
+ SSMR3PutGCPhys32,
+ SSMR3PutGCPhys64,
+ SSMR3PutGCPhys,
+ SSMR3PutGCPtr,
+ SSMR3PutGCUIntPtr,
+ SSMR3PutRCPtr,
+ SSMR3PutIOPort,
+ SSMR3PutSel,
+ SSMR3PutMem,
+ SSMR3PutStrZ,
+ SSMR3GetStruct,
+ SSMR3GetStructEx,
+ SSMR3GetBool,
+ SSMR3GetBoolV,
+ SSMR3GetU8,
+ SSMR3GetU8V,
+ SSMR3GetS8,
+ SSMR3GetS8V,
+ SSMR3GetU16,
+ SSMR3GetU16V,
+ SSMR3GetS16,
+ SSMR3GetS16V,
+ SSMR3GetU32,
+ SSMR3GetU32V,
+ SSMR3GetS32,
+ SSMR3GetS32V,
+ SSMR3GetU64,
+ SSMR3GetU64V,
+ SSMR3GetS64,
+ SSMR3GetS64V,
+ SSMR3GetU128,
+ SSMR3GetU128V,
+ SSMR3GetS128,
+ SSMR3GetS128V,
+ SSMR3GetGCPhys32,
+ SSMR3GetGCPhys32V,
+ SSMR3GetGCPhys64,
+ SSMR3GetGCPhys64V,
+ SSMR3GetGCPhys,
+ SSMR3GetGCPhysV,
+ SSMR3GetUInt,
+ SSMR3GetSInt,
+ SSMR3GetGCUInt,
+ SSMR3GetGCUIntReg,
+ SSMR3GetGCPtr,
+ SSMR3GetGCUIntPtr,
+ SSMR3GetRCPtr,
+ SSMR3GetIOPort,
+ SSMR3GetSel,
+ SSMR3GetMem,
+ SSMR3GetStrZ,
+ SSMR3GetStrZEx,
+ SSMR3Skip,
+ SSMR3SkipToEndOfUnit,
+ SSMR3SetLoadError,
+ SSMR3SetLoadErrorV,
+ SSMR3SetCfgError,
+ SSMR3SetCfgErrorV,
+ SSMR3HandleGetStatus,
+ SSMR3HandleGetAfter,
+ SSMR3HandleIsLiveSave,
+ SSMR3HandleMaxDowntime,
+ SSMR3HandleHostBits,
+ SSMR3HandleRevision,
+ SSMR3HandleVersion,
+ SSMR3HandleHostOSAndArch,
+ CFGMR3Exists,
+ CFGMR3QueryType,
+ CFGMR3QuerySize,
+ CFGMR3QueryInteger,
+ CFGMR3QueryIntegerDef,
+ CFGMR3QueryString,
+ CFGMR3QueryStringDef,
+ CFGMR3QueryPassword,
+ CFGMR3QueryPasswordDef,
+ CFGMR3QueryBytes,
+ CFGMR3QueryU64,
+ CFGMR3QueryU64Def,
+ CFGMR3QueryS64,
+ CFGMR3QueryS64Def,
+ CFGMR3QueryU32,
+ CFGMR3QueryU32Def,
+ CFGMR3QueryS32,
+ CFGMR3QueryS32Def,
+ CFGMR3QueryU16,
+ CFGMR3QueryU16Def,
+ CFGMR3QueryS16,
+ CFGMR3QueryS16Def,
+ CFGMR3QueryU8,
+ CFGMR3QueryU8Def,
+ CFGMR3QueryS8,
+ CFGMR3QueryS8Def,
+ CFGMR3QueryBool,
+ CFGMR3QueryBoolDef,
+ CFGMR3QueryPort,
+ CFGMR3QueryPortDef,
+ CFGMR3QueryUInt,
+ CFGMR3QueryUIntDef,
+ CFGMR3QuerySInt,
+ CFGMR3QuerySIntDef,
+ CFGMR3QueryGCPtr,
+ CFGMR3QueryGCPtrDef,
+ CFGMR3QueryGCPtrU,
+ CFGMR3QueryGCPtrUDef,
+ CFGMR3QueryGCPtrS,
+ CFGMR3QueryGCPtrSDef,
+ CFGMR3QueryStringAlloc,
+ CFGMR3QueryStringAllocDef,
+ CFGMR3GetParent,
+ CFGMR3GetChild,
+ CFGMR3GetChildF,
+ CFGMR3GetChildFV,
+ CFGMR3GetFirstChild,
+ CFGMR3GetNextChild,
+ CFGMR3GetName,
+ CFGMR3GetNameLen,
+ CFGMR3AreChildrenValid,
+ CFGMR3GetFirstValue,
+ CFGMR3GetNextValue,
+ CFGMR3GetValueName,
+ CFGMR3GetValueNameLen,
+ CFGMR3GetValueType,
+ CFGMR3AreValuesValid,
+ CFGMR3ValidateConfig,
+ pdmR3DrvHlp_MMHeapFree,
+ pdmR3DrvHlp_DBGFInfoRegister,
+ pdmR3DrvHlp_DBGFInfoRegisterArgv,
+ pdmR3DrvHlp_DBGFInfoDeregister,
+ pdmR3DrvHlp_STAMRegister,
+ pdmR3DrvHlp_STAMRegisterF,
+ pdmR3DrvHlp_STAMRegisterV,
+ pdmR3DrvHlp_STAMDeregister,
+ pdmR3DrvHlp_SUPCallVMMR0Ex,
+ pdmR3DrvHlp_USBRegisterHub,
+ pdmR3DrvHlp_SetAsyncNotification,
+ pdmR3DrvHlp_AsyncNotificationCompleted,
+ pdmR3DrvHlp_ThreadCreate,
+ PDMR3ThreadDestroy,
+ PDMR3ThreadIAmSuspending,
+ PDMR3ThreadIAmRunning,
+ PDMR3ThreadSleep,
+ PDMR3ThreadSuspend,
+ PDMR3ThreadResume,
+ pdmR3DrvHlp_AsyncCompletionTemplateCreate,
+ PDMR3AsyncCompletionTemplateDestroy,
+ PDMR3AsyncCompletionEpCreateForFile,
+ PDMR3AsyncCompletionEpClose,
+ PDMR3AsyncCompletionEpGetSize,
+ PDMR3AsyncCompletionEpSetSize,
+ PDMR3AsyncCompletionEpSetBwMgr,
+ PDMR3AsyncCompletionEpFlush,
+ PDMR3AsyncCompletionEpRead,
+ PDMR3AsyncCompletionEpWrite,
+ pdmR3DrvHlp_NetShaperAttach,
+ pdmR3DrvHlp_NetShaperDetach,
+ pdmR3DrvHlp_NetShaperAllocateBandwidth,
+ pdmR3DrvHlp_LdrGetRCInterfaceSymbols,
+ pdmR3DrvHlp_LdrGetR0InterfaceSymbols,
+ pdmR3DrvHlp_CritSectInit,
+ pdmR3DrvHlp_CritSectYield,
+ pdmR3DrvHlp_CritSectEnter,
+ pdmR3DrvHlp_CritSectEnterDebug,
+ pdmR3DrvHlp_CritSectTryEnter,
+ pdmR3DrvHlp_CritSectTryEnterDebug,
+ pdmR3DrvHlp_CritSectLeave,
+ pdmR3DrvHlp_CritSectIsOwner,
+ pdmR3DrvHlp_CritSectIsInitialized,
+ pdmR3DrvHlp_CritSectHasWaiters,
+ pdmR3DrvHlp_CritSectGetRecursion,
+ pdmR3DrvHlp_CritSectScheduleExitEvent,
+ pdmR3DrvHlp_CritSectDelete,
+ pdmR3DrvHlp_CallR0,
+ pdmR3DrvHlp_BlkCacheRetain,
+ PDMR3BlkCacheRelease,
+ PDMR3BlkCacheClear,
+ PDMR3BlkCacheSuspend,
+ PDMR3BlkCacheResume,
+ PDMR3BlkCacheIoXferComplete,
+ PDMR3BlkCacheRead,
+ PDMR3BlkCacheWrite,
+ PDMR3BlkCacheFlush,
+ PDMR3BlkCacheDiscard,
+ pdmR3DrvHlp_VMGetSuspendReason,
+ pdmR3DrvHlp_VMGetResumeReason,
+ pdmR3DrvHlp_TimerSetMillies,
+ pdmR3DrvHlp_STAMDeregisterByPrefix,
+ pdmR3DrvHlp_QueryGenericUserObject,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ PDM_DRVHLPR3_VERSION /* u32TheEnd */
+};
+
+/** @} */
diff --git a/src/VBox/VMM/VMMR3/PDMLdr.cpp b/src/VBox/VMM/VMMR3/PDMLdr.cpp
new file mode 100644
index 00000000..13e904bf
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMLdr.cpp
@@ -0,0 +1,1782 @@
+/* $Id: PDMLdr.cpp $ */
+/** @file
+ * PDM - Pluggable Device Manager, module loader.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+//#define PDMLDR_FAKE_MODE
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_LDR
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/sup.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/VBoxTpG.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/ctype.h>
+#include <iprt/file.h>
+#include <iprt/ldr.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+
+#include <limits.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Structure which the user argument of the RTLdrGetBits() callback points to.
+ * @internal
+ */
+typedef struct PDMGETIMPORTARGS
+{
+ PVM pVM;
+ PPDMMOD pModule;
+} PDMGETIMPORTARGS, *PPDMGETIMPORTARGS;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+static DECLCALLBACK(int) pdmR3GetImportRC(RTLDRMOD hLdrMod, const char *pszModule, const char *pszSymbol, unsigned uSymbol, RTUINTPTR *pValue, void *pvUser);
+static char *pdmR3FileRC(const char *pszFile, const char *pszSearchPath);
+#endif
+static int pdmR3LoadR0U(PUVM pUVM, const char *pszFilename, const char *pszName, const char *pszSearchPath);
+static char *pdmR3FileR0(const char *pszFile, const char *pszSearchPath);
+static char *pdmR3File(const char *pszFile, const char *pszDefaultExt, const char *pszSearchPath, bool fShared);
+
+
+
+/**
+ * Loads the VMMR0.r0 module early in the init process.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3_INT_DECL(int) PDMR3LdrLoadVMMR0U(PUVM pUVM)
+{
+ return pdmR3LoadR0U(pUVM, NULL, VMMR0_MAIN_MODULE_NAME, NULL);
+}
+
+
+/**
+ * Init the module loader part of PDM.
+ *
+ * This routine will load the Host Context Ring-0 and Guest
+ * Context VMM modules.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ */
+int pdmR3LdrInitU(PUVM pUVM)
+{
+#if !defined(PDMLDR_FAKE_MODE) && defined(VBOX_WITH_RAW_MODE_KEEP)
+ /*
+ * Load the mandatory RC module, the VMMR0.r0 is loaded before VM creation.
+ */
+ PVM pVM = pUVM->pVM; AssertPtr(pVM);
+ if (VM_IS_RAW_MODE_ENABLED(pVM))
+ {
+ int rc = PDMR3LdrLoadRC(pVM, NULL, VMMRC_MAIN_MODULE_NAME);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+#else
+ RT_NOREF(pUVM);
+#endif
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminate the module loader part of PDM.
+ *
+ * This will unload and free all modules.
+ *
+ * @param pUVM The user mode VM structure.
+ * @param fFinal This is clear when in the PDMR3Term/vmR3Destroy call
+ * chain, and set when called from PDMR3TermUVM.
+ *
+ * @remarks This is normally called twice during termination.
+ */
+void pdmR3LdrTermU(PUVM pUVM, bool fFinal)
+{
+ /*
+ * Free the modules.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMMOD pModule = pUVM->pdm.s.pModules;
+ pUVM->pdm.s.pModules = NULL;
+ PPDMMOD *ppNext = &pUVM->pdm.s.pModules;
+ while (pModule)
+ {
+ /* free loader item. */
+ if (pModule->hLdrMod != NIL_RTLDRMOD)
+ {
+ int rc2 = RTLdrClose(pModule->hLdrMod);
+ AssertRC(rc2);
+ pModule->hLdrMod = NIL_RTLDRMOD;
+ }
+
+ /* free bits. */
+ switch (pModule->eType)
+ {
+ case PDMMOD_TYPE_R0:
+ {
+ if (fFinal)
+ {
+ Assert(pModule->ImageBase);
+ int rc2 = SUPR3FreeModule((void *)(uintptr_t)pModule->ImageBase);
+ AssertRC(rc2);
+ pModule->ImageBase = 0;
+ break;
+ }
+
+ /* Postpone ring-0 module till the PDMR3TermUVM() phase as VMMR0.r0 is still
+ busy when we're called the first time very very early in vmR3Destroy(). */
+ PPDMMOD pNextModule = pModule->pNext;
+
+ pModule->pNext = NULL;
+ *ppNext = pModule;
+ ppNext = &pModule->pNext;
+
+ pModule = pNextModule;
+ continue;
+ }
+
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+ case PDMMOD_TYPE_RC:
+#endif
+ case PDMMOD_TYPE_R3:
+ /* MM will free this memory for us - it's alloc only memory. :-) */
+ break;
+
+ default:
+ AssertMsgFailed(("eType=%d\n", pModule->eType));
+ break;
+ }
+ pModule->pvBits = NULL;
+
+ void *pvFree = pModule;
+ pModule = pModule->pNext;
+ RTMemFree(pvFree);
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Applies relocations to RC modules.
+ *
+ * This must be done very early in the relocation
+ * process so that components can resolve RC symbols during relocation.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) PDMR3LdrRelocateU(PUVM pUVM, RTGCINTPTR offDelta)
+{
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+ LogFlow(("PDMR3LdrRelocate: offDelta=%RGv\n", offDelta));
+ RT_NOREF1(offDelta);
+
+ /*
+ * RC Modules.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ if (pUVM->pdm.s.pModules)
+ {
+ /*
+ * The relocation have to be done in two passes so imports
+ * can be correctly resolved. The first pass will update
+ * the ImageBase saving the current value in OldImageBase.
+ * The second pass will do the actual relocation.
+ */
+ /* pass 1 */
+ PPDMMOD pCur;
+ for (pCur = pUVM->pdm.s.pModules; pCur; pCur = pCur->pNext)
+ {
+ if (pCur->eType == PDMMOD_TYPE_RC)
+ {
+ pCur->OldImageBase = pCur->ImageBase;
+ pCur->ImageBase = MMHyperR3ToRC(pUVM->pVM, pCur->pvBits);
+ }
+ }
+
+ /* pass 2 */
+ for (pCur = pUVM->pdm.s.pModules; pCur; pCur = pCur->pNext)
+ {
+ if (pCur->eType == PDMMOD_TYPE_RC)
+ {
+ PDMGETIMPORTARGS Args;
+ Args.pVM = pUVM->pVM;
+ Args.pModule = pCur;
+ int rc = RTLdrRelocate(pCur->hLdrMod, pCur->pvBits, pCur->ImageBase, pCur->OldImageBase,
+ pdmR3GetImportRC, &Args);
+ AssertFatalMsgRC(rc, ("RTLdrRelocate failed, rc=%d\n", rc));
+ }
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+#else
+ RT_NOREF2(pUVM, offDelta);
+#endif
+}
+
+
+/**
+ * Loads a module into the host context ring-3.
+ *
+ * This is used by the driver and device init functions to load modules
+ * containing the drivers and devices. The function can be extended to
+ * load modules which are not native to the environment we're running in,
+ * but at the moment this is not required.
+ *
+ * No reference counting is kept, since we don't implement any facilities
+ * for unloading the module. But the module will naturally be released
+ * when the VM terminates.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszFilename Filename of the module binary.
+ * @param pszName Module name. Case sensitive and the length is limited!
+ */
+int pdmR3LoadR3U(PUVM pUVM, const char *pszFilename, const char *pszName)
+{
+ /*
+ * Validate input.
+ */
+ AssertMsg(RTCritSectIsInitialized(&pUVM->pdm.s.ListCritSect), ("bad init order!\n"));
+ Assert(pszFilename);
+ size_t cchFilename = strlen(pszFilename);
+ Assert(pszName);
+ size_t cchName = strlen(pszName);
+ PPDMMOD pCur;
+ if (cchName >= sizeof(pCur->szName))
+ {
+ AssertMsgFailed(("Name is too long, cchName=%d pszName='%s'\n", cchName, pszName));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Try lookup the name and see if the module exists.
+ */
+ int rc;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (pCur = pUVM->pdm.s.pModules; pCur; pCur = pCur->pNext)
+ {
+ if (!strcmp(pCur->szName, pszName))
+ {
+ if (pCur->eType == PDMMOD_TYPE_R3)
+ rc = VINF_PDM_ALREADY_LOADED;
+ else
+ rc = VERR_PDM_MODULE_NAME_CLASH;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ AssertMsgRC(rc, ("We've already got a module '%s' loaded!\n", pszName));
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the module list node and initialize it.
+ */
+ const char *pszSuff = RTLdrGetSuff();
+ size_t cchSuff = RTPathHasSuffix(pszFilename) ? 0 : strlen(pszSuff);
+ PPDMMOD pModule = (PPDMMOD)RTMemAllocZ(RT_UOFFSETOF_DYN(PDMMOD, szFilename[cchFilename + cchSuff + 1]));
+ if (pModule)
+ {
+ pModule->eType = PDMMOD_TYPE_R3;
+ memcpy(pModule->szName, pszName, cchName); /* memory is zero'd, no need to copy terminator :-) */
+ memcpy(pModule->szFilename, pszFilename, cchFilename);
+ memcpy(&pModule->szFilename[cchFilename], pszSuff, cchSuff);
+
+ /*
+ * Load the loader item.
+ */
+ RTERRINFOSTATIC ErrInfo;
+ RTErrInfoInitStatic(&ErrInfo);
+ rc = SUPR3HardenedLdrLoadPlugIn(pModule->szFilename, &pModule->hLdrMod, &ErrInfo.Core);
+ if (RT_SUCCESS(rc))
+ {
+ pModule->pNext = pUVM->pdm.s.pModules;
+ pUVM->pdm.s.pModules = pModule;
+ }
+ else
+ {
+ /* Something went wrong, most likely module not found. Don't consider other unlikely errors */
+ rc = VMSetError(pUVM->pVM, rc, RT_SRC_POS,
+ N_("Unable to load R3 module %s (%s): %s"), pModule->szFilename, pszName, ErrInfo.Core.pszMsg);
+ RTMemFree(pModule);
+ }
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+
+/**
+ * Resolve an external symbol during RTLdrGetBits() of a RC module.
+ *
+ * @returns VBox status code.
+ * @param hLdrMod The loader module handle.
+ * @param pszModule Module name.
+ * @param pszSymbol Symbol name, NULL if uSymbol should be used.
+ * @param uSymbol Symbol ordinal, ~0 if pszSymbol should be used.
+ * @param pValue Where to store the symbol value (address).
+ * @param pvUser User argument.
+ */
+static DECLCALLBACK(int) pdmR3GetImportRC(RTLDRMOD hLdrMod, const char *pszModule, const char *pszSymbol, unsigned uSymbol,
+ RTUINTPTR *pValue, void *pvUser)
+{
+ PVM pVM = ((PPDMGETIMPORTARGS)pvUser)->pVM;
+ PPDMMOD pModule = ((PPDMGETIMPORTARGS)pvUser)->pModule;
+ NOREF(hLdrMod); NOREF(uSymbol);
+
+ /*
+ * Adjust input.
+ */
+ if (pszModule && !*pszModule)
+ pszModule = NULL;
+
+ /*
+ * Builtin module.
+ */
+ if (!pszModule || !strcmp(pszModule, "VMMRCBuiltin.rc"))
+ {
+ int rc = VINF_SUCCESS;
+ if (!strcmp(pszSymbol, "g_VM"))
+ *pValue = pVM->pVMRC;
+ else if (!strcmp(pszSymbol, "g_VCpu0"))
+ *pValue = pVM->pVMRC + pVM->offVMCPU;
+ else if (!strcmp(pszSymbol, "g_CPUM"))
+ *pValue = VM_RC_ADDR(pVM, &pVM->cpum);
+ else if ( !strncmp(pszSymbol, "g_TRPM", 6)
+ || !strncmp(pszSymbol, "g_trpm", 6)
+ || !strncmp(pszSymbol, "TRPM", 4))
+ {
+ RTRCPTR RCPtr = 0;
+ rc = TRPMR3GetImportRC(pVM, pszSymbol, &RCPtr);
+ if (RT_SUCCESS(rc))
+ *pValue = RCPtr;
+ }
+ else if ( !strncmp(pszSymbol, "VMM", 3)
+ || !strcmp(pszSymbol, "g_Logger")
+ || !strcmp(pszSymbol, "g_RelLogger"))
+ {
+ RTRCPTR RCPtr = 0;
+ rc = VMMR3GetImportRC(pVM, pszSymbol, &RCPtr);
+ if (RT_SUCCESS(rc))
+ *pValue = RCPtr;
+ }
+ else
+ {
+ AssertMsg(!pszModule, ("Unknown builtin symbol '%s' for module '%s'!\n", pszSymbol, pModule->szName)); NOREF(pModule);
+ rc = VERR_SYMBOL_NOT_FOUND;
+ }
+ if (RT_SUCCESS(rc) || pszModule)
+ {
+ if (RT_FAILURE(rc))
+ LogRel(("PDMLdr: Couldn't find symbol '%s' in module '%s'!\n", pszSymbol, pszModule));
+ return rc;
+ }
+ }
+
+ /*
+ * Search for module.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMMOD pCur = pUVM->pdm.s.pModules;
+ while (pCur)
+ {
+ if ( pCur->eType == PDMMOD_TYPE_RC
+ && ( !pszModule
+ || !strcmp(pCur->szName, pszModule))
+ )
+ {
+ /* Search for the symbol. */
+ int rc = RTLdrGetSymbolEx(pCur->hLdrMod, pCur->pvBits, pCur->ImageBase, UINT32_MAX, pszSymbol, pValue);
+ if (RT_SUCCESS(rc))
+ {
+ AssertMsg(*pValue - pCur->ImageBase < RTLdrSize(pCur->hLdrMod),
+ ("%RRv-%RRv %s %RRv\n", (RTRCPTR)pCur->ImageBase,
+ (RTRCPTR)(pCur->ImageBase + RTLdrSize(pCur->hLdrMod) - 1),
+ pszSymbol, (RTRCPTR)*pValue));
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+ if (pszModule)
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertLogRelMsgFailed(("PDMLdr: Couldn't find symbol '%s' in module '%s'!\n", pszSymbol, pszModule));
+ return VERR_SYMBOL_NOT_FOUND;
+ }
+ }
+
+ /* next */
+ pCur = pCur->pNext;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertLogRelMsgFailed(("Couldn't find module '%s' for resolving symbol '%s'!\n", pszModule, pszSymbol));
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
+
+/**
+ * Loads a module into the raw-mode context (i.e. into the Hypervisor memory
+ * region).
+ *
+ * @returns VBox status code.
+ * @retval VINF_PDM_ALREADY_LOADED if the module is already loaded (name +
+ * filename match).
+ * @retval VERR_PDM_MODULE_NAME_CLASH if a different file has already been
+ * loaded with the name module name.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszFilename Filename of the module binary.
+ * @param pszName Module name. Case sensitive and the length is limited!
+ */
+VMMR3DECL(int) PDMR3LdrLoadRC(PVM pVM, const char *pszFilename, const char *pszName)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PDM_HM_IPE);
+
+ /*
+ * Find the file if not specified.
+ */
+ char *pszFile = NULL;
+ if (!pszFilename)
+ pszFilename = pszFile = pdmR3FileRC(pszName, NULL);
+
+ /*
+ * Check if a module by that name is already loaded.
+ */
+ int rc;
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMMOD pCur = pUVM->pdm.s.pModules;
+ while (pCur)
+ {
+ if (!strcmp(pCur->szName, pszName))
+ {
+ /* Name clash. Hopefully due to it being the same file. */
+ if (!strcmp(pCur->szFilename, pszFilename))
+ rc = VINF_PDM_ALREADY_LOADED;
+ else
+ {
+ rc = VERR_PDM_MODULE_NAME_CLASH;
+ AssertMsgFailed(("We've already got a module '%s' loaded!\n", pszName));
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemTmpFree(pszFile);
+ return rc;
+ }
+ /* next */
+ pCur = pCur->pNext;
+ }
+
+ /*
+ * Allocate the module list node.
+ */
+ PPDMMOD pModule = (PPDMMOD)RTMemAllocZ(sizeof(*pModule) + strlen(pszFilename));
+ if (!pModule)
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemTmpFree(pszFile);
+ return VERR_NO_MEMORY;
+ }
+ AssertMsg(strlen(pszName) + 1 < sizeof(pModule->szName),
+ ("pazName is too long (%d chars) max is %d chars.\n", strlen(pszName), sizeof(pModule->szName) - 1));
+ strcpy(pModule->szName, pszName);
+ pModule->eType = PDMMOD_TYPE_RC;
+ strcpy(pModule->szFilename, pszFilename);
+
+
+ /*
+ * Open the loader item.
+ */
+ RTERRINFOSTATIC ErrInfo;
+ RTErrInfoInitStatic(&ErrInfo);
+ rc = SUPR3HardenedVerifyPlugIn(pszFilename, &ErrInfo.Core);
+ if (RT_SUCCESS(rc))
+ {
+ RTErrInfoClear(&ErrInfo.Core);
+ rc = RTLdrOpen(pszFilename, 0, RTLDRARCH_X86_32, &pModule->hLdrMod);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Allocate space in the hypervisor.
+ */
+ size_t cb = RTLdrSize(pModule->hLdrMod);
+ cb = RT_ALIGN_Z(cb, RT_MAX(GUEST_PAGE_SIZE, HOST_PAGE_SIZE));
+ uint32_t cPages = (uint32_t)(cb >> HOST_PAGE_SHIFT);
+ if (((size_t)cPages << HOST_PAGE_SHIFT) == cb)
+ {
+ PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
+ if (paPages)
+ {
+ rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pModule->pvBits, NULL /*pR0Ptr*/, paPages);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPTR GCPtr;
+ rc = VERR_NOT_IMPLEMENTED; //MMR3HyperMapPages(pVM, pModule->pvBits, NIL_RTR0PTR, cPages, paPages, pModule->szName, &GCPtr);
+ if (RT_SUCCESS(rc))
+ {
+ //MMR3HyperReserveFence(pVM);
+
+ /*
+ * Get relocated image bits.
+ */
+ Assert(MMHyperR3ToRC(pVM, pModule->pvBits) == GCPtr);
+ pModule->ImageBase = GCPtr;
+ PDMGETIMPORTARGS Args;
+ Args.pVM = pVM;
+ Args.pModule = pModule;
+ rc = RTLdrGetBits(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, pdmR3GetImportRC, &Args);
+ if (RT_SUCCESS(rc))
+ {
+#ifdef VBOX_WITH_DTRACE_RC
+ /*
+ * Register the tracer bits if present.
+ */
+ RTLDRADDR uValue;
+ rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, UINT32_MAX,
+ "g_VTGObjHeader", &uValue);
+ if (RT_SUCCESS(rc))
+ {
+ PVTGOBJHDR pVtgHdr = (PVTGOBJHDR)MMHyperRCToCC(pVM, (RTRCPTR)uValue);
+ if ( pVtgHdr
+ && !memcmp(pVtgHdr->szMagic, VTGOBJHDR_MAGIC, sizeof(pVtgHdr->szMagic)))
+ rc = SUPR3TracerRegisterModule(~(uintptr_t)0, pModule->szName, pVtgHdr, uValue,
+ SUP_TRACER_UMOD_FLAGS_SHARED);
+ else
+ rc = pVtgHdr ? VERR_INVALID_MAGIC : VERR_INVALID_POINTER;
+ if (RT_FAILURE(rc))
+ LogRel(("PDMLdr: Failed to register tracepoints for '%s': %Rrc\n", pModule->szName, rc));
+ }
+#endif
+
+ /*
+ * Insert the module.
+ */
+ if (pUVM->pdm.s.pModules)
+ {
+ /* we don't expect this list to be very long, so rather save the tail pointer. */
+ pCur = pUVM->pdm.s.pModules;
+ while (pCur->pNext)
+ pCur = pCur->pNext;
+ pCur->pNext = pModule;
+ }
+ else
+ pUVM->pdm.s.pModules = pModule; /* (pNext is zeroed by alloc) */
+ Log(("PDM: RC Module at %RRv %s (%s)\n", (RTRCPTR)pModule->ImageBase, pszName, pszFilename));
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemTmpFree(pszFile);
+ RTMemTmpFree(paPages);
+
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ AssertRC(rc);
+ SUPR3PageFreeEx(pModule->pvBits, cPages);
+ }
+ }
+ else
+ AssertMsgFailed(("SUPR3PageAlloc(%d,) -> %Rrc\n", cPages, rc));
+ RTMemTmpFree(paPages);
+ }
+ else
+ rc = VERR_NO_TMP_MEMORY;
+ }
+ else
+ rc = VERR_OUT_OF_RANGE;
+ int rc2 = RTLdrClose(pModule->hLdrMod);
+ AssertRC(rc2);
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ /* Don't consider VERR_PDM_MODULE_NAME_CLASH and VERR_NO_MEMORY above as these are very unlikely. */
+ if (RT_FAILURE(rc) && RTErrInfoIsSet(&ErrInfo.Core))
+ rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Cannot load RC module %s: %s"), pszFilename, ErrInfo.Core.pszMsg);
+ else if (RT_FAILURE(rc))
+ rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Cannot load RC module %s"), pszFilename);
+
+ RTMemFree(pModule);
+ RTMemTmpFree(pszFile);
+ return rc;
+}
+
+#endif /* VBOX_WITH_RAW_MODE_KEEP */
+
+/**
+ * Loads a module into the ring-0 context.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszFilename Filename of the module binary.
+ * @param pszName Module name. Case sensitive and the length is limited!
+ * @param pszSearchPath List of directories to search if @a pszFilename is
+ * not specified. Can be NULL, in which case the arch
+ * dependent install dir is searched.
+ */
+static int pdmR3LoadR0U(PUVM pUVM, const char *pszFilename, const char *pszName, const char *pszSearchPath)
+{
+ /*
+ * Validate input.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMMOD pCur = pUVM->pdm.s.pModules;
+ while (pCur)
+ {
+ if (!strcmp(pCur->szName, pszName))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertMsgFailed(("We've already got a module '%s' loaded!\n", pszName));
+ return VERR_PDM_MODULE_NAME_CLASH;
+ }
+ /* next */
+ pCur = pCur->pNext;
+ }
+
+ /*
+ * Find the file if not specified.
+ */
+ char *pszFile = NULL;
+ if (!pszFilename)
+ pszFilename = pszFile = pdmR3FileR0(pszName, pszSearchPath);
+
+ /*
+ * Allocate the module list node.
+ */
+ PPDMMOD pModule = (PPDMMOD)RTMemAllocZ(sizeof(*pModule) + strlen(pszFilename));
+ if (!pModule)
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemTmpFree(pszFile);
+ return VERR_NO_MEMORY;
+ }
+ AssertMsg(strlen(pszName) + 1 < sizeof(pModule->szName),
+ ("pazName is too long (%d chars) max is %d chars.\n", strlen(pszName), sizeof(pModule->szName) - 1));
+ strcpy(pModule->szName, pszName);
+ pModule->eType = PDMMOD_TYPE_R0;
+ strcpy(pModule->szFilename, pszFilename);
+
+ /*
+ * Ask the support library to load it.
+ */
+ void *pvImageBase;
+ RTERRINFOSTATIC ErrInfo;
+ RTErrInfoInitStatic(&ErrInfo);
+ int rc = SUPR3LoadModule(pszFilename, pszName, &pvImageBase, &ErrInfo.Core);
+ if (RT_SUCCESS(rc))
+ {
+ pModule->hLdrMod = NIL_RTLDRMOD;
+ pModule->ImageBase = (uintptr_t)pvImageBase;
+
+ /*
+ * Insert the module.
+ */
+ if (pUVM->pdm.s.pModules)
+ {
+ /* we don't expect this list to be very long, so rather save the tail pointer. */
+ pCur = pUVM->pdm.s.pModules;
+ while (pCur->pNext)
+ pCur = pCur->pNext;
+ pCur->pNext = pModule;
+ }
+ else
+ pUVM->pdm.s.pModules = pModule; /* (pNext is zeroed by alloc) */
+ Log(("PDM: R0 Module at %RHv %s (%s)\n", (RTR0PTR)pModule->ImageBase, pszName, pszFilename));
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemTmpFree(pszFile);
+ return VINF_SUCCESS;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ RTMemFree(pModule);
+ LogRel(("PDMLdr: pdmR3LoadR0U: pszName=\"%s\" rc=%Rrc szErr=\"%s\"\n", pszName, rc, ErrInfo.Core.pszMsg));
+
+ /* Don't consider VERR_PDM_MODULE_NAME_CLASH and VERR_NO_MEMORY above as these are very unlikely. */
+ if (RT_FAILURE(rc))
+ rc = VMR3SetError(pUVM, rc, RT_SRC_POS, N_("Failed to load R0 module %s: %s"), pszFilename, ErrInfo.Core.pszMsg);
+
+ RTMemTmpFree(pszFile); /* might be reference thru pszFilename in the above VMSetError call. */
+ return rc;
+}
+
+
+/**
+ * Makes sure a ring-0 module is loaded.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszModule Module name (no path).
+ * @param pszSearchPath List of directories to search for the module
+ * (assumes @a pszModule is also a filename).
+ */
+VMMR3_INT_DECL(int) PDMR3LdrLoadR0(PUVM pUVM, const char *pszModule, const char *pszSearchPath)
+{
+ /*
+ * Find the module.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ {
+ if ( pModule->eType == PDMMOD_TYPE_R0
+ && !strcmp(pModule->szName, pszModule))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ /*
+ * Okay, load it.
+ */
+ return pdmR3LoadR0U(pUVM, NULL, pszModule, pszSearchPath);
+}
+
+
+/**
+ * Get the address of a symbol in a given HC ring 3 module.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModule Module name.
+ * @param pszSymbol Symbol name. If it's value is less than 64k it's treated like a
+ * ordinal value rather than a string pointer.
+ * @param ppvValue Where to store the symbol value.
+ */
+VMMR3_INT_DECL(int) PDMR3LdrGetSymbolR3(PVM pVM, const char *pszModule, const char *pszSymbol, void **ppvValue)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtr(pVM);
+ AssertPtr(pszModule);
+ AssertPtr(ppvValue);
+ PUVM pUVM = pVM->pUVM;
+ AssertMsg(RTCritSectIsInitialized(&pUVM->pdm.s.ListCritSect), ("bad init order!\n"));
+
+ /*
+ * Find the module.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ {
+ if ( pModule->eType == PDMMOD_TYPE_R3
+ && !strcmp(pModule->szName, pszModule))
+ {
+ RTUINTPTR Value = 0;
+ int rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, UINT32_MAX, pszSymbol, &Value);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ {
+ *ppvValue = (void *)(uintptr_t)Value;
+ Assert((uintptr_t)*ppvValue == Value);
+ }
+ else
+ {
+ if ((uintptr_t)pszSymbol < 0x10000)
+ AssertMsg(rc, ("Couldn't symbol '%u' in module '%s'\n", (unsigned)(uintptr_t)pszSymbol, pszModule));
+ else
+ AssertMsg(rc, ("Couldn't symbol '%s' in module '%s'\n", pszSymbol, pszModule));
+ }
+ return rc;
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertMsgFailed(("Couldn't locate module '%s'\n", pszModule));
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
+
+/**
+ * Get the address of a symbol in a given HC ring 0 module.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModule Module name. If NULL the main R0 module (VMMR0.r0) is assumes.
+ * @param pszSymbol Symbol name. If it's value is less than 64k it's treated like a
+ * ordinal value rather than a string pointer.
+ * @param ppvValue Where to store the symbol value.
+ */
+VMMR3DECL(int) PDMR3LdrGetSymbolR0(PVM pVM, const char *pszModule, const char *pszSymbol, PRTR0PTR ppvValue)
+{
+#ifdef PDMLDR_FAKE_MODE
+ *ppvValue = 0xdeadbeef;
+ return VINF_SUCCESS;
+
+#else
+ /*
+ * Validate input.
+ */
+ AssertPtr(pVM);
+ AssertPtrNull(pszModule);
+ AssertPtr(ppvValue);
+ PUVM pUVM = pVM->pUVM;
+ AssertMsg(RTCritSectIsInitialized(&pUVM->pdm.s.ListCritSect), ("bad init order!\n"));
+
+ if (!pszModule)
+ pszModule = VMMR0_MAIN_MODULE_NAME;
+
+ /*
+ * Find the module.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ {
+ if ( pModule->eType == PDMMOD_TYPE_R0
+ && !strcmp(pModule->szName, pszModule))
+ {
+ int rc = SUPR3GetSymbolR0((void *)(uintptr_t)pModule->ImageBase, pszSymbol, (void **)ppvValue);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgRC(rc, ("Couldn't find symbol '%s' in module '%s'\n", pszSymbol, pszModule));
+ LogRel(("PDMLdr: PDMGetSymbol: Couldn't find symbol '%s' in module '%s'\n", pszSymbol, pszModule));
+ }
+ return rc;
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertMsgFailed(("Couldn't locate module '%s'\n", pszModule));
+ return VERR_SYMBOL_NOT_FOUND;
+#endif
+}
+
+
+/**
+ * Same as PDMR3LdrGetSymbolR0 except that the module will be attempted loaded if not found.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModule Module name. If NULL the main R0 module (VMMR0.r0) is assumed.
+ * @param pszSearchPath List of directories to search if @a pszFile is
+ * not qualified with a path. Can be NULL, in which
+ * case the arch dependent install dir is searched.
+ * @param pszSymbol Symbol name. If it's value is less than 64k it's treated like a
+ * ordinal value rather than a string pointer.
+ * @param ppvValue Where to store the symbol value.
+ */
+VMMR3DECL(int) PDMR3LdrGetSymbolR0Lazy(PVM pVM, const char *pszModule, const char *pszSearchPath, const char *pszSymbol,
+ PRTR0PTR ppvValue)
+{
+#ifdef PDMLDR_FAKE_MODE
+ *ppvValue = 0xdeadbeef;
+ return VINF_SUCCESS;
+
+#else
+ AssertPtr(pVM);
+ AssertPtrNull(pszModule);
+ AssertPtr(ppvValue);
+ PUVM pUVM = pVM->pUVM;
+ AssertMsg(RTCritSectIsInitialized(&pUVM->pdm.s.ListCritSect), ("bad init order!\n"));
+
+ if (pszModule) /* (We don't lazy load the main R0 module.) */
+ {
+ /*
+ * Since we're lazy, we'll only check if the module is present
+ * and hand it over to PDMR3LdrGetSymbolR0 when that's done.
+ */
+ AssertMsgReturn(!strpbrk(pszModule, "/\\:\n\r\t"), ("pszModule=%s\n", pszModule), VERR_INVALID_PARAMETER);
+ PPDMMOD pModule;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ if ( pModule->eType == PDMMOD_TYPE_R0
+ && !strcmp(pModule->szName, pszModule))
+ break;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (!pModule)
+ {
+ int rc = pdmR3LoadR0U(pUVM, NULL, pszModule, pszSearchPath);
+ AssertMsgRCReturn(rc, ("pszModule=%s rc=%Rrc\n", pszModule, rc), VERR_MODULE_NOT_FOUND);
+ }
+ }
+
+ return PDMR3LdrGetSymbolR0(pVM, pszModule, pszSymbol, ppvValue);
+#endif
+}
+
+
+/**
+ * Get the address of a symbol in a given RC module.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModule Module name. If NULL the main R0 module (VMMRC.rc)
+ * is assumes.
+ * @param pszSymbol Symbol name. If it's value is less than 64k it's
+ * treated like a ordinal value rather than a string
+ * pointer.
+ * @param pRCPtrValue Where to store the symbol value.
+ */
+VMMR3DECL(int) PDMR3LdrGetSymbolRC(PVM pVM, const char *pszModule, const char *pszSymbol, PRTRCPTR pRCPtrValue)
+{
+#if defined(PDMLDR_FAKE_MODE) || !defined(VBOX_WITH_RAW_MODE_KEEP)
+ RT_NOREF(pVM, pszModule, pszSymbol);
+ *pRCPtrValue = NIL_RTRCPTR;
+ return VINF_SUCCESS;
+
+#else
+ /*
+ * Validate input.
+ */
+ AssertPtr(pVM);
+ AssertPtrNull(pszModule);
+ AssertPtr(pRCPtrValue);
+
+ if (!pszModule)
+ pszModule = VMMRC_MAIN_MODULE_NAME;
+
+ /*
+ * Find the module.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ {
+ if ( pModule->eType == PDMMOD_TYPE_RC
+ && !strcmp(pModule->szName, pszModule))
+ {
+ RTUINTPTR Value;
+ int rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, UINT32_MAX, pszSymbol, &Value);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ {
+ *pRCPtrValue = (RTGCPTR)Value;
+ Assert(*pRCPtrValue == Value);
+ }
+ else
+ {
+ if ((uintptr_t)pszSymbol < 0x10000)
+ AssertMsg(rc, ("Couldn't symbol '%u' in module '%s'\n", (unsigned)(uintptr_t)pszSymbol, pszModule));
+ else
+ AssertMsg(rc, ("Couldn't symbol '%s' in module '%s'\n", pszSymbol, pszModule));
+ }
+ return rc;
+ }
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertMsgFailed(("Couldn't locate module '%s'\n", pszModule));
+ return VERR_SYMBOL_NOT_FOUND;
+#endif
+}
+
+
+/**
+ * Same as PDMR3LdrGetSymbolRC except that the module will be attempted loaded if not found.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModule Module name. If NULL the main RC module (VMMRC.rc)
+ * is assumed.
+ * @param pszSearchPath List of directories to search if @a pszFile is
+ * not qualified with a path. Can be NULL, in which
+ * case the arch dependent install dir is searched.
+ * @param pszSymbol Symbol name. If it's value is less than 64k it's treated like a
+ * ordinal value rather than a string pointer.
+ * @param pRCPtrValue Where to store the symbol value.
+ */
+VMMR3DECL(int) PDMR3LdrGetSymbolRCLazy(PVM pVM, const char *pszModule, const char *pszSearchPath, const char *pszSymbol,
+ PRTRCPTR pRCPtrValue)
+{
+#if defined(PDMLDR_FAKE_MODE) || !defined(VBOX_WITH_RAW_MODE_KEEP)
+ RT_NOREF(pVM, pszModule, pszSearchPath, pszSymbol);
+ *pRCPtrValue = NIL_RTRCPTR;
+ return VINF_SUCCESS;
+
+#else
+ AssertPtr(pVM);
+ if (!pszModule)
+ pszModule = VMMRC_MAIN_MODULE_NAME;
+ AssertPtr(pszModule);
+ AssertPtr(pRCPtrValue);
+
+ /*
+ * Since we're lazy, we'll only check if the module is present
+ * and hand it over to PDMR3LdrGetSymbolRC when that's done.
+ */
+ AssertMsgReturn(!strpbrk(pszModule, "/\\:\n\r\t"), ("pszModule=%s\n", pszModule), VERR_INVALID_PARAMETER);
+ PUVM pUVM = pVM->pUVM;
+ PPDMMOD pModule;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ if ( pModule->eType == PDMMOD_TYPE_RC
+ && !strcmp(pModule->szName, pszModule))
+ break;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (!pModule)
+ {
+ char *pszFilename = pdmR3FileRC(pszModule, pszSearchPath);
+ AssertMsgReturn(pszFilename, ("pszModule=%s\n", pszModule), VERR_MODULE_NOT_FOUND);
+ int rc = PDMR3LdrLoadRC(pVM, pszFilename, pszModule);
+ RTMemTmpFree(pszFilename);
+ AssertMsgRCReturn(rc, ("pszModule=%s rc=%Rrc\n", pszModule, rc), VERR_MODULE_NOT_FOUND);
+ }
+
+ return PDMR3LdrGetSymbolRC(pVM, pszModule, pszSymbol, pRCPtrValue);
+#endif
+}
+
+
+/**
+ * Constructs the full filename for a R3 image file.
+ *
+ * @returns Pointer to temporary memory containing the filename.
+ * Caller must free this using RTMemTmpFree().
+ * @returns NULL on failure.
+ *
+ * @param pszFile File name (no path).
+ * @param fShared If true, search in the shared directory (/usr/lib on Unix), else
+ * search in the private directory (/usr/lib/virtualbox on Unix).
+ * Ignored if VBOX_PATH_SHARED_LIBS is not defined.
+ */
+char *pdmR3FileR3(const char *pszFile, bool fShared)
+{
+ return pdmR3File(pszFile, NULL, NULL, fShared);
+}
+
+
+/**
+ * Constructs the full filename for a R0 image file.
+ *
+ * @returns Pointer to temporary memory containing the filename.
+ * Caller must free this using RTMemTmpFree().
+ * @returns NULL on failure.
+ *
+ * @param pszFile File name (no path).
+ * @param pszSearchPath List of directories to search if @a pszFile is
+ * not qualified with a path. Can be NULL, in which
+ * case the arch dependent install dir is searched.
+ */
+char *pdmR3FileR0(const char *pszFile, const char *pszSearchPath)
+{
+ return pdmR3File(pszFile, NULL, pszSearchPath, /*fShared=*/false);
+}
+
+
+/**
+ * Constructs the full filename for a RC image file.
+ *
+ * @returns Pointer to temporary memory containing the filename.
+ * Caller must free this using RTMemTmpFree().
+ * @returns NULL on failure.
+ *
+ * @param pszFile File name (no path).
+ * @param pszSearchPath List of directories to search if @a pszFile is
+ * not qualified with a path. Can be NULL, in which
+ * case the arch dependent install dir is searched.
+ */
+char *pdmR3FileRC(const char *pszFile, const char *pszSearchPath)
+{
+ return pdmR3File(pszFile, NULL, pszSearchPath, /*fShared=*/false);
+}
+
+
+/**
+ * Worker for pdmR3File().
+ *
+ * @returns Pointer to temporary memory containing the filename.
+ * Caller must free this using RTMemTmpFree().
+ * @returns NULL on failure.
+ *
+ * @param pszDir Directory part
+ * @param pszFile File name part
+ * @param pszDefaultExt Extension part
+ */
+static char *pdmR3FileConstruct(const char *pszDir, const char *pszFile, const char *pszDefaultExt)
+{
+ /*
+ * Allocate temp memory for return buffer.
+ */
+ size_t cchDir = strlen(pszDir);
+ size_t cchFile = strlen(pszFile);
+ size_t cchDefaultExt;
+
+ /*
+ * Default extention?
+ */
+ if (!pszDefaultExt || strchr(pszFile, '.'))
+ cchDefaultExt = 0;
+ else
+ cchDefaultExt = strlen(pszDefaultExt);
+
+ size_t cchPath = cchDir + 1 + cchFile + cchDefaultExt + 1;
+ AssertMsgReturn(cchPath <= RTPATH_MAX, ("Path too long!\n"), NULL);
+
+ char *pszRet = (char *)RTMemTmpAlloc(cchDir + 1 + cchFile + cchDefaultExt + 1);
+ AssertMsgReturn(pszRet, ("Out of temporary memory!\n"), NULL);
+
+ /*
+ * Construct the filename.
+ */
+ memcpy(pszRet, pszDir, cchDir);
+ pszRet[cchDir++] = '/'; /* this works everywhere */
+ memcpy(pszRet + cchDir, pszFile, cchFile + 1);
+ if (cchDefaultExt)
+ memcpy(pszRet + cchDir + cchFile, pszDefaultExt, cchDefaultExt + 1);
+
+ return pszRet;
+}
+
+
+/**
+ * Worker for pdmR3FileRC(), pdmR3FileR0() and pdmR3FileR3().
+ *
+ * @returns Pointer to temporary memory containing the filename.
+ * Caller must free this using RTMemTmpFree().
+ * @returns NULL on failure.
+ * @param pszFile File name (no path).
+ * @param pszDefaultExt The default extention, NULL if none.
+ * @param pszSearchPath List of directories to search if @a pszFile is
+ * not qualified with a path. Can be NULL, in which
+ * case the arch dependent install dir is searched.
+ * @param fShared If true, search in the shared directory (/usr/lib on Unix), else
+ * search in the private directory (/usr/lib/virtualbox on Unix).
+ * Ignored if VBOX_PATH_SHARED_LIBS is not defined.
+ * @todo We'll have this elsewhere than in the root later!
+ * @todo Remove the fShared hack again once we don't need to link against VBoxDD anymore!
+ */
+static char *pdmR3File(const char *pszFile, const char *pszDefaultExt, const char *pszSearchPath, bool fShared)
+{
+ char szPath[RTPATH_MAX];
+ int rc;
+
+ AssertLogRelReturn(!fShared || !pszSearchPath, NULL);
+ Assert(!RTPathHavePath(pszFile));
+
+ /*
+ * If there is a path, search it.
+ */
+ if ( pszSearchPath
+ && *pszSearchPath)
+ {
+ /* Check the filename length. */
+ size_t const cchFile = strlen(pszFile);
+ if (cchFile >= sizeof(szPath))
+ return NULL;
+
+ /*
+ * Walk the search path.
+ */
+ const char *psz = pszSearchPath;
+ while (*psz)
+ {
+ /* Skip leading blanks - no directories with leading spaces, thank you. */
+ while (RT_C_IS_BLANK(*psz))
+ psz++;
+
+ /* Find the end of this element. */
+ const char *pszNext;
+ const char *pszEnd = strchr(psz, ';');
+ if (!pszEnd)
+ pszEnd = pszNext = strchr(psz, '\0');
+ else
+ pszNext = pszEnd + 1;
+ if (pszEnd != psz)
+ {
+ rc = RTPathJoinEx(szPath, sizeof(szPath), psz, pszEnd - psz, pszFile, cchFile, RTPATH_STR_F_STYLE_HOST);
+ if (RT_SUCCESS(rc))
+ {
+ if (RTFileExists(szPath))
+ {
+ size_t cchPath = strlen(szPath) + 1;
+ char *pszRet = (char *)RTMemTmpAlloc(cchPath);
+ if (pszRet)
+ memcpy(pszRet, szPath, cchPath);
+ return pszRet;
+ }
+ }
+ }
+
+ /* advance */
+ psz = pszNext;
+ }
+ }
+
+ /*
+ * Use the default location.
+ */
+ rc = fShared
+ ? RTPathSharedLibs( szPath, sizeof(szPath))
+ : RTPathAppPrivateArch(szPath, sizeof(szPath));
+ if (!RT_SUCCESS(rc))
+ {
+ AssertMsgFailed(("RTPath[SharedLibs|AppPrivateArch](,%d) failed rc=%d!\n", sizeof(szPath), rc));
+ return NULL;
+ }
+
+ return pdmR3FileConstruct(szPath, pszFile, pszDefaultExt);
+}
+
+
+/** @internal */
+typedef struct QMFEIPARG
+{
+ RTINTPTR uPC;
+
+ char *pszNearSym1;
+ size_t cchNearSym1;
+ RTINTPTR offNearSym1;
+
+ char *pszNearSym2;
+ size_t cchNearSym2;
+ RTINTPTR offNearSym2;
+} QMFEIPARG, *PQMFEIPARG;
+
+
+/**
+ * Enumeration callback function used by RTLdrEnumSymbols().
+ *
+ * @returns VBox status code. Failure will stop the enumeration.
+ * @param hLdrMod The loader module handle.
+ * @param pszSymbol Symbol name. NULL if ordinal only.
+ * @param uSymbol Symbol ordinal, ~0 if not used.
+ * @param Value Symbol value.
+ * @param pvUser The user argument specified to RTLdrEnumSymbols().
+ */
+static DECLCALLBACK(int) pdmR3QueryModFromEIPEnumSymbols(RTLDRMOD hLdrMod, const char *pszSymbol, unsigned uSymbol,
+ RTUINTPTR Value, void *pvUser)
+{
+ PQMFEIPARG pArgs = (PQMFEIPARG)pvUser;
+ NOREF(hLdrMod);
+
+ RTINTPTR off = Value - pArgs->uPC;
+ if (off <= 0) /* near1 is before or at same location. */
+ {
+ if (off > pArgs->offNearSym1)
+ {
+ pArgs->offNearSym1 = off;
+ if (pArgs->pszNearSym1 && pArgs->cchNearSym1)
+ {
+ *pArgs->pszNearSym1 = '\0';
+ if (pszSymbol)
+ strncat(pArgs->pszNearSym1, pszSymbol, pArgs->cchNearSym1);
+ else
+ {
+ char szOrd[32];
+ RTStrPrintf(szOrd, sizeof(szOrd), "#%#x", uSymbol);
+ strncat(pArgs->pszNearSym1, szOrd, pArgs->cchNearSym1);
+ }
+ }
+ }
+ }
+ else /* near2 is after */
+ {
+ if (off < pArgs->offNearSym2)
+ {
+ pArgs->offNearSym2 = off;
+ if (pArgs->pszNearSym2 && pArgs->cchNearSym2)
+ {
+ *pArgs->pszNearSym2 = '\0';
+ if (pszSymbol)
+ strncat(pArgs->pszNearSym2, pszSymbol, pArgs->cchNearSym2);
+ else
+ {
+ char szOrd[32];
+ RTStrPrintf(szOrd, sizeof(szOrd), "#%#x", uSymbol);
+ strncat(pArgs->pszNearSym2, szOrd, pArgs->cchNearSym2);
+ }
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker for PDMR3LdrQueryRCModFromPC and PDMR3LdrQueryR0ModFromPC.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uPC The program counter (eip/rip) to locate the module for.
+ * @param enmType The module type.
+ * @param pszModName Where to store the module name.
+ * @param cchModName Size of the module name buffer.
+ * @param pMod Base address of the module.
+ * @param pszNearSym1 Name of the closes symbol from below.
+ * @param cchNearSym1 Size of the buffer pointed to by pszNearSym1.
+ * @param pNearSym1 The address of pszNearSym1.
+ * @param pszNearSym2 Name of the closes symbol from below.
+ * @param cchNearSym2 Size of the buffer pointed to by pszNearSym2.
+ * @param pNearSym2 The address of pszNearSym2.
+ */
+static int pdmR3LdrQueryModFromPC(PVM pVM, RTUINTPTR uPC, PDMMODTYPE enmType,
+ char *pszModName, size_t cchModName, PRTUINTPTR pMod,
+ char *pszNearSym1, size_t cchNearSym1, PRTUINTPTR pNearSym1,
+ char *pszNearSym2, size_t cchNearSym2, PRTUINTPTR pNearSym2)
+{
+ PUVM pUVM = pVM->pUVM;
+ int rc = VERR_MODULE_NOT_FOUND;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pCur= pUVM->pdm.s.pModules; pCur; pCur = pCur->pNext)
+ {
+ if (pCur->eType != enmType)
+ continue;
+
+ /* The following RTLdrOpen call is a dirty hack to get ring-0 module information. */
+ RTLDRMOD hLdrMod = pCur->hLdrMod;
+ if (hLdrMod == NIL_RTLDRMOD && uPC >= pCur->ImageBase)
+ {
+ int rc2 = RTLdrOpen(pCur->szFilename, 0 /*fFlags*/, RTLDRARCH_HOST, &hLdrMod);
+ if (RT_FAILURE(rc2))
+ hLdrMod = NIL_RTLDRMOD;
+ }
+
+ if ( hLdrMod != NIL_RTLDRMOD
+ && uPC - pCur->ImageBase < RTLdrSize(hLdrMod))
+ {
+ if (pMod)
+ *pMod = pCur->ImageBase;
+ if (pszModName && cchModName)
+ {
+ *pszModName = '\0';
+ strncat(pszModName, pCur->szName, cchModName);
+ }
+ if (pNearSym1) *pNearSym1 = 0;
+ if (pNearSym2) *pNearSym2 = 0;
+ if (pszNearSym1) *pszNearSym1 = '\0';
+ if (pszNearSym2) *pszNearSym2 = '\0';
+
+ /*
+ * Locate the nearest symbols.
+ */
+ QMFEIPARG Args;
+ Args.uPC = uPC;
+ Args.pszNearSym1 = pszNearSym1;
+ Args.cchNearSym1 = cchNearSym1;
+ Args.offNearSym1 = RTINTPTR_MIN;
+ Args.pszNearSym2 = pszNearSym2;
+ Args.cchNearSym2 = cchNearSym2;
+ Args.offNearSym2 = RTINTPTR_MAX;
+
+ rc = RTLdrEnumSymbols(hLdrMod, RTLDR_ENUM_SYMBOL_FLAGS_ALL, pCur->pvBits, pCur->ImageBase,
+ pdmR3QueryModFromEIPEnumSymbols, &Args);
+ if (pNearSym1 && Args.offNearSym1 != RTINTPTR_MIN)
+ *pNearSym1 = Args.offNearSym1 + uPC;
+ if (pNearSym2 && Args.offNearSym2 != RTINTPTR_MAX)
+ *pNearSym2 = Args.offNearSym2 + uPC;
+
+ rc = VINF_SUCCESS;
+ }
+
+ if (hLdrMod != pCur->hLdrMod && hLdrMod != NIL_RTLDRMOD)
+ RTLdrClose(hLdrMod);
+
+ if (RT_SUCCESS(rc))
+ break;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Queries raw-mode context module information from an PC (eip/rip).
+ *
+ * This is typically used to locate a crash address.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uPC The program counter (eip/rip) to locate the module for.
+ * @param pszModName Where to store the module name.
+ * @param cchModName Size of the module name buffer.
+ * @param pMod Base address of the module.
+ * @param pszNearSym1 Name of the closes symbol from below.
+ * @param cchNearSym1 Size of the buffer pointed to by pszNearSym1.
+ * @param pNearSym1 The address of pszNearSym1.
+ * @param pszNearSym2 Name of the closes symbol from below.
+ * @param cchNearSym2 Size of the buffer pointed to by pszNearSym2.
+ * @param pNearSym2 The address of pszNearSym2.
+ */
+VMMR3_INT_DECL(int) PDMR3LdrQueryRCModFromPC(PVM pVM, RTRCPTR uPC,
+ char *pszModName, size_t cchModName, PRTRCPTR pMod,
+ char *pszNearSym1, size_t cchNearSym1, PRTRCPTR pNearSym1,
+ char *pszNearSym2, size_t cchNearSym2, PRTRCPTR pNearSym2)
+{
+ RTUINTPTR AddrMod = 0;
+ RTUINTPTR AddrNear1 = 0;
+ RTUINTPTR AddrNear2 = 0;
+ int rc = pdmR3LdrQueryModFromPC(pVM, uPC, PDMMOD_TYPE_RC,
+ pszModName, cchModName, &AddrMod,
+ pszNearSym1, cchNearSym1, &AddrNear1,
+ pszNearSym2, cchNearSym2, &AddrNear2);
+ if (RT_SUCCESS(rc))
+ {
+ if (pMod)
+ *pMod = (RTRCPTR)AddrMod;
+ if (pNearSym1)
+ *pNearSym1 = (RTRCPTR)AddrNear1;
+ if (pNearSym2)
+ *pNearSym2 = (RTRCPTR)AddrNear2;
+ }
+ return rc;
+}
+
+
+/**
+ * Queries ring-0 context module information from an PC (eip/rip).
+ *
+ * This is typically used to locate a crash address.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uPC The program counter (eip/rip) to locate the module for.
+ * @param pszModName Where to store the module name.
+ * @param cchModName Size of the module name buffer.
+ * @param pMod Base address of the module.
+ * @param pszNearSym1 Name of the closes symbol from below.
+ * @param cchNearSym1 Size of the buffer pointed to by pszNearSym1.
+ * @param pNearSym1 The address of pszNearSym1.
+ * @param pszNearSym2 Name of the closes symbol from below.
+ * @param cchNearSym2 Size of the buffer pointed to by pszNearSym2. Optional.
+ * @param pNearSym2 The address of pszNearSym2. Optional.
+ */
+VMMR3_INT_DECL(int) PDMR3LdrQueryR0ModFromPC(PVM pVM, RTR0PTR uPC,
+ char *pszModName, size_t cchModName, PRTR0PTR pMod,
+ char *pszNearSym1, size_t cchNearSym1, PRTR0PTR pNearSym1,
+ char *pszNearSym2, size_t cchNearSym2, PRTR0PTR pNearSym2)
+{
+ RTUINTPTR AddrMod = 0;
+ RTUINTPTR AddrNear1 = 0;
+ RTUINTPTR AddrNear2 = 0;
+ int rc = pdmR3LdrQueryModFromPC(pVM, uPC, PDMMOD_TYPE_R0,
+ pszModName, cchModName, &AddrMod,
+ pszNearSym1, cchNearSym1, &AddrNear1,
+ pszNearSym2, cchNearSym2, &AddrNear2);
+ if (RT_SUCCESS(rc))
+ {
+ if (pMod)
+ *pMod = (RTR0PTR)AddrMod;
+ if (pNearSym1)
+ *pNearSym1 = (RTR0PTR)AddrNear1;
+ if (pNearSym2)
+ *pNearSym2 = (RTR0PTR)AddrNear2;
+ }
+ return rc;
+}
+
+
+/**
+ * Enumerate all PDM modules.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pfnCallback Function to call back for each of the modules.
+ * @param pvArg User argument.
+ */
+VMMR3DECL(int) PDMR3LdrEnumModules(PVM pVM, PFNPDMR3ENUM pfnCallback, void *pvArg)
+{
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pCur = pUVM->pdm.s.pModules; pCur; pCur = pCur->pNext)
+ {
+ rc = pfnCallback(pVM,
+ pCur->szFilename,
+ pCur->szName,
+ pCur->ImageBase,
+ pCur->eType == PDMMOD_TYPE_RC ? RTLdrSize(pCur->hLdrMod) : 0,
+ pCur->eType == PDMMOD_TYPE_RC ? PDMLDRCTX_RAW_MODE
+ : pCur->eType == PDMMOD_TYPE_R0 ? PDMLDRCTX_RING_0
+ : pCur->eType == PDMMOD_TYPE_R3 ? PDMLDRCTX_RING_3
+ : PDMLDRCTX_INVALID,
+ pvArg);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Locates a module.
+ *
+ * @returns Pointer to the module if found.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszModule The module name.
+ * @param enmType The module type.
+ * @param fLazy Lazy loading the module if set.
+ * @param pszSearchPath Search path for use when lazy loading.
+ */
+static PPDMMOD pdmR3LdrFindModule(PUVM pUVM, const char *pszModule, PDMMODTYPE enmType,
+ bool fLazy, const char *pszSearchPath)
+{
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMMOD pModule = pUVM->pdm.s.pModules; pModule; pModule = pModule->pNext)
+ if ( pModule->eType == enmType
+ && !strcmp(pModule->szName, pszModule))
+ {
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return pModule;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ if (fLazy)
+ {
+ switch (enmType)
+ {
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+ case PDMMOD_TYPE_RC:
+ {
+ char *pszFilename = pdmR3FileRC(pszModule, pszSearchPath);
+ if (pszFilename)
+ {
+ int rc = PDMR3LdrLoadRC(pUVM->pVM, pszFilename, pszModule);
+ RTMemTmpFree(pszFilename);
+ if (RT_SUCCESS(rc))
+ return pdmR3LdrFindModule(pUVM, pszModule, enmType, false, NULL);
+ }
+ break;
+ }
+#endif
+
+ case PDMMOD_TYPE_R0:
+ {
+ int rc = pdmR3LoadR0U(pUVM, NULL, pszModule, pszSearchPath);
+ if (RT_SUCCESS(rc))
+ return pdmR3LdrFindModule(pUVM, pszModule, enmType, false, NULL);
+ break;
+ }
+
+ default:
+ AssertFailed();
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Resolves a ring-0 or raw-mode context interface.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvInterface Pointer to the interface structure. The symbol list
+ * describes the layout.
+ * @param cbInterface The size of the structure pvInterface is pointing
+ * to. For bounds checking.
+ * @param pszModule The module name. If NULL we assume it's the default
+ * R0 or RC module (@a fRing0OrRC). We'll attempt to
+ * load the module if it isn't found in the module
+ * list.
+ * @param pszSearchPath The module search path. If NULL, search the
+ * architecture dependent install directory.
+ * @param pszSymPrefix What to prefix the symbols in the list with. The
+ * idea is that you define a list that goes with an
+ * interface (INTERFACE_SYM_LIST) and reuse it with
+ * each implementation.
+ * @param pszSymList The symbol list for the interface. This is a
+ * semi-colon separated list of symbol base names. As
+ * mentioned above, each is prefixed with @a
+ * pszSymPrefix before resolving. There are a couple
+ * of special symbol names that will cause us to skip
+ * ahead a little bit:
+ * - U8:whatever,
+ * - U16:whatever,
+ * - U32:whatever,
+ * - U64:whatever,
+ * - RCPTR:whatever,
+ * - R3PTR:whatever,
+ * - R0PTR:whatever,
+ * - GCPHYS:whatever,
+ * - HCPHYS:whatever.
+ * @param fRing0 Set if it's a ring-0 context interface, clear if
+ * it's raw-mode context interface.
+ */
+VMMR3_INT_DECL(int) PDMR3LdrGetInterfaceSymbols(PVM pVM, void *pvInterface, size_t cbInterface,
+ const char *pszModule, const char *pszSearchPath,
+ const char *pszSymPrefix, const char *pszSymList,
+ bool fRing0)
+{
+ bool const fNullRun = !fRing0;
+
+ /*
+ * Find the module.
+ */
+ int rc = VINF_SUCCESS;
+ PPDMMOD pModule = NULL;
+ if (!fNullRun)
+ pModule = pdmR3LdrFindModule(pVM->pUVM,
+ pszModule ? pszModule : fRing0 ? "VMMR0.r0" : "VMMRC.rc",
+ fRing0 ? PDMMOD_TYPE_R0 : PDMMOD_TYPE_RC,
+ true /*fLazy*/, pszSearchPath);
+ if (pModule || fNullRun)
+ {
+ /* Prep the symbol name. */
+ char szSymbol[256];
+ size_t const cchSymPrefix = strlen(pszSymPrefix);
+ AssertReturn(cchSymPrefix + 5 < sizeof(szSymbol), VERR_SYMBOL_NOT_FOUND);
+ memcpy(szSymbol, pszSymPrefix, cchSymPrefix);
+
+ /*
+ * Iterate the symbol list.
+ */
+ uint32_t offInterface = 0;
+ const char *pszCur = pszSymList;
+ while (pszCur)
+ {
+ /*
+ * Find the end of the current symbol name.
+ */
+ size_t cchSym;
+ const char *pszNext = strchr(pszCur, ';');
+ if (pszNext)
+ {
+ cchSym = pszNext - pszCur;
+ pszNext++;
+ }
+ else
+ cchSym = strlen(pszCur);
+ AssertBreakStmt(cchSym > 0, rc = VERR_INVALID_PARAMETER);
+
+ /* Is it a skip instruction? */
+ const char *pszColon = (const char *)memchr(pszCur, ':', cchSym);
+ if (pszColon)
+ {
+ /*
+ * String switch on the instruction and execute it, checking
+ * that we didn't overshoot the interface structure.
+ */
+#define IS_SKIP_INSTR(szInstr) \
+ ( cchSkip == sizeof(szInstr) - 1 \
+ && !memcmp(pszCur, szInstr, sizeof(szInstr) - 1) )
+
+ size_t const cchSkip = pszColon - pszCur;
+ if (IS_SKIP_INSTR("U8"))
+ offInterface += sizeof(uint8_t);
+ else if (IS_SKIP_INSTR("U16"))
+ offInterface += sizeof(uint16_t);
+ else if (IS_SKIP_INSTR("U32"))
+ offInterface += sizeof(uint32_t);
+ else if (IS_SKIP_INSTR("U64"))
+ offInterface += sizeof(uint64_t);
+ else if (IS_SKIP_INSTR("RCPTR"))
+ offInterface += sizeof(RTRCPTR);
+ else if (IS_SKIP_INSTR("R3PTR"))
+ offInterface += sizeof(RTR3PTR);
+ else if (IS_SKIP_INSTR("R0PTR"))
+ offInterface += sizeof(RTR0PTR);
+ else if (IS_SKIP_INSTR("HCPHYS"))
+ offInterface += sizeof(RTHCPHYS);
+ else if (IS_SKIP_INSTR("GCPHYS"))
+ offInterface += sizeof(RTGCPHYS);
+ else
+ AssertMsgFailedBreakStmt(("Invalid skip instruction %.*s (prefix=%s)\n", cchSym, pszCur, pszSymPrefix),
+ rc = VERR_INVALID_PARAMETER);
+ AssertMsgBreakStmt(offInterface <= cbInterface,
+ ("off=%#x cb=%#x (sym=%.*s prefix=%s)\n", offInterface, cbInterface, cchSym, pszCur, pszSymPrefix),
+ rc = VERR_BUFFER_OVERFLOW);
+#undef IS_SKIP_INSTR
+ }
+ else
+ {
+ /*
+ * Construct the symbol name, get its value, store it and
+ * advance the interface cursor.
+ */
+ AssertReturn(cchSymPrefix + cchSym < sizeof(szSymbol), VERR_SYMBOL_NOT_FOUND);
+ memcpy(&szSymbol[cchSymPrefix], pszCur, cchSym);
+ szSymbol[cchSymPrefix + cchSym] = '\0';
+
+ if (fRing0)
+ {
+ void *pvValue = NULL;
+ if (!fNullRun)
+ {
+ rc = SUPR3GetSymbolR0((void *)(RTR0PTR)pModule->ImageBase, szSymbol, &pvValue);
+ AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ }
+
+ PRTR0PTR pValue = (PRTR0PTR)((uintptr_t)pvInterface + offInterface);
+ AssertMsgBreakStmt(offInterface + sizeof(*pValue) <= cbInterface,
+ ("off=%#x cb=%#x sym=%s\n", offInterface, cbInterface, szSymbol),
+ rc = VERR_BUFFER_OVERFLOW);
+ *pValue = (RTR0PTR)pvValue;
+ Assert((void *)*pValue == pvValue);
+ offInterface += sizeof(*pValue);
+ }
+ else
+ {
+ RTUINTPTR Value = 0;
+ if (!fNullRun)
+ {
+ rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, UINT32_MAX, szSymbol, &Value);
+ AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ }
+
+ PRTRCPTR pValue = (PRTRCPTR)((uintptr_t)pvInterface + offInterface);
+ AssertMsgBreakStmt(offInterface + sizeof(*pValue) <= cbInterface,
+ ("off=%#x cb=%#x sym=%s\n", offInterface, cbInterface, szSymbol),
+ rc = VERR_BUFFER_OVERFLOW);
+ *pValue = (RTRCPTR)Value;
+ Assert(*pValue == Value);
+ offInterface += sizeof(*pValue);
+ }
+ }
+
+ /* advance */
+ pszCur = pszNext;
+ }
+
+ }
+ else
+ rc = VERR_MODULE_NOT_FOUND;
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMNetShaper.cpp b/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
new file mode 100644
index 00000000..8ef6cffc
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
@@ -0,0 +1,549 @@
+/* $Id: PDMNetShaper.cpp $ */
+/** @file
+ * PDM Network Shaper - Limit network traffic according to bandwidth group settings.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NET_SHAPER
+#include <VBox/vmm/pdm.h>
+#include "PDMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/critsect.h>
+#include <iprt/string.h>
+#include <iprt/semaphore.h>
+#include <iprt/thread.h>
+
+#include <VBox/vmm/pdmnetshaper.h>
+
+
+
+
+/**
+ * Looks up a network bandwidth group by it's name.
+ *
+ * @returns Pointer to the group if found, NULL if not.
+ * @param pVM The cross context VM structure.
+ * @param pszName The name of the group to find.
+ */
+static PPDMNSBWGROUP pdmNsBwGroupFindByName(PVM pVM, const char *pszName)
+{
+ AssertPtrReturn(pszName, NULL);
+ AssertReturn(*pszName != '\0', NULL);
+
+ size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups));
+ for (size_t i = 0; i < cGroups; i++)
+ if (RTStrCmp(pVM->pdm.s.aNsGroups[i].szName, pszName) == 0)
+ return &pVM->pdm.s.aNsGroups[i];
+ return NULL;
+}
+
+
+#ifdef VBOX_STRICT
+/**
+ * Checks if pFilter is attached to the given group by walking the list.
+ */
+DECLINLINE(bool) pdmR3NsIsFilterAttached(PPDMNSBWGROUP pGroup, PPDMNSFILTER pFilter)
+{
+ PPDMNSFILTER pCur;
+ RTListForEach(&pGroup->FilterList, pCur, PDMNSFILTER, ListEntry)
+ {
+ if (pCur == pFilter)
+ return true;
+ }
+ return false;
+}
+#endif
+
+/**
+ * Attaches a network filter driver to the named bandwidth group.
+ *
+ * @returns VBox status code.
+ * @retval VERR_ALREADY_INITIALIZED if already attached.
+ * @retval VERR_NOT_FOUND if the bandwidth wasn't found.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ * @param pszName Name of the bandwidth group to attach to.
+ * @param pFilter Pointer to the filter to attach.
+ */
+VMMR3_INT_DECL(int) PDMR3NsAttach(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, PPDMNSFILTER pFilter)
+{
+ /*
+ * Validate input.
+ */
+ RT_NOREF(pDrvIns);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pFilter, VERR_INVALID_POINTER);
+
+ uint32_t iGroup = pFilter->iGroup;
+ AssertMsgReturn(iGroup == 0, ("iGroup=%d\n", iGroup), VERR_ALREADY_INITIALIZED);
+ Assert(pFilter->ListEntry.pNext == NULL);
+ Assert(pFilter->ListEntry.pPrev == NULL);
+
+ /* Resolve the group. */
+ PPDMNSBWGROUP pGroup = pdmNsBwGroupFindByName(pVM, pszName);
+ AssertMsgReturn(pGroup, ("'%s'\n", pszName), VERR_NOT_FOUND);
+
+ /*
+ * The attach is protected by PDM::NsLock and by updating iGroup atomatically.
+ */
+ int rc = RTCritSectEnter(&pVM->pdm.s.NsLock);
+ if (RT_SUCCESS(rc))
+ {
+ if (ASMAtomicCmpXchgU32(&pFilter->iGroup, (uint32_t)(pGroup - &pVM->pdm.s.aNsGroups[0]) + 1, 0))
+ {
+ Assert(pFilter->ListEntry.pNext == NULL);
+ Assert(pFilter->ListEntry.pPrev == NULL);
+ RTListAppend(&pGroup->FilterList, &pFilter->ListEntry);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pGroup->cRefs);
+ AssertMsg(cRefs > 0 && cRefs < _16K, ("%u\n", cRefs));
+ RT_NOREF_PV(cRefs);
+
+ LogFlow(("PDMR3NsAttach: Attached '%s'/%u to %s (cRefs=%u)\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pGroup->szName, cRefs));
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ AssertMsgFailed(("iGroup=%d (attach race)\n", pFilter->iGroup));
+ rc = VERR_ALREADY_INITIALIZED;
+ }
+
+ int rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock);
+ AssertRC(rc2);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Detaches a network filter driver from its current bandwidth group (if any).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ * @param pFilter Pointer to the filter to detach.
+ */
+VMMR3_INT_DECL(int) PDMR3NsDetach(PVM pVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter)
+{
+ /*
+ * Validate input.
+ */
+ RT_NOREF(pDrvIns);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pFilter, VERR_INVALID_POINTER);
+
+ /* Now, return quietly if the filter isn't attached since driver/device
+ destructors are called on constructor failure. */
+ uint32_t const iGroup = ASMAtomicUoReadU32(&pFilter->iGroup);
+ if (!iGroup)
+ return VINF_SUCCESS;
+ AssertMsgReturn(iGroup - 1 < RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups)), ("iGroup=%#x\n", iGroup),
+ VERR_INVALID_HANDLE);
+ PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[iGroup - 1];
+
+ /*
+ * The detaching is protected by PDM::NsLock and by atomically updating iGroup.
+ */
+ int rc = RTCritSectEnter(&pVM->pdm.s.NsLock);
+ if (RT_SUCCESS(rc))
+ {
+ if (ASMAtomicCmpXchgU32(&pFilter->iGroup, 0, iGroup))
+ {
+ Assert(pdmR3NsIsFilterAttached(pGroup, pFilter));
+ RTListNodeRemove(&pFilter->ListEntry);
+ Assert(pFilter->ListEntry.pNext == NULL);
+ Assert(pFilter->ListEntry.pPrev == NULL);
+ ASMAtomicWriteU32(&pFilter->iGroup, 0);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pGroup->cRefs);
+ Assert(cRefs < _16K);
+ RT_NOREF_PV(cRefs);
+
+ LogFlow(("PDMR3NsDetach: Detached '%s'/%u from %s (cRefs=%u)\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, pGroup->szName, cRefs));
+ rc = VINF_SUCCESS;
+ }
+ else
+ AssertFailedStmt(rc = VERR_WRONG_ORDER);
+
+ int rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock);
+ AssertRC(rc2);
+ }
+ else
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * This is used both by pdmR3NsUnchokeThread and PDMR3NsBwGroupSetLimit,
+ * the latter only when setting cbPerSecMax to zero.
+ *
+ * @param pGroup The group which filters should be unchoked.
+ * @note Caller owns the PDM::NsLock critsect.
+ */
+static void pdmR3NsUnchokeGroupFilters(PPDMNSBWGROUP pGroup)
+{
+ PPDMNSFILTER pFilter;
+ RTListForEach(&pGroup->FilterList, pFilter, PDMNSFILTER, ListEntry)
+ {
+ bool fChoked = ASMAtomicXchgBool(&pFilter->fChoked, false);
+ if (fChoked)
+ {
+ PPDMINETWORKDOWN pIDrvNet = pFilter->pIDrvNetR3;
+ if (pIDrvNet && pIDrvNet->pfnXmitPending != NULL)
+ {
+ Log3(("pdmR3NsUnchokeGroupFilters: Unchoked %p in %s, calling %p\n",
+ pFilter, pGroup->szName, pIDrvNet->pfnXmitPending));
+ pIDrvNet->pfnXmitPending(pIDrvNet);
+ }
+ else
+ Log3(("pdmR3NsUnchokeGroupFilters: Unchoked %p in %s (no callback)\n", pFilter, pGroup->szName));
+ }
+ }
+}
+
+
+/**
+ * Worker for PDMR3NsBwGroupSetLimit and pdmR3NetShaperInit.
+ *
+ * @returns New bucket size.
+ * @param pGroup The group to update.
+ * @param cbPerSecMax The new max bytes per second.
+ */
+static uint32_t pdmNsBwGroupSetLimit(PPDMNSBWGROUP pGroup, uint64_t cbPerSecMax)
+{
+ uint32_t const cbRet = RT_MAX(PDM_NETSHAPER_MIN_BUCKET_SIZE, cbPerSecMax * PDM_NETSHAPER_MAX_LATENCY / RT_MS_1SEC);
+ pGroup->cbBucket = cbRet;
+ pGroup->cbPerSecMax = cbPerSecMax;
+ LogFlow(("pdmNsBwGroupSetLimit: New rate limit is %#RX64 bytes per second, adjusted bucket size to %#x bytes\n",
+ cbPerSecMax, cbRet));
+ return cbRet;
+}
+
+
+/**
+ * Adjusts the maximum rate for the bandwidth group.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszName Name of the bandwidth group to attach to.
+ * @param cbPerSecMax Maximum number of bytes per second to be transmitted.
+ */
+VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PUVM pUVM, const char *pszName, uint64_t cbPerSecMax)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM const pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ int rc;
+ PPDMNSBWGROUP pGroup = pdmNsBwGroupFindByName(pVM, pszName);
+ if (pGroup)
+ {
+ /*
+ * Lock the group while we effect the changes.
+ */
+ rc = PDMCritSectEnter(pVM, &pGroup->Lock, VERR_IGNORED);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t const cbBucket = pdmNsBwGroupSetLimit(pGroup, cbPerSecMax);
+
+ /* Drop extra tokens */
+ if (pGroup->cbTokensLast > cbBucket)
+ pGroup->cbTokensLast = cbBucket;
+ Log(("PDMR3NsBwGroupSetLimit/%s: cbBucket=%#x cbPerSecMax=%#RX64\n", pGroup->szName, cbBucket, cbPerSecMax));
+
+ int rc2 = PDMCritSectLeave(pVM, &pGroup->Lock);
+ AssertRC(rc2);
+
+ /*
+ * If we disabled the group, we must make sure to unchoke all filter
+ * as the thread will ignore the group from now on.
+ *
+ * We do this after leaving the group lock to keep the locking simple.
+ * Extra pfnXmitPending calls should be harmless, of course ASSUMING
+ * nobody take offence to being called on this thread.
+ */
+ if (cbPerSecMax == 0)
+ {
+ Log(("PDMR3NsBwGroupSetLimit: cbPerSecMax was set to zero, so unchoking filters...\n"));
+ rc = RTCritSectEnter(&pVM->pdm.s.NsLock);
+ AssertRC(rc);
+
+ pdmR3NsUnchokeGroupFilters(pGroup);
+
+ rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock);
+ AssertRC(rc2);
+ }
+ }
+ else
+ AssertRC(rc);
+ }
+ else
+ rc = VERR_NOT_FOUND;
+ return rc;
+}
+
+
+/**
+ * I/O thread for pending unchoking and associating transmitting.
+ *
+ * @returns VINF_SUCCESS (ignored).
+ * @param pVM The cross context VM structure.
+ * @param pThread The PDM thread data.
+ */
+static DECLCALLBACK(int) pdmR3NsUnchokeThread(PVM pVM, PPDMTHREAD pThread)
+{
+ LogFlow(("pdmR3NsUnchokeThread: pVM=%p\n", pVM));
+ while (pThread->enmState == PDMTHREADSTATE_RUNNING)
+ {
+ int rc = RTSemEventWait(pVM->pdm.s.hNsUnchokeEvt, RT_INDEFINITE_WAIT);
+ if (pThread->enmState != PDMTHREADSTATE_RUNNING)
+ break;
+ AssertMsgStmt(RT_SUCCESS(rc) || rc == VERR_TIMEOUT /* paranioa*/, ("%Rrc\n", rc),
+ RTThreadSleep(PDM_NETSHAPER_MAX_LATENCY));
+
+ /*
+ * Go over all bandwidth groups/filters and unchoke their filters.
+ *
+ * We take the main lock here to prevent any detaching or attaching
+ * from taking place while we're traversing the filter lists.
+ */
+ rc = RTCritSectEnter(&pVM->pdm.s.NsLock);
+ AssertRC(rc);
+
+ size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups));
+ for (size_t i = 0; i < cGroups; i++)
+ {
+ PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[i];
+ if ( pGroup->cRefs > 0
+ && pGroup->cbPerSecMax > 0)
+ pdmR3NsUnchokeGroupFilters(pGroup);
+ }
+
+ rc = RTCritSectLeave(&pVM->pdm.s.NsLock);
+ AssertRC(rc);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @copydoc FNPDMTHREADWAKEUPINT
+ */
+static DECLCALLBACK(int) pdmR3NsUnchokeWakeUp(PVM pVM, PPDMTHREAD pThread)
+{
+ LogFlow(("pdmR3NsUnchokeWakeUp:\n"));
+
+ /* Wake up the thread. */
+ int rc = RTSemEventSignal(pVM->pdm.s.hNsUnchokeEvt);
+ AssertRC(rc);
+
+ RT_NOREF(pThread);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNTMTIMERINT, Wakes up pdmR3NsUnchokeThread.}
+ */
+static DECLCALLBACK(void) pdmR3NsUnchokeTimer(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ ASMAtomicWriteBool(&pVM->pdm.s.fNsUnchokeTimerArmed, false);
+
+ /* Wake up the thread. */
+ int rc = RTSemEventSignal(pVM->pdm.s.hNsUnchokeEvt);
+ AssertRC(rc);
+
+ RT_NOREF(hTimer, pvUser);
+}
+
+
+/**
+ * Terminate the network shaper, groups, lock and everything.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pdmR3NetShaperTerm(PVM pVM)
+{
+ size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups));
+ for (size_t i = 0; i < cGroups; i++)
+ {
+ PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[i];
+ AssertMsg(pGroup->cRefs == 0, ("cRefs=%s '%s'\n", pGroup->cRefs, pGroup->szName));
+ AssertContinue(PDMCritSectIsInitialized(&pGroup->Lock));
+ PDMR3CritSectDelete(pVM, &pGroup->Lock);
+ }
+
+ RTCritSectDelete(&pVM->pdm.s.NsLock);
+}
+
+
+/**
+ * Initialize the network shaper.
+ *
+ * @returns VBox status code
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3NetShaperInit(PVM pVM)
+{
+ LogFlow(("pdmR3NetShaperInit: pVM=%p\n", pVM));
+ VM_ASSERT_EMT(pVM);
+
+ Assert(pVM->pdm.s.cNsGroups == 0);
+ pVM->pdm.s.hNsUnchokeEvt = NIL_RTSEMEVENT;
+ pVM->pdm.s.hNsUnchokeTimer = NIL_TMTIMERHANDLE;
+
+ /*
+ * Initialize the critical section protecting attaching, detaching and unchoking.
+ *
+ * This is a non-recursive lock to make sure nobody tries to mess with the groups
+ * from the pfnXmitPending callback.
+ */
+ int rc = RTCritSectInitEx(&pVM->pdm.s.NsLock, RTCRITSECT_FLAGS_NO_NESTING,
+ NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "PDMNetShaper");
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Initialize all bandwidth groups.
+ */
+ PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "NetworkShaper");
+ PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNetShaper, "BwGroups");
+ if (pCfgBwGrp)
+ {
+ uint32_t iGroup = 0;
+ for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /*
+ * Get the config data.
+ */
+ size_t cchName = CFGMR3GetNameLen(pCur);
+ AssertBreakStmt(cchName <= PDM_NET_SHAPER_MAX_NAME_LEN,
+ rc = VMR3SetError(pVM->pUVM, VERR_INVALID_NAME, RT_SRC_POS,
+ N_("Network shaper group name #%u is too long: %zu, max %u"),
+ iGroup, cchName, PDM_NET_SHAPER_MAX_NAME_LEN));
+ char szName[PDM_NET_SHAPER_MAX_NAME_LEN + 1];
+ rc = CFGMR3GetName(pCur, szName, sizeof(szName));
+ AssertRCBreak(rc);
+ AssertBreakStmt(szName[0] != '\0',
+ rc = VMR3SetError(pVM->pUVM, VERR_INVALID_NAME, RT_SRC_POS,
+ N_("Empty network shaper group name #%u"), iGroup));
+
+ uint64_t cbMax;
+ rc = CFGMR3QueryU64(pCur, "Max", &cbMax);
+ AssertRCBreakStmt(rc, rc = VMR3SetError(pVM->pUVM, rc, RT_SRC_POS,
+ N_("Failed to read 'Max' value for network shaper group '%s': %Rrc"),
+ szName, rc));
+
+ /*
+ * Initialize the group table entry.
+ */
+ AssertBreakStmt(iGroup < RT_ELEMENTS(pVM->pdm.s.aNsGroups),
+ rc = VMR3SetError(pVM->pUVM, VERR_TOO_MUCH_DATA, RT_SRC_POS, N_("Too many bandwidth groups (max %zu)"),
+ RT_ELEMENTS(pVM->pdm.s.aNsGroups)));
+
+ rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.aNsGroups[iGroup].Lock, RT_SRC_POS, "BWGRP%02u-%s", iGroup, szName);
+ AssertRCBreak(rc);
+
+ RTListInit(&pVM->pdm.s.aNsGroups[iGroup].FilterList);
+ pVM->pdm.s.aNsGroups[iGroup].cRefs = 0;
+ RTStrCopy(pVM->pdm.s.aNsGroups[iGroup].szName, sizeof(pVM->pdm.s.aNsGroups[iGroup].szName), szName);
+ pVM->pdm.s.aNsGroups[iGroup].cbTokensLast = pdmNsBwGroupSetLimit(&pVM->pdm.s.aNsGroups[iGroup], cbMax);
+ pVM->pdm.s.aNsGroups[iGroup].tsUpdatedLast = RTTimeSystemNanoTS();
+ LogFlowFunc(("PDM NetShaper Group #%u: %s - cbPerSecMax=%#RU64 cbBucket=%#x\n",
+ iGroup, pVM->pdm.s.aNsGroups[iGroup].szName, pVM->pdm.s.aNsGroups[iGroup].cbPerSecMax,
+ pVM->pdm.s.aNsGroups[iGroup].cbBucket));
+
+ /*
+ * Register statistics.
+ */
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].cbPerSecMax, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "", "/PDM/NetShaper/%u-%s/cbPerSecMax", iGroup, szName);
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].cRefs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "", "/PDM/NetShaper/%u-%s/cRefs", iGroup, szName);
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].cbBucket, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "", "/PDM/NetShaper/%u-%s/cbBucket", iGroup, szName);
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].cbTokensLast, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "", "/PDM/NetShaper/%u-%s/cbTokensLast", iGroup, szName);
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].tsUpdatedLast, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_NS, "", "/PDM/NetShaper/%u-%s/tsUpdatedLast", iGroup, szName);
+ STAMR3RegisterF(pVM, (void *)&pVM->pdm.s.aNsGroups[iGroup].cTotalChokings, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_OCCURENCES, "", "/PDM/NetShaper/%u-%s/TotalChokings", iGroup, szName);
+
+ pVM->pdm.s.cNsGroups = ++iGroup;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * If there are any groups configured, create a unchoke thread and an
+ * associated timer for waking it up when needed. The timer runs on
+ * the real time clock.
+ */
+ if (pVM->pdm.s.cNsGroups == 0)
+ {
+ LogFlowFunc(("returns VINF_SUCCESS - no groups\n"));
+ return VINF_SUCCESS;
+ }
+
+ rc = RTSemEventCreate(&pVM->pdm.s.hNsUnchokeEvt);
+ if (RT_SUCCESS(rc))
+ {
+ rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, pdmR3NsUnchokeTimer, NULL, TMTIMER_FLAGS_NO_RING0,
+ "PDMNetShaperUnchoke", &pVM->pdm.s.hNsUnchokeTimer);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PDMR3ThreadCreate(pVM, &pVM->pdm.s.pNsUnchokeThread, NULL, pdmR3NsUnchokeThread, pdmR3NsUnchokeWakeUp,
+ 0 /*cbStack*/, RTTHREADTYPE_IO, "PDMNsUnchoke");
+ if (RT_SUCCESS(rc))
+ {
+
+ LogFlowFunc(("returns VINF_SUCCESS (%u groups)\n", pVM->pdm.s.cNsGroups));
+ return VINF_SUCCESS;
+ }
+ }
+ }
+ }
+
+ RTCritSectDelete(&pVM->pdm.s.NsLock);
+ LogRel(("pdmR3NetShaperInit: failed rc=%Rrc\n", rc));
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMQueue.cpp b/src/VBox/VMM/VMMR3/PDMQueue.cpp
new file mode 100644
index 00000000..18253348
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMQueue.cpp
@@ -0,0 +1,892 @@
+/* $Id: PDMQueue.cpp $ */
+/** @file
+ * PDM Queue - Transport data and tasks to EMT and R3.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_QUEUE
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <iprt/errcore.h>
+
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int pdmR3QueueDestroyLocked(PVM pVM, PDMQUEUEHANDLE hQueue, void *pvOwner);
+static DECLCALLBACK(void) pdmR3QueueTimer(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser);
+
+
+
+/**
+ * Internal worker for the queue creation apis.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cbItem Item size.
+ * @param cItems Number of items.
+ * @param cMilliesInterval Number of milliseconds between polling the queue.
+ * If 0 then the emulation thread will be notified whenever an item arrives.
+ * @param fRZEnabled Set if the queue will be used from RC/R0,
+ * these can only be created from EMT0.
+ * @param pszName The queue name. Unique. Not copied.
+ * @param enmType Owner type.
+ * @param pvOwner The queue owner pointer.
+ * @param uCallback Callback function.
+ * @param phQueue Where to store the queue handle.
+ *
+ * @thread Emulation thread only. When @a fRZEnables is true only EMT0.
+ * @note Caller owns ListCritSect.
+ */
+static int pdmR3QueueCreateLocked(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, bool fRZEnabled,
+ const char *pszName, PDMQUEUETYPE enmType, void *pvOwner, uintptr_t uCallback,
+ PDMQUEUEHANDLE *phQueue)
+{
+ /*
+ * Validate and adjust the input.
+ */
+ if (fRZEnabled)
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ else
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ cbItem = RT_ALIGN(cbItem, sizeof(uint64_t));
+ AssertMsgReturn(cbItem >= sizeof(PDMQUEUEITEMCORE) && cbItem < PDMQUEUE_MAX_ITEM_SIZE, ("cbItem=%zu\n", cbItem),
+ VERR_OUT_OF_RANGE);
+ AssertMsgReturn(cItems >= 1 && cItems <= PDMQUEUE_MAX_ITEMS, ("cItems=%u\n", cItems), VERR_OUT_OF_RANGE);
+ AssertMsgReturn((uint64_t)cbItem * cItems <= (fRZEnabled ? PDMQUEUE_MAX_TOTAL_SIZE_R0 : PDMQUEUE_MAX_TOTAL_SIZE_R3),
+ ("cItems=%u cbItem=%#x -> %#RX64, max %'u\n", cItems, cbItem, (uint64_t)cbItem * cItems,
+ fRZEnabled ? PDMQUEUE_MAX_TOTAL_SIZE_R0 : PDMQUEUE_MAX_TOTAL_SIZE_R3),
+ VERR_OUT_OF_RANGE);
+ AssertReturn(!fRZEnabled || enmType == PDMQUEUETYPE_INTERNAL || enmType == PDMQUEUETYPE_DEV, VERR_INVALID_PARAMETER);
+ if (SUPR3IsDriverless())
+ fRZEnabled = false;
+
+ /* Unqiue name that fits within the szName field: */
+ size_t cchName = strlen(pszName);
+ AssertReturn(cchName > 0, VERR_INVALID_NAME);
+ AssertMsgReturn(cchName < RT_SIZEOFMEMB(PDMQUEUE, szName), ("'%s' is too long\n", pszName), VERR_INVALID_NAME);
+ size_t i = pVM->pdm.s.cRing3Queues;
+ while (i-- > 0 )
+ AssertMsgReturn(strcmp(pVM->pdm.s.papRing3Queues[i]->szName, pszName) != 0, ("%s\n", pszName), VERR_DUPLICATE);
+ i = pVM->pdm.s.cRing0Queues;
+ while (i-- > 0 )
+ AssertMsgReturn(strcmp(pVM->pdm.s.apRing0Queues[i]->szName, pszName) != 0, ("%s\n", pszName), VERR_DUPLICATE);
+
+ /*
+ * Align the item size and calculate the structure size.
+ */
+ PPDMQUEUE pQueue;
+ PDMQUEUEHANDLE hQueue;
+ if (fRZEnabled)
+ {
+ /* Call ring-0 to allocate and create the queue: */
+ PDMQUEUECREATEREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.cItems = cItems;
+ Req.cbItem = (uint32_t)cbItem;
+ Req.enmType = enmType;
+ Req.pvOwner = pvOwner;
+ Req.pfnCallback = (RTR3PTR)uCallback;
+ RTStrCopy(Req.szName, sizeof(Req.szName), pszName);
+ AssertCompileMembersSameSize(PDMQUEUECREATEREQ, szName, PDMQUEUE, szName);
+ Req.hQueue = NIL_PDMQUEUEHANDLE;
+
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_PDM_QUEUE_CREATE, 0, &Req.Hdr);
+ if (RT_FAILURE(rc))
+ return rc;
+ hQueue = Req.hQueue;
+ AssertReturn(hQueue < RT_ELEMENTS(pVM->pdm.s.apRing0Queues), VERR_INTERNAL_ERROR_2);
+ pQueue = pVM->pdm.s.apRing0Queues[hQueue];
+ AssertPtrReturn(pQueue, VERR_INTERNAL_ERROR_3);
+ AssertReturn(pQueue->u32Magic == PDMQUEUE_MAGIC, VERR_INTERNAL_ERROR_4);
+ AssertReturn(pQueue->cbItem == cbItem, VERR_INTERNAL_ERROR_4);
+ AssertReturn(pQueue->cItems == cItems, VERR_INTERNAL_ERROR_4);
+ AssertReturn(pQueue->enmType == enmType, VERR_INTERNAL_ERROR_4);
+ AssertReturn(pQueue->u.Gen.pvOwner == pvOwner, VERR_INTERNAL_ERROR_4);
+ AssertReturn(pQueue->u.Gen.pfnCallback == (RTR3PTR)uCallback, VERR_INTERNAL_ERROR_4);
+ }
+ else
+ {
+ /* Do it here using the paged heap: */
+ uint32_t const cbBitmap = RT_ALIGN_32(RT_ALIGN_32(cItems, 64) / 8, 64); /* keep bitmap in it's own cacheline */
+ uint32_t const cbQueue = RT_OFFSETOF(PDMQUEUE, bmAlloc)
+ + cbBitmap
+ + (uint32_t)cbItem * cItems;
+ pQueue = (PPDMQUEUE)RTMemPageAllocZ(cbQueue);
+ if (!pQueue)
+ return VERR_NO_PAGE_MEMORY;
+ pdmQueueInit(pQueue, cbBitmap, (uint32_t)cbItem, cItems, pszName, enmType, (RTR3PTR)uCallback, pvOwner);
+
+ uint32_t iQueue = pVM->pdm.s.cRing3Queues;
+ if (iQueue >= pVM->pdm.s.cRing3QueuesAlloc)
+ {
+ AssertLogRelMsgReturnStmt(iQueue < _16K, ("%#x\n", iQueue), RTMemPageFree(pQueue, cbQueue), VERR_TOO_MANY_OPENS);
+
+ uint32_t const cNewAlloc = RT_ALIGN_32(iQueue, 64) + 64;
+ PPDMQUEUE *papQueuesNew = (PPDMQUEUE *)RTMemAllocZ(cNewAlloc * sizeof(papQueuesNew[0]));
+ AssertLogRelMsgReturnStmt(papQueuesNew, ("cNewAlloc=%u\n", cNewAlloc), RTMemPageFree(pQueue, cbQueue), VERR_NO_MEMORY);
+
+ if (iQueue)
+ memcpy(papQueuesNew, pVM->pdm.s.papRing3Queues, iQueue * sizeof(papQueuesNew[0]));
+ PPDMQUEUE *papQueuesOld = ASMAtomicXchgPtrT(&pVM->pdm.s.papRing3Queues, papQueuesNew, PPDMQUEUE *);
+ pVM->pdm.s.cRing3QueuesAlloc = cNewAlloc;
+ RTMemFree(papQueuesOld);
+ }
+
+ pVM->pdm.s.papRing3Queues[iQueue] = pQueue;
+ pVM->pdm.s.cRing3Queues = iQueue + 1;
+ hQueue = iQueue + RT_ELEMENTS(pVM->pdm.s.apRing0Queues);
+ }
+
+ /*
+ * Create timer?
+ */
+ if (cMilliesInterval)
+ {
+ char szName[48+6];
+ RTStrPrintf(szName, sizeof(szName), "Que/%s", pQueue->szName);
+ int rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, pdmR3QueueTimer, pQueue, TMTIMER_FLAGS_NO_RING0, szName, &pQueue->hTimer);
+ if (RT_SUCCESS(rc))
+ {
+ rc = TMTimerSetMillies(pVM, pQueue->hTimer, cMilliesInterval);
+ if (RT_SUCCESS(rc))
+ pQueue->cMilliesInterval = cMilliesInterval;
+ else
+ {
+ AssertMsgFailed(("TMTimerSetMillies failed rc=%Rrc\n", rc));
+ int rc2 = TMR3TimerDestroy(pVM, pQueue->hTimer); AssertRC(rc2);
+ pQueue->hTimer = NIL_TMTIMERHANDLE;
+ }
+ }
+ else
+ AssertMsgFailed(("TMR3TimerCreateInternal failed rc=%Rrc\n", rc));
+ if (RT_FAILURE(rc))
+ {
+ if (!fRZEnabled)
+ pdmR3QueueDestroyLocked(pVM, hQueue, pvOwner);
+ /* else: will clean up queue when VM is destroyed */
+ return rc;
+ }
+ }
+
+ /*
+ * Register the statistics.
+ */
+ STAMR3RegisterF(pVM, &pQueue->cbItem, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+ "Item size.", "/PDM/Queue/%s/cbItem", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->cItems, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Queue size.", "/PDM/Queue/%s/cItems", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->rcOkay, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
+ "Non-zero means queue is busted.", "/PDM/Queue/%s/rcOkay", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->StatAllocFailures, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "PDMQueueAlloc failures.", "/PDM/Queue/%s/AllocFailures", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->StatInsert, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "Calls to PDMQueueInsert.", "/PDM/Queue/%s/Insert", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->StatFlush, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "Calls to pdmR3QueueFlush.", "/PDM/Queue/%s/Flush", pQueue->szName);
+ STAMR3RegisterF(pVM, &pQueue->StatFlushLeftovers, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Left over items after flush.", "/PDM/Queue/%s/FlushLeftovers", pQueue->szName);
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pVM, &pQueue->StatFlushPrf, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling pdmR3QueueFlush.", "/PDM/Queue/%s/FlushPrf", pQueue->szName);
+ STAMR3RegisterF(pVM, (void *)&pQueue->cStatPending, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Pending items.", "/PDM/Queue/%s/Pending", pQueue->szName);
+#endif
+
+ *phQueue = hQueue;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Create a queue with a device owner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param cbItem Size a queue item.
+ * @param cItems Number of items in the queue.
+ * @param cMilliesInterval Number of milliseconds between polling the queue.
+ * If 0 then the emulation thread will be notified whenever an item arrives.
+ * @param pfnCallback The consumer function.
+ * @param fRZEnabled Set if the queue must be usable from RC/R0.
+ * @param pszName The queue name. Unique. Copied.
+ * @param phQueue Where to store the queue handle on success.
+ * @thread Emulation thread only. Only EMT0 when @a fRZEnables is true.
+ */
+VMMR3_INT_DECL(int) PDMR3QueueCreateDevice(PVM pVM, PPDMDEVINS pDevIns, size_t cbItem, uint32_t cItems,
+ uint32_t cMilliesInterval, PFNPDMQUEUEDEV pfnCallback,
+ bool fRZEnabled, const char *pszName, PDMQUEUEHANDLE *phQueue)
+{
+ LogFlow(("PDMR3QueueCreateDevice: pDevIns=%p cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p fRZEnabled=%RTbool pszName=%s\n",
+ pDevIns, cbItem, cItems, cMilliesInterval, pfnCallback, fRZEnabled, pszName));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0(pVM);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ if (!(pDevIns->Internal.s.fIntFlags & PDMDEVINSINT_FLAGS_R0_ENABLED))
+ fRZEnabled = false;
+
+ /*
+ * Create the queue.
+ */
+ int rc = RTCritSectEnter(&pVM->pUVM->pdm.s.ListCritSect);
+ AssertRCReturn(rc, rc);
+
+ rc = pdmR3QueueCreateLocked(pVM, cbItem, cItems, cMilliesInterval, fRZEnabled, pszName,
+ PDMQUEUETYPE_DEV, pDevIns, (uintptr_t)pfnCallback, phQueue);
+
+ RTCritSectLeave(&pVM->pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Created device queue %#RX64; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pDevIns=%p\n",
+ *phQueue, cbItem, cItems, cMilliesInterval, pfnCallback, pDevIns));
+ return rc;
+}
+
+
+/**
+ * Create a queue with a driver owner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param cbItem Size a queue item.
+ * @param cItems Number of items in the queue.
+ * @param cMilliesInterval Number of milliseconds between polling the queue.
+ * If 0 then the emulation thread will be notified whenever an item arrives.
+ * @param pfnCallback The consumer function.
+ * @param pszName The queue name. Unique. Copied.
+ * @param phQueue Where to store the queue handle on success.
+ * @thread Emulation thread only.
+ */
+VMMR3_INT_DECL(int) PDMR3QueueCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEDRV pfnCallback, const char *pszName, PDMQUEUEHANDLE *phQueue)
+{
+ LogFlow(("PDMR3QueueCreateDriver: pDrvIns=%p cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p pszName=%s\n",
+ pDrvIns, cbItem, cItems, cMilliesInterval, pfnCallback, pszName));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0(pVM);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
+
+ /*
+ * Create the queue.
+ */
+ int rc = RTCritSectEnter(&pVM->pUVM->pdm.s.ListCritSect);
+ AssertRCReturn(rc, rc);
+
+ rc = pdmR3QueueCreateLocked(pVM, cbItem, cItems, cMilliesInterval, false /*fRZEnabled*/, pszName,
+ PDMQUEUETYPE_DRV, pDrvIns, (uintptr_t)pfnCallback, phQueue);
+
+ RTCritSectLeave(&pVM->pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Created driver queue %#RX64; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pDrvIns=%p\n",
+ *phQueue, cbItem, cItems, cMilliesInterval, pfnCallback, pDrvIns));
+ return rc;
+}
+
+
+/**
+ * Create a queue with an internal owner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cbItem Size a queue item.
+ * @param cItems Number of items in the queue.
+ * @param cMilliesInterval Number of milliseconds between polling the queue.
+ * If 0 then the emulation thread will be notified whenever an item arrives.
+ * @param pfnCallback The consumer function.
+ * @param fRZEnabled Set if the queue must be usable from RC/R0.
+ * @param pszName The queue name. Unique. Copied.
+ * @param phQueue Where to store the queue handle on success.
+ * @thread Emulation thread only. When @a fRZEnables is true only EMT0.
+ */
+VMMR3_INT_DECL(int) PDMR3QueueCreateInternal(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEINT pfnCallback, bool fRZEnabled,
+ const char *pszName, PDMQUEUEHANDLE *phQueue)
+{
+ LogFlow(("PDMR3QueueCreateInternal: cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p fRZEnabled=%RTbool pszName=%s\n",
+ cbItem, cItems, cMilliesInterval, pfnCallback, fRZEnabled, pszName));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0(pVM);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+
+ /*
+ * Create the queue.
+ */
+ int rc = RTCritSectEnter(&pVM->pUVM->pdm.s.ListCritSect);
+ AssertRCReturn(rc, rc);
+
+ rc = pdmR3QueueCreateLocked(pVM, cbItem, cItems, cMilliesInterval, fRZEnabled, pszName,
+ PDMQUEUETYPE_INTERNAL, pVM, (uintptr_t)pfnCallback, phQueue);
+
+ RTCritSectLeave(&pVM->pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Created internal queue %p; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p\n",
+ *phQueue, cbItem, cItems, cMilliesInterval, pfnCallback));
+ return rc;
+}
+
+
+/**
+ * Create a queue with an external owner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cbItem Size a queue item.
+ * @param cItems Number of items in the queue.
+ * @param cMilliesInterval Number of milliseconds between polling the queue.
+ * If 0 then the emulation thread will be notified whenever an item arrives.
+ * @param pfnCallback The consumer function.
+ * @param pvUser The user argument to the consumer function.
+ * @param pszName The queue name. Unique. Not copied.
+ * @param phQueue Where to store the queue handle on success.
+ * @thread Emulation thread only.
+ */
+VMMR3DECL(int) PDMR3QueueCreateExternal(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEEXT pfnCallback, void *pvUser,
+ const char *pszName, PDMQUEUEHANDLE *phQueue)
+{
+ LogFlow(("PDMR3QueueCreateExternal: cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p pszName=%s\n",
+ cbItem, cItems, cMilliesInterval, pfnCallback, pszName));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0(pVM);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+
+ /*
+ * Create the queue.
+ */
+ int rc = RTCritSectEnter(&pVM->pUVM->pdm.s.ListCritSect);
+ AssertRCReturn(rc, rc);
+
+ rc = pdmR3QueueCreateLocked(pVM, cbItem, cItems, cMilliesInterval, false /*fRZEnabled*/, pszName,
+ PDMQUEUETYPE_EXTERNAL, pvUser, (uintptr_t)pfnCallback, phQueue);
+
+ RTCritSectLeave(&pVM->pUVM->pdm.s.ListCritSect);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Created external queue %p; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pvUser=%p\n",
+ *phQueue, cbItem, cItems, cMilliesInterval, pfnCallback, pvUser));
+ return rc;
+}
+
+
+/**
+ * Destroy a queue.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param hQueue Handle to the queue that should be destroyed.
+ * @param pvOwner The owner address.
+ * @thread EMT
+ */
+static int pdmR3QueueDestroyLocked(PVM pVM, PDMQUEUEHANDLE hQueue, void *pvOwner)
+{
+ LogFlow(("pdmR3QueueDestroyLocked: hQueue=%p pvOwner=%p\n", hQueue, pvOwner));
+ Assert(RTCritSectIsOwner(&pVM->pUVM->pdm.s.ListCritSect));
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ if (hQueue == NIL_PDMQUEUEHANDLE)
+ return VINF_SUCCESS;
+
+ PPDMQUEUE pQueue;
+ bool fRZEnabled = false;
+ if (hQueue < RT_ELEMENTS(pVM->pdm.s.apRing0Queues))
+ {
+ AssertReturn(hQueue < pVM->pdm.s.cRing0Queues, VERR_INVALID_HANDLE);
+ pQueue = pVM->pdm.s.apRing0Queues[hQueue];
+ AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
+ AssertReturn(pQueue->u32Magic == PDMQUEUE_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pQueue->u.Gen.pvOwner == pvOwner, VERR_INVALID_HANDLE);
+
+ /* Lazy bird: Cannot dynamically delete ring-0 capable queues. */
+ AssertFailedReturn(VERR_NOT_SUPPORTED);
+ }
+ else
+ {
+ hQueue -= RT_ELEMENTS(pVM->pdm.s.apRing0Queues);
+ AssertReturn(hQueue < pVM->pdm.s.cRing3Queues, VERR_INVALID_HANDLE);
+ pQueue = pVM->pdm.s.papRing3Queues[hQueue];
+ AssertPtrReturn(pQueue, VERR_INVALID_HANDLE);
+ AssertReturn(pQueue->u32Magic == PDMQUEUE_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pQueue->u.Gen.pvOwner == pvOwner, VERR_INVALID_HANDLE);
+
+ /* Enter the lock here to serialize with other EMTs traversing the handles. */
+ pdmLock(pVM);
+ pVM->pdm.s.papRing3Queues[hQueue] = NULL;
+ if (hQueue + 1 == pVM->pdm.s.cRing3Queues)
+ {
+ while (hQueue > 0 && pVM->pdm.s.papRing3Queues[hQueue - 1] == NULL)
+ hQueue--;
+ pVM->pdm.s.cRing3Queues = hQueue;
+ }
+ pQueue->u32Magic = PDMQUEUE_MAGIC_DEAD;
+ pdmUnlock(pVM);
+ }
+
+ /*
+ * Deregister statistics.
+ */
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/Queue/%s/*", pQueue->szName);
+
+ /*
+ * Destroy the timer and free it.
+ */
+ if (pQueue->hTimer != NIL_TMTIMERHANDLE)
+ {
+ TMR3TimerDestroy(pVM, pQueue->hTimer);
+ pQueue->hTimer = NIL_TMTIMERHANDLE;
+ }
+ if (!fRZEnabled)
+ RTMemPageFree(pQueue, pQueue->offItems + pQueue->cbItem * pQueue->cItems);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy a queue.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param hQueue Handle to the queue that should be destroyed.
+ * @param pvOwner The owner address.
+ * @thread EMT
+ * @note Externally visible mainly for testing purposes.
+ */
+VMMR3DECL(int) PDMR3QueueDestroy(PVM pVM, PDMQUEUEHANDLE hQueue, void *pvOwner)
+{
+ PUVM const pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ int rc = pdmR3QueueDestroyLocked(pVM, hQueue, pvOwner);
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Destroy a all queues with a given owner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvOwner The owner pointer.
+ * @param enmType Owner type.
+ * @thread EMT
+ */
+static int pdmR3QueueDestroyByOwner(PVM pVM, void *pvOwner, PDMQUEUETYPE enmType)
+{
+ LogFlow(("pdmR3QueueDestroyByOwner: pvOwner=%p enmType=%d\n", pvOwner, enmType));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pvOwner, VERR_INVALID_PARAMETER);
+ AssertReturn(pvOwner != pVM, VERR_INVALID_PARAMETER);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); /* Not requiring EMT0 here as we cannot destroy RZ capable ones here. */
+
+ /*
+ * Scan and destroy.
+ */
+ PUVM const pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ uint32_t i = pVM->pdm.s.cRing0Queues;
+ while (i-- > 0)
+ {
+ PPDMQUEUE pQueue = pVM->pdm.s.apRing0Queues[i];
+ if ( pQueue
+ && pQueue->u.Gen.pvOwner == pvOwner
+ && pQueue->enmType == enmType)
+ {
+ /* Not supported at runtime. */
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_DESTROYING, VERR_WRONG_ORDER);
+ }
+ }
+
+ i = pVM->pdm.s.cRing3Queues;
+ while (i-- > 0)
+ {
+ PPDMQUEUE pQueue = pVM->pdm.s.papRing3Queues[i];
+ if ( pQueue
+ && pQueue->u.Gen.pvOwner == pvOwner
+ && pQueue->enmType == enmType)
+ pdmR3QueueDestroyLocked(pVM, i + RT_ELEMENTS(pVM->pdm.s.apRing0Queues), pvOwner);
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy a all queues owned by the specified device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) PDMR3QueueDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ LogFlow(("PDMR3QueueDestroyDevice: pDevIns=%p\n", pDevIns));
+ return pdmR3QueueDestroyByOwner(pVM, pDevIns, PDMQUEUETYPE_DEV);
+}
+
+
+/**
+ * Destroy a all queues owned by the specified driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) PDMR3QueueDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("PDMR3QueueDestroyDriver: pDrvIns=%p\n", pDrvIns));
+ return pdmR3QueueDestroyByOwner(pVM, pDrvIns, PDMQUEUETYPE_DRV);
+}
+
+
+/**
+ * Free an item.
+ *
+ * @param pQueue The queue.
+ * @param pbItems Where the items area starts.
+ * @param cbItem Item size.
+ * @param pItem The item to free.
+ */
+DECLINLINE(void) pdmR3QueueFreeItem(PPDMQUEUE pQueue, uint8_t *pbItems, uint32_t cbItem, PPDMQUEUEITEMCORE pItem)
+{
+ pItem->u64View = UINT64_C(0xfeedfeedfeedfeed);
+
+ uintptr_t const offItem = (uintptr_t)pItem - (uintptr_t)pbItems;
+ uintptr_t const iItem = offItem / cbItem;
+ Assert(!(offItem % cbItem));
+ Assert(iItem < pQueue->cItems);
+ AssertReturnVoidStmt(ASMAtomicBitTestAndSet(pQueue->bmAlloc, iItem) == false, pQueue->rcOkay = VERR_INTERNAL_ERROR_4);
+ STAM_STATS({ ASMAtomicDecU32(&pQueue->cStatPending); });
+}
+
+
+
+/**
+ * Process pending items in one queue.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue needing flushing.
+ */
+static int pdmR3QueueFlush(PVM pVM, PPDMQUEUE pQueue)
+{
+ STAM_PROFILE_START(&pQueue->StatFlushPrf,p);
+
+ uint32_t const cbItem = pQueue->cbItem;
+ uint32_t const cItems = pQueue->cItems;
+ uint8_t * const pbItems = (uint8_t *)pQueue + pQueue->offItems;
+
+ /*
+ * Get the list and reverse it into a pointer list (inserted in LIFO order to avoid locking).
+ */
+ uint32_t cPending = 0;
+ PPDMQUEUEITEMCORE pHead = NULL;
+ {
+ uint32_t iCur = ASMAtomicXchgU32(&pQueue->iPending, UINT32_MAX);
+ do
+ {
+ AssertMsgReturn(iCur < cItems, ("%#x vs %#x\n", iCur, cItems), pQueue->rcOkay = VERR_INTERNAL_ERROR_5);
+ AssertReturn(ASMBitTest(pQueue->bmAlloc, iCur) == false, pQueue->rcOkay = VERR_INTERNAL_ERROR_3);
+ PPDMQUEUEITEMCORE pCur = (PPDMQUEUEITEMCORE)&pbItems[iCur * cbItem];
+
+ iCur = pCur->iNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pCur->pNext = pHead;
+ pHead = pCur;
+ cPending++;
+ } while (iCur != UINT32_MAX);
+ }
+ RT_NOREF(cPending);
+
+ /*
+ * Feed the items to the consumer function.
+ */
+ Log2(("pdmR3QueueFlush: pQueue=%p enmType=%d pHead=%p cItems=%u\n", pQueue, pQueue->enmType, pHead, cPending));
+ switch (pQueue->enmType)
+ {
+ case PDMQUEUETYPE_DEV:
+ while (pHead)
+ {
+ if (!pQueue->u.Dev.pfnCallback(pQueue->u.Dev.pDevIns, pHead))
+ break;
+ PPDMQUEUEITEMCORE pFree = pHead;
+ pHead = pHead->pNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pdmR3QueueFreeItem(pQueue, pbItems, cbItem, pFree);
+ }
+ break;
+
+ case PDMQUEUETYPE_DRV:
+ while (pHead)
+ {
+ if (!pQueue->u.Drv.pfnCallback(pQueue->u.Drv.pDrvIns, pHead))
+ break;
+ PPDMQUEUEITEMCORE pFree = pHead;
+ pHead = pHead->pNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pdmR3QueueFreeItem(pQueue, pbItems, cbItem, pFree);
+ }
+ break;
+
+ case PDMQUEUETYPE_INTERNAL:
+ while (pHead)
+ {
+ if (!pQueue->u.Int.pfnCallback(pVM, pHead))
+ break;
+ PPDMQUEUEITEMCORE pFree = pHead;
+ pHead = pHead->pNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pdmR3QueueFreeItem(pQueue, pbItems, cbItem, pFree);
+ }
+ break;
+
+ case PDMQUEUETYPE_EXTERNAL:
+ while (pHead)
+ {
+ if (!pQueue->u.Ext.pfnCallback(pQueue->u.Ext.pvUser, pHead))
+ break;
+ PPDMQUEUEITEMCORE pFree = pHead;
+ pHead = pHead->pNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pdmR3QueueFreeItem(pQueue, pbItems, cbItem, pFree);
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("Invalid queue type %d\n", pQueue->enmType));
+ break;
+ }
+
+ /*
+ * Success?
+ */
+ if (!pHead)
+ { /* likely */ }
+ else
+ {
+ /*
+ * Reverse the list and turn it back into index chain.
+ */
+ uint32_t iPendingHead = UINT32_MAX;
+ do
+ {
+ PPDMQUEUEITEMCORE pInsert = pHead;
+ pHead = pHead->pNext;
+ ASMCompilerBarrier(); /* paranoia */
+ pInsert->iNext = iPendingHead;
+ iPendingHead = ((uintptr_t)pInsert - (uintptr_t)pbItems) / cbItem;
+ } while (pHead);
+
+ /*
+ * Insert the list at the tail of the pending list. If someone races
+ * us there, we have to join the new LIFO with the old.
+ */
+ for (;;)
+ {
+ if (ASMAtomicCmpXchgU32(&pQueue->iPending, iPendingHead, UINT32_MAX))
+ break;
+
+ uint32_t const iNewPending = ASMAtomicXchgU32(&pQueue->iPending, UINT32_MAX);
+ if (iNewPending != UINT32_MAX)
+ {
+ /* Find the last entry and chain iPendingHead onto it. */
+ uint32_t iCur = iNewPending;
+ for (;;)
+ {
+ AssertReturn(iCur < cItems, pQueue->rcOkay = VERR_INTERNAL_ERROR_2);
+ AssertReturn(ASMBitTest(pQueue->bmAlloc, iCur) == false, pQueue->rcOkay = VERR_INTERNAL_ERROR_3);
+ PPDMQUEUEITEMCORE pCur = (PPDMQUEUEITEMCORE)&pbItems[iCur * cbItem];
+ iCur = pCur->iNext;
+ if (iCur == UINT32_MAX)
+ {
+ pCur->iNext = iPendingHead;
+ break;
+ }
+ }
+
+ iPendingHead = iNewPending;
+ }
+ }
+
+ STAM_REL_COUNTER_INC(&pQueue->StatFlushLeftovers);
+ }
+
+ STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Flush pending queues.
+ * This is a forced action callback.
+ *
+ * @param pVM The cross context VM structure.
+ * @thread Emulation thread only.
+ * @note Internal, but exported for use in the testcase.
+ */
+VMMR3DECL(void) PDMR3QueueFlushAll(PVM pVM)
+{
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("PDMR3QueuesFlush:\n"));
+
+ /*
+ * Only let one EMT flushing queues at any one time to preserve the order
+ * and to avoid wasting time. The FF is always cleared here, because it's
+ * only used to get someones attention. Queue inserts occurring during the
+ * flush are caught using the pending bit.
+ *
+ * Note! We must check the force action and pending flags after clearing
+ * the active bit!
+ */
+ VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES);
+ while (!ASMAtomicBitTestAndSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_ACTIVE_BIT))
+ {
+ ASMAtomicBitClear(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT);
+
+ /* Scan the ring-0 queues: */
+ size_t i = pVM->pdm.s.cRing0Queues;
+ while (i-- > 0)
+ {
+ PPDMQUEUE pQueue = pVM->pdm.s.apRing0Queues[i];
+ if ( pQueue
+ && pQueue->iPending != UINT32_MAX
+ && pQueue->hTimer == NIL_TMTIMERHANDLE
+ && pQueue->rcOkay == VINF_SUCCESS)
+ pdmR3QueueFlush(pVM, pQueue);
+ }
+
+ /* Scan the ring-3 queues: */
+/** @todo Deal with destroy concurrency issues. */
+ i = pVM->pdm.s.cRing3Queues;
+ while (i-- > 0)
+ {
+ PPDMQUEUE pQueue = pVM->pdm.s.papRing3Queues[i];
+ if ( pQueue
+ && pQueue->iPending != UINT32_MAX
+ && pQueue->hTimer == NIL_TMTIMERHANDLE
+ && pQueue->rcOkay == VINF_SUCCESS)
+ pdmR3QueueFlush(pVM, pQueue);
+ }
+
+ ASMAtomicBitClear(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_ACTIVE_BIT);
+
+ /* We're done if there were no inserts while we were busy. */
+ if ( !ASMBitTest(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT)
+ && !VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
+ break;
+ VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES);
+ }
+}
+
+
+
+/**
+ * @callback_method_impl{FNTMTIMERINT, Timer handler for one PDM queue.}
+ */
+static DECLCALLBACK(void) pdmR3QueueTimer(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ PPDMQUEUE pQueue = (PPDMQUEUE)pvUser;
+ Assert(hTimer == pQueue->hTimer);
+
+ if (pQueue->iPending != UINT32_MAX)
+ pdmR3QueueFlush(pVM, pQueue);
+
+ int rc = TMTimerSetMillies(pVM, hTimer, pQueue->cMilliesInterval);
+ AssertRC(rc);
+}
+
+
+/**
+ * Terminate the queues, freeing any resources still allocated.
+ *
+ * @param pVM The cross-context VM structure.
+ */
+DECLHIDDEN(void) pdmR3QueueTerm(PVM pVM)
+{
+ PUVM const pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+
+ if (pVM->pdm.s.papRing3Queues)
+ {
+ /*
+ * Free the R3 queue handle array.
+ */
+ PDMQUEUEHANDLE cQueues = pVM->pdm.s.cRing3Queues;
+ for (PDMQUEUEHANDLE i = 0; i < cQueues; i++)
+ if (pVM->pdm.s.papRing3Queues[i])
+ {
+ PPDMQUEUE pQueue = pVM->pdm.s.papRing3Queues[i];
+
+ pdmR3QueueDestroyLocked(pVM, RT_ELEMENTS(pVM->pdm.s.apRing0Queues) + i, pQueue->u.Gen.pvOwner);
+ Assert(!pVM->pdm.s.papRing3Queues[i]);
+ }
+
+ RTMemFree(pVM->pdm.s.papRing3Queues);
+ pVM->pdm.s.cRing3QueuesAlloc = 0;
+ pVM->pdm.s.papRing3Queues = NULL;
+ }
+
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
diff --git a/src/VBox/VMM/VMMR3/PDMR3Task.cpp b/src/VBox/VMM/VMMR3/PDMR3Task.cpp
new file mode 100644
index 00000000..f4893666
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMR3Task.cpp
@@ -0,0 +1,638 @@
+/* $Id: PDMR3Task.cpp $ */
+/** @file
+ * PDM Task - Asynchronous user mode tasks.
+ */
+
+/*
+ * Copyright (C) 2019-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_TASK
+#include "PDMInternal.h"
+#include <VBox/vmm/pdmtask.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <VBox/sup.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/semaphore.h>
+#include <iprt/thread.h>
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT}
+ */
+static DECLCALLBACK(void) pdmR3TaskInfo(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ RT_NOREF(cArgs, papszArgs); /* for now. */
+
+ uint32_t cSetsDisplayed = 0;
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.apTaskSets); i++)
+ {
+ PPDMTASKSET pTaskSet = pVM->pdm.s.apTaskSets[i];
+ if ( pTaskSet
+ && ( pTaskSet->cAllocated > 0
+ || ASMAtomicReadU64(&pTaskSet->fTriggered)))
+ {
+ if (cSetsDisplayed > 0)
+ pHlp->pfnPrintf(pHlp, "\n");
+ pHlp->pfnPrintf(pHlp,
+ "Task set #%u - handle base %u, pending %#RX64%s%s, running %d, %u of %u allocated:\n"
+ /* 123: triggered internal 0123456789abcdef 0123456789abcdef 0x0000 SomeFunctionName */
+ " Hnd: State Type pfnCallback pvUser Flags Name\n",
+ i, pTaskSet->uHandleBase, ASMAtomicReadU64(&pTaskSet->fTriggered),
+ pTaskSet->fRZEnabled ? " RZ-enabled" : "", pTaskSet->hThread != NIL_RTTHREAD ? "" : " no-thread",
+ (int)ASMAtomicReadU32(&pTaskSet->idxRunning), pTaskSet->cAllocated, RT_ELEMENTS(pTaskSet->aTasks));
+ for (unsigned j = 0; j < RT_ELEMENTS(pTaskSet->aTasks); j++)
+ {
+ PPDMTASK pTask = &pTaskSet->aTasks[j];
+ if (pTask->pvOwner)
+ {
+ const char *pszType;
+ switch (pTask->enmType)
+ {
+ case PDMTASKTYPE_DEV: pszType = " device "; break;
+ case PDMTASKTYPE_DRV: pszType = " driver "; break;
+ case PDMTASKTYPE_USB: pszType = " usbdev "; break;
+ case PDMTASKTYPE_INTERNAL: pszType = "internal"; break;
+ default: pszType = "unknown "; break;
+ }
+ pHlp->pfnPrintf(pHlp, " %3u: %s %s %p %p %#06x %s\n", pTaskSet->uHandleBase + j,
+ ASMBitTest(&pTaskSet->fTriggered, j) ? "triggered"
+ : ASMAtomicReadU32(&pTaskSet->idxRunning) == j ? " running " : " idle ",
+ pszType, pTask->pfnCallback, pTask->pvUser, pTask->fFlags, pTask->pszName);
+ }
+ }
+
+ cSetsDisplayed++;
+ }
+ }
+}
+
+
+/**
+ * Initializes the ring-0 capable tasks during VM construction.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3TaskInit(PVM pVM)
+{
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.aTaskSets); i++)
+ {
+ PPDMTASKSET pTaskSet = &pVM->pdm.s.aTaskSets[i];
+
+ pTaskSet->u32Magic = PDMTASKSET_MAGIC;
+ pTaskSet->fRZEnabled = true;
+ //pTaskSet->cAllocated = 0;
+ pTaskSet->uHandleBase = (uint16_t)(i * RT_ELEMENTS(pTaskSet->aTasks));
+ pTaskSet->hThread = NIL_RTTHREAD;
+ int rc = SUPSemEventCreate(pVM->pSession, &pTaskSet->hEventR0);
+ AssertRCReturn(rc, rc);
+ pTaskSet->hEventR3 = NIL_RTSEMEVENT;
+ //pTaskSet->fTriggered = 0;
+ pTaskSet->idxRunning = UINT8_MAX;
+ //pTaskSet->fShutdown = false;
+ pTaskSet->pVM = pVM;
+
+ pVM->pdm.s.apTaskSets[i] = pTaskSet;
+ }
+
+ int rc = DBGFR3InfoRegisterInternalArgv(pVM, "tasks", "PDM tasks", pdmR3TaskInfo, 0 /*fFlags*/);
+ AssertRC(rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates task threads when the VM is destroyed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pdmR3TaskTerm(PVM pVM)
+{
+ /*
+ * Signal all the threads first.
+ */
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.apTaskSets); i++)
+ {
+ PPDMTASKSET pTaskSet = pVM->pdm.s.apTaskSets[i];
+ if (pTaskSet)
+ {
+ /*
+ * Set the shutdown indicator and signal the thread.
+ */
+ ASMAtomicWriteBool(&pTaskSet->fShutdown, true);
+
+ if (pTaskSet->hEventR0 != NIL_SUPSEMEVENT)
+ {
+ int rc = SUPSemEventSignal(pVM->pSession, pTaskSet->hEventR0);
+ AssertRC(rc);
+ }
+
+ if (pTaskSet->hEventR3 != NIL_RTSEMEVENT)
+ {
+ int rc = RTSemEventSignal(pTaskSet->hEventR3);
+ AssertRC(rc);
+ }
+ }
+ }
+
+ /*
+ * Wait for them to terminate and clean up semaphores.
+ */
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.apTaskSets); i++)
+ {
+ PPDMTASKSET pTaskSet = pVM->pdm.s.apTaskSets[i];
+ if (pTaskSet)
+ {
+ /*
+ * Wait for the thread to terminate.
+ */
+ if (pTaskSet->hThread != NIL_RTTHREAD)
+ {
+ int rc = RTThreadWait(pTaskSet->hThread, RT_MS_30SEC, NULL);
+ AssertLogRelMsg(RT_SUCCESS(rc), ("pTaskSet %u: thread wait failed: %Rrc\n", i, rc));
+ if (RT_SUCCESS(rc))
+ pTaskSet->hThread = NIL_RTTHREAD;
+ }
+
+ /*
+ * Destroy the semaphore.
+ */
+ if (pTaskSet->hEventR0 != NIL_SUPSEMEVENT)
+ {
+ int rc = SUPSemEventClose(pVM->pSession, pTaskSet->hEventR0);
+ AssertRC(rc);
+ pTaskSet->hEventR0 = NIL_SUPSEMEVENT;
+ }
+
+ if (pTaskSet->hEventR3 != NIL_RTSEMEVENT)
+ {
+ int rc = RTSemEventDestroy(pTaskSet->hEventR3);
+ AssertRC(rc);
+ pTaskSet->hEventR3 = NIL_RTSEMEVENT;
+ }
+ }
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNRTTHREAD,
+ * PDM Asynchronous Task Executor Thread}
+ */
+static DECLCALLBACK(int) pdmR3TaskThread(RTTHREAD ThreadSelf, void *pvUser)
+{
+ PPDMTASKSET const pTaskSet = (PPDMTASKSET)pvUser;
+ AssertPtr(pTaskSet);
+ Assert(pTaskSet->u32Magic == PDMTASKSET_MAGIC);
+ RT_NOREF(ThreadSelf);
+
+ /*
+ * Process stuff until we're told to terminate.
+ */
+ while (!ASMAtomicReadBool(&pTaskSet->fShutdown))
+ {
+ /*
+ * Process pending tasks.
+ *
+ * The outer loop runs till there are no more pending tasks.
+ *
+ * The inner loop takes one snapshot of fTriggered and processes all
+ * pending bits in the snaphot. This ensure fairness.
+ */
+ for (;;)
+ {
+ uint64_t fTriggered = ASMAtomicReadU64(&pTaskSet->fTriggered);
+ unsigned iTask = ASMBitFirstSetU64(fTriggered);
+ if (iTask == 0)
+ break;
+ uint32_t cShutdown = 3;
+ do
+ {
+ iTask--;
+ AssertBreak(iTask < RT_ELEMENTS(pTaskSet->aTasks));
+
+ if (ASMAtomicBitTestAndClear(&pTaskSet->fTriggered, iTask))
+ {
+ PPDMTASK pTask = &pTaskSet->aTasks[iTask];
+
+ /* Copy out the data we need here to try avoid destruction race trouble. */
+ PDMTASKTYPE const enmType = pTask->enmType;
+ PFNRT const pfnCallback = pTask->pfnCallback;
+ void * const pvOwner = pTask->pvOwner;
+ void * const pvTaskUser = pTask->pvUser;
+
+ ASMAtomicWriteU32(&pTaskSet->idxRunning, iTask);
+
+ if ( pvOwner
+ && pfnCallback
+ && pvOwner == pTask->pvOwner
+ && pfnCallback == pTask->pfnCallback
+ && pvTaskUser == pTask->pvUser
+ && enmType == pTask->enmType)
+ {
+ pTask->cRuns += 1;
+ switch (pTask->enmType)
+ {
+ case PDMTASKTYPE_DEV:
+ Log2(("pdmR3TaskThread: Runs dev task %s (%#x)\n", pTask->pszName, iTask + pTaskSet->uHandleBase));
+ ((PFNPDMTASKDEV)(pfnCallback))((PPDMDEVINS)pvOwner, pvTaskUser);
+ break;
+ case PDMTASKTYPE_DRV:
+ Log2(("pdmR3TaskThread: Runs drv task %s (%#x)\n", pTask->pszName, iTask + pTaskSet->uHandleBase));
+ ((PFNPDMTASKDRV)(pfnCallback))((PPDMDRVINS)pvOwner, pvTaskUser);
+ break;
+ case PDMTASKTYPE_USB:
+ Log2(("pdmR3TaskThread: Runs USB task %s (%#x)\n", pTask->pszName, iTask + pTaskSet->uHandleBase));
+ ((PFNPDMTASKUSB)(pfnCallback))((PPDMUSBINS)pvOwner, pvTaskUser);
+ break;
+ case PDMTASKTYPE_INTERNAL:
+ Log2(("pdmR3TaskThread: Runs int task %s (%#x)\n", pTask->pszName, iTask + pTaskSet->uHandleBase));
+ ((PFNPDMTASKINT)(pfnCallback))((PVM)pvOwner, pvTaskUser);
+ break;
+ default:
+ AssertFailed();
+ }
+ }
+ else /* Note! There might be a race here during destruction. */
+ AssertMsgFailed(("%d %p %p %p\n", enmType, pvOwner, pfnCallback, pvTaskUser));
+
+ ASMAtomicWriteU32(&pTaskSet->idxRunning, UINT32_MAX);
+ }
+
+ /* Next pending task. */
+ fTriggered &= ~RT_BIT_64(iTask);
+ iTask = ASMBitFirstSetU64(fTriggered);
+ } while (iTask != 0);
+
+ /*
+ * If we're shutting down, we'll try drain the pending tasks by
+ * looping three more times before just quitting. We don't want
+ * to get stuck here if some stuff is misbehaving.
+ */
+ if (!ASMAtomicReadBool(&pTaskSet->fShutdown))
+ { /* likely */ }
+ else if (--cShutdown == 0)
+ break;
+ }
+
+ /*
+ * Wait unless we're shutting down.
+ */
+ if (!ASMAtomicReadBool(&pTaskSet->fShutdown))
+ {
+ if (pTaskSet->fRZEnabled)
+ SUPSemEventWaitNoResume(pTaskSet->pVM->pSession, pTaskSet->hEventR0, RT_MS_15SEC);
+ else
+ RTSemEventWaitNoResume(pTaskSet->hEventR3, RT_MS_15SEC);
+ }
+ }
+
+ /*
+ * Complain about pending tasks.
+ */
+ uint64_t const fTriggered = ASMAtomicReadU64(&pTaskSet->fTriggered);
+ AssertLogRelMsg(fTriggered == 0, ("fTriggered=%#RX64 - %u %s\n", fTriggered, ASMBitFirstSetU64(fTriggered) - 1,
+ pTaskSet->aTasks[ASMBitFirstSetU64(fTriggered) - 1].pszName));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for PDMR3TaskCreate().
+ */
+DECLINLINE(PPDMTASK) pdmR3TaskAllocInSet(PPDMTASKSET pTaskSet)
+{
+ if (pTaskSet->cAllocated < RT_ELEMENTS(pTaskSet->aTasks))
+ {
+ for (size_t j = 0; j < RT_ELEMENTS(pTaskSet->aTasks); j++)
+ if (pTaskSet->aTasks[j].pvOwner == NULL)
+ return &pTaskSet->aTasks[j];
+ AssertFailed();
+ }
+ return NULL;
+}
+
+/**
+ * Creates a task.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFlags PDMTASK_F_XXX.
+ * @param pszName The task name (function name ++).
+ * @param enmType The task owner type.
+ * @param pvOwner The task owner pointer.
+ * @param pfnCallback The task callback.
+ * @param pvUser The user argument for the callback.
+ * @param phTask Where to return the task handle.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) PDMR3TaskCreate(PVM pVM, uint32_t fFlags, const char *pszName, PDMTASKTYPE enmType, void *pvOwner,
+ PFNRT pfnCallback, void *pvUser, PDMTASKHANDLE *phTask)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn(!(fFlags & ~PDMTASK_F_VALID_MASK), VERR_INVALID_FLAGS);
+ AssertPtrReturn(pvOwner, VERR_INVALID_POINTER);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); /* implicit serialization by requiring EMT(0) */
+ switch (enmType)
+ {
+ case PDMTASKTYPE_DEV:
+ case PDMTASKTYPE_DRV:
+ case PDMTASKTYPE_USB:
+ break;
+ case PDMTASKTYPE_INTERNAL:
+ AssertReturn(pvOwner == (void *)pVM, VERR_INVALID_PARAMETER);
+ break;
+ default:
+ AssertFailedReturn(VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * If the callback must be ring-0 triggerable, we are restricted to the
+ * task sets living the VM structure. Otherwise, pick from the dynamically
+ * allocated sets living on ring-3 heap.
+ */
+ PPDMTASKSET pTaskSet = NULL;
+ PPDMTASK pTask = NULL;
+ if (fFlags & PDMTASK_F_RZ)
+ {
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.aTaskSets); i++)
+ {
+ pTaskSet = &pVM->pdm.s.aTaskSets[i];
+ pTask = pdmR3TaskAllocInSet(pTaskSet);
+ if (pTask)
+ break;
+ }
+ }
+ else
+ {
+ for (size_t i = RT_ELEMENTS(pVM->pdm.s.aTaskSets); i < RT_ELEMENTS(pVM->pdm.s.apTaskSets); i++)
+ {
+ pTaskSet = pVM->pdm.s.apTaskSets[i];
+ if (pTaskSet)
+ {
+ pTask = pdmR3TaskAllocInSet(pTaskSet);
+ if (pTask)
+ break;
+ }
+ else
+ {
+ /*
+ * Try allocate a new set.
+ */
+ LogFlow(("PDMR3TaskCreate: Allocating new task set (%#u)...\n", i));
+ pTaskSet = (PPDMTASKSET)MMR3HeapAllocZ(pVM, MM_TAG_PDM, sizeof(*pTaskSet));
+ AssertReturn(pTaskSet, VERR_NO_MEMORY);
+
+ pTaskSet->u32Magic = PDMTASKSET_MAGIC;
+ //pTaskSet->fRZEnabled = false;
+ //pTaskSet->cAllocated = 0;
+ pTaskSet->uHandleBase = (uint16_t)(i * RT_ELEMENTS(pTaskSet->aTasks));
+ pTaskSet->hThread = NIL_RTTHREAD;
+ pTaskSet->hEventR0 = NIL_SUPSEMEVENT;
+ int rc = RTSemEventCreate(&pTaskSet->hEventR3);
+ AssertRCReturnStmt(rc, MMR3HeapFree(pTaskSet), rc);
+ //pTaskSet->fTriggered = 0;
+ pTaskSet->idxRunning = UINT8_MAX;
+ //pTaskSet->fShutdown = false;
+ pTaskSet->pVM = pVM;
+
+ pVM->pdm.s.apTaskSets[i] = pTaskSet;
+ pTask = &pTaskSet->aTasks[0];
+ break;
+ }
+ }
+ }
+ AssertLogRelReturn(pTask, VERR_OUT_OF_RESOURCES);
+
+ /*
+ * Do we need to start a worker thread? Do this first as it can fail.
+ */
+ if (pTaskSet->hThread == NIL_RTTHREAD)
+ {
+ int rc = RTThreadCreateF(&pTaskSet->hThread, pdmR3TaskThread, pTaskSet, 0 /*cbStack*/, RTTHREADTYPE_IO,
+ RTTHREADFLAGS_WAITABLE, "TaskSet%u", pTaskSet->uHandleBase / RT_ELEMENTS(pTaskSet->aTasks));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Complete the allocation.
+ */
+ pTask->enmType = enmType;
+ pTask->fFlags = fFlags;
+ pTask->pvUser = pvUser;
+ pTask->pfnCallback = pfnCallback;
+ pTask->pszName = pszName;
+ ASMAtomicWritePtr(&pTask->pvOwner, pvOwner);
+ pTaskSet->cAllocated += 1;
+
+ uint32_t const hTask = pTaskSet->uHandleBase + (uint32_t)(pTask - &pTaskSet->aTasks[0]);
+ *phTask = hTask;
+
+ STAMR3RegisterF(pVM, &pTask->cRuns, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of times the task has been executed.", "/PDM/Tasks/%03u-%s-runs", hTask, pszName);
+ STAMR3RegisterF(pVM, (void *)&pTask->cAlreadyTrigged, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of times the task was re-triggered.", "/PDM/Tasks/%03u-%s-retriggered", hTask, pszName);
+
+ LogFlow(("PDMR3TaskCreate: Allocated %u for %s\n", hTask, pszName));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates an internal task.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fFlags PDMTASK_F_XXX.
+ * @param pszName The task name (function name ++).
+ * @param pfnCallback The task callback.
+ * @param pvUser The user argument for the callback.
+ * @param phTask Where to return the task handle.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) PDMR3TaskCreateInternal(PVM pVM, uint32_t fFlags, const char *pszName,
+ PFNPDMTASKINT pfnCallback, void *pvUser, PDMTASKHANDLE *phTask)
+{
+ return PDMR3TaskCreate(pVM, fFlags, pszName, PDMTASKTYPE_INTERNAL, pVM, (PFNRT)pfnCallback, pvUser, phTask);
+}
+
+
+/**
+ * Worker for PDMR3TaskDestroyAllByOwner() and PDMR3TaskDestroySpecific().
+ */
+static void pdmR3TaskDestroyOne(PVM pVM, PPDMTASKSET pTaskSet, PPDMTASK pTask, size_t iTask)
+{
+ AssertPtr(pTask->pvOwner);
+
+ /*
+ * Delay if busy.
+ */
+ uint32_t cYields = 64;
+ while ( ASMAtomicReadU32(&pTaskSet->idxRunning) == iTask
+ && cYields > 0
+ && pTaskSet->hThread != NIL_RTTHREAD)
+ {
+ ASMNopPause();
+ RTThreadYield();
+ }
+
+ /*
+ * Zap it (very noisy, but whatever).
+ */
+ LogFlow(("pdmR3TaskDestroyOne: Destroying %zu %s\n", iTask + pTaskSet->uHandleBase, pTask->pszName));
+ AssertPtr(pTask->pvOwner);
+
+ char szPrefix[64];
+ RTStrPrintf(szPrefix, sizeof(szPrefix), "/PDM/Tasks/%03zu-", iTask + pTaskSet->uHandleBase);
+ STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
+
+ AssertPtr(pTask->pvOwner);
+ ASMAtomicWriteNullPtr(&pTask->pvOwner);
+ pTask->enmType = (PDMTASKTYPE)0;
+ pTask->fFlags = 0;
+ ASMAtomicWriteNullPtr((void **)&pTask->pfnCallback);
+ ASMAtomicWriteNullPtr(&pTask->pvUser);
+ ASMAtomicWriteNullPtr(&pTask->pszName);
+
+ AssertReturnVoid(pTaskSet->cAllocated > 0);
+ pTaskSet->cAllocated -= 1;
+}
+
+
+/**
+ * Destroys all tasks belonging to @a pvOwner.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmType The owner type.
+ * @param pvOwner The owner.
+ */
+VMMR3_INT_DECL(int) PDMR3TaskDestroyAllByOwner(PVM pVM, PDMTASKTYPE enmType, void *pvOwner)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn(enmType >= PDMTASKTYPE_DEV && enmType < PDMTASKTYPE_INTERNAL, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvOwner, VERR_INVALID_POINTER);
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); /* implicit serialization by requiring EMT(0) */
+
+ /*
+ * Scan all the task sets.
+ */
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pdm.s.apTaskSets); i++)
+ {
+ PPDMTASKSET pTaskSet = pVM->pdm.s.apTaskSets[i];
+ if (pTaskSet)
+ {
+ ssize_t cLeft = pTaskSet->cAllocated;
+ for (size_t j = 0; j < RT_ELEMENTS(pTaskSet->aTasks) && cLeft > 0; j++)
+ {
+ PPDMTASK pTask = &pTaskSet->aTasks[j];
+ void * const pvTaskOwner = pTask->pvOwner;
+ if (pvTaskOwner)
+ {
+ if ( pvTaskOwner == pvOwner
+ && pTask->enmType == enmType)
+ pdmR3TaskDestroyOne(pVM, pTaskSet, pTask, j);
+ else
+ Assert(pvTaskOwner != pvOwner);
+ cLeft--;
+ }
+ }
+ }
+ else
+ break;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys the task @a hTask.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmType The owner type.
+ * @param pvOwner The owner.
+ * @param hTask Handle to the task to destroy.
+ */
+VMMR3_INT_DECL(int) PDMR3TaskDestroySpecific(PVM pVM, PDMTASKTYPE enmType, void *pvOwner, PDMTASKHANDLE hTask)
+{
+ /*
+ * Validate the input.
+ */
+ AssertReturn(enmType >= PDMTASKTYPE_DEV && enmType <= PDMTASKTYPE_INTERNAL, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvOwner, VERR_INVALID_POINTER);
+
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ size_t const iTask = hTask % RT_ELEMENTS(pVM->pdm.s.apTaskSets[0]->aTasks);
+ size_t const iTaskSet = hTask / RT_ELEMENTS(pVM->pdm.s.apTaskSets[0]->aTasks);
+ AssertReturn(iTaskSet < RT_ELEMENTS(pVM->pdm.s.apTaskSets), VERR_INVALID_HANDLE);
+ PPDMTASKSET const pTaskSet = pVM->pdm.s.apTaskSets[iTaskSet];
+ AssertPtrReturn(pTaskSet, VERR_INVALID_HANDLE);
+ AssertPtrReturn(pTaskSet->u32Magic == PDMTASKSET_MAGIC, VERR_INVALID_MAGIC);
+ PPDMTASK const pTask = &pTaskSet->aTasks[iTask];
+
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); /* implicit serialization by requiring EMT(0) */
+
+ AssertPtrReturn(pTask->pvOwner == pvOwner, VERR_NOT_OWNER);
+ AssertPtrReturn(pTask->enmType == enmType, VERR_NOT_OWNER);
+
+ /*
+ * Do the job.
+ */
+ pdmR3TaskDestroyOne(pVM, pTaskSet, pTask, iTask);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys the internal task @a hTask.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param hTask Handle to the task to destroy.
+ */
+VMMR3_INT_DECL(int) PDMR3TaskDestroyInternal(PVM pVM, PDMTASKHANDLE hTask)
+{
+ return PDMR3TaskDestroySpecific(pVM, PDMTASKTYPE_INTERNAL, pVM, hTask);
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMThread.cpp b/src/VBox/VMM/VMMR3/PDMThread.cpp
new file mode 100644
index 00000000..f0ce41e7
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMThread.cpp
@@ -0,0 +1,1103 @@
+/* $Id: PDMThread.cpp $ */
+/** @file
+ * PDM Thread - VM Thread Management.
+ */
+
+/*
+ * Copyright (C) 2007-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+/// @todo \#define LOG_GROUP LOG_GROUP_PDM_THREAD
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/semaphore.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) pdmR3ThreadMain(RTTHREAD Thread, void *pvUser);
+
+
+/**
+ * Wrapper around ASMAtomicCmpXchgSize.
+ */
+DECLINLINE(bool) pdmR3AtomicCmpXchgState(PPDMTHREAD pThread, PDMTHREADSTATE enmNewState, PDMTHREADSTATE enmOldState)
+{
+ bool fRc;
+ ASMAtomicCmpXchgSize(&pThread->enmState, enmNewState, enmOldState, fRc);
+ return fRc;
+}
+
+
+/**
+ * Does the wakeup call.
+ *
+ * @returns VBox status code. Already asserted on failure.
+ * @param pThread The PDM thread.
+ */
+static DECLCALLBACK(int) pdmR3ThreadWakeUp(PPDMTHREAD pThread)
+{
+ RTSemEventMultiSignal(pThread->Internal.s.SleepEvent);
+
+ int rc;
+ switch (pThread->Internal.s.enmType)
+ {
+ case PDMTHREADTYPE_DEVICE:
+ rc = pThread->u.Dev.pfnWakeUp(pThread->u.Dev.pDevIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_USB:
+ rc = pThread->u.Usb.pfnWakeUp(pThread->u.Usb.pUsbIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_DRIVER:
+ rc = pThread->u.Drv.pfnWakeUp(pThread->u.Drv.pDrvIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_INTERNAL:
+ rc = pThread->u.Int.pfnWakeUp(pThread->Internal.s.pVM, pThread);
+ break;
+
+ case PDMTHREADTYPE_EXTERNAL:
+ rc = pThread->u.Ext.pfnWakeUp(pThread);
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", pThread->Internal.s.enmType));
+ rc = VERR_PDM_THREAD_IPE_1;
+ break;
+ }
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * Allocates new thread instance.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppThread Where to store the pointer to the instance.
+ */
+static int pdmR3ThreadNew(PVM pVM, PPPDMTHREAD ppThread)
+{
+ PPDMTHREAD pThread;
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_THREAD, sizeof(*pThread), (void **)&pThread);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pThread->u32Version = PDMTHREAD_VERSION;
+ pThread->enmState = PDMTHREADSTATE_INITIALIZING;
+ pThread->Thread = NIL_RTTHREAD;
+ pThread->Internal.s.pVM = pVM;
+
+ *ppThread = pThread;
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Initialize a new thread, this actually creates the thread.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppThread Where the thread instance data handle is.
+ * @param cbStack The stack size, see RTThreadCreate().
+ * @param enmType The thread type, see RTThreadCreate().
+ * @param pszName The thread name, see RTThreadCreate().
+ */
+static int pdmR3ThreadInit(PVM pVM, PPPDMTHREAD ppThread, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ PPDMTHREAD pThread = *ppThread;
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Initialize the remainder of the structure.
+ */
+ pThread->Internal.s.pVM = pVM;
+
+ int rc = RTSemEventMultiCreate(&pThread->Internal.s.BlockEvent);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemEventMultiCreate(&pThread->Internal.s.SleepEvent);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create the thread and wait for it to initialize.
+ * The newly created thread will set the PDMTHREAD::Thread member.
+ */
+ RTTHREAD Thread;
+ rc = RTThreadCreate(&Thread, pdmR3ThreadMain, pThread, cbStack, enmType, RTTHREADFLAGS_WAITABLE, pszName);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTThreadUserWait(Thread, 60*1000);
+ if ( RT_SUCCESS(rc)
+ && pThread->enmState != PDMTHREADSTATE_SUSPENDED)
+ rc = VERR_PDM_THREAD_IPE_2;
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Insert it into the thread list.
+ */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pThread->Internal.s.pNext = NULL;
+ if (pUVM->pdm.s.pThreadsTail)
+ pUVM->pdm.s.pThreadsTail->Internal.s.pNext = pThread;
+ else
+ pUVM->pdm.s.pThreads = pThread;
+ pUVM->pdm.s.pThreadsTail = pThread;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ rc = RTThreadUserReset(Thread);
+ AssertRC(rc);
+ return rc;
+ }
+
+ /* bailout */
+ RTThreadWait(Thread, 60*1000, NULL);
+ }
+ RTSemEventMultiDestroy(pThread->Internal.s.SleepEvent);
+ pThread->Internal.s.SleepEvent = NIL_RTSEMEVENTMULTI;
+ }
+ RTSemEventMultiDestroy(pThread->Internal.s.BlockEvent);
+ pThread->Internal.s.BlockEvent = NIL_RTSEMEVENTMULTI;
+ }
+ MMR3HeapFree(pThread);
+ *ppThread = NULL;
+
+ return rc;
+}
+
+
+/**
+ * Device Helper for creating a thread associated with a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance.
+ * @param ppThread Where to store the thread 'handle'.
+ * @param pvUser The user argument to the thread function.
+ * @param pfnThread The thread function.
+ * @param pfnWakeUp The wakup callback. This is called on the EMT thread when
+ * a state change is pending.
+ * @param cbStack See RTThreadCreate.
+ * @param enmType See RTThreadCreate.
+ * @param pszName See RTThreadCreate.
+ */
+int pdmR3ThreadCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDEV pfnThread,
+ PFNPDMTHREADWAKEUPDEV pfnWakeUp, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ int rc = pdmR3ThreadNew(pVM, ppThread);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMTHREAD pThread = *ppThread;
+ pThread->pvUser = pvUser;
+ pThread->Internal.s.enmType = PDMTHREADTYPE_DEVICE;
+ pThread->u.Dev.pDevIns = pDevIns;
+ pThread->u.Dev.pfnThread = pfnThread;
+ pThread->u.Dev.pfnWakeUp = pfnWakeUp;
+ rc = pdmR3ThreadInit(pVM, ppThread, cbStack, enmType, pszName);
+ }
+ return rc;
+}
+
+
+/**
+ * USB Device Helper for creating a thread associated with an USB device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance.
+ * @param ppThread Where to store the thread 'handle'.
+ * @param pvUser The user argument to the thread function.
+ * @param pfnThread The thread function.
+ * @param pfnWakeUp The wakup callback. This is called on the EMT thread when
+ * a state change is pending.
+ * @param cbStack See RTThreadCreate.
+ * @param enmType See RTThreadCreate.
+ * @param pszName See RTThreadCreate.
+ */
+int pdmR3ThreadCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADUSB pfnThread,
+ PFNPDMTHREADWAKEUPUSB pfnWakeUp, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ int rc = pdmR3ThreadNew(pVM, ppThread);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMTHREAD pThread = *ppThread;
+ pThread->pvUser = pvUser;
+ pThread->Internal.s.enmType = PDMTHREADTYPE_USB;
+ pThread->u.Usb.pUsbIns = pUsbIns;
+ pThread->u.Usb.pfnThread = pfnThread;
+ pThread->u.Usb.pfnWakeUp = pfnWakeUp;
+ rc = pdmR3ThreadInit(pVM, ppThread, cbStack, enmType, pszName);
+ }
+ return rc;
+}
+
+
+/**
+ * Driver Helper for creating a thread associated with a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ * @param ppThread Where to store the thread 'handle'.
+ * @param pvUser The user argument to the thread function.
+ * @param pfnThread The thread function.
+ * @param pfnWakeUp The wakup callback. This is called on the EMT thread when
+ * a state change is pending.
+ * @param cbStack See RTThreadCreate.
+ * @param enmType See RTThreadCreate.
+ * @param pszName See RTThreadCreate.
+ */
+int pdmR3ThreadCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDRV pfnThread,
+ PFNPDMTHREADWAKEUPDRV pfnWakeUp, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ int rc = pdmR3ThreadNew(pVM, ppThread);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMTHREAD pThread = *ppThread;
+ pThread->pvUser = pvUser;
+ pThread->Internal.s.enmType = PDMTHREADTYPE_DRIVER;
+ pThread->u.Drv.pDrvIns = pDrvIns;
+ pThread->u.Drv.pfnThread = pfnThread;
+ pThread->u.Drv.pfnWakeUp = pfnWakeUp;
+ rc = pdmR3ThreadInit(pVM, ppThread, cbStack, enmType, pszName);
+ }
+ return rc;
+}
+
+
+/**
+ * Creates a PDM thread for internal use in the VM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppThread Where to store the thread 'handle'.
+ * @param pvUser The user argument to the thread function.
+ * @param pfnThread The thread function.
+ * @param pfnWakeUp The wakup callback. This is called on the EMT thread when
+ * a state change is pending.
+ * @param cbStack See RTThreadCreate.
+ * @param enmType See RTThreadCreate.
+ * @param pszName See RTThreadCreate.
+ */
+VMMR3DECL(int) PDMR3ThreadCreate(PVM pVM, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADINT pfnThread,
+ PFNPDMTHREADWAKEUPINT pfnWakeUp, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ int rc = pdmR3ThreadNew(pVM, ppThread);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMTHREAD pThread = *ppThread;
+ pThread->pvUser = pvUser;
+ pThread->Internal.s.enmType = PDMTHREADTYPE_INTERNAL;
+ pThread->u.Int.pfnThread = pfnThread;
+ pThread->u.Int.pfnWakeUp = pfnWakeUp;
+ rc = pdmR3ThreadInit(pVM, ppThread, cbStack, enmType, pszName);
+ }
+ return rc;
+}
+
+
+/**
+ * Creates a PDM thread for VM use by some external party.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param ppThread Where to store the thread 'handle'.
+ * @param pvUser The user argument to the thread function.
+ * @param pfnThread The thread function.
+ * @param pfnWakeUp The wakup callback. This is called on the EMT thread when
+ * a state change is pending.
+ * @param cbStack See RTThreadCreate.
+ * @param enmType See RTThreadCreate.
+ * @param pszName See RTThreadCreate.
+ */
+VMMR3DECL(int) PDMR3ThreadCreateExternal(PVM pVM, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADEXT pfnThread,
+ PFNPDMTHREADWAKEUPEXT pfnWakeUp, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ int rc = pdmR3ThreadNew(pVM, ppThread);
+ if (RT_SUCCESS(rc))
+ {
+ PPDMTHREAD pThread = *ppThread;
+ pThread->pvUser = pvUser;
+ pThread->Internal.s.enmType = PDMTHREADTYPE_EXTERNAL;
+ pThread->u.Ext.pfnThread = pfnThread;
+ pThread->u.Ext.pfnWakeUp = pfnWakeUp;
+ rc = pdmR3ThreadInit(pVM, ppThread, cbStack, enmType, pszName);
+ }
+ return rc;
+}
+
+
+/**
+ * Destroys a PDM thread.
+ *
+ * This will wakeup the thread, tell it to terminate, and wait for it terminate.
+ *
+ * @returns VBox status code.
+ * This reflects the success off destroying the thread and not the exit code
+ * of the thread as this is stored in *pRcThread.
+ * @param pThread The thread to destroy.
+ * @param pRcThread Where to store the thread exit code. Optional.
+ * @thread The emulation thread (EMT).
+ */
+VMMR3DECL(int) PDMR3ThreadDestroy(PPDMTHREAD pThread, int *pRcThread)
+{
+ /*
+ * Assert sanity.
+ */
+ AssertPtrReturn(pThread, VERR_INVALID_POINTER);
+ AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
+ Assert(pThread->Thread != RTThreadSelf());
+ AssertPtrNullReturn(pRcThread, VERR_INVALID_POINTER);
+ PVM pVM = pThread->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Advance the thread to the terminating state.
+ */
+ int rc = VINF_SUCCESS;
+ if (pThread->enmState <= PDMTHREADSTATE_TERMINATING)
+ {
+ for (;;)
+ {
+ PDMTHREADSTATE enmState = pThread->enmState;
+ switch (enmState)
+ {
+ case PDMTHREADSTATE_RUNNING:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ rc = pdmR3ThreadWakeUp(pThread);
+ break;
+
+ case PDMTHREADSTATE_SUSPENDED:
+ case PDMTHREADSTATE_SUSPENDING:
+ case PDMTHREADSTATE_RESUMING:
+ case PDMTHREADSTATE_INITIALIZING:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ break;
+
+ case PDMTHREADSTATE_TERMINATING:
+ case PDMTHREADSTATE_TERMINATED:
+ break;
+
+ default:
+ AssertMsgFailed(("enmState=%d\n", enmState));
+ rc = VERR_PDM_THREAD_IPE_2;
+ break;
+ }
+ break;
+ }
+ }
+ int rc2 = RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
+ AssertRC(rc2);
+
+ /*
+ * Wait for it to terminate and the do cleanups.
+ */
+ rc2 = RTThreadWait(pThread->Thread, RT_SUCCESS(rc) ? 60*1000 : 150, pRcThread);
+ if (RT_SUCCESS(rc2))
+ {
+ /* make it invalid. */
+ pThread->u32Version = 0xffffffff;
+ pThread->enmState = PDMTHREADSTATE_INVALID;
+ pThread->Thread = NIL_RTTHREAD;
+
+ /* unlink */
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ if (pUVM->pdm.s.pThreads == pThread)
+ {
+ pUVM->pdm.s.pThreads = pThread->Internal.s.pNext;
+ if (!pThread->Internal.s.pNext)
+ pUVM->pdm.s.pThreadsTail = NULL;
+ }
+ else
+ {
+ PPDMTHREAD pPrev = pUVM->pdm.s.pThreads;
+ while (pPrev && pPrev->Internal.s.pNext != pThread)
+ pPrev = pPrev->Internal.s.pNext;
+ Assert(pPrev);
+ if (pPrev)
+ pPrev->Internal.s.pNext = pThread->Internal.s.pNext;
+ if (!pThread->Internal.s.pNext)
+ pUVM->pdm.s.pThreadsTail = pPrev;
+ }
+ pThread->Internal.s.pNext = NULL;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ /* free the resources */
+ RTSemEventMultiDestroy(pThread->Internal.s.BlockEvent);
+ pThread->Internal.s.BlockEvent = NIL_RTSEMEVENTMULTI;
+
+ RTSemEventMultiDestroy(pThread->Internal.s.SleepEvent);
+ pThread->Internal.s.SleepEvent = NIL_RTSEMEVENTMULTI;
+
+ MMR3HeapFree(pThread);
+ }
+ else if (RT_SUCCESS(rc))
+ rc = rc2;
+
+ return rc;
+}
+
+
+/**
+ * Destroys all threads associated with a device.
+ *
+ * This function is called by PDMDevice when a device is
+ * destroyed (not currently implemented).
+ *
+ * @returns VBox status code of the first failure.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns the device instance.
+ */
+int pdmR3ThreadDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = pVM->pUVM;
+
+ AssertPtr(pDevIns);
+
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMTHREAD pThread = pUVM->pdm.s.pThreads;
+ while (pThread)
+ {
+ PPDMTHREAD pNext = pThread->Internal.s.pNext;
+ if ( pThread->Internal.s.enmType == PDMTHREADTYPE_DEVICE
+ && pThread->u.Dev.pDevIns == pDevIns)
+ {
+ int rc2 = PDMR3ThreadDestroy(pThread, NULL);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ pThread = pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Destroys all threads associated with an USB device.
+ *
+ * This function is called by PDMUsb when a device is destroyed.
+ *
+ * @returns VBox status code of the first failure.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance.
+ */
+int pdmR3ThreadDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns)
+{
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = pVM->pUVM;
+
+ AssertPtr(pUsbIns);
+
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMTHREAD pThread = pUVM->pdm.s.pThreads;
+ while (pThread)
+ {
+ PPDMTHREAD pNext = pThread->Internal.s.pNext;
+ if ( pThread->Internal.s.enmType == PDMTHREADTYPE_DEVICE
+ && pThread->u.Usb.pUsbIns == pUsbIns)
+ {
+ int rc2 = PDMR3ThreadDestroy(pThread, NULL);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ pThread = pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Destroys all threads associated with a driver.
+ *
+ * This function is called by PDMDriver when a driver is destroyed.
+ *
+ * @returns VBox status code of the first failure.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance.
+ */
+int pdmR3ThreadDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = pVM->pUVM;
+
+ AssertPtr(pDrvIns);
+
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMTHREAD pThread = pUVM->pdm.s.pThreads;
+ while (pThread)
+ {
+ PPDMTHREAD pNext = pThread->Internal.s.pNext;
+ if ( pThread->Internal.s.enmType == PDMTHREADTYPE_DRIVER
+ && pThread->u.Drv.pDrvIns == pDrvIns)
+ {
+ int rc2 = PDMR3ThreadDestroy(pThread, NULL);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ pThread = pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Called For VM power off.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pdmR3ThreadDestroyAll(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMTHREAD pThread = pUVM->pdm.s.pThreads;
+ while (pThread)
+ {
+ PPDMTHREAD pNext = pThread->Internal.s.pNext;
+ int rc2 = PDMR3ThreadDestroy(pThread, NULL);
+ AssertRC(rc2);
+ pThread = pNext;
+ }
+ Assert(!pUVM->pdm.s.pThreads && !pUVM->pdm.s.pThreadsTail);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+}
+
+
+/**
+ * Initiate termination of the thread (self) because something failed in a bad way.
+ *
+ * @param pThread The PDM thread.
+ */
+static void pdmR3ThreadBailMeOut(PPDMTHREAD pThread)
+{
+ for (;;)
+ {
+ PDMTHREADSTATE enmState = pThread->enmState;
+ switch (enmState)
+ {
+ case PDMTHREADSTATE_SUSPENDING:
+ case PDMTHREADSTATE_SUSPENDED:
+ case PDMTHREADSTATE_RESUMING:
+ case PDMTHREADSTATE_RUNNING:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ break;
+
+ case PDMTHREADSTATE_TERMINATING:
+ case PDMTHREADSTATE_TERMINATED:
+ break;
+
+ case PDMTHREADSTATE_INITIALIZING:
+ default:
+ AssertMsgFailed(("enmState=%d\n", enmState));
+ break;
+ }
+ break;
+ }
+}
+
+
+/**
+ * Called by the PDM thread in response to a wakeup call with
+ * suspending as the new state.
+ *
+ * The thread will block in side this call until the state is changed in
+ * response to a VM state change or to the device/driver/whatever calling the
+ * PDMR3ThreadResume API.
+ *
+ * @returns VBox status code.
+ * On failure, terminate the thread.
+ * @param pThread The PDM thread.
+ */
+VMMR3DECL(int) PDMR3ThreadIAmSuspending(PPDMTHREAD pThread)
+{
+ /*
+ * Assert sanity.
+ */
+ AssertPtr(pThread);
+ AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
+ Assert(pThread->Thread == RTThreadSelf() || pThread->enmState == PDMTHREADSTATE_INITIALIZING);
+ PDMTHREADSTATE enmState = pThread->enmState;
+ Assert( enmState == PDMTHREADSTATE_SUSPENDING
+ || enmState == PDMTHREADSTATE_INITIALIZING);
+
+ /*
+ * Update the state, notify the control thread (the API caller) and go to sleep.
+ */
+ int rc = VERR_WRONG_ORDER;
+ if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_SUSPENDED, enmState))
+ {
+ rc = RTThreadUserSignal(pThread->Thread);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemEventMultiWait(pThread->Internal.s.BlockEvent, RT_INDEFINITE_WAIT);
+ if ( RT_SUCCESS(rc)
+ && pThread->enmState != PDMTHREADSTATE_SUSPENDED)
+ return rc;
+
+ if (RT_SUCCESS(rc))
+ rc = VERR_PDM_THREAD_IPE_2;
+ }
+ }
+
+ AssertMsgFailed(("rc=%d enmState=%d\n", rc, pThread->enmState));
+ pdmR3ThreadBailMeOut(pThread);
+ return rc;
+}
+
+
+/**
+ * Called by the PDM thread in response to a resuming state.
+ *
+ * The purpose of this API is to tell the PDMR3ThreadResume caller that
+ * the PDM thread has successfully resumed. It will also do the
+ * state transition from the resuming to the running state.
+ *
+ * @returns VBox status code.
+ * On failure, terminate the thread.
+ * @param pThread The PDM thread.
+ */
+VMMR3DECL(int) PDMR3ThreadIAmRunning(PPDMTHREAD pThread)
+{
+ /*
+ * Assert sanity.
+ */
+ Assert(pThread->enmState == PDMTHREADSTATE_RESUMING);
+ Assert(pThread->Thread == RTThreadSelf());
+
+ /*
+ * Update the state and tell the control thread (the guy calling the resume API).
+ */
+ int rc = VERR_WRONG_ORDER;
+ if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_RUNNING, PDMTHREADSTATE_RESUMING))
+ {
+ rc = RTThreadUserSignal(pThread->Thread);
+ if (RT_SUCCESS(rc))
+ return rc;
+ }
+
+ AssertMsgFailed(("rc=%d enmState=%d\n", rc, pThread->enmState));
+ pdmR3ThreadBailMeOut(pThread);
+ return rc;
+}
+
+
+/**
+ * Called by the PDM thread instead of RTThreadSleep.
+ *
+ * The difference is that the sleep will be interrupted on state change. The
+ * thread must be in the running state, otherwise it will return immediately.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success or state change.
+ * @retval VERR_INTERRUPTED on signal or APC.
+ *
+ * @param pThread The PDM thread.
+ * @param cMillies The number of milliseconds to sleep.
+ */
+VMMR3DECL(int) PDMR3ThreadSleep(PPDMTHREAD pThread, RTMSINTERVAL cMillies)
+{
+ /*
+ * Assert sanity.
+ */
+ AssertReturn(pThread->enmState > PDMTHREADSTATE_INVALID && pThread->enmState < PDMTHREADSTATE_TERMINATED, VERR_PDM_THREAD_IPE_2);
+ AssertReturn(pThread->Thread == RTThreadSelf(), VERR_PDM_THREAD_INVALID_CALLER);
+
+ /*
+ * Reset the event semaphore, check the state and sleep.
+ */
+ RTSemEventMultiReset(pThread->Internal.s.SleepEvent);
+ if (pThread->enmState != PDMTHREADSTATE_RUNNING)
+ return VINF_SUCCESS;
+ return RTSemEventMultiWaitNoResume(pThread->Internal.s.SleepEvent, cMillies);
+}
+
+
+/**
+ * The PDM thread function.
+ *
+ * @returns return from pfnThread.
+ *
+ * @param Thread The thread handle.
+ * @param pvUser Pointer to the PDMTHREAD structure.
+ */
+static DECLCALLBACK(int) pdmR3ThreadMain(RTTHREAD Thread, void *pvUser)
+{
+ PPDMTHREAD pThread = (PPDMTHREAD)pvUser;
+ Log(("PDMThread: Initializing thread %RTthrd / %p / '%s'...\n", Thread, pThread, RTThreadGetName(Thread)));
+ pThread->Thread = Thread;
+
+ PUVM pUVM = pThread->Internal.s.pVM->pUVM;
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyPdmtInit)
+ pUVM->pVmm2UserMethods->pfnNotifyPdmtInit(pUVM->pVmm2UserMethods, pUVM);
+
+ /*
+ * The run loop.
+ *
+ * It handles simple thread functions which returns when they see a suspending
+ * request and leaves the PDMR3ThreadIAmSuspending and PDMR3ThreadIAmRunning
+ * parts to us.
+ */
+ int rc;
+ for (;;)
+ {
+ switch (pThread->Internal.s.enmType)
+ {
+ case PDMTHREADTYPE_DEVICE:
+ rc = pThread->u.Dev.pfnThread(pThread->u.Dev.pDevIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_USB:
+ rc = pThread->u.Usb.pfnThread(pThread->u.Usb.pUsbIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_DRIVER:
+ rc = pThread->u.Drv.pfnThread(pThread->u.Drv.pDrvIns, pThread);
+ break;
+
+ case PDMTHREADTYPE_INTERNAL:
+ rc = pThread->u.Int.pfnThread(pThread->Internal.s.pVM, pThread);
+ break;
+
+ case PDMTHREADTYPE_EXTERNAL:
+ rc = pThread->u.Ext.pfnThread(pThread);
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", pThread->Internal.s.enmType));
+ rc = VERR_PDM_THREAD_IPE_1;
+ break;
+ }
+ if (RT_FAILURE(rc))
+ break;
+
+ /*
+ * If this is a simple thread function, the state will be suspending
+ * or initializing now. If it isn't we're supposed to terminate.
+ */
+ if ( pThread->enmState != PDMTHREADSTATE_SUSPENDING
+ && pThread->enmState != PDMTHREADSTATE_INITIALIZING)
+ {
+ Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
+ break;
+ }
+ rc = PDMR3ThreadIAmSuspending(pThread);
+ if (RT_FAILURE(rc))
+ break;
+ if (pThread->enmState != PDMTHREADSTATE_RESUMING)
+ {
+ Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
+ break;
+ }
+
+ rc = PDMR3ThreadIAmRunning(pThread);
+ if (RT_FAILURE(rc))
+ break;
+ }
+
+ if (RT_FAILURE(rc))
+ LogRel(("PDMThread: Thread '%s' (%RTthrd) quit unexpectedly with rc=%Rrc.\n", RTThreadGetName(Thread), Thread, rc));
+
+ /*
+ * Advance the state to terminating and then on to terminated.
+ */
+ for (;;)
+ {
+ PDMTHREADSTATE enmState = pThread->enmState;
+ if ( enmState == PDMTHREADSTATE_TERMINATING
+ || pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ break;
+ }
+
+ ASMAtomicXchgSize(&pThread->enmState, PDMTHREADSTATE_TERMINATED);
+ int rc2 = RTThreadUserSignal(Thread); AssertRC(rc2);
+
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyPdmtTerm)
+ pUVM->pVmm2UserMethods->pfnNotifyPdmtTerm(pUVM->pVmm2UserMethods, pUVM);
+ Log(("PDMThread: Terminating thread %RTthrd / %p / '%s': %Rrc\n", Thread, pThread, RTThreadGetName(Thread), rc));
+ return rc;
+}
+
+
+/**
+ * Initiate termination of the thread because something failed in a bad way.
+ *
+ * @param pThread The PDM thread.
+ */
+static void pdmR3ThreadBailOut(PPDMTHREAD pThread)
+{
+ for (;;)
+ {
+ PDMTHREADSTATE enmState = pThread->enmState;
+ switch (enmState)
+ {
+ case PDMTHREADSTATE_SUSPENDING:
+ case PDMTHREADSTATE_SUSPENDED:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
+ break;
+
+ case PDMTHREADSTATE_RESUMING:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ break;
+
+ case PDMTHREADSTATE_RUNNING:
+ if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
+ continue;
+ pdmR3ThreadWakeUp(pThread);
+ break;
+
+ case PDMTHREADSTATE_TERMINATING:
+ case PDMTHREADSTATE_TERMINATED:
+ break;
+
+ case PDMTHREADSTATE_INITIALIZING:
+ default:
+ AssertMsgFailed(("enmState=%d\n", enmState));
+ break;
+ }
+ break;
+ }
+}
+
+
+/**
+ * Suspends the thread.
+ *
+ * This can be called at the power off / suspend notifications to suspend the
+ * PDM thread a bit early. The thread will be automatically suspend upon
+ * completion of the device/driver notification cycle.
+ *
+ * The caller is responsible for serializing the control operations on the
+ * thread. That basically means, always do these calls from the EMT.
+ *
+ * @returns VBox status code.
+ * @param pThread The PDM thread.
+ */
+VMMR3DECL(int) PDMR3ThreadSuspend(PPDMTHREAD pThread)
+{
+ /*
+ * Assert sanity.
+ */
+ AssertPtrReturn(pThread, VERR_INVALID_POINTER);
+ AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
+ Assert(pThread->Thread != RTThreadSelf());
+
+ /*
+ * This is a noop if the thread is already suspended.
+ */
+ if (pThread->enmState == PDMTHREADSTATE_SUSPENDED)
+ return VINF_SUCCESS;
+
+ /*
+ * Change the state to resuming and kick the thread.
+ */
+ int rc = RTSemEventMultiReset(pThread->Internal.s.BlockEvent);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTThreadUserReset(pThread->Thread);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VERR_WRONG_ORDER;
+ if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_SUSPENDING, PDMTHREADSTATE_RUNNING))
+ {
+ rc = pdmR3ThreadWakeUp(pThread);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Wait for the thread to reach the suspended state.
+ */
+ if (pThread->enmState != PDMTHREADSTATE_SUSPENDED)
+ rc = RTThreadUserWait(pThread->Thread, 60*1000);
+ if ( RT_SUCCESS(rc)
+ && pThread->enmState != PDMTHREADSTATE_SUSPENDED)
+ rc = VERR_PDM_THREAD_IPE_2;
+ if (RT_SUCCESS(rc))
+ return rc;
+ }
+ }
+ }
+ }
+
+ /*
+ * Something failed, initialize termination.
+ */
+ AssertMsgFailed(("PDMR3ThreadSuspend -> rc=%Rrc enmState=%d suspending '%s'\n",
+ rc, pThread->enmState, RTThreadGetName(pThread->Thread)));
+ pdmR3ThreadBailOut(pThread);
+ return rc;
+}
+
+
+/**
+ * Suspend all running threads.
+ *
+ * This is called by PDMR3Suspend() and PDMR3PowerOff() after all the devices
+ * and drivers have been notified about the suspend / power off.
+ *
+ * @return VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3ThreadSuspendAll(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect); /* This may cause deadlocks later... */
+ for (PPDMTHREAD pThread = pUVM->pdm.s.pThreads; pThread; pThread = pThread->Internal.s.pNext)
+ switch (pThread->enmState)
+ {
+ case PDMTHREADSTATE_RUNNING:
+ {
+ int rc = PDMR3ThreadSuspend(pThread);
+ AssertLogRelMsgReturnStmt(RT_SUCCESS(rc),
+ ("PDMR3ThreadSuspend -> %Rrc for '%s'\n", rc, RTThreadGetName(pThread->Thread)),
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect),
+ rc);
+ break;
+ }
+
+ /* suspend -> power off; voluntary suspend. */
+ case PDMTHREADSTATE_SUSPENDED:
+ break;
+
+ default:
+ AssertMsgFailed(("pThread=%p enmState=%d\n", pThread, pThread->enmState));
+ break;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resumes the thread.
+ *
+ * This can be called the power on / resume notifications to resume the
+ * PDM thread a bit early. The thread will be automatically resumed upon
+ * return from these two notification callbacks (devices/drivers).
+ *
+ * The caller is responsible for serializing the control operations on the
+ * thread. That basically means, always do these calls from the EMT.
+ *
+ * @returns VBox status code.
+ * @param pThread The PDM thread.
+ */
+VMMR3DECL(int) PDMR3ThreadResume(PPDMTHREAD pThread)
+{
+ /*
+ * Assert sanity.
+ */
+ AssertPtrReturn(pThread, VERR_INVALID_POINTER);
+ AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
+ Assert(pThread->Thread != RTThreadSelf());
+
+ /*
+ * Change the state to resuming and kick the thread.
+ */
+ int rc = RTThreadUserReset(pThread->Thread);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VERR_WRONG_ORDER;
+ if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_RESUMING, PDMTHREADSTATE_SUSPENDED))
+ {
+ rc = RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Wait for the thread to reach the running state.
+ */
+ rc = RTThreadUserWait(pThread->Thread, 60*1000);
+ if ( RT_SUCCESS(rc)
+ && pThread->enmState != PDMTHREADSTATE_RUNNING)
+ rc = VERR_PDM_THREAD_IPE_2;
+ if (RT_SUCCESS(rc))
+ return rc;
+ }
+ }
+ }
+
+ /*
+ * Something failed, initialize termination.
+ */
+ AssertMsgFailed(("PDMR3ThreadResume -> rc=%Rrc enmState=%d\n", rc, pThread->enmState));
+ pdmR3ThreadBailOut(pThread);
+ return rc;
+}
+
+
+/**
+ * Resumes all threads not running.
+ *
+ * This is called by PDMR3Resume() and PDMR3PowerOn() after all the devices
+ * and drivers have been notified about the resume / power on .
+ *
+ * @return VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3ThreadResumeAll(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ for (PPDMTHREAD pThread = pUVM->pdm.s.pThreads; pThread; pThread = pThread->Internal.s.pNext)
+ switch (pThread->enmState)
+ {
+ case PDMTHREADSTATE_SUSPENDED:
+ {
+ int rc = PDMR3ThreadResume(pThread);
+ AssertRCReturn(rc, rc);
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("pThread=%p enmState=%d\n", pThread, pThread->enmState));
+ break;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PDMUsb.cpp b/src/VBox/VMM/VMMR3/PDMUsb.cpp
new file mode 100644
index 00000000..d198907a
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PDMUsb.cpp
@@ -0,0 +1,2421 @@
+/* $Id: PDMUsb.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, USB part.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DRIVER
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vusb.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/version.h>
+#include <VBox/err.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/string.h>
+#include <iprt/asm.h>
+#include <iprt/alloc.h>
+#include <iprt/alloca.h>
+#include <iprt/path.h>
+#include <iprt/uuid.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Internal callback structure pointer.
+ *
+ * The main purpose is to define the extra data we associate
+ * with PDMUSBREGCB so we can find the VM instance and so on.
+ */
+typedef struct PDMUSBREGCBINT
+{
+ /** The callback structure. */
+ PDMUSBREGCB Core;
+ /** A bit of padding. */
+ uint32_t u32[4];
+ /** VM Handle. */
+ PVM pVM;
+} PDMUSBREGCBINT, *PPDMUSBREGCBINT;
+typedef const PDMUSBREGCBINT *PCPDMUSBREGCBINT;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @def PDMUSB_ASSERT_USBINS
+ * Asserts the validity of the USB device instance.
+ */
+#ifdef VBOX_STRICT
+# define PDMUSB_ASSERT_USBINS(pUsbIns) \
+ do { \
+ AssertPtr(pUsbIns); \
+ Assert(pUsbIns->u32Version == PDM_USBINS_VERSION); \
+ Assert(pUsbIns->pvInstanceDataR3 == (void *)&pUsbIns->achInstanceData[0]); \
+ } while (0)
+#else
+# define PDMUSB_ASSERT_USBINS(pUsbIns) do { } while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void pdmR3UsbDestroyDevice(PVM pVM, PPDMUSBINS pUsbIns);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+extern const PDMUSBHLP g_pdmR3UsbHlp;
+
+
+AssertCompile(sizeof(PDMUSBINSINT) <= RT_SIZEOFMEMB(PDMUSBINS, Internal.padding));
+
+
+/**
+ * Registers a USB hub driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns The driver instance of the hub.
+ * @param fVersions Indicates the kinds of USB devices that can be attached to this HUB.
+ * @param cPorts The number of ports.
+ * @param pUsbHubReg The hub callback structure that PDMUsb uses to interact with it.
+ * @param ppUsbHubHlp The helper callback structure that the hub uses to talk to PDMUsb.
+ * @thread EMT
+ */
+int pdmR3UsbRegisterHub(PVM pVM, PPDMDRVINS pDrvIns, uint32_t fVersions, uint32_t cPorts, PCPDMUSBHUBREG pUsbHubReg, PPCPDMUSBHUBHLP ppUsbHubHlp)
+{
+ /*
+ * Validate input.
+ */
+ /* The driver must be in the USB class. */
+ if (!(pDrvIns->pReg->fClass & PDM_DRVREG_CLASS_USB))
+ {
+ LogRel(("PDMUsb: pdmR3UsbRegisterHub: fClass=%#x expected %#x to be set\n", pDrvIns->pReg->fClass, PDM_DRVREG_CLASS_USB));
+ return VERR_INVALID_PARAMETER;
+ }
+ AssertMsgReturn(!(fVersions & ~(VUSB_STDVER_11 | VUSB_STDVER_20 | VUSB_STDVER_30)), ("%#x\n", fVersions), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppUsbHubHlp, VERR_INVALID_POINTER);
+ AssertPtrReturn(pUsbHubReg, VERR_INVALID_POINTER);
+ AssertReturn(pUsbHubReg->u32Version == PDM_USBHUBREG_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pUsbHubReg->u32TheEnd == PDM_USBHUBREG_VERSION, VERR_INVALID_MAGIC);
+ AssertPtrReturn(pUsbHubReg->pfnAttachDevice, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pUsbHubReg->pfnDetachDevice, VERR_INVALID_PARAMETER);
+
+ /*
+ * Check for duplicate registration and find the last hub for FIFO registration.
+ */
+ PPDMUSBHUB pPrev = NULL;
+ for (PPDMUSBHUB pCur = pVM->pdm.s.pUsbHubs; pCur; pCur = pCur->pNext)
+ {
+ if (pCur->pDrvIns == pDrvIns)
+ return VERR_PDM_USB_HUB_EXISTS;
+ pPrev = pCur;
+ }
+
+ /*
+ * Create an internal USB hub structure.
+ */
+ PPDMUSBHUB pHub = (PPDMUSBHUB)MMR3HeapAlloc(pVM, MM_TAG_PDM_DRIVER, sizeof(*pHub));
+ if (!pHub)
+ return VERR_NO_MEMORY;
+
+ pHub->fVersions = fVersions;
+ pHub->cPorts = cPorts;
+ pHub->cAvailablePorts = cPorts;
+ pHub->pDrvIns = pDrvIns;
+ pHub->Reg = *pUsbHubReg;
+ pHub->pNext = NULL;
+
+ /* link it */
+ if (pPrev)
+ pPrev->pNext = pHub;
+ else
+ pVM->pdm.s.pUsbHubs = pHub;
+
+ Log(("PDM: Registered USB hub %p/%s\n", pDrvIns, pDrvIns->pReg->szName));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads one device module and call the registration entry point.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegCB The registration callback stuff.
+ * @param pszFilename Module filename.
+ * @param pszName Module name.
+ */
+static int pdmR3UsbLoad(PVM pVM, PCPDMUSBREGCBINT pRegCB, const char *pszFilename, const char *pszName)
+{
+ /*
+ * Load it.
+ */
+ int rc = pdmR3LoadR3U(pVM->pUVM, pszFilename, pszName);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Get the registration export and call it.
+ */
+ FNPDMVBOXUSBREGISTER *pfnVBoxUsbRegister;
+ rc = PDMR3LdrGetSymbolR3(pVM, pszName, "VBoxUsbRegister", (void **)&pfnVBoxUsbRegister);
+ if (RT_SUCCESS(rc))
+ {
+ Log(("PDM: Calling VBoxUsbRegister (%p) of %s (%s)\n", pfnVBoxUsbRegister, pszName, pszFilename));
+ rc = pfnVBoxUsbRegister(&pRegCB->Core, VBOX_VERSION);
+ if (RT_SUCCESS(rc))
+ Log(("PDM: Successfully loaded device module %s (%s).\n", pszName, pszFilename));
+ else
+ AssertMsgFailed(("VBoxDevicesRegister failed with rc=%Rrc for module %s (%s)\n", rc, pszName, pszFilename));
+ }
+ else
+ {
+ AssertMsgFailed(("Failed to locate 'VBoxUsbRegister' in %s (%s) rc=%Rrc\n", pszName, pszFilename, rc));
+ if (rc == VERR_SYMBOL_NOT_FOUND)
+ rc = VERR_PDM_NO_REGISTRATION_EXPORT;
+ }
+ }
+ else
+ AssertMsgFailed(("Failed to load VBoxDD!\n"));
+ return rc;
+}
+
+
+
+/**
+ * @interface_method_impl{PDMUSBREGCB,pfnRegister}
+ */
+static DECLCALLBACK(int) pdmR3UsbReg_Register(PCPDMUSBREGCB pCallbacks, PCPDMUSBREG pReg)
+{
+ /*
+ * Validate the registration structure.
+ */
+ Assert(pReg);
+ AssertMsgReturn(pReg->u32Version == PDM_USBREG_VERSION,
+ ("Unknown struct version %#x!\n", pReg->u32Version),
+ VERR_PDM_UNKNOWN_USBREG_VERSION);
+ AssertMsgReturn( pReg->szName[0]
+ && strlen(pReg->szName) < sizeof(pReg->szName)
+ && pdmR3IsValidName(pReg->szName),
+ ("Invalid name '%.*s'\n", sizeof(pReg->szName), pReg->szName),
+ VERR_PDM_INVALID_USB_REGISTRATION);
+ AssertMsgReturn((pReg->fFlags & ~(PDM_USBREG_HIGHSPEED_CAPABLE | PDM_USBREG_SUPERSPEED_CAPABLE | PDM_USBREG_SAVED_STATE_SUPPORTED)) == 0,
+ ("fFlags=%#x\n", pReg->fFlags), VERR_PDM_INVALID_USB_REGISTRATION);
+ AssertMsgReturn(pReg->cMaxInstances > 0,
+ ("Max instances %u! (USB Device %s)\n", pReg->cMaxInstances, pReg->szName),
+ VERR_PDM_INVALID_USB_REGISTRATION);
+ AssertMsgReturn(pReg->cbInstance <= _1M,
+ ("Instance size %d bytes! (USB Device %s)\n", pReg->cbInstance, pReg->szName),
+ VERR_PDM_INVALID_USB_REGISTRATION);
+ AssertMsgReturn(pReg->pfnConstruct, ("No constructor! (USB Device %s)\n", pReg->szName),
+ VERR_PDM_INVALID_USB_REGISTRATION);
+
+ /*
+ * Check for duplicate and find FIFO entry at the same time.
+ */
+ PCPDMUSBREGCBINT pRegCB = (PCPDMUSBREGCBINT)pCallbacks;
+ PPDMUSB pUsbPrev = NULL;
+ PPDMUSB pUsb = pRegCB->pVM->pdm.s.pUsbDevs;
+ for (; pUsb; pUsbPrev = pUsb, pUsb = pUsb->pNext)
+ AssertMsgReturn(strcmp(pUsb->pReg->szName, pReg->szName),
+ ("USB Device '%s' already exists\n", pReg->szName),
+ VERR_PDM_USB_NAME_CLASH);
+
+ /*
+ * Allocate new device structure and insert it into the list.
+ */
+ pUsb = (PPDMUSB)MMR3HeapAlloc(pRegCB->pVM, MM_TAG_PDM_DEVICE, sizeof(*pUsb));
+ if (pUsb)
+ {
+ pUsb->pNext = NULL;
+ pUsb->iNextInstance = 0;
+ pUsb->pInstances = NULL;
+ pUsb->pReg = pReg;
+ pUsb->cchName = (RTUINT)strlen(pReg->szName);
+
+ if (pUsbPrev)
+ pUsbPrev->pNext = pUsb;
+ else
+ pRegCB->pVM->pdm.s.pUsbDevs = pUsb;
+ Log(("PDM: Registered USB device '%s'\n", pReg->szName));
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Load USB Device modules.
+ *
+ * This is called by pdmR3DevInit() after it has loaded it's device modules.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3UsbLoadModules(PVM pVM)
+{
+ LogFlow(("pdmR3UsbLoadModules:\n"));
+
+ AssertRelease(!(RT_UOFFSETOF(PDMUSBINS, achInstanceData) & 15));
+ AssertRelease(sizeof(pVM->pdm.s.pUsbInstances->Internal.s) <= sizeof(pVM->pdm.s.pUsbInstances->Internal.padding));
+
+ /*
+ * Initialize the callback structure.
+ */
+ PDMUSBREGCBINT RegCB;
+ RegCB.Core.u32Version = PDM_USBREG_CB_VERSION;
+ RegCB.Core.pfnRegister = pdmR3UsbReg_Register;
+ RegCB.pVM = pVM;
+
+ /*
+ * Load the builtin module
+ */
+ PCFGMNODE pUsbNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM/USB/");
+ bool fLoadBuiltin;
+ int rc = CFGMR3QueryBool(pUsbNode, "LoadBuiltin", &fLoadBuiltin);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
+ fLoadBuiltin = true;
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Configuration error: Querying boolean \"LoadBuiltin\" failed with %Rrc\n", rc));
+ return rc;
+ }
+ if (fLoadBuiltin)
+ {
+ /* make filename */
+ char *pszFilename = pdmR3FileR3("VBoxDD", true /*fShared*/);
+ if (!pszFilename)
+ return VERR_NO_TMP_MEMORY;
+ rc = pdmR3UsbLoad(pVM, &RegCB, pszFilename, "VBoxDD");
+ RTMemTmpFree(pszFilename);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Load additional device modules.
+ */
+ PCFGMNODE pCur;
+ for (pCur = CFGMR3GetFirstChild(pUsbNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /*
+ * Get the name and path.
+ */
+ char szName[PDMMOD_NAME_LEN];
+ rc = CFGMR3GetName(pCur, &szName[0], sizeof(szName));
+ if (rc == VERR_CFGM_NOT_ENOUGH_SPACE)
+ {
+ AssertMsgFailed(("configuration error: The module name is too long, cchName=%zu.\n", CFGMR3GetNameLen(pCur)));
+ return VERR_PDM_MODULE_NAME_TOO_LONG;
+ }
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("CFGMR3GetName -> %Rrc.\n", rc));
+ return rc;
+ }
+
+ /* the path is optional, if no path the module name + path is used. */
+ char szFilename[RTPATH_MAX];
+ rc = CFGMR3QueryString(pCur, "Path", &szFilename[0], sizeof(szFilename));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ strcpy(szFilename, szName);
+ else if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("configuration error: Failure to query the module path, rc=%Rrc.\n", rc));
+ return rc;
+ }
+
+ /* prepend path? */
+ if (!RTPathHavePath(szFilename))
+ {
+ char *psz = pdmR3FileR3(szFilename, false /*fShared*/);
+ if (!psz)
+ return VERR_NO_TMP_MEMORY;
+ size_t cch = strlen(psz) + 1;
+ if (cch > sizeof(szFilename))
+ {
+ RTMemTmpFree(psz);
+ AssertMsgFailed(("Filename too long! cch=%d '%s'\n", cch, psz));
+ return VERR_FILENAME_TOO_LONG;
+ }
+ memcpy(szFilename, psz, cch);
+ RTMemTmpFree(psz);
+ }
+
+ /*
+ * Load the module and register it's devices.
+ */
+ rc = pdmR3UsbLoad(pVM, &RegCB, szFilename, szName);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Send the init-complete notification to all the USB devices.
+ *
+ * This is called from pdmR3DevInit() after it has do its notification round.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3UsbVMInitComplete(PVM pVM)
+{
+ for (PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ {
+ if (pUsbIns->pReg->pfnVMInitComplete)
+ {
+ int rc = pUsbIns->pReg->pfnVMInitComplete(pUsbIns);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("InitComplete on USB device '%s'/%d failed with rc=%Rrc\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+ }
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Lookups a device structure by name.
+ * @internal
+ */
+PPDMUSB pdmR3UsbLookup(PVM pVM, const char *pszName)
+{
+ size_t cchName = strlen(pszName);
+ for (PPDMUSB pUsb = pVM->pdm.s.pUsbDevs; pUsb; pUsb = pUsb->pNext)
+ if ( pUsb->cchName == cchName
+ && !strcmp(pUsb->pReg->szName, pszName))
+ return pUsb;
+ return NULL;
+}
+
+
+/**
+ * Locates a suitable hub for the specified kind of device.
+ *
+ * @returns VINF_SUCCESS and *ppHub on success.
+ * VERR_PDM_NO_USB_HUBS or VERR_PDM_NO_USB_PORTS on failure.
+ * @param pVM The cross context VM structure.
+ * @param iUsbVersion The USB device version.
+ * @param ppHub Where to store the pointer to the USB hub.
+ */
+static int pdmR3UsbFindHub(PVM pVM, uint32_t iUsbVersion, PPDMUSBHUB *ppHub)
+{
+ *ppHub = NULL;
+ if (!pVM->pdm.s.pUsbHubs)
+ return VERR_PDM_NO_USB_HUBS;
+
+ for (PPDMUSBHUB pCur = pVM->pdm.s.pUsbHubs; pCur; pCur = pCur->pNext)
+ if (pCur->cAvailablePorts > 0)
+ {
+ /* First check for an exact match. */
+ if (pCur->fVersions & iUsbVersion)
+ {
+ *ppHub = pCur;
+ break;
+ }
+ /* For high-speed USB 2.0 devices only, allow USB 1.1 fallback. */
+ if ((iUsbVersion & VUSB_STDVER_20) && (pCur->fVersions == VUSB_STDVER_11))
+ *ppHub = pCur;
+ }
+ if (*ppHub)
+ return VINF_SUCCESS;
+ return VERR_PDM_NO_USB_PORTS;
+}
+
+
+/**
+ * Translates a USB version (a bit-mask) to USB speed (enum). Picks
+ * the highest available version.
+ *
+ * @returns VUSBSPEED enum
+ *
+ * @param iUsbVersion The USB version.
+ *
+ */
+static VUSBSPEED pdmR3UsbVer2Spd(uint32_t iUsbVersion)
+{
+ VUSBSPEED enmSpd = VUSB_SPEED_UNKNOWN;
+ Assert(iUsbVersion);
+
+ if (iUsbVersion & VUSB_STDVER_30)
+ enmSpd = VUSB_SPEED_SUPER;
+ else if (iUsbVersion & VUSB_STDVER_20)
+ enmSpd = VUSB_SPEED_HIGH;
+ else if (iUsbVersion & VUSB_STDVER_11)
+ enmSpd = VUSB_SPEED_FULL; /* Can't distinguish LS vs. FS. */
+
+ return enmSpd;
+}
+
+
+/**
+ * Translates a USB speed (enum) to USB version.
+ *
+ * @returns USB version mask
+ *
+ * @param enmSpeed The USB connection speed.
+ *
+ */
+static uint32_t pdmR3UsbSpd2Ver(VUSBSPEED enmSpeed)
+{
+ uint32_t iUsbVersion = 0;
+ Assert(enmSpeed != VUSB_SPEED_UNKNOWN);
+
+ switch (enmSpeed)
+ {
+ case VUSB_SPEED_LOW:
+ case VUSB_SPEED_FULL:
+ iUsbVersion = VUSB_STDVER_11;
+ break;
+ case VUSB_SPEED_HIGH:
+ iUsbVersion = VUSB_STDVER_20;
+ break;
+ case VUSB_SPEED_SUPER:
+ case VUSB_SPEED_SUPERPLUS:
+ default:
+ iUsbVersion = VUSB_STDVER_30;
+ break;
+ }
+
+ return iUsbVersion;
+}
+
+
+/**
+ * Creates the device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pHub The USB hub it'll be attached to.
+ * @param pUsbDev The USB device emulation.
+ * @param iInstance -1 if not called by pdmR3UsbInstantiateDevices().
+ * @param pUuid The UUID for this device.
+ * @param ppInstanceNode Pointer to the device instance pointer. This is set to NULL if inserted
+ * into the tree or cleaned up.
+ *
+ * In the pdmR3UsbInstantiateDevices() case (iInstance != -1) this is
+ * the actual instance node and will not be cleaned up.
+ *
+ * @param enmSpeed The speed the USB device is operating at.
+ * @param pszCaptureFilename Path to the file for USB traffic capturing, optional.
+ */
+static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int iInstance, PCRTUUID pUuid,
+ PCFGMNODE *ppInstanceNode, VUSBSPEED enmSpeed, const char *pszCaptureFilename)
+{
+ int rc;
+
+ AssertPtrReturn(ppInstanceNode, VERR_INVALID_POINTER);
+ AssertPtrReturn(*ppInstanceNode, VERR_INVALID_POINTER);
+
+ /*
+ * If not called by pdmR3UsbInstantiateDevices(), we'll have to fix
+ * the configuration now.
+ */
+ /* USB device node. */
+ PCFGMNODE pDevNode = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "USB/%s/", pUsbDev->pReg->szName);
+ if (!pDevNode)
+ {
+ rc = CFGMR3InsertNodeF(CFGMR3GetRoot(pVM), &pDevNode, "USB/%s/", pUsbDev->pReg->szName);
+ AssertRCReturn(rc, rc);
+ }
+
+ /* The instance node and number. */
+ PCFGMNODE pInstanceToDelete = NULL;
+ PCFGMNODE pInstanceNode = NULL;
+ if (iInstance == -1)
+ {
+ /** @todo r=bird: This code is bogus as it ASSUMES that all USB devices are
+ * capable of infinite number of instances. */
+ rc = VINF_SUCCESS; /* Shut up stupid incorrect uninitialized warning from Visual C++ 2010. */
+ for (unsigned c = 0; c < _2M; c++)
+ {
+ iInstance = pUsbDev->iNextInstance++;
+ rc = CFGMR3InsertNodeF(pDevNode, &pInstanceNode, "%d/", iInstance);
+ if (rc != VERR_CFGM_NODE_EXISTS)
+ break;
+ }
+ AssertRCReturn(rc, rc);
+
+ rc = CFGMR3ReplaceSubTree(pInstanceNode, *ppInstanceNode);
+ AssertRCReturn(rc, rc);
+ *ppInstanceNode = NULL;
+ pInstanceToDelete = pInstanceNode;
+ }
+ else
+ {
+ Assert(iInstance >= 0);
+ if (iInstance >= (int)pUsbDev->iNextInstance)
+ pUsbDev->iNextInstance = iInstance + 1;
+ pInstanceNode = *ppInstanceNode;
+ }
+
+ /* Make sure the instance config node exists. */
+ PCFGMNODE pConfig = CFGMR3GetChild(pInstanceNode, "Config");
+ if (!pConfig)
+ {
+ rc = CFGMR3InsertNode(pInstanceNode, "Config", &pConfig);
+ AssertRCReturn(rc, rc);
+ }
+ Assert(CFGMR3GetChild(pInstanceNode, "Config") == pConfig);
+
+ /* The global device config node. */
+ PCFGMNODE pGlobalConfig = CFGMR3GetChild(pDevNode, "GlobalConfig");
+ if (!pGlobalConfig)
+ {
+ rc = CFGMR3InsertNode(pDevNode, "GlobalConfig", &pGlobalConfig);
+ if (RT_FAILURE(rc))
+ {
+ CFGMR3RemoveNode(pInstanceToDelete);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /*
+ * Allocate the device instance.
+ */
+ size_t cb = RT_UOFFSETOF_DYN(PDMUSBINS, achInstanceData[pUsbDev->pReg->cbInstance]);
+ cb = RT_ALIGN_Z(cb, 16);
+ PPDMUSBINS pUsbIns;
+ rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_USB, cb, (void **)&pUsbIns);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Failed to allocate %d bytes of instance data for USB device '%s'. rc=%Rrc\n",
+ cb, pUsbDev->pReg->szName, rc));
+ CFGMR3RemoveNode(pInstanceToDelete);
+ return rc;
+ }
+
+ /*
+ * Initialize it.
+ */
+ pUsbIns->u32Version = PDM_USBINS_VERSION;
+ //pUsbIns->Internal.s.pNext = NULL;
+ //pUsbIns->Internal.s.pPerDeviceNext = NULL;
+ pUsbIns->Internal.s.pUsbDev = pUsbDev;
+ pUsbIns->Internal.s.pVM = pVM;
+ //pUsbIns->Internal.s.pLuns = NULL;
+ pUsbIns->Internal.s.pCfg = pInstanceNode;
+ pUsbIns->Internal.s.pCfgDelete = pInstanceToDelete;
+ pUsbIns->Internal.s.pCfgGlobal = pGlobalConfig;
+ pUsbIns->Internal.s.Uuid = *pUuid;
+ //pUsbIns->Internal.s.pHub = NULL;
+ pUsbIns->Internal.s.iPort = UINT32_MAX; /* to be determined. */
+ VMSTATE const enmVMState = VMR3GetState(pVM);
+ pUsbIns->Internal.s.fVMSuspended = !VMSTATE_IS_POWERED_ON(enmVMState);
+ //pUsbIns->Internal.s.pfnAsyncNotify = NULL;
+ pUsbIns->pHlpR3 = &g_pdmR3UsbHlp;
+ pUsbIns->pReg = pUsbDev->pReg;
+ pUsbIns->pCfg = pConfig;
+ pUsbIns->pCfgGlobal = pGlobalConfig;
+ pUsbIns->iInstance = iInstance;
+ pUsbIns->pvInstanceDataR3 = &pUsbIns->achInstanceData[0];
+ pUsbIns->pszName = RTStrDup(pUsbDev->pReg->szName);
+ //pUsbIns->fTracing = 0;
+ pUsbIns->idTracing = ++pVM->pdm.s.idTracingOther;
+ pUsbIns->enmSpeed = enmSpeed;
+
+ /*
+ * Link it into all the lists.
+ */
+ /* The global instance FIFO. */
+ PPDMUSBINS pPrev1 = pVM->pdm.s.pUsbInstances;
+ if (!pPrev1)
+ pVM->pdm.s.pUsbInstances = pUsbIns;
+ else
+ {
+ while (pPrev1->Internal.s.pNext)
+ {
+ Assert(pPrev1->u32Version == PDM_USBINS_VERSION);
+ pPrev1 = pPrev1->Internal.s.pNext;
+ }
+ pPrev1->Internal.s.pNext = pUsbIns;
+ }
+
+ /* The per device instance FIFO. */
+ PPDMUSBINS pPrev2 = pUsbDev->pInstances;
+ if (!pPrev2)
+ pUsbDev->pInstances = pUsbIns;
+ else
+ {
+ while (pPrev2->Internal.s.pPerDeviceNext)
+ {
+ Assert(pPrev2->u32Version == PDM_USBINS_VERSION);
+ pPrev2 = pPrev2->Internal.s.pPerDeviceNext;
+ }
+ pPrev2->Internal.s.pPerDeviceNext = pUsbIns;
+ }
+
+ /*
+ * Call the constructor.
+ */
+ Log(("PDM: Constructing USB device '%s' instance %d...\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ rc = pUsbIns->pReg->pfnConstruct(pUsbIns, pUsbIns->iInstance, pUsbIns->pCfg, pUsbIns->pCfgGlobal);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Attach it to the hub.
+ */
+ Log(("PDM: Attaching it...\n"));
+ rc = pHub->Reg.pfnAttachDevice(pHub->pDrvIns, pUsbIns, pszCaptureFilename, &pUsbIns->Internal.s.iPort);
+ if (RT_SUCCESS(rc))
+ {
+ pHub->cAvailablePorts--;
+ Assert((int32_t)pHub->cAvailablePorts >= 0 && pHub->cAvailablePorts < pHub->cPorts);
+ pUsbIns->Internal.s.pHub = pHub;
+
+ /* Send the hot-plugged notification if applicable. */
+ if (VMSTATE_IS_POWERED_ON(enmVMState) && pUsbIns->pReg->pfnHotPlugged)
+ pUsbIns->pReg->pfnHotPlugged(pUsbIns);
+
+ Log(("PDM: Successfully attached USB device '%s' instance %d to hub %p\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pHub));
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("PDMUsb: Failed to attach USB device '%s' instance %d to hub %p: %Rrc\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pHub, rc));
+ }
+ else
+ {
+ AssertMsgFailed(("Failed to construct '%s'/%d! %Rra\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ if (rc == VERR_VERSION_MISMATCH)
+ rc = VERR_PDM_USBDEV_VERSION_MISMATCH;
+ }
+ if (VMSTATE_IS_POWERED_ON(enmVMState))
+ pdmR3UsbDestroyDevice(pVM, pUsbIns);
+ /* else: destructors are invoked later. */
+ return rc;
+}
+
+
+/**
+ * Instantiate USB devices.
+ *
+ * This is called by pdmR3DevInit() after it has instantiated the
+ * other devices and their drivers. If there aren't any hubs
+ * around, we'll silently skip the USB devices.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pdmR3UsbInstantiateDevices(PVM pVM)
+{
+ /*
+ * Any hubs?
+ */
+ if (!pVM->pdm.s.pUsbHubs)
+ {
+ Log(("PDM: No USB hubs, skipping USB device instantiation.\n"));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Count the device instances.
+ */
+ PCFGMNODE pCur;
+ PCFGMNODE pUsbNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "USB/");
+ PCFGMNODE pInstanceNode;
+ unsigned cUsbDevs = 0;
+ for (pCur = CFGMR3GetFirstChild(pUsbNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ PCFGMNODE pGlobal = CFGMR3GetChild(pCur, "GlobalConfig/");
+ for (pInstanceNode = CFGMR3GetFirstChild(pCur); pInstanceNode; pInstanceNode = CFGMR3GetNextChild(pInstanceNode))
+ if (pInstanceNode != pGlobal)
+ cUsbDevs++;
+ }
+ if (!cUsbDevs)
+ {
+ Log(("PDM: No USB devices were configured!\n"));
+ return VINF_SUCCESS;
+ }
+ Log2(("PDM: cUsbDevs=%d!\n", cUsbDevs));
+
+ /*
+ * Collect info on each USB device instance.
+ */
+ struct USBDEVORDER
+ {
+ /** Configuration node. */
+ PCFGMNODE pNode;
+ /** Pointer to the USB device. */
+ PPDMUSB pUsbDev;
+ /** Init order. */
+ uint32_t u32Order;
+ /** VBox instance number. */
+ uint32_t iInstance;
+ /** Device UUID. */
+ RTUUID Uuid;
+ } *paUsbDevs = (struct USBDEVORDER *)alloca(sizeof(paUsbDevs[0]) * (cUsbDevs + 1)); /* (One extra for swapping) */
+ Assert(paUsbDevs);
+ int rc;
+ unsigned i = 0;
+ for (pCur = CFGMR3GetFirstChild(pUsbNode); pCur; pCur = CFGMR3GetNextChild(pCur))
+ {
+ /* Get the device name. */
+ char szName[sizeof(paUsbDevs[0].pUsbDev->pReg->szName)];
+ rc = CFGMR3GetName(pCur, szName, sizeof(szName));
+ AssertMsgRCReturn(rc, ("Configuration error: device name is too long (or something)! rc=%Rrc\n", rc), rc);
+
+ /* Find the device. */
+ PPDMUSB pUsbDev = pdmR3UsbLookup(pVM, szName);
+ AssertMsgReturn(pUsbDev, ("Configuration error: device '%s' not found!\n", szName), VERR_PDM_DEVICE_NOT_FOUND);
+
+ /* Configured priority or use default? */
+ uint32_t u32Order;
+ rc = CFGMR3QueryU32(pCur, "Priority", &u32Order);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ u32Order = i << 4;
+ else
+ AssertMsgRCReturn(rc, ("Configuration error: reading \"Priority\" for the '%s' USB device failed rc=%Rrc!\n", szName, rc), rc);
+
+ /* Global config. */
+ PCFGMNODE pGlobal = CFGMR3GetChild(pCur, "GlobalConfig/");
+ if (!pGlobal)
+ {
+ rc = CFGMR3InsertNode(pCur, "GlobalConfig/", &pGlobal);
+ AssertMsgRCReturn(rc, ("Failed to create GlobalConfig node! rc=%Rrc\n", rc), rc);
+ CFGMR3SetRestrictedRoot(pGlobal);
+ }
+
+ /* Enumerate the device instances. */
+ for (pInstanceNode = CFGMR3GetFirstChild(pCur); pInstanceNode; pInstanceNode = CFGMR3GetNextChild(pInstanceNode))
+ {
+ if (pInstanceNode == pGlobal)
+ continue;
+
+ /* Use the configured UUID if present, create our own otherwise. */
+ char *pszUuid = NULL;
+
+ RTUuidClear(&paUsbDevs[i].Uuid);
+ rc = CFGMR3QueryStringAlloc(pInstanceNode, "UUID", &pszUuid);
+ if (RT_SUCCESS(rc))
+ {
+ AssertPtr(pszUuid);
+
+ rc = RTUuidFromStr(&paUsbDevs[i].Uuid, pszUuid);
+ AssertMsgRCReturn(rc, ("Failed to convert UUID from string! rc=%Rrc\n", rc), rc);
+ MMR3HeapFree(pszUuid);
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = RTUuidCreate(&paUsbDevs[i].Uuid);
+
+ AssertRCReturn(rc, rc);
+ paUsbDevs[i].pNode = pInstanceNode;
+ paUsbDevs[i].pUsbDev = pUsbDev;
+ paUsbDevs[i].u32Order = u32Order;
+
+ /* Get the instance number. */
+ char szInstance[32];
+ rc = CFGMR3GetName(pInstanceNode, szInstance, sizeof(szInstance));
+ AssertMsgRCReturn(rc, ("Configuration error: instance name is too long (or something)! rc=%Rrc\n", rc), rc);
+ char *pszNext = NULL;
+ rc = RTStrToUInt32Ex(szInstance, &pszNext, 0, &paUsbDevs[i].iInstance);
+ AssertMsgRCReturn(rc, ("Configuration error: RTStrToInt32Ex failed on the instance name '%s'! rc=%Rrc\n", szInstance, rc), rc);
+ AssertMsgReturn(!*pszNext, ("Configuration error: the instance name '%s' isn't all digits. (%s)\n", szInstance, pszNext), VERR_INVALID_PARAMETER);
+
+ /* next instance */
+ i++;
+ }
+ } /* devices */
+ Assert(i == cUsbDevs);
+
+ /*
+ * Sort the device array ascending on u32Order. (bubble)
+ */
+ unsigned c = cUsbDevs - 1;
+ while (c)
+ {
+ unsigned j = 0;
+ for (i = 0; i < c; i++)
+ if (paUsbDevs[i].u32Order > paUsbDevs[i + 1].u32Order)
+ {
+ paUsbDevs[cUsbDevs] = paUsbDevs[i + 1];
+ paUsbDevs[i + 1] = paUsbDevs[i];
+ paUsbDevs[i] = paUsbDevs[cUsbDevs];
+ j = i;
+ }
+ c = j;
+ }
+
+ /*
+ * Instantiate the devices.
+ */
+ for (i = 0; i < cUsbDevs; i++)
+ {
+ /*
+ * Make sure there is a config node and mark it as restricted.
+ */
+ PCFGMNODE pConfigNode = CFGMR3GetChild(paUsbDevs[i].pNode, "Config/");
+ if (!pConfigNode)
+ {
+ rc = CFGMR3InsertNode(paUsbDevs[i].pNode, "Config", &pConfigNode);
+ AssertMsgRCReturn(rc, ("Failed to create Config node! rc=%Rrc\n", rc), rc);
+ }
+ CFGMR3SetRestrictedRoot(pConfigNode);
+
+ /*
+ * Every emulated device must support USB 1.x hubs; optionally, high-speed USB 2.0 hubs
+ * might be also supported. This determines where to attach the device.
+ */
+ uint32_t iUsbVersion = VUSB_STDVER_11;
+
+ if (paUsbDevs[i].pUsbDev->pReg->fFlags & PDM_USBREG_HIGHSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_20;
+ if (paUsbDevs[i].pUsbDev->pReg->fFlags & PDM_USBREG_SUPERSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_30;
+
+ /*
+ * Find a suitable hub with free ports.
+ */
+ PPDMUSBHUB pHub;
+ rc = pdmR3UsbFindHub(pVM, iUsbVersion, &pHub);
+ if (RT_FAILURE(rc))
+ {
+ Log(("pdmR3UsbFindHub failed %Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * This is how we inform the device what speed it's communicating at, and hence
+ * which descriptors it should present to the guest.
+ */
+ iUsbVersion &= pHub->fVersions;
+
+ /*
+ * Create and attach the device.
+ */
+ rc = pdmR3UsbCreateDevice(pVM, pHub, paUsbDevs[i].pUsbDev, paUsbDevs[i].iInstance, &paUsbDevs[i].Uuid,
+ &paUsbDevs[i].pNode, pdmR3UsbVer2Spd(iUsbVersion), NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+ } /* for device instances */
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates an emulated USB device instance at runtime.
+ *
+ * This will find an appropriate HUB for the USB device
+ * and try instantiate the emulated device.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDeviceName The name of the PDM device to instantiate.
+ * @param pInstanceNode The instance CFGM node.
+ * @param pUuid The UUID to be associated with the device.
+ * @param pszCaptureFilename Path to the file for USB traffic capturing, optional.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3UsbCreateEmulatedDevice(PUVM pUVM, const char *pszDeviceName, PCFGMNODE pInstanceNode, PCRTUUID pUuid,
+ const char *pszCaptureFilename)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pszDeviceName, VERR_INVALID_POINTER);
+ AssertPtrReturn(pInstanceNode, VERR_INVALID_POINTER);
+
+ /*
+ * Find the device.
+ */
+ PPDMUSB pUsbDev = pdmR3UsbLookup(pVM, pszDeviceName);
+ if (!pUsbDev)
+ {
+ LogRel(("PDMUsb: PDMR3UsbCreateEmulatedDevice: The '%s' device wasn't found\n", pszDeviceName));
+ return VERR_PDM_NO_USBPROXY;
+ }
+
+ /*
+ * Every device must support USB 1.x hubs; optionally, high-speed USB 2.0 hubs
+ * might be also supported. This determines where to attach the device.
+ */
+ uint32_t iUsbVersion = VUSB_STDVER_11;
+ if (pUsbDev->pReg->fFlags & PDM_USBREG_HIGHSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_20;
+ if (pUsbDev->pReg->fFlags & PDM_USBREG_SUPERSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_30;
+
+ /*
+ * Find a suitable hub with free ports.
+ */
+ PPDMUSBHUB pHub;
+ int rc = pdmR3UsbFindHub(pVM, iUsbVersion, &pHub);
+ if (RT_FAILURE(rc))
+ {
+ Log(("pdmR3UsbFindHub: failed %Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * This is how we inform the device what speed it's communicating at, and hence
+ * which descriptors it should present to the guest.
+ */
+ iUsbVersion &= pHub->fVersions;
+
+ /*
+ * Create and attach the device.
+ */
+ rc = pdmR3UsbCreateDevice(pVM, pHub, pUsbDev, -1, pUuid, &pInstanceNode,
+ pdmR3UsbVer2Spd(iUsbVersion), pszCaptureFilename);
+ AssertRCReturn(rc, rc);
+
+ return rc;
+}
+
+
+/**
+ * Creates a USB proxy device instance.
+ *
+ * This will find an appropriate HUB for the USB device, create the necessary CFGM stuff
+ * and try instantiate the proxy device.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pUuid The UUID to be associated with the device.
+ * @param pszBackend The proxy backend to use.
+ * @param pszAddress The address string.
+ * @param pSubTree The CFGM subtree to incorporate into the settings
+ * (same restrictions as for CFGMR3InsertSubTree() apply),
+ * optional.
+ * @param enmSpeed The speed the USB device is operating at.
+ * @param fMaskedIfs The interfaces to hide from the guest.
+ * @param pszCaptureFilename Path to the file for USB traffic capturing, optional.
+ */
+VMMR3DECL(int) PDMR3UsbCreateProxyDevice(PUVM pUVM, PCRTUUID pUuid, const char *pszBackend, const char *pszAddress, PCFGMNODE pSubTree,
+ VUSBSPEED enmSpeed, uint32_t fMaskedIfs, const char *pszCaptureFilename)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pUuid, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszAddress, VERR_INVALID_POINTER);
+ AssertReturn( enmSpeed == VUSB_SPEED_LOW
+ || enmSpeed == VUSB_SPEED_FULL
+ || enmSpeed == VUSB_SPEED_HIGH
+ || enmSpeed == VUSB_SPEED_SUPER
+ || enmSpeed == VUSB_SPEED_SUPERPLUS, VERR_INVALID_PARAMETER);
+
+ /*
+ * Find the USBProxy driver.
+ */
+ PPDMUSB pUsbDev = pdmR3UsbLookup(pVM, "USBProxy");
+ if (!pUsbDev)
+ {
+ LogRel(("PDMUsb: PDMR3UsbCreateProxyDevice: The USBProxy device class wasn't found\n"));
+ return VERR_PDM_NO_USBPROXY;
+ }
+
+ /*
+ * Find a suitable hub with free ports.
+ */
+ PPDMUSBHUB pHub;
+ uint32_t iUsbVersion = pdmR3UsbSpd2Ver(enmSpeed);
+ int rc = pdmR3UsbFindHub(pVM, iUsbVersion, &pHub);
+ if (RT_FAILURE(rc))
+ {
+ Log(("pdmR3UsbFindHub: failed %Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Create the CFGM instance node.
+ */
+ PCFGMNODE pInstance = CFGMR3CreateTree(pUVM);
+ AssertReturn(pInstance, VERR_NO_MEMORY);
+ do /* break loop */
+ {
+ PCFGMNODE pConfig;
+ rc = CFGMR3InsertNode(pInstance, "Config", &pConfig); AssertRCBreak(rc);
+ rc = CFGMR3InsertString(pConfig, "Address", pszAddress); AssertRCBreak(rc);
+ char szUuid[RTUUID_STR_LENGTH];
+ rc = RTUuidToStr(pUuid, &szUuid[0], sizeof(szUuid)); AssertRCBreak(rc);
+ rc = CFGMR3InsertString(pConfig, "UUID", szUuid); AssertRCBreak(rc);
+ rc = CFGMR3InsertString(pConfig, "Backend", pszBackend); AssertRCBreak(rc);
+ rc = CFGMR3InsertInteger(pConfig, "MaskedIfs", fMaskedIfs); AssertRCBreak(rc);
+ rc = CFGMR3InsertInteger(pConfig, "Force11Device", !(pHub->fVersions & iUsbVersion)); AssertRCBreak(rc);
+ if (pSubTree)
+ {
+ rc = CFGMR3InsertSubTree(pConfig, "BackendCfg", pSubTree, NULL /*ppChild*/);
+ AssertRCBreak(rc);
+ }
+ } while (0); /* break loop */
+ if (RT_FAILURE(rc))
+ {
+ CFGMR3RemoveNode(pInstance);
+ LogRel(("PDMUsb: PDMR3UsbCreateProxyDevice: failed to setup CFGM config, rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ if (enmSpeed == VUSB_SPEED_UNKNOWN)
+ enmSpeed = pdmR3UsbVer2Spd(iUsbVersion);
+
+ /*
+ * Finally, try to create it.
+ */
+ rc = pdmR3UsbCreateDevice(pVM, pHub, pUsbDev, -1, pUuid, &pInstance, enmSpeed, pszCaptureFilename);
+ if (RT_FAILURE(rc) && pInstance)
+ CFGMR3RemoveNode(pInstance);
+ return rc;
+}
+
+
+/**
+ * Destroys a hot-plugged USB device.
+ *
+ * The device must be detached from the HUB at this point.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance to destroy.
+ * @thread EMT
+ */
+static void pdmR3UsbDestroyDevice(PVM pVM, PPDMUSBINS pUsbIns)
+{
+ Assert(!pUsbIns->Internal.s.pHub);
+
+ /*
+ * Do the unplug notification.
+ */
+ /** @todo what about the drivers? */
+ if (pUsbIns->pReg->pfnHotUnplugged)
+ pUsbIns->pReg->pfnHotUnplugged(pUsbIns);
+
+ /*
+ * Destroy the luns with their driver chains and call the device destructor.
+ */
+ while (pUsbIns->Internal.s.pLuns)
+ {
+ PPDMLUN pLun = pUsbIns->Internal.s.pLuns;
+ pUsbIns->Internal.s.pLuns = pLun->pNext;
+ if (pLun->pTop)
+ pdmR3DrvDestroyChain(pLun->pTop, PDM_TACH_FLAGS_NOT_HOT_PLUG); /* Hotplugging is handled differently here atm. */
+ MMR3HeapFree(pLun);
+ }
+
+ /* finally, the device. */
+ if (pUsbIns->pReg->pfnDestruct)
+ {
+ Log(("PDM: Destructing USB device '%s' instance %d...\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ pUsbIns->pReg->pfnDestruct(pUsbIns);
+ }
+ TMR3TimerDestroyUsb(pVM, pUsbIns);
+ SSMR3DeregisterUsb(pVM, pUsbIns, NULL, 0);
+ pdmR3ThreadDestroyUsb(pVM, pUsbIns);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ pdmR3AsyncCompletionTemplateDestroyUsb(pVM, pUsbIns);
+#endif
+
+ /*
+ * Unlink it.
+ */
+ /* The global instance FIFO. */
+ if (pVM->pdm.s.pUsbInstances == pUsbIns)
+ pVM->pdm.s.pUsbInstances = pUsbIns->Internal.s.pNext;
+ else
+ {
+ PPDMUSBINS pPrev = pVM->pdm.s.pUsbInstances;
+ while (pPrev && pPrev->Internal.s.pNext != pUsbIns)
+ {
+ Assert(pPrev->u32Version == PDM_USBINS_VERSION);
+ pPrev = pPrev->Internal.s.pNext;
+ }
+ Assert(pPrev); Assert(pPrev != pUsbIns);
+ if (pPrev)
+ pPrev->Internal.s.pNext = pUsbIns->Internal.s.pNext;
+ }
+
+ /* The per device instance FIFO. */
+ PPDMUSB pUsbDev = pUsbIns->Internal.s.pUsbDev;
+ if (pUsbDev->pInstances == pUsbIns)
+ pUsbDev->pInstances = pUsbIns->Internal.s.pPerDeviceNext;
+ else
+ {
+ PPDMUSBINS pPrev = pUsbDev->pInstances;
+ while (pPrev && pPrev->Internal.s.pPerDeviceNext != pUsbIns)
+ {
+ Assert(pPrev->u32Version == PDM_USBINS_VERSION);
+ pPrev = pPrev->Internal.s.pPerDeviceNext;
+ }
+ Assert(pPrev); Assert(pPrev != pUsbIns);
+ if (pPrev)
+ pPrev->Internal.s.pPerDeviceNext = pUsbIns->Internal.s.pPerDeviceNext;
+ }
+
+ /*
+ * Trash it.
+ */
+ pUsbIns->u32Version = 0;
+ pUsbIns->pReg = NULL;
+ if (pUsbIns->pszName)
+ {
+ RTStrFree(pUsbIns->pszName);
+ pUsbIns->pszName = NULL;
+ }
+ CFGMR3RemoveNode(pUsbIns->Internal.s.pCfgDelete);
+ MMR3HeapFree(pUsbIns);
+}
+
+
+/**
+ * Detaches and destroys a USB device.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pUuid The UUID associated with the device to detach.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3UsbDetachDevice(PUVM pUVM, PCRTUUID pUuid)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+ AssertPtrReturn(pUuid, VERR_INVALID_POINTER);
+
+ /*
+ * Search the global list for it.
+ */
+ PPDMUSBINS pUsbIns = pVM->pdm.s.pUsbInstances;
+ for ( ; pUsbIns; pUsbIns = pUsbIns->Internal.s.pNext)
+ if (!RTUuidCompare(&pUsbIns->Internal.s.Uuid, pUuid))
+ break;
+ if (!pUsbIns)
+ return VERR_PDM_DEVICE_INSTANCE_NOT_FOUND; /** @todo VERR_PDM_USB_INSTANCE_NOT_FOUND */
+
+ /*
+ * Detach it from the HUB (if it's actually attached to one).
+ */
+ PPDMUSBHUB pHub = pUsbIns->Internal.s.pHub;
+ if (pHub)
+ {
+ int rc = pHub->Reg.pfnDetachDevice(pHub->pDrvIns, pUsbIns, pUsbIns->Internal.s.iPort);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PDMUsb: Failed to detach USB device '%s' instance %d from %p: %Rrc\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pHub, rc));
+ return rc;
+ }
+
+ pHub->cAvailablePorts++;
+ Assert(pHub->cAvailablePorts > 0 && pHub->cAvailablePorts <= pHub->cPorts);
+ pUsbIns->Internal.s.pHub = NULL;
+ }
+
+ /*
+ * Notify about unplugging and destroy the device with it's drivers.
+ */
+ pdmR3UsbDestroyDevice(pVM, pUsbIns);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if there are any USB hubs attached.
+ *
+ * @returns true / false accordingly.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) PDMR3UsbHasHub(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->pdm.s.pUsbHubs != NULL;
+}
+
+
+/**
+ * Locates a LUN.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param ppLun Where to store the pointer to the LUN if found.
+ * @thread Try only do this in EMT...
+ */
+static int pdmR3UsbFindLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPPDMLUN ppLun)
+{
+ /*
+ * Iterate registered devices looking for the device.
+ */
+ size_t cchDevice = strlen(pszDevice);
+ for (PPDMUSB pUsbDev = pVM->pdm.s.pUsbDevs; pUsbDev; pUsbDev = pUsbDev->pNext)
+ {
+ if ( pUsbDev->cchName == cchDevice
+ && !memcmp(pUsbDev->pReg->szName, pszDevice, cchDevice))
+ {
+ /*
+ * Iterate device instances.
+ */
+ for (PPDMUSBINS pUsbIns = pUsbDev->pInstances; pUsbIns; pUsbIns = pUsbIns->Internal.s.pPerDeviceNext)
+ {
+ if (pUsbIns->iInstance == iInstance)
+ {
+ /*
+ * Iterate luns.
+ */
+ for (PPDMLUN pLun = pUsbIns->Internal.s.pLuns; pLun; pLun = pLun->pNext)
+ {
+ if (pLun->iLun == iLun)
+ {
+ *ppLun = pLun;
+ return VINF_SUCCESS;
+ }
+ }
+ return VERR_PDM_LUN_NOT_FOUND;
+ }
+ }
+ return VERR_PDM_DEVICE_INSTANCE_NOT_FOUND;
+ }
+ }
+ return VERR_PDM_DEVICE_NOT_FOUND;
+}
+
+
+/**
+ * Attaches a preconfigured driver to an existing device or driver instance.
+ *
+ * This is used to change drivers and suchlike at runtime. The driver or device
+ * at the end of the chain will be told to attach to whatever is configured
+ * below it.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iDevIns Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param fFlags Flags, combination of the PDM_TACH_FLAGS_* \#defines.
+ * @param ppBase Where to store the base interface pointer. Optional.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3UsbDriverAttach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun, uint32_t fFlags,
+ PPPDMIBASE ppBase)
+{
+ LogFlow(("PDMR3UsbDriverAttach: pszDevice=%p:{%s} iDevIns=%d iLun=%d fFlags=%#x ppBase=%p\n",
+ pszDevice, pszDevice, iDevIns, iLun, fFlags, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+
+ if (ppBase)
+ *ppBase = NULL;
+
+ /*
+ * Find the LUN in question.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3UsbFindLun(pVM, pszDevice, iDevIns, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Anything attached to the LUN?
+ */
+ PPDMDRVINS pDrvIns = pLun->pTop;
+ if (!pDrvIns)
+ {
+ /* No, ask the device to attach to the new stuff. */
+ PPDMUSBINS pUsbIns = pLun->pUsbIns;
+ if (pUsbIns->pReg->pfnDriverAttach)
+ {
+ rc = pUsbIns->pReg->pfnDriverAttach(pUsbIns, iLun, fFlags);
+ if (RT_SUCCESS(rc) && ppBase)
+ *ppBase = pLun->pTop ? &pLun->pTop->IBase : NULL;
+ }
+ else
+ rc = VERR_PDM_DEVICE_NO_RT_ATTACH;
+ }
+ else
+ {
+ /* Yes, find the bottom most driver and ask it to attach to the new stuff. */
+ while (pDrvIns->Internal.s.pDown)
+ pDrvIns = pDrvIns->Internal.s.pDown;
+ if (pDrvIns->pReg->pfnAttach)
+ {
+ rc = pDrvIns->pReg->pfnAttach(pDrvIns, fFlags);
+ if (RT_SUCCESS(rc) && ppBase)
+ *ppBase = pDrvIns->Internal.s.pDown
+ ? &pDrvIns->Internal.s.pDown->IBase
+ : NULL;
+ }
+ else
+ rc = VERR_PDM_DRIVER_NO_RT_ATTACH;
+ }
+ }
+
+ if (ppBase)
+ LogFlow(("PDMR3UsbDriverAttach: returns %Rrc *ppBase=%p\n", rc, *ppBase));
+ else
+ LogFlow(("PDMR3UsbDriverAttach: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Detaches the specified driver instance.
+ *
+ * This is used to replumb drivers at runtime for simulating hot plugging and
+ * media changes.
+ *
+ * This method allows detaching drivers from
+ * any driver or device by specifying the driver to start detaching at. The
+ * only prerequisite is that the driver or device above implements the
+ * pfnDetach callback (PDMDRVREG / PDMUSBREG).
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iDevIns Device instance.
+ * @param iLun The Logical Unit in which to look for the driver.
+ * @param pszDriver The name of the driver which to detach. If NULL
+ * then the entire driver chain is detatched.
+ * @param iOccurrence The occurrence of that driver in the chain. This is
+ * usually 0.
+ * @param fFlags Flags, combination of the PDM_TACH_FLAGS_* \#defines.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3UsbDriverDetach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
+ const char *pszDriver, unsigned iOccurrence, uint32_t fFlags)
+{
+ LogFlow(("PDMR3UsbDriverDetach: pszDevice=%p:{%s} iDevIns=%u iLun=%u pszDriver=%p:{%s} iOccurrence=%u fFlags=%#x\n",
+ pszDevice, pszDevice, iDevIns, iLun, pszDriver, pszDriver, iOccurrence, fFlags));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
+ AssertPtr(pszDevice);
+ AssertPtrNull(pszDriver);
+ Assert(iOccurrence == 0 || pszDriver);
+ Assert(!(fFlags & ~(PDM_TACH_FLAGS_NOT_HOT_PLUG)));
+
+ /*
+ * Find the LUN in question.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3UsbFindLun(pVM, pszDevice, iDevIns, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Locate the driver.
+ */
+ PPDMDRVINS pDrvIns = pLun->pTop;
+ if (pDrvIns)
+ {
+ if (pszDriver)
+ {
+ while (pDrvIns)
+ {
+ if (!strcmp(pDrvIns->pReg->szName, pszDriver))
+ {
+ if (iOccurrence == 0)
+ break;
+ iOccurrence--;
+ }
+ pDrvIns = pDrvIns->Internal.s.pDown;
+ }
+ }
+ if (pDrvIns)
+ rc = pdmR3DrvDetach(pDrvIns, fFlags);
+ else
+ rc = VERR_PDM_DRIVER_INSTANCE_NOT_FOUND;
+ }
+ else
+ rc = VINF_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+
+ LogFlow(("PDMR3UsbDriverDetach: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Query the interface of the top level driver on a LUN.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param ppBase Where to store the base interface pointer.
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3UsbQueryLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+{
+ LogFlow(("PDMR3UsbQueryLun: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Find the LUN.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3UsbFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLun->pTop)
+ {
+ *ppBase = &pLun->pTop->IBase;
+ LogFlow(("PDMR3UsbQueryLun: return %Rrc and *ppBase=%p\n", VINF_SUCCESS, *ppBase));
+ return VINF_SUCCESS;
+ }
+ rc = VERR_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+ LogFlow(("PDMR3UsbQueryLun: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Query the interface of a named driver on a LUN.
+ *
+ * If the driver appears more than once in the driver chain, the first instance
+ * is returned.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDevice Device name.
+ * @param iInstance Device instance.
+ * @param iLun The Logical Unit to obtain the interface of.
+ * @param pszDriver The driver name.
+ * @param ppBase Where to store the base interface pointer.
+ *
+ * @remark We're not doing any locking ATM, so don't try call this at times when the
+ * device chain is known to be updated.
+ */
+VMMR3DECL(int) PDMR3UsbQueryDriverOnLun(PUVM pUVM, const char *pszDevice, unsigned iInstance,
+ unsigned iLun, const char *pszDriver, PPPDMIBASE ppBase)
+{
+ LogFlow(("PDMR3QueryDriverOnLun: pszDevice=%p:{%s} iInstance=%u iLun=%u pszDriver=%p:{%s} ppBase=%p\n",
+ pszDevice, pszDevice, iInstance, iLun, pszDriver, pszDriver, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Find the LUN.
+ */
+ PPDMLUN pLun;
+ int rc = pdmR3UsbFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ if (RT_SUCCESS(rc))
+ {
+ if (pLun->pTop)
+ {
+ for (PPDMDRVINS pDrvIns = pLun->pTop; pDrvIns; pDrvIns = pDrvIns->Internal.s.pDown)
+ if (!strcmp(pDrvIns->pReg->szName, pszDriver))
+ {
+ *ppBase = &pDrvIns->IBase;
+ LogFlow(("PDMR3UsbQueryDriverOnLun: return %Rrc and *ppBase=%p\n", VINF_SUCCESS, *ppBase));
+ return VINF_SUCCESS;
+
+ }
+ rc = VERR_PDM_DRIVER_NOT_FOUND;
+ }
+ else
+ rc = VERR_PDM_NO_DRIVER_ATTACHED_TO_LUN;
+ }
+ LogFlow(("PDMR3UsbQueryDriverOnLun: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/** @name USB Device Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMUSBHLP,pfnDriverAttach} */
+static DECLCALLBACK(int) pdmR3UsbHlp_DriverAttach(PPDMUSBINS pUsbIns, RTUINT iLun, PPDMIBASE pBaseInterface,
+ PPDMIBASE *ppBaseInterface, const char *pszDesc)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3UsbHlp_DriverAttach: caller='%s'/%d: iLun=%d pBaseInterface=%p ppBaseInterface=%p pszDesc=%p:{%s}\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, iLun, pBaseInterface, ppBaseInterface, pszDesc, pszDesc));
+
+ /*
+ * Lookup the LUN, it might already be registered.
+ */
+ PPDMLUN pLunPrev = NULL;
+ PPDMLUN pLun = pUsbIns->Internal.s.pLuns;
+ for (; pLun; pLunPrev = pLun, pLun = pLun->pNext)
+ if (pLun->iLun == iLun)
+ break;
+
+ /*
+ * Create the LUN if if wasn't found, else check if driver is already attached to it.
+ */
+ if (!pLun)
+ {
+ if ( !pBaseInterface
+ || !pszDesc
+ || !*pszDesc)
+ {
+ Assert(pBaseInterface);
+ Assert(pszDesc || *pszDesc);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ pLun = (PPDMLUN)MMR3HeapAlloc(pVM, MM_TAG_PDM_LUN, sizeof(*pLun));
+ if (!pLun)
+ return VERR_NO_MEMORY;
+
+ pLun->iLun = iLun;
+ pLun->pNext = pLunPrev ? pLunPrev->pNext : NULL;
+ pLun->pTop = NULL;
+ pLun->pBottom = NULL;
+ pLun->pDevIns = NULL;
+ pLun->pUsbIns = pUsbIns;
+ pLun->pszDesc = pszDesc;
+ pLun->pBase = pBaseInterface;
+ if (!pLunPrev)
+ pUsbIns->Internal.s.pLuns = pLun;
+ else
+ pLunPrev->pNext = pLun;
+ Log(("pdmR3UsbHlp_DriverAttach: Registered LUN#%d '%s' with device '%s'/%d.\n",
+ iLun, pszDesc, pUsbIns->pReg->szName, pUsbIns->iInstance));
+ }
+ else if (pLun->pTop)
+ {
+ AssertMsgFailed(("Already attached! The device should keep track of such things!\n"));
+ LogFlow(("pdmR3UsbHlp_DriverAttach: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, VERR_PDM_DRIVER_ALREADY_ATTACHED));
+ return VERR_PDM_DRIVER_ALREADY_ATTACHED;
+ }
+ Assert(pLun->pBase == pBaseInterface);
+
+
+ /*
+ * Get the attached driver configuration.
+ */
+ int rc;
+ PCFGMNODE pNode = CFGMR3GetChildF(pUsbIns->Internal.s.pCfg, "LUN#%u", iLun);
+ if (pNode)
+ rc = pdmR3DrvInstantiate(pVM, pNode, pBaseInterface, NULL /*pDrvAbove*/, pLun, ppBaseInterface);
+ else
+ rc = VERR_PDM_NO_ATTACHED_DRIVER;
+
+
+ LogFlow(("pdmR3UsbHlp_DriverAttach: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnAssertEMT} */
+static DECLCALLBACK(bool) pdmR3UsbHlp_AssertEMT(PPDMUSBINS pUsbIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ if (VM_IS_EMT(pUsbIns->Internal.s.pVM))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertEMT '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ return false;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnAssertOther} */
+static DECLCALLBACK(bool) pdmR3UsbHlp_AssertOther(PPDMUSBINS pUsbIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ if (!VM_IS_EMT(pUsbIns->Internal.s.pVM))
+ return true;
+
+ char szMsg[100];
+ RTStrPrintf(szMsg, sizeof(szMsg), "AssertOther '%s'/%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance);
+ RTAssertMsg1Weak(szMsg, iLine, pszFile, pszFunction);
+ AssertBreakpoint();
+ return false;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnDBGFStopV} */
+static DECLCALLBACK(int) pdmR3UsbHlp_DBGFStopV(PPDMUSBINS pUsbIns, const char *pszFile, unsigned iLine, const char *pszFunction,
+ const char *pszFormat, va_list va)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+#ifdef LOG_ENABLED
+ va_list va2;
+ va_copy(va2, va);
+ LogFlow(("pdmR3UsbHlp_DBGFStopV: caller='%s'/%d: pszFile=%p:{%s} iLine=%d pszFunction=%p:{%s} pszFormat=%p:{%s} (%N)\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pszFile, pszFile, iLine, pszFunction, pszFunction, pszFormat, pszFormat, pszFormat, &va2));
+ va_end(va2);
+#endif
+
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3EventSrcV(pVM, DBGFEVENT_DEV_STOP, pszFile, iLine, pszFunction, pszFormat, va);
+ if (rc == VERR_DBGF_NOT_ATTACHED)
+ rc = VINF_SUCCESS;
+
+ LogFlow(("pdmR3UsbHlp_DBGFStopV: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnDBGFInfoRegisterArgv} */
+static DECLCALLBACK(int) pdmR3UsbHlp_DBGFInfoRegisterArgv(PPDMUSBINS pUsbIns, const char *pszName, const char *pszDesc,
+ PFNDBGFINFOARGVUSB pfnHandler)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ LogFlow(("pdmR3UsbHlp_DBGFInfoRegister: caller='%s'/%d: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pszName, pszName, pszDesc, pszDesc, pfnHandler));
+
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3InfoRegisterUsbArgv(pVM, pszName, pszDesc, pfnHandler, pUsbIns);
+
+ LogFlow(("pdmR3UsbHlp_DBGFInfoRegister: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnMMHeapAlloc} */
+static DECLCALLBACK(void *) pdmR3UsbHlp_MMHeapAlloc(PPDMUSBINS pUsbIns, size_t cb)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ LogFlow(("pdmR3UsbHlp_MMHeapAlloc: caller='%s'/%d: cb=%#x\n", pUsbIns->pReg->szName, pUsbIns->iInstance, cb));
+
+ void *pv = MMR3HeapAlloc(pUsbIns->Internal.s.pVM, MM_TAG_PDM_USB_USER, cb);
+
+ LogFlow(("pdmR3UsbHlp_MMHeapAlloc: caller='%s'/%d: returns %p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, pv));
+ return pv;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnMMHeapAllocZ} */
+static DECLCALLBACK(void *) pdmR3UsbHlp_MMHeapAllocZ(PPDMUSBINS pUsbIns, size_t cb)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ LogFlow(("pdmR3UsbHlp_MMHeapAllocZ: caller='%s'/%d: cb=%#x\n", pUsbIns->pReg->szName, pUsbIns->iInstance, cb));
+
+ void *pv = MMR3HeapAllocZ(pUsbIns->Internal.s.pVM, MM_TAG_PDM_USB_USER, cb);
+
+ LogFlow(("pdmR3UsbHlp_MMHeapAllocZ: caller='%s'/%d: returns %p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, pv));
+ return pv;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnMMHeapFree} */
+static DECLCALLBACK(void) pdmR3UsbHlp_MMHeapFree(PPDMUSBINS pUsbIns, void *pv)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns); RT_NOREF(pUsbIns);
+ LogFlow(("pdmR3UsbHlp_MMHeapFree: caller='%s'/%d: pv=%p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, pv));
+
+ MMR3HeapFree(pv);
+
+ LogFlow(("pdmR3UsbHlp_MMHeapFree: caller='%s'/%d: returns\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnPDMQueueCreate} */
+static DECLCALLBACK(int) pdmR3UsbHlp_PDMQueueCreate(PPDMUSBINS pUsbIns, RTUINT cbItem, RTUINT cItems, uint32_t cMilliesInterval,
+ PFNPDMQUEUEUSB pfnCallback, const char *pszName, PPDMQUEUE *ppQueue)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ LogFlow(("pdmR3UsbHlp_PDMQueueCreate: caller='%s'/%d: cbItem=%#x cItems=%#x cMilliesInterval=%u pfnCallback=%p pszName=%p:{%s} ppQueue=%p\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, cbItem, cItems, cMilliesInterval, pfnCallback, pszName, pszName, ppQueue));
+
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+
+ if (pUsbIns->iInstance > 0)
+ {
+ pszName = MMR3HeapAPrintf(pVM, MM_TAG_PDM_DEVICE_DESC, "%s_%u", pszName, pUsbIns->iInstance);
+ AssertLogRelReturn(pszName, VERR_NO_MEMORY);
+ }
+
+ RT_NOREF5(cbItem, cItems, cMilliesInterval, pfnCallback, ppQueue);
+ /** @todo int rc = PDMR3QueueCreateUsb(pVM, pUsbIns, cbItem, cItems, cMilliesInterval, pfnCallback, fGCEnabled, pszName, ppQueue); */
+ int rc = VERR_NOT_IMPLEMENTED; AssertFailed();
+
+ LogFlow(("pdmR3UsbHlp_PDMQueueCreate: caller='%s'/%d: returns %Rrc *ppQueue=%p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc, *ppQueue));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnSSMRegister} */
+static DECLCALLBACK(int) pdmR3UsbHlp_SSMRegister(PPDMUSBINS pUsbIns, uint32_t uVersion, size_t cbGuess,
+ PFNSSMUSBLIVEPREP pfnLivePrep, PFNSSMUSBLIVEEXEC pfnLiveExec, PFNSSMUSBLIVEVOTE pfnLiveVote,
+ PFNSSMUSBSAVEPREP pfnSavePrep, PFNSSMUSBSAVEEXEC pfnSaveExec, PFNSSMUSBSAVEDONE pfnSaveDone,
+ PFNSSMUSBLOADPREP pfnLoadPrep, PFNSSMUSBLOADEXEC pfnLoadExec, PFNSSMUSBLOADDONE pfnLoadDone)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ VM_ASSERT_EMT(pUsbIns->Internal.s.pVM);
+ LogFlow(("pdmR3UsbHlp_SSMRegister: caller='%s'/%d: uVersion=%#x cbGuess=%#x\n"
+ " pfnLivePrep=%p pfnLiveExec=%p pfnLiveVote=%p pfnSavePrep=%p pfnSaveExec=%p pfnSaveDone=%p pszLoadPrep=%p pfnLoadExec=%p pfnLoadDone=%p\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, uVersion, cbGuess,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone));
+
+ int rc = SSMR3RegisterUsb(pUsbIns->Internal.s.pVM, pUsbIns, pUsbIns->pReg->szName, pUsbIns->iInstance,
+ uVersion, cbGuess,
+ pfnLivePrep, pfnLiveExec, pfnLiveVote,
+ pfnSavePrep, pfnSaveExec, pfnSaveDone,
+ pfnLoadPrep, pfnLoadExec, pfnLoadDone);
+
+ LogFlow(("pdmR3UsbHlp_SSMRegister: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnSTAMRegisterV} */
+static DECLCALLBACK(void) pdmR3UsbHlp_STAMRegisterV(PPDMUSBINS pUsbIns, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ STAMUNIT enmUnit, const char *pszDesc, const char *pszName, va_list va)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+
+ int rc = STAMR3RegisterV(pVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, va);
+ AssertRC(rc);
+
+ NOREF(pVM);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerCreate} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerCreate(PPDMUSBINS pUsbIns, TMCLOCK enmClock, PFNTMTIMERUSB pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszDesc, PTMTIMERHANDLE phTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ LogFlow(("pdmR3UsbHlp_TMTimerCreate: caller='%s'/%d: enmClock=%d pfnCallback=%p pvUser=%p fFlags=%#x pszDesc=%p:{%s} phTimer=%p\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, enmClock, pfnCallback, pvUser, fFlags, pszDesc, pszDesc, phTimer));
+
+ AssertReturn(!(fFlags & TMTIMER_FLAGS_RING0), VERR_INVALID_FLAGS);
+ fFlags |= TMTIMER_FLAGS_NO_RING0;
+
+ /* Mangle the timer name if there are more than one instance of this device. */
+ char szName[32];
+ AssertReturn(strlen(pszDesc) < sizeof(szName) - 8, VERR_INVALID_NAME);
+ if (pUsbIns->iInstance > 0)
+ {
+ RTStrPrintf(szName, sizeof(szName), "%s[%u:%s]", pszDesc, pUsbIns->iInstance, pUsbIns->Internal.s.pUsbDev->pReg->szName);
+ pszDesc = szName;
+ }
+
+ int rc = TMR3TimerCreateUsb(pVM, pUsbIns, enmClock, pfnCallback, pvUser, fFlags, pszDesc, phTimer);
+
+ LogFlow(("pdmR3UsbHlp_TMTimerCreate: caller='%s'/%d: returns %Rrc *phTimer=%p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc, *phTimer));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerFromMicro} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerFromMicro(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerFromMicro(pUsbIns->Internal.s.pVM, hTimer, cMicroSecs);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerFromMilli} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerFromMilli(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerFromMilli(pUsbIns->Internal.s.pVM, hTimer, cMilliSecs);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerFromNano} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerFromNano(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerFromNano(pUsbIns->Internal.s.pVM, hTimer, cNanoSecs);
+}
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerGet} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerGet(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerGet(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerGetFreq} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerGetFreq(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerGetFreq(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerGetNano} */
+static DECLCALLBACK(uint64_t) pdmR3UsbHlp_TimerGetNano(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerGetNano(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerIsActive} */
+static DECLCALLBACK(bool) pdmR3UsbHlp_TimerIsActive(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerIsActive(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerIsLockOwner} */
+static DECLCALLBACK(bool) pdmR3UsbHlp_TimerIsLockOwner(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerIsLockOwner(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerLockClock} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerLockClock(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerLock(pUsbIns->Internal.s.pVM, hTimer, VERR_IGNORED);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerLockClock2} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerLockClock2(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM const pVM = pUsbIns->Internal.s.pVM;
+ int rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
+ if (rc == VINF_SUCCESS)
+ {
+ rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+ if (rc == VINF_SUCCESS)
+ return rc;
+ AssertRC(rc);
+ TMTimerUnlock(pVM, hTimer);
+ }
+ else
+ AssertRC(rc);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSet} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSet(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t uExpire)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSet(pUsbIns->Internal.s.pVM, hTimer, uExpire);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetFrequencyHint} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetFrequencyHint(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint32_t uHz)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSetFrequencyHint(pUsbIns->Internal.s.pVM, hTimer, uHz);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetMicro} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetMicro(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSetMicro(pUsbIns->Internal.s.pVM, hTimer, cMicrosToNext);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetMillies} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetMillies(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cMilliesToNext)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSetMillies(pUsbIns->Internal.s.pVM, hTimer, cMilliesToNext);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetNano} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetNano(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSetNano(pUsbIns->Internal.s.pVM, hTimer, cNanosToNext);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetRelative} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetRelative(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerSetRelative(pUsbIns->Internal.s.pVM, hTimer, cTicksToNext, pu64Now);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerStop} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerStop(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMTimerStop(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerUnlockClock} */
+static DECLCALLBACK(void) pdmR3UsbHlp_TimerUnlockClock(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ TMTimerUnlock(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerUnlockClock2} */
+static DECLCALLBACK(void) pdmR3UsbHlp_TimerUnlockClock2(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM const pVM = pUsbIns->Internal.s.pVM;
+ TMTimerUnlock(pVM, hTimer);
+ int rc = PDMCritSectLeave(pVM, pCritSect);
+ AssertRC(rc);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSetCritSect} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSetCritSect(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMR3TimerSetCritSect(pUsbIns->Internal.s.pVM, hTimer, pCritSect);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerSave} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerSave(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMR3TimerSave(pUsbIns->Internal.s.pVM, hTimer, pSSM);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerLoad} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerLoad(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMR3TimerLoad(pUsbIns->Internal.s.pVM, hTimer, pSSM);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnTimerDestroy} */
+static DECLCALLBACK(int) pdmR3UsbHlp_TimerDestroy(PPDMUSBINS pUsbIns, TMTIMERHANDLE hTimer)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ return TMR3TimerDestroy(pUsbIns->Internal.s.pVM, hTimer);
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMSetErrorV} */
+static DECLCALLBACK(int) pdmR3UsbHlp_VMSetErrorV(PPDMUSBINS pUsbIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ int rc2 = VMSetErrorV(pUsbIns->Internal.s.pVM, rc, RT_SRC_POS_ARGS, pszFormat, va); Assert(rc2 == rc); NOREF(rc2);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMSetRuntimeErrorV} */
+static DECLCALLBACK(int) pdmR3UsbHlp_VMSetRuntimeErrorV(PPDMUSBINS pUsbIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ int rc = VMSetRuntimeErrorV(pUsbIns->Internal.s.pVM, fFlags, pszErrorId, pszFormat, va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMState} */
+static DECLCALLBACK(VMSTATE) pdmR3UsbHlp_VMState(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+
+ VMSTATE enmVMState = VMR3GetState(pUsbIns->Internal.s.pVM);
+
+ LogFlow(("pdmR3UsbHlp_VMState: caller='%s'/%d: returns %d (%s)\n", pUsbIns->pReg->szName, pUsbIns->iInstance,
+ enmVMState, VMR3GetStateName(enmVMState)));
+ return enmVMState;
+}
+
+/** @interface_method_impl{PDMUSBHLP,pfnThreadCreate} */
+static DECLCALLBACK(int) pdmR3UsbHlp_ThreadCreate(PPDMUSBINS pUsbIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADUSB pfnThread,
+ PFNPDMTHREADWAKEUPUSB pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ VM_ASSERT_EMT(pUsbIns->Internal.s.pVM);
+ LogFlow(("pdmR3UsbHlp_ThreadCreate: caller='%s'/%d: ppThread=%p pvUser=%p pfnThread=%p pfnWakeup=%p cbStack=%#zx enmType=%d pszName=%p:{%s}\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName, pszName));
+
+ int rc = pdmR3ThreadCreateUsb(pUsbIns->Internal.s.pVM, pUsbIns, ppThread, pvUser, pfnThread, pfnWakeup, cbStack, enmType, pszName);
+
+ LogFlow(("pdmR3UsbHlp_ThreadCreate: caller='%s'/%d: returns %Rrc *ppThread=%RTthrd\n", pUsbIns->pReg->szName, pUsbIns->iInstance,
+ rc, *ppThread));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnSetAsyncNotification} */
+static DECLCALLBACK(int) pdmR3UsbHlp_SetAsyncNotification(PPDMUSBINS pUsbIns, PFNPDMUSBASYNCNOTIFY pfnAsyncNotify)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ VM_ASSERT_EMT0(pUsbIns->Internal.s.pVM);
+ LogFlow(("pdmR3UsbHlp_SetAsyncNotification: caller='%s'/%d: pfnAsyncNotify=%p\n", pUsbIns->pReg->szName, pUsbIns->iInstance, pfnAsyncNotify));
+
+ int rc = VINF_SUCCESS;
+ AssertStmt(pfnAsyncNotify, rc = VERR_INVALID_PARAMETER);
+ AssertStmt(!pUsbIns->Internal.s.pfnAsyncNotify, rc = VERR_WRONG_ORDER);
+ AssertStmt(pUsbIns->Internal.s.fVMSuspended || pUsbIns->Internal.s.fVMReset, rc = VERR_WRONG_ORDER);
+ VMSTATE enmVMState = VMR3GetState(pUsbIns->Internal.s.pVM);
+ AssertStmt( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS,
+ rc = VERR_INVALID_STATE);
+
+ if (RT_SUCCESS(rc))
+ pUsbIns->Internal.s.pfnAsyncNotify = pfnAsyncNotify;
+
+ LogFlow(("pdmR3UsbHlp_SetAsyncNotification: caller='%s'/%d: returns %Rrc\n", pUsbIns->pReg->szName, pUsbIns->iInstance, rc));
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnAsyncNotificationCompleted} */
+static DECLCALLBACK(void) pdmR3UsbHlp_AsyncNotificationCompleted(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ if ( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS
+ || enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS)
+ {
+ LogFlow(("pdmR3UsbHlp_AsyncNotificationCompleted: caller='%s'/%d:\n", pUsbIns->pReg->szName, pUsbIns->iInstance));
+ VMR3AsyncPdmNotificationWakeupU(pVM->pUVM);
+ }
+ else
+ LogFlow(("pdmR3UsbHlp_AsyncNotificationCompleted: caller='%s'/%d: enmVMState=%d\n", pUsbIns->pReg->szName, pUsbIns->iInstance, enmVMState));
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3UsbHlp_VMGetSuspendReason(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3UsbHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3UsbHlp_VMGetResumeReason(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3UsbHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnQueryGenericUserObject} */
+static DECLCALLBACK(void *) pdmR3UsbHlp_QueryGenericUserObject(PPDMUSBINS pUsbIns, PCRTUUID pUuid)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ PUVM pUVM = pVM->pUVM;
+
+ void *pvRet;
+ if (pUVM->pVmm2UserMethods->pfnQueryGenericObject)
+ pvRet = pUVM->pVmm2UserMethods->pfnQueryGenericObject(pUVM->pVmm2UserMethods, pUVM, pUuid);
+ else
+ pvRet = NULL;
+
+ Log(("pdmR3UsbHlp_QueryGenericUserObject: caller='%s'/%d: returns %#p for %RTuuid\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, pvRet, pUuid));
+ return pvRet;
+}
+
+
+/**
+ * The USB device helper structure.
+ */
+const PDMUSBHLP g_pdmR3UsbHlp =
+{
+ PDM_USBHLP_VERSION,
+ pdmR3UsbHlp_DriverAttach,
+ pdmR3UsbHlp_AssertEMT,
+ pdmR3UsbHlp_AssertOther,
+ pdmR3UsbHlp_DBGFStopV,
+ pdmR3UsbHlp_DBGFInfoRegisterArgv,
+ pdmR3UsbHlp_MMHeapAlloc,
+ pdmR3UsbHlp_MMHeapAllocZ,
+ pdmR3UsbHlp_MMHeapFree,
+ pdmR3UsbHlp_PDMQueueCreate,
+ pdmR3UsbHlp_SSMRegister,
+ SSMR3PutStruct,
+ SSMR3PutStructEx,
+ SSMR3PutBool,
+ SSMR3PutU8,
+ SSMR3PutS8,
+ SSMR3PutU16,
+ SSMR3PutS16,
+ SSMR3PutU32,
+ SSMR3PutS32,
+ SSMR3PutU64,
+ SSMR3PutS64,
+ SSMR3PutU128,
+ SSMR3PutS128,
+ SSMR3PutUInt,
+ SSMR3PutSInt,
+ SSMR3PutGCUInt,
+ SSMR3PutGCUIntReg,
+ SSMR3PutGCPhys32,
+ SSMR3PutGCPhys64,
+ SSMR3PutGCPhys,
+ SSMR3PutGCPtr,
+ SSMR3PutGCUIntPtr,
+ SSMR3PutRCPtr,
+ SSMR3PutIOPort,
+ SSMR3PutSel,
+ SSMR3PutMem,
+ SSMR3PutStrZ,
+ SSMR3GetStruct,
+ SSMR3GetStructEx,
+ SSMR3GetBool,
+ SSMR3GetBoolV,
+ SSMR3GetU8,
+ SSMR3GetU8V,
+ SSMR3GetS8,
+ SSMR3GetS8V,
+ SSMR3GetU16,
+ SSMR3GetU16V,
+ SSMR3GetS16,
+ SSMR3GetS16V,
+ SSMR3GetU32,
+ SSMR3GetU32V,
+ SSMR3GetS32,
+ SSMR3GetS32V,
+ SSMR3GetU64,
+ SSMR3GetU64V,
+ SSMR3GetS64,
+ SSMR3GetS64V,
+ SSMR3GetU128,
+ SSMR3GetU128V,
+ SSMR3GetS128,
+ SSMR3GetS128V,
+ SSMR3GetGCPhys32,
+ SSMR3GetGCPhys32V,
+ SSMR3GetGCPhys64,
+ SSMR3GetGCPhys64V,
+ SSMR3GetGCPhys,
+ SSMR3GetGCPhysV,
+ SSMR3GetUInt,
+ SSMR3GetSInt,
+ SSMR3GetGCUInt,
+ SSMR3GetGCUIntReg,
+ SSMR3GetGCPtr,
+ SSMR3GetGCUIntPtr,
+ SSMR3GetRCPtr,
+ SSMR3GetIOPort,
+ SSMR3GetSel,
+ SSMR3GetMem,
+ SSMR3GetStrZ,
+ SSMR3GetStrZEx,
+ SSMR3Skip,
+ SSMR3SkipToEndOfUnit,
+ SSMR3SetLoadError,
+ SSMR3SetLoadErrorV,
+ SSMR3SetCfgError,
+ SSMR3SetCfgErrorV,
+ SSMR3HandleGetStatus,
+ SSMR3HandleGetAfter,
+ SSMR3HandleIsLiveSave,
+ SSMR3HandleMaxDowntime,
+ SSMR3HandleHostBits,
+ SSMR3HandleRevision,
+ SSMR3HandleVersion,
+ SSMR3HandleHostOSAndArch,
+ CFGMR3Exists,
+ CFGMR3QueryType,
+ CFGMR3QuerySize,
+ CFGMR3QueryInteger,
+ CFGMR3QueryIntegerDef,
+ CFGMR3QueryString,
+ CFGMR3QueryStringDef,
+ CFGMR3QueryBytes,
+ CFGMR3QueryU64,
+ CFGMR3QueryU64Def,
+ CFGMR3QueryS64,
+ CFGMR3QueryS64Def,
+ CFGMR3QueryU32,
+ CFGMR3QueryU32Def,
+ CFGMR3QueryS32,
+ CFGMR3QueryS32Def,
+ CFGMR3QueryU16,
+ CFGMR3QueryU16Def,
+ CFGMR3QueryS16,
+ CFGMR3QueryS16Def,
+ CFGMR3QueryU8,
+ CFGMR3QueryU8Def,
+ CFGMR3QueryS8,
+ CFGMR3QueryS8Def,
+ CFGMR3QueryBool,
+ CFGMR3QueryBoolDef,
+ CFGMR3QueryPort,
+ CFGMR3QueryPortDef,
+ CFGMR3QueryUInt,
+ CFGMR3QueryUIntDef,
+ CFGMR3QuerySInt,
+ CFGMR3QuerySIntDef,
+ CFGMR3QueryGCPtr,
+ CFGMR3QueryGCPtrDef,
+ CFGMR3QueryGCPtrU,
+ CFGMR3QueryGCPtrUDef,
+ CFGMR3QueryGCPtrS,
+ CFGMR3QueryGCPtrSDef,
+ CFGMR3QueryStringAlloc,
+ CFGMR3QueryStringAllocDef,
+ CFGMR3GetParent,
+ CFGMR3GetChild,
+ CFGMR3GetChildF,
+ CFGMR3GetChildFV,
+ CFGMR3GetFirstChild,
+ CFGMR3GetNextChild,
+ CFGMR3GetName,
+ CFGMR3GetNameLen,
+ CFGMR3AreChildrenValid,
+ CFGMR3GetFirstValue,
+ CFGMR3GetNextValue,
+ CFGMR3GetValueName,
+ CFGMR3GetValueNameLen,
+ CFGMR3GetValueType,
+ CFGMR3AreValuesValid,
+ CFGMR3ValidateConfig,
+ pdmR3UsbHlp_STAMRegisterV,
+ pdmR3UsbHlp_TimerCreate,
+ pdmR3UsbHlp_TimerFromMicro,
+ pdmR3UsbHlp_TimerFromMilli,
+ pdmR3UsbHlp_TimerFromNano,
+ pdmR3UsbHlp_TimerGet,
+ pdmR3UsbHlp_TimerGetFreq,
+ pdmR3UsbHlp_TimerGetNano,
+ pdmR3UsbHlp_TimerIsActive,
+ pdmR3UsbHlp_TimerIsLockOwner,
+ pdmR3UsbHlp_TimerLockClock,
+ pdmR3UsbHlp_TimerLockClock2,
+ pdmR3UsbHlp_TimerSet,
+ pdmR3UsbHlp_TimerSetFrequencyHint,
+ pdmR3UsbHlp_TimerSetMicro,
+ pdmR3UsbHlp_TimerSetMillies,
+ pdmR3UsbHlp_TimerSetNano,
+ pdmR3UsbHlp_TimerSetRelative,
+ pdmR3UsbHlp_TimerStop,
+ pdmR3UsbHlp_TimerUnlockClock,
+ pdmR3UsbHlp_TimerUnlockClock2,
+ pdmR3UsbHlp_TimerSetCritSect,
+ pdmR3UsbHlp_TimerSave,
+ pdmR3UsbHlp_TimerLoad,
+ pdmR3UsbHlp_TimerDestroy,
+ TMR3TimerSkip,
+ pdmR3UsbHlp_VMSetErrorV,
+ pdmR3UsbHlp_VMSetRuntimeErrorV,
+ pdmR3UsbHlp_VMState,
+ pdmR3UsbHlp_ThreadCreate,
+ PDMR3ThreadDestroy,
+ PDMR3ThreadIAmSuspending,
+ PDMR3ThreadIAmRunning,
+ PDMR3ThreadSleep,
+ PDMR3ThreadSuspend,
+ PDMR3ThreadResume,
+ pdmR3UsbHlp_SetAsyncNotification,
+ pdmR3UsbHlp_AsyncNotificationCompleted,
+ pdmR3UsbHlp_VMGetSuspendReason,
+ pdmR3UsbHlp_VMGetResumeReason,
+ pdmR3UsbHlp_QueryGenericUserObject,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ PDM_USBHLP_VERSION
+};
+
+/** @} */
diff --git a/src/VBox/VMM/VMMR3/PGM.cpp b/src/VBox/VMM/VMMR3/PGM.cpp
new file mode 100644
index 00000000..69d31fbe
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGM.cpp
@@ -0,0 +1,2691 @@
+/* $Id: PGM.cpp $ */
+/** @file
+ * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_pgm PGM - The Page Manager and Monitor
+ *
+ * @sa @ref grp_pgm
+ * @subpage pg_pgm_pool
+ * @subpage pg_pgm_phys
+ *
+ *
+ * @section sec_pgm_modes Paging Modes
+ *
+ * There are three memory contexts: Host Context (HC), Guest Context (GC)
+ * and intermediate context. When talking about paging HC can also be referred
+ * to as "host paging", and GC referred to as "shadow paging".
+ *
+ * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
+ * is defined by the host operating system. The mode used in the shadow paging mode
+ * depends on the host paging mode and what the mode the guest is currently in. The
+ * following relation between the two is defined:
+ *
+ * @verbatim
+ Host > 32-bit | PAE | AMD64 |
+ Guest | | | |
+ ==v================================
+ 32-bit 32-bit PAE PAE
+ -------|--------|--------|--------|
+ PAE PAE PAE PAE
+ -------|--------|--------|--------|
+ AMD64 AMD64 AMD64 AMD64
+ -------|--------|--------|--------| @endverbatim
+ *
+ * All configuration except those in the diagonal (upper left) are expected to
+ * require special effort from the switcher (i.e. a bit slower).
+ *
+ *
+ *
+ *
+ * @section sec_pgm_shw The Shadow Memory Context
+ *
+ *
+ * [..]
+ *
+ * Because of guest context mappings requires PDPT and PML4 entries to allow
+ * writing on AMD64, the two upper levels will have fixed flags whatever the
+ * guest is thinking of using there. So, when shadowing the PD level we will
+ * calculate the effective flags of PD and all the higher levels. In legacy
+ * PAE mode this only applies to the PWT and PCD bits (the rest are
+ * ignored/reserved/MBZ). We will ignore those bits for the present.
+ *
+ *
+ *
+ * @section sec_pgm_int The Intermediate Memory Context
+ *
+ * The world switch goes thru an intermediate memory context which purpose it is
+ * to provide different mappings of the switcher code. All guest mappings are also
+ * present in this context.
+ *
+ * The switcher code is mapped at the same location as on the host, at an
+ * identity mapped location (physical equals virtual address), and at the
+ * hypervisor location. The identity mapped location is for when the world
+ * switches that involves disabling paging.
+ *
+ * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
+ * simplifies switching guest CPU mode and consistency at the cost of more
+ * code to do the work. All memory use for those page tables is located below
+ * 4GB (this includes page tables for guest context mappings).
+ *
+ * Note! The intermediate memory context is also used for 64-bit guest
+ * execution on 32-bit hosts. Because we need to load 64-bit registers
+ * prior to switching to guest context, we need to be in 64-bit mode
+ * first. So, HM has some 64-bit worker routines in VMMRC.rc that get
+ * invoked via the special world switcher code in LegacyToAMD64.asm.
+ *
+ *
+ * @subsection subsec_pgm_int_gc Guest Context Mappings
+ *
+ * During assignment and relocation of a guest context mapping the intermediate
+ * memory context is used to verify the new location.
+ *
+ * Guest context mappings are currently restricted to below 4GB, for reasons
+ * of simplicity. This may change when we implement AMD64 support.
+ *
+ *
+ *
+ *
+ * @section sec_pgm_misc Misc
+ *
+ *
+ * @subsection sec_pgm_misc_A20 The A20 Gate
+ *
+ * PGM implements the A20 gate masking when translating a virtual guest address
+ * into a physical address for CPU access, i.e. PGMGstGetPage (and friends) and
+ * the code reading the guest page table entries during shadowing. The masking
+ * is done consistenly for all CPU modes, paged ones included. Large pages are
+ * also masked correctly. (On current CPUs, experiments indicates that AMD does
+ * not apply A20M in paged modes and intel only does it for the 2nd MB of
+ * memory.)
+ *
+ * The A20 gate implementation is per CPU core. It can be configured on a per
+ * core basis via the keyboard device and PC architecture device. This is
+ * probably not exactly how real CPUs do it, but SMP and A20 isn't a place where
+ * guest OSes try pushing things anyway, so who cares. (On current real systems
+ * the A20M signal is probably only sent to the boot CPU and it affects all
+ * thread and probably all cores in that package.)
+ *
+ * The keyboard device and the PC architecture device doesn't OR their A20
+ * config bits together, rather they are currently implemented such that they
+ * mirror the CPU state. So, flipping the bit in either of them will change the
+ * A20 state. (On real hardware the bits of the two devices should probably be
+ * ORed together to indicate enabled, i.e. both needs to be cleared to disable
+ * A20 masking.)
+ *
+ * The A20 state will change immediately, transmeta fashion. There is no delays
+ * due to buses, wiring or other physical stuff. (On real hardware there are
+ * normally delays, the delays differs between the two devices and probably also
+ * between chipsets and CPU generations. Note that it's said that transmeta CPUs
+ * does the change immediately like us, they apparently intercept/handles the
+ * port accesses in microcode. Neat.)
+ *
+ * @sa http://en.wikipedia.org/wiki/A20_line#The_80286_and_the_high_memory_area
+ *
+ *
+ * @subsection subsec_pgm_misc_diff Differences Between Legacy PAE and Long Mode PAE
+ *
+ * The differences between legacy PAE and long mode PAE are:
+ * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
+ * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
+ * usual meanings while 6 is ignored (AMD). This means that upon switching to
+ * legacy PAE mode we'll have to clear these bits and when going to long mode
+ * they must be set. This applies to both intermediate and shadow contexts,
+ * however we don't need to do it for the intermediate one since we're
+ * executing with CR0.WP at that time.
+ * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
+ * a page aligned one is required.
+ *
+ *
+ * @section sec_pgm_handlers Access Handlers
+ *
+ * Placeholder.
+ *
+ *
+ * @subsection sec_pgm_handlers_phys Physical Access Handlers
+ *
+ * Placeholder.
+ *
+ *
+ * @subsection sec_pgm_handlers_virt Virtual Access Handlers (obsolete)
+ *
+ * We currently implement three types of virtual access handlers: ALL, WRITE
+ * and HYPERVISOR (WRITE). See PGMVIRTHANDLERKIND for some more details.
+ *
+ * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
+ * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
+ * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
+ * rest of this section is going to be about these handlers.
+ *
+ * We'll go thru the life cycle of a handler and try make sense of it all, don't know
+ * how successful this is gonna be...
+ *
+ * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
+ * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
+ * and create a new node that is inserted into the AVL tree (range key). Then
+ * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
+ *
+ * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
+ *
+ * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
+ * via the current guest CR3 and update the physical page -> virtual handler
+ * translation. Needless to say, this doesn't exactly scale very well. If any changes
+ * are detected, it will flag a virtual bit update just like we did on registration.
+ * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
+ *
+ * 2b. The virtual bit update process will iterate all the pages covered by all the
+ * virtual handlers and update the PGMPAGE virtual handler state to the max of all
+ * virtual handlers on that page.
+ *
+ * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
+ * we don't miss any alias mappings of the monitored pages.
+ *
+ * 2d. SyncCR3 will then proceed with syncing the CR3 table.
+ *
+ * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
+ * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
+ * will call the handlers like in the next step. If the physical mapping has
+ * changed we will - some time in the future - perform a handler callback
+ * (optional) and update the physical -> virtual handler cache.
+ *
+ * 4. \#PF(,write) on a page in the range. This will cause the handler to
+ * be invoked.
+ *
+ * 5. The guest invalidates the page and changes the physical backing or
+ * unmaps it. This should cause the invalidation callback to be invoked
+ * (it might not yet be 100% perfect). Exactly what happens next... is
+ * this where we mess up and end up out of sync for a while?
+ *
+ * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
+ * We will then set all PGMPAGEs in the physical -> virtual handler cache for
+ * this handler to NONE and trigger a full PGM resync (basically the same
+ * as int step 1). Which means 2 is executed again.
+ *
+ *
+ * @subsubsection sub_sec_pgm_handler_virt_todo TODOs
+ *
+ * There is a bunch of things that needs to be done to make the virtual handlers
+ * work 100% correctly and work more efficiently.
+ *
+ * The first bit hasn't been implemented yet because it's going to slow the
+ * whole mess down even more, and besides it seems to be working reliably for
+ * our current uses. OTOH, some of the optimizations might end up more or less
+ * implementing the missing bits, so we'll see.
+ *
+ * On the optimization side, the first thing to do is to try avoid unnecessary
+ * cache flushing. Then try team up with the shadowing code to track changes
+ * in mappings by means of access to them (shadow in), updates to shadows pages,
+ * invlpg, and shadow PT discarding (perhaps).
+ *
+ * Some idea that have popped up for optimization for current and new features:
+ * - bitmap indicating where there are virtual handlers installed.
+ * (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
+ * - Further optimize this by min/max (needs min/max avl getters).
+ * - Shadow page table entry bit (if any left)?
+ *
+ */
+
+
+/** @page pg_pgm_phys PGM Physical Guest Memory Management
+ *
+ *
+ * Objectives:
+ * - Guest RAM over-commitment using memory ballooning,
+ * zero pages and general page sharing.
+ * - Moving or mirroring a VM onto a different physical machine.
+ *
+ *
+ * @section sec_pgmPhys_Definitions Definitions
+ *
+ * Allocation chunk - A RTR0MemObjAllocPhysNC or RTR0MemObjAllocPhys allocate
+ * memory object and the tracking machinery associated with it.
+ *
+ *
+ *
+ *
+ * @section sec_pgmPhys_AllocPage Allocating a page.
+ *
+ * Initially we map *all* guest memory to the (per VM) zero page, which
+ * means that none of the read functions will cause pages to be allocated.
+ *
+ * Exception, access bit in page tables that have been shared. This must
+ * be handled, but we must also make sure PGMGst*Modify doesn't make
+ * unnecessary modifications.
+ *
+ * Allocation points:
+ * - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
+ * - Replacing a zero page mapping at \#PF.
+ * - Replacing a shared page mapping at \#PF.
+ * - ROM registration (currently MMR3RomRegister).
+ * - VM restore (pgmR3Load).
+ *
+ * For the first three it would make sense to keep a few pages handy
+ * until we've reached the max memory commitment for the VM.
+ *
+ * For the ROM registration, we know exactly how many pages we need
+ * and will request these from ring-0. For restore, we will save
+ * the number of non-zero pages in the saved state and allocate
+ * them up front. This would allow the ring-0 component to refuse
+ * the request if the isn't sufficient memory available for VM use.
+ *
+ * Btw. for both ROM and restore allocations we won't be requiring
+ * zeroed pages as they are going to be filled instantly.
+ *
+ *
+ * @section sec_pgmPhys_FreePage Freeing a page
+ *
+ * There are a few points where a page can be freed:
+ * - After being replaced by the zero page.
+ * - After being replaced by a shared page.
+ * - After being ballooned by the guest additions.
+ * - At reset.
+ * - At restore.
+ *
+ * When freeing one or more pages they will be returned to the ring-0
+ * component and replaced by the zero page.
+ *
+ * The reasoning for clearing out all the pages on reset is that it will
+ * return us to the exact same state as on power on, and may thereby help
+ * us reduce the memory load on the system. Further it might have a
+ * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
+ *
+ * On restore, as mention under the allocation topic, pages should be
+ * freed / allocated depending on how many is actually required by the
+ * new VM state. The simplest approach is to do like on reset, and free
+ * all non-ROM pages and then allocate what we need.
+ *
+ * A measure to prevent some fragmentation, would be to let each allocation
+ * chunk have some affinity towards the VM having allocated the most pages
+ * from it. Also, try make sure to allocate from allocation chunks that
+ * are almost full. Admittedly, both these measures might work counter to
+ * our intentions and its probably not worth putting a lot of effort,
+ * cpu time or memory into this.
+ *
+ *
+ * @section sec_pgmPhys_SharePage Sharing a page
+ *
+ * The basic idea is that there there will be a idle priority kernel
+ * thread walking the non-shared VM pages hashing them and looking for
+ * pages with the same checksum. If such pages are found, it will compare
+ * them byte-by-byte to see if they actually are identical. If found to be
+ * identical it will allocate a shared page, copy the content, check that
+ * the page didn't change while doing this, and finally request both the
+ * VMs to use the shared page instead. If the page is all zeros (special
+ * checksum and byte-by-byte check) it will request the VM that owns it
+ * to replace it with the zero page.
+ *
+ * To make this efficient, we will have to make sure not to try share a page
+ * that will change its contents soon. This part requires the most work.
+ * A simple idea would be to request the VM to write monitor the page for
+ * a while to make sure it isn't modified any time soon. Also, it may
+ * make sense to skip pages that are being write monitored since this
+ * information is readily available to the thread if it works on the
+ * per-VM guest memory structures (presently called PGMRAMRANGE).
+ *
+ *
+ * @section sec_pgmPhys_Fragmentation Fragmentation Concerns and Counter Measures
+ *
+ * The pages are organized in allocation chunks in ring-0, this is a necessity
+ * if we wish to have an OS agnostic approach to this whole thing. (On Linux we
+ * could easily work on a page-by-page basis if we liked. Whether this is possible
+ * or efficient on NT I don't quite know.) Fragmentation within these chunks may
+ * become a problem as part of the idea here is that we wish to return memory to
+ * the host system.
+ *
+ * For instance, starting two VMs at the same time, they will both allocate the
+ * guest memory on-demand and if permitted their page allocations will be
+ * intermixed. Shut down one of the two VMs and it will be difficult to return
+ * any memory to the host system because the page allocation for the two VMs are
+ * mixed up in the same allocation chunks.
+ *
+ * To further complicate matters, when pages are freed because they have been
+ * ballooned or become shared/zero the whole idea is that the page is supposed
+ * to be reused by another VM or returned to the host system. This will cause
+ * allocation chunks to contain pages belonging to different VMs and prevent
+ * returning memory to the host when one of those VM shuts down.
+ *
+ * The only way to really deal with this problem is to move pages. This can
+ * either be done at VM shutdown and or by the idle priority worker thread
+ * that will be responsible for finding sharable/zero pages. The mechanisms
+ * involved for coercing a VM to move a page (or to do it for it) will be
+ * the same as when telling it to share/zero a page.
+ *
+ *
+ * @section sec_pgmPhys_Tracking Tracking Structures And Their Cost
+ *
+ * There's a difficult balance between keeping the per-page tracking structures
+ * (global and guest page) easy to use and keeping them from eating too much
+ * memory. We have limited virtual memory resources available when operating in
+ * 32-bit kernel space (on 64-bit there'll it's quite a different story). The
+ * tracking structures will be attempted designed such that we can deal with up
+ * to 32GB of memory on a 32-bit system and essentially unlimited on 64-bit ones.
+ *
+ *
+ * @subsection subsec_pgmPhys_Tracking_Kernel Kernel Space
+ *
+ * @see pg_GMM
+ *
+ * @subsection subsec_pgmPhys_Tracking_PerVM Per-VM
+ *
+ * Fixed info is the physical address of the page (HCPhys) and the page id
+ * (described above). Theoretically we'll need 48(-12) bits for the HCPhys part.
+ * Today we've restricting ourselves to 40(-12) bits because this is the current
+ * restrictions of all AMD64 implementations (I think Barcelona will up this
+ * to 48(-12) bits, not that it really matters) and I needed the bits for
+ * tracking mappings of a page. 48-12 = 36. That leaves 28 bits, which means a
+ * decent range for the page id: 2^(28+12) = 1024TB.
+ *
+ * In additions to these, we'll have to keep maintaining the page flags as we
+ * currently do. Although it wouldn't harm to optimize these quite a bit, like
+ * for instance the ROM shouldn't depend on having a write handler installed
+ * in order for it to become read-only. A RO/RW bit should be considered so
+ * that the page syncing code doesn't have to mess about checking multiple
+ * flag combinations (ROM || RW handler || write monitored) in order to
+ * figure out how to setup a shadow PTE. But this of course, is second
+ * priority at present. Current this requires 12 bits, but could probably
+ * be optimized to ~8.
+ *
+ * Then there's the 24 bits used to track which shadow page tables are
+ * currently mapping a page for the purpose of speeding up physical
+ * access handlers, and thereby the page pool cache. More bit for this
+ * purpose wouldn't hurt IIRC.
+ *
+ * Then there is a new bit in which we need to record what kind of page
+ * this is, shared, zero, normal or write-monitored-normal. This'll
+ * require 2 bits. One bit might be needed for indicating whether a
+ * write monitored page has been written to. And yet another one or
+ * two for tracking migration status. 3-4 bits total then.
+ *
+ * Whatever is left will can be used to record the sharabilitiy of a
+ * page. The page checksum will not be stored in the per-VM table as
+ * the idle thread will not be permitted to do modifications to it.
+ * It will instead have to keep its own working set of potentially
+ * shareable pages and their check sums and stuff.
+ *
+ * For the present we'll keep the current packing of the
+ * PGMRAMRANGE::aHCPhys to keep the changes simple, only of course,
+ * we'll have to change it to a struct with a total of 128-bits at
+ * our disposal.
+ *
+ * The initial layout will be like this:
+ * @verbatim
+ RTHCPHYS HCPhys; The current stuff.
+ 63:40 Current shadow PT tracking stuff.
+ 39:12 The physical page frame number.
+ 11:0 The current flags.
+ uint32_t u28PageId : 28; The page id.
+ uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
+ uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
+ uint32_t u1Reserved : 1; Reserved for later.
+ uint32_t u32Reserved; Reserved for later, mostly sharing stats.
+ @endverbatim
+ *
+ * The final layout will be something like this:
+ * @verbatim
+ RTHCPHYS HCPhys; The current stuff.
+ 63:48 High page id (12+).
+ 47:12 The physical page frame number.
+ 11:0 Low page id.
+ uint32_t fReadOnly : 1; Whether it's readonly page (rom or monitored in some way).
+ uint32_t u3Type : 3; The page type {RESERVED, MMIO, MMIO2, ROM, shadowed ROM, RAM}.
+ uint32_t u2PhysMon : 2; Physical access handler type {none, read, write, all}.
+ uint32_t u2VirtMon : 2; Virtual access handler type {none, read, write, all}..
+ uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
+ uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
+ uint32_t u20Reserved : 20; Reserved for later, mostly sharing stats.
+ uint32_t u32Tracking; The shadow PT tracking stuff, roughly.
+ @endverbatim
+ *
+ * Cost wise, this means we'll double the cost for guest memory. There isn't anyway
+ * around that I'm afraid. It means that the cost of dealing out 32GB of memory
+ * to one or more VMs is: (32GB >> GUEST_PAGE_SHIFT) * 16 bytes, or 128MBs. Or
+ * another example, the VM heap cost when assigning 1GB to a VM will be: 4MB.
+ *
+ * A couple of cost examples for the total cost per-VM + kernel.
+ * 32-bit Windows and 32-bit linux:
+ * 1GB guest ram, 256K pages: 4MB + 2MB(+) = 6MB
+ * 4GB guest ram, 1M pages: 16MB + 8MB(+) = 24MB
+ * 32GB guest ram, 8M pages: 128MB + 64MB(+) = 192MB
+ * 64-bit Windows and 64-bit linux:
+ * 1GB guest ram, 256K pages: 4MB + 3MB(+) = 7MB
+ * 4GB guest ram, 1M pages: 16MB + 12MB(+) = 28MB
+ * 32GB guest ram, 8M pages: 128MB + 96MB(+) = 224MB
+ *
+ * UPDATE - 2007-09-27:
+ * Will need a ballooned flag/state too because we cannot
+ * trust the guest 100% and reporting the same page as ballooned more
+ * than once will put the GMM off balance.
+ *
+ *
+ * @section sec_pgmPhys_Serializing Serializing Access
+ *
+ * Initially, we'll try a simple scheme:
+ *
+ * - The per-VM RAM tracking structures (PGMRAMRANGE) is only modified
+ * by the EMT thread of that VM while in the pgm critsect.
+ * - Other threads in the VM process that needs to make reliable use of
+ * the per-VM RAM tracking structures will enter the critsect.
+ * - No process external thread or kernel thread will ever try enter
+ * the pgm critical section, as that just won't work.
+ * - The idle thread (and similar threads) doesn't not need 100% reliable
+ * data when performing it tasks as the EMT thread will be the one to
+ * do the actual changes later anyway. So, as long as it only accesses
+ * the main ram range, it can do so by somehow preventing the VM from
+ * being destroyed while it works on it...
+ *
+ * - The over-commitment management, including the allocating/freeing
+ * chunks, is serialized by a ring-0 mutex lock (a fast one since the
+ * more mundane mutex implementation is broken on Linux).
+ * - A separate mutex is protecting the set of allocation chunks so
+ * that pages can be shared or/and freed up while some other VM is
+ * allocating more chunks. This mutex can be take from under the other
+ * one, but not the other way around.
+ *
+ *
+ * @section sec_pgmPhys_Request VM Request interface
+ *
+ * When in ring-0 it will become necessary to send requests to a VM so it can
+ * for instance move a page while defragmenting during VM destroy. The idle
+ * thread will make use of this interface to request VMs to setup shared
+ * pages and to perform write monitoring of pages.
+ *
+ * I would propose an interface similar to the current VMReq interface, similar
+ * in that it doesn't require locking and that the one sending the request may
+ * wait for completion if it wishes to. This shouldn't be very difficult to
+ * realize.
+ *
+ * The requests themselves are also pretty simple. They are basically:
+ * -# Check that some precondition is still true.
+ * -# Do the update.
+ * -# Update all shadow page tables involved with the page.
+ *
+ * The 3rd step is identical to what we're already doing when updating a
+ * physical handler, see pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs.
+ *
+ *
+ *
+ * @section sec_pgmPhys_MappingCaches Mapping Caches
+ *
+ * In order to be able to map in and out memory and to be able to support
+ * guest with more RAM than we've got virtual address space, we'll employing
+ * a mapping cache. Normally ring-0 and ring-3 can share the same cache,
+ * however on 32-bit darwin the ring-0 code is running in a different memory
+ * context and therefore needs a separate cache. In raw-mode context we also
+ * need a separate cache. The 32-bit darwin mapping cache and the one for
+ * raw-mode context share a lot of code, see PGMRZDYNMAP.
+ *
+ *
+ * @subsection subsec_pgmPhys_MappingCaches_R3 Ring-3
+ *
+ * We've considered implementing the ring-3 mapping cache page based but found
+ * that this was bother some when one had to take into account TLBs+SMP and
+ * portability (missing the necessary APIs on several platforms). There were
+ * also some performance concerns with this approach which hadn't quite been
+ * worked out.
+ *
+ * Instead, we'll be mapping allocation chunks into the VM process. This simplifies
+ * matters greatly quite a bit since we don't need to invent any new ring-0 stuff,
+ * only some minor RTR0MEMOBJ mapping stuff. The main concern here is that mapping
+ * compared to the previous idea is that mapping or unmapping a 1MB chunk is more
+ * costly than a single page, although how much more costly is uncertain. We'll
+ * try address this by using a very big cache, preferably bigger than the actual
+ * VM RAM size if possible. The current VM RAM sizes should give some idea for
+ * 32-bit boxes, while on 64-bit we can probably get away with employing an
+ * unlimited cache.
+ *
+ * The cache have to parts, as already indicated, the ring-3 side and the
+ * ring-0 side.
+ *
+ * The ring-0 will be tied to the page allocator since it will operate on the
+ * memory objects it contains. It will therefore require the first ring-0 mutex
+ * discussed in @ref sec_pgmPhys_Serializing. We some double house keeping wrt
+ * to who has mapped what I think, since both VMMR0.r0 and RTR0MemObj will keep
+ * track of mapping relations
+ *
+ * The ring-3 part will be protected by the pgm critsect. For simplicity, we'll
+ * require anyone that desires to do changes to the mapping cache to do that
+ * from within this critsect. Alternatively, we could employ a separate critsect
+ * for serializing changes to the mapping cache as this would reduce potential
+ * contention with other threads accessing mappings unrelated to the changes
+ * that are in process. We can see about this later, contention will show
+ * up in the statistics anyway, so it'll be simple to tell.
+ *
+ * The organization of the ring-3 part will be very much like how the allocation
+ * chunks are organized in ring-0, that is in an AVL tree by chunk id. To avoid
+ * having to walk the tree all the time, we'll have a couple of lookaside entries
+ * like in we do for I/O ports and MMIO in IOM.
+ *
+ * The simplified flow of a PGMPhysRead/Write function:
+ * -# Enter the PGM critsect.
+ * -# Lookup GCPhys in the ram ranges and get the Page ID.
+ * -# Calc the Allocation Chunk ID from the Page ID.
+ * -# Check the lookaside entries and then the AVL tree for the Chunk ID.
+ * If not found in cache:
+ * -# Call ring-0 and request it to be mapped and supply
+ * a chunk to be unmapped if the cache is maxed out already.
+ * -# Insert the new mapping into the AVL tree (id + R3 address).
+ * -# Update the relevant lookaside entry and return the mapping address.
+ * -# Do the read/write according to monitoring flags and everything.
+ * -# Leave the critsect.
+ *
+ *
+ * @section sec_pgmPhys_Changes Changes
+ *
+ * Breakdown of the changes involved?
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/hm.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/vmm/uvm.h>
+#include "PGMInline.h"
+
+#include <VBox/dbg.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/env.h>
+#include <iprt/file.h>
+#include <iprt/mem.h>
+#include <iprt/rand.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#ifdef RT_OS_LINUX
+# include <iprt/linux/sysfs.h>
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Argument package for pgmR3RElocatePhysHnadler, pgmR3RelocateVirtHandler and
+ * pgmR3RelocateHyperVirtHandler.
+ */
+typedef struct PGMRELOCHANDLERARGS
+{
+ RTGCINTPTR offDelta;
+ PVM pVM;
+} PGMRELOCHANDLERARGS;
+/** Pointer to a page access handlere relocation argument package. */
+typedef PGMRELOCHANDLERARGS const *PCPGMRELOCHANDLERARGS;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int pgmR3InitPaging(PVM pVM);
+static int pgmR3InitStats(PVM pVM);
+static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+#ifdef VBOX_STRICT
+static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
+#endif
+
+#ifdef VBOX_WITH_DEBUGGER
+static FNDBGCCMD pgmR3CmdError;
+static FNDBGCCMD pgmR3CmdSync;
+static FNDBGCCMD pgmR3CmdSyncAlways;
+# ifdef VBOX_STRICT
+static FNDBGCCMD pgmR3CmdAssertCR3;
+# endif
+static FNDBGCCMD pgmR3CmdPhysToFile;
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_DEBUGGER
+/** Argument descriptors for '.pgmerror' and '.pgmerroroff'. */
+static const DBGCVARDESC g_aPgmErrorArgs[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 0, 1, DBGCVAR_CAT_STRING, 0, "where", "Error injection location." },
+};
+
+static const DBGCVARDESC g_aPgmPhysToFileArgs[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "file", "The file name." },
+ { 0, 1, DBGCVAR_CAT_STRING, 0, "nozero", "If present, zero pages are skipped." },
+};
+
+# ifdef DEBUG_sandervl
+static const DBGCVARDESC g_aPgmCountPhysWritesArgs[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 1, 1, DBGCVAR_CAT_STRING, 0, "enabled", "on/off." },
+ { 1, 1, DBGCVAR_CAT_NUMBER_NO_RANGE, 0, "interval", "Interval in ms." },
+};
+# endif
+
+/** Command descriptors. */
+static const DBGCCMD g_aCmds[] =
+{
+ /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
+ { "pgmsync", 0, 0, NULL, 0, 0, pgmR3CmdSync, "", "Sync the CR3 page." },
+ { "pgmerror", 0, 1, &g_aPgmErrorArgs[0], 1, 0, pgmR3CmdError, "", "Enables inject runtime of errors into parts of PGM." },
+ { "pgmerroroff", 0, 1, &g_aPgmErrorArgs[0], 1, 0, pgmR3CmdError, "", "Disables inject runtime errors into parts of PGM." },
+# ifdef VBOX_STRICT
+ { "pgmassertcr3", 0, 0, NULL, 0, 0, pgmR3CmdAssertCR3, "", "Check the shadow CR3 mapping." },
+# ifdef VBOX_WITH_PAGE_SHARING
+ { "pgmcheckduppages", 0, 0, NULL, 0, 0, pgmR3CmdCheckDuplicatePages, "", "Check for duplicate pages in all running VMs." },
+ { "pgmsharedmodules", 0, 0, NULL, 0, 0, pgmR3CmdShowSharedModules, "", "Print shared modules info." },
+# endif
+# endif
+ { "pgmsyncalways", 0, 0, NULL, 0, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
+ { "pgmphystofile", 1, 2, &g_aPgmPhysToFileArgs[0], 2, 0, pgmR3CmdPhysToFile, "", "Save the physical memory to file." },
+};
+#endif
+
+#ifdef VBOX_WITH_PGM_NEM_MODE
+
+/**
+ * Interface that NEM uses to switch PGM into simplified memory managment mode.
+ *
+ * This call occurs before PGMR3Init.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM)
+{
+ AssertFatal(!PDMCritSectIsInitialized(&pVM->pgm.s.CritSectX));
+ pVM->pgm.s.fNemMode = true;
+}
+
+
+/**
+ * Checks whether the simplificed memory management mode for NEM is enabled.
+ *
+ * @returns true if enabled, false if not.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM)
+{
+ return pVM->pgm.s.fNemMode;
+}
+
+#endif /* VBOX_WITH_PGM_NEM_MODE */
+
+/**
+ * Initiates the paging of VM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3Init(PVM pVM)
+{
+ LogFlow(("PGMR3Init:\n"));
+ PCFGMNODE pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
+ int rc;
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
+ AssertCompile(sizeof(pVM->apCpusR3[0]->pgm.s) <= sizeof(pVM->apCpusR3[0]->pgm.padding));
+ AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
+
+ /*
+ * If we're in driveless mode we have to use the simplified memory mode.
+ */
+ bool const fDriverless = SUPR3IsDriverless();
+ if (fDriverless)
+ {
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!pVM->pgm.s.fNemMode)
+ pVM->pgm.s.fNemMode = true;
+#else
+ return VMR3SetError(pVM->pUVM, VERR_SUP_DRIVERLESS, RT_SRC_POS,
+ "Driverless requires that VBox is built with VBOX_WITH_PGM_NEM_MODE defined");
+#endif
+ }
+
+ /*
+ * Init the structure.
+ */
+ /*pVM->pgm.s.fRestoreRomPagesAtReset = false;*/
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
+ {
+ pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
+ pVM->pgm.s.aHandyPages[i].fZeroed = false;
+ pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
+ pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
+ }
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLargeHandyPage); i++)
+ {
+ pVM->pgm.s.aLargeHandyPage[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
+ pVM->pgm.s.aLargeHandyPage[i].fZeroed = false;
+ pVM->pgm.s.aLargeHandyPage[i].idPage = NIL_GMM_PAGEID;
+ pVM->pgm.s.aLargeHandyPage[i].idSharedPage = NIL_GMM_PAGEID;
+ }
+
+ AssertReleaseReturn(pVM->pgm.s.cPhysHandlerTypes == 0, VERR_WRONG_ORDER);
+ for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++)
+ {
+ if (fDriverless)
+ pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
+ pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
+ pVM->pgm.s.aPhysHandlerTypes[i].pfnHandler = pgmR3HandlerPhysicalHandlerInvalid;
+ }
+
+ /* Init the per-CPU part. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ PPGMCPU pPGM = &pVCpu->pgm.s;
+
+ pPGM->enmShadowMode = PGMMODE_INVALID;
+ pPGM->enmGuestMode = PGMMODE_INVALID;
+ pPGM->enmGuestSlatMode = PGMSLAT_INVALID;
+ pPGM->idxGuestModeData = UINT8_MAX;
+ pPGM->idxShadowModeData = UINT8_MAX;
+ pPGM->idxBothModeData = UINT8_MAX;
+
+ pPGM->GCPhysCR3 = NIL_RTGCPHYS;
+ pPGM->GCPhysNstGstCR3 = NIL_RTGCPHYS;
+ pPGM->GCPhysPaeCR3 = NIL_RTGCPHYS;
+
+ pPGM->pGst32BitPdR3 = NULL;
+ pPGM->pGstPaePdptR3 = NULL;
+ pPGM->pGstAmd64Pml4R3 = NULL;
+ pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
+ pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
+ pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ pPGM->pGstEptPml4R3 = NULL;
+ pPGM->pGstEptPml4R0 = NIL_RTR0PTR;
+ pPGM->uEptPtr = 0;
+#endif
+ for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.apGstPaePDsR3); i++)
+ {
+ pPGM->apGstPaePDsR3[i] = NULL;
+ pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
+ pPGM->aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
+ }
+
+ pPGM->fA20Enabled = true;
+ pPGM->GCPhysA20Mask = ~((RTGCPHYS)!pPGM->fA20Enabled << 20);
+ }
+
+ pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
+ pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
+
+ rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
+#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
+ true
+#else
+ false
+#endif
+ );
+ AssertLogRelRCReturn(rc, rc);
+
+#if HC_ARCH_BITS == 32
+# ifdef RT_OS_DARWIN
+ rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE * 3);
+# else
+ rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);
+# endif
+#else
+ rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
+#endif
+ AssertLogRelRCReturn(rc, rc);
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
+ pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
+
+ /*
+ * Get the configured RAM size - to estimate saved state size.
+ */
+ uint64_t cbRam;
+ rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ cbRam = 0;
+ else if (RT_SUCCESS(rc))
+ {
+ if (cbRam < GUEST_PAGE_SIZE)
+ cbRam = 0;
+ cbRam = RT_ALIGN_64(cbRam, GUEST_PAGE_SIZE);
+ }
+ else
+ {
+ AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
+ return rc;
+ }
+
+ /*
+ * Check for PCI pass-through and other configurables.
+ */
+ rc = CFGMR3QueryBoolDef(pCfgPGM, "PciPassThrough", &pVM->pgm.s.fPciPassthrough, false);
+ AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"PciPassThrough\", rc=%Rrc.\n", rc), rc);
+ AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough || pVM->pgm.s.fRamPreAlloc, VERR_INVALID_PARAMETER);
+
+ rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "PageFusionAllowed", &pVM->pgm.s.fPageFusionAllowed, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/PGM/ZeroRamPagesOnReset, boolean, true}
+ * Whether to clear RAM pages on (hard) reset. */
+ rc = CFGMR3QueryBoolDef(pCfgPGM, "ZeroRamPagesOnReset", &pVM->pgm.s.fZeroRamPagesOnReset, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Register callbacks, string formatters and the saved state data unit.
+ */
+#ifdef VBOX_STRICT
+ VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
+#endif
+ PGMRegisterStringFormatTypes();
+
+ rc = pgmR3InitSavedState(pVM, cbRam);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Initialize the PGM critical section and flush the phys TLBs
+ */
+ rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
+ AssertRCReturn(rc, rc);
+
+ PGMR3PhysChunkInvalidateTLB(pVM);
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ /*
+ * For the time being we sport a full set of handy pages in addition to the base
+ * memory to simplify things.
+ */
+ rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Setup the zero page (HCPHysZeroPg is set by ring-0).
+ */
+ RT_ZERO(pVM->pgm.s.abZeroPg); /* paranoia */
+ if (fDriverless)
+ pVM->pgm.s.HCPhysZeroPg = _4G - GUEST_PAGE_SIZE * 2 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
+ AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
+ AssertRelease(pVM->pgm.s.HCPhysZeroPg != 0);
+
+ /*
+ * Setup the invalid MMIO page (HCPhysMmioPg is set by ring-0).
+ * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
+ */
+ ASMMemFill32(pVM->pgm.s.abMmioPg, sizeof(pVM->pgm.s.abMmioPg), 0xfeedface);
+ if (fDriverless)
+ pVM->pgm.s.HCPhysMmioPg = _4G - GUEST_PAGE_SIZE * 3 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
+ AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
+ AssertRelease(pVM->pgm.s.HCPhysMmioPg != 0);
+ pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
+
+ /*
+ * Initialize physical access handlers.
+ */
+ /** @cfgm{/PGM/MaxPhysicalAccessHandlers, uint32_t, 32, 65536, 6144}
+ * Number of physical access handlers allowed (subject to rounding). This is
+ * managed as one time allocation during initializations. The default is
+ * lower for a driverless setup. */
+ /** @todo can lower it for nested paging too, at least when there is no
+ * nested guest involved. */
+ uint32_t cAccessHandlers = 0;
+ rc = CFGMR3QueryU32Def(pCfgPGM, "MaxPhysicalAccessHandlers", &cAccessHandlers, !fDriverless ? 6144 : 640);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgStmt(cAccessHandlers >= 32, ("cAccessHandlers=%#x, min 32\n", cAccessHandlers), cAccessHandlers = 32);
+ AssertLogRelMsgStmt(cAccessHandlers <= _64K, ("cAccessHandlers=%#x, max 65536\n", cAccessHandlers), cAccessHandlers = _64K);
+ if (!fDriverless)
+ {
+ rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_HANDLER_INIT, cAccessHandlers, NULL);
+ AssertRCReturn(rc, rc);
+ AssertPtr(pVM->pgm.s.pPhysHandlerTree);
+ AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_paNodes);
+ AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc);
+ }
+ else
+ {
+ uint32_t cbTreeAndBitmap = 0;
+ uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cAccessHandlers, &cbTreeAndBitmap);
+ uint8_t *pb = NULL;
+ rc = SUPR3PageAlloc(cbTotalAligned >> HOST_PAGE_SHIFT, 0, (void **)&pb);
+ AssertLogRelRCReturn(rc, rc);
+
+ pVM->pgm.s.PhysHandlerAllocator.initSlabAllocator(cAccessHandlers, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
+ (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
+ pVM->pgm.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
+ pVM->pgm.s.pPhysHandlerTree->initWithAllocator(&pVM->pgm.s.PhysHandlerAllocator);
+ }
+
+ /*
+ * Register the physical access handler protecting ROMs.
+ */
+ if (RT_SUCCESS(rc))
+ /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/, pgmPhysRomWriteHandler,
+ "ROM write protection", &pVM->pgm.s.hRomPhysHandlerType);
+
+ /*
+ * Register the physical access handler doing dirty MMIO2 tracing.
+ */
+ if (RT_SUCCESS(rc))
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
+ pgmPhysMmio2WriteHandler, "MMIO2 dirty page tracing",
+ &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
+
+ /*
+ * Init the paging.
+ */
+ if (RT_SUCCESS(rc))
+ rc = pgmR3InitPaging(pVM);
+
+ /*
+ * Init the page pool.
+ */
+ if (RT_SUCCESS(rc))
+ rc = pgmR3PoolInit(pVM);
+
+ if (RT_SUCCESS(rc))
+ {
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Info & statistics
+ */
+ DBGFR3InfoRegisterInternalEx(pVM, "mode",
+ "Shows the current paging mode. "
+ "Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing is given.",
+ pgmR3InfoMode,
+ DBGFINFO_FLAGS_ALL_EMTS);
+ DBGFR3InfoRegisterInternal(pVM, "pgmcr3",
+ "Dumps all the entries in the top level paging table. No arguments.",
+ pgmR3InfoCr3);
+ DBGFR3InfoRegisterInternal(pVM, "phys",
+ "Dumps all the physical address ranges. Pass 'verbose' to get more details.",
+ pgmR3PhysInfo);
+ DBGFR3InfoRegisterInternal(pVM, "handlers",
+ "Dumps physical, virtual and hyper virtual handlers. "
+ "Pass 'phys', 'virt', 'hyper' as argument if only one kind is wanted."
+ "Add 'nost' if the statistics are unwanted, use together with 'all' or explicit selection.",
+ pgmR3InfoHandlers);
+
+ pgmR3InitStats(pVM);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /*
+ * Debugger commands.
+ */
+ static bool s_fRegisteredCmds = false;
+ if (!s_fRegisteredCmds)
+ {
+ int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ if (RT_SUCCESS(rc2))
+ s_fRegisteredCmds = true;
+ }
+#endif
+
+#ifdef RT_OS_LINUX
+ /*
+ * Log the /proc/sys/vm/max_map_count value on linux as that is
+ * frequently giving us grief when too low.
+ */
+ int64_t const cGuessNeeded = MMR3PhysGetRamSize(pVM) / _2M + 16384 /*guesstimate*/;
+ int64_t cMaxMapCount = 0;
+ int rc2 = RTLinuxSysFsReadIntFile(10, &cMaxMapCount, "/proc/sys/vm/max_map_count");
+ LogRel(("PGM: /proc/sys/vm/max_map_count = %RI64 (rc2=%Rrc); cGuessNeeded=%RI64\n", cMaxMapCount, rc2, cGuessNeeded));
+ if (RT_SUCCESS(rc2) && cMaxMapCount < cGuessNeeded)
+ LogRel(("PGM: WARNING!!\n"
+ "PGM: WARNING!! Please increase /proc/sys/vm/max_map_count to at least %RI64 (or reduce the amount of RAM assigned to the VM)!\n"
+ "PGM: WARNING!!\n", cMaxMapCount));
+
+#endif
+
+ return VINF_SUCCESS;
+ }
+
+ /* Almost no cleanup necessary, MM frees all memory. */
+ PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
+
+ return rc;
+}
+
+
+/**
+ * Init paging.
+ *
+ * Since we need to check what mode the host is operating in before we can choose
+ * the right paging functions for the host we have to delay this until R0 has
+ * been initialized.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int pgmR3InitPaging(PVM pVM)
+{
+ /*
+ * Force a recalculation of modes and switcher so everyone gets notified.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
+ pVCpu->pgm.s.enmGuestMode = PGMMODE_INVALID;
+ pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_INVALID;
+ pVCpu->pgm.s.idxGuestModeData = UINT8_MAX;
+ pVCpu->pgm.s.idxShadowModeData = UINT8_MAX;
+ pVCpu->pgm.s.idxBothModeData = UINT8_MAX;
+ }
+
+ pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
+
+ /*
+ * Initialize paging workers and mode from current host mode
+ * and the guest running in real mode.
+ */
+ pVM->pgm.s.enmHostMode = SUPR3GetPagingMode();
+ switch (pVM->pgm.s.enmHostMode)
+ {
+ case SUPPAGINGMODE_32_BIT:
+ case SUPPAGINGMODE_32_BIT_GLOBAL:
+ case SUPPAGINGMODE_PAE:
+ case SUPPAGINGMODE_PAE_GLOBAL:
+ case SUPPAGINGMODE_PAE_NX:
+ case SUPPAGINGMODE_PAE_GLOBAL_NX:
+
+ case SUPPAGINGMODE_AMD64:
+ case SUPPAGINGMODE_AMD64_GLOBAL:
+ case SUPPAGINGMODE_AMD64_NX:
+ case SUPPAGINGMODE_AMD64_GLOBAL_NX:
+ if (ARCH_BITS != 64)
+ {
+ AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
+ LogRel(("PGM: Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
+ return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
+ }
+ break;
+#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
+ case SUPPAGINGMODE_INVALID:
+ pVM->pgm.s.enmHostMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
+ break;
+#endif
+ default:
+ AssertMsgFailed(("Host mode %d is not supported\n", pVM->pgm.s.enmHostMode));
+ return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
+ }
+
+ LogFlow(("pgmR3InitPaging: returns successfully\n"));
+#if HC_ARCH_BITS == 64 && 0
+ LogRel(("PGM: HCPhysInterPD=%RHp HCPhysInterPaePDPT=%RHp HCPhysInterPaePML4=%RHp\n",
+ pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPT, pVM->pgm.s.HCPhysInterPaePML4));
+ LogRel(("PGM: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
+ MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]),
+ MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[1]),
+ MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
+ MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64)));
+#endif
+
+ /*
+ * Log the host paging mode. It may come in handy.
+ */
+ const char *pszHostMode;
+ switch (pVM->pgm.s.enmHostMode)
+ {
+ case SUPPAGINGMODE_32_BIT: pszHostMode = "32-bit"; break;
+ case SUPPAGINGMODE_32_BIT_GLOBAL: pszHostMode = "32-bit+PGE"; break;
+ case SUPPAGINGMODE_PAE: pszHostMode = "PAE"; break;
+ case SUPPAGINGMODE_PAE_GLOBAL: pszHostMode = "PAE+PGE"; break;
+ case SUPPAGINGMODE_PAE_NX: pszHostMode = "PAE+NXE"; break;
+ case SUPPAGINGMODE_PAE_GLOBAL_NX: pszHostMode = "PAE+PGE+NXE"; break;
+ case SUPPAGINGMODE_AMD64: pszHostMode = "AMD64"; break;
+ case SUPPAGINGMODE_AMD64_GLOBAL: pszHostMode = "AMD64+PGE"; break;
+ case SUPPAGINGMODE_AMD64_NX: pszHostMode = "AMD64+NX"; break;
+ case SUPPAGINGMODE_AMD64_GLOBAL_NX: pszHostMode = "AMD64+PGE+NX"; break;
+ default: pszHostMode = "???"; break;
+ }
+ LogRel(("PGM: Host paging mode: %s\n", pszHostMode));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Init statistics
+ * @returns VBox status code.
+ */
+static int pgmR3InitStats(PVM pVM)
+{
+ PPGM pPGM = &pVM->pgm.s;
+ int rc;
+
+ /*
+ * Release statistics.
+ */
+ /* Common - misc variables */
+ STAM_REL_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_COUNT, "The total number of pages.");
+ STAM_REL_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_COUNT, "The number of private pages.");
+ STAM_REL_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_COUNT, "The number of shared pages.");
+ STAM_REL_REG(pVM, &pPGM->cReusedSharedPages, STAMTYPE_U32, "/PGM/Page/cReusedSharedPages", STAMUNIT_COUNT, "The number of reused shared pages.");
+ STAM_REL_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_COUNT, "The number of zero backed pages.");
+ STAM_REL_REG(pVM, &pPGM->cPureMmioPages, STAMTYPE_U32, "/PGM/Page/cPureMmioPages", STAMUNIT_COUNT, "The number of pure MMIO pages.");
+ STAM_REL_REG(pVM, &pPGM->cMonitoredPages, STAMTYPE_U32, "/PGM/Page/cMonitoredPages", STAMUNIT_COUNT, "The number of write monitored pages.");
+ STAM_REL_REG(pVM, &pPGM->cWrittenToPages, STAMTYPE_U32, "/PGM/Page/cWrittenToPages", STAMUNIT_COUNT, "The number of previously write monitored pages that have been written to.");
+ STAM_REL_REG(pVM, &pPGM->cWriteLockedPages, STAMTYPE_U32, "/PGM/Page/cWriteLockedPages", STAMUNIT_COUNT, "The number of write(/read) locked pages.");
+ STAM_REL_REG(pVM, &pPGM->cReadLockedPages, STAMTYPE_U32, "/PGM/Page/cReadLockedPages", STAMUNIT_COUNT, "The number of read (only) locked pages.");
+ STAM_REL_REG(pVM, &pPGM->cBalloonedPages, STAMTYPE_U32, "/PGM/Page/cBalloonedPages", STAMUNIT_COUNT, "The number of ballooned pages.");
+ STAM_REL_REG(pVM, &pPGM->cHandyPages, STAMTYPE_U32, "/PGM/Page/cHandyPages", STAMUNIT_COUNT, "The number of handy pages (not included in cAllPages).");
+ STAM_REL_REG(pVM, &pPGM->cLargePages, STAMTYPE_U32, "/PGM/Page/cLargePages", STAMUNIT_COUNT, "The number of large pages allocated (includes disabled).");
+ STAM_REL_REG(pVM, &pPGM->cLargePagesDisabled, STAMTYPE_U32, "/PGM/Page/cLargePagesDisabled", STAMUNIT_COUNT, "The number of disabled large pages.");
+ STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_COUNT, "Number of mapped chunks.");
+ STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_COUNT, "Maximum number of mapped chunks.");
+ STAM_REL_REG(pVM, &pPGM->cMappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Mapped", STAMUNIT_COUNT, "Number of times we mapped a chunk.");
+ STAM_REL_REG(pVM, &pPGM->cUnmappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Unmapped", STAMUNIT_COUNT, "Number of times we unmapped a chunk.");
+
+ STAM_REL_REG(pVM, &pPGM->StatLargePageReused, STAMTYPE_COUNTER, "/PGM/LargePage/Reused", STAMUNIT_OCCURENCES, "The number of times we've reused a large page.");
+ STAM_REL_REG(pVM, &pPGM->StatLargePageRefused, STAMTYPE_COUNTER, "/PGM/LargePage/Refused", STAMUNIT_OCCURENCES, "The number of times we couldn't use a large page.");
+ STAM_REL_REG(pVM, &pPGM->StatLargePageRecheck, STAMTYPE_COUNTER, "/PGM/LargePage/Recheck", STAMUNIT_OCCURENCES, "The number of times we've rechecked a disabled large page.");
+
+ STAM_REL_REG(pVM, &pPGM->StatShModCheck, STAMTYPE_PROFILE, "/PGM/ShMod/Check", STAMUNIT_TICKS_PER_CALL, "Profiles the shared module checking.");
+ STAM_REL_REG(pVM, &pPGM->StatMmio2QueryAndResetDirtyBitmap, STAMTYPE_PROFILE, "/PGM/Mmio2QueryAndResetDirtyBitmap", STAMUNIT_TICKS_PER_CALL, "Profiles calls to PGMR3PhysMmio2QueryAndResetDirtyBitmap (sans locking).");
+
+ /* Live save */
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.fActive, STAMTYPE_U8, "/PGM/LiveSave/fActive", STAMUNIT_COUNT, "Active or not.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cIgnoredPages, STAMTYPE_U32, "/PGM/LiveSave/cIgnoredPages", STAMUNIT_COUNT, "The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM).");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesLong, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesLong", STAMUNIT_COUNT, "Longer term dirty page average.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesShort, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesShort", STAMUNIT_COUNT, "Short term dirty page average.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cPagesPerSecond, STAMTYPE_U32, "/PGM/LiveSave/cPagesPerSecond", STAMUNIT_COUNT, "Pages per second.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cSavedPages, STAMTYPE_U64, "/PGM/LiveSave/cSavedPages", STAMUNIT_COUNT, "The total number of saved pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cReadPages", STAMUNIT_COUNT, "RAM: Ready pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cDirtyPages", STAMUNIT_COUNT, "RAM: Dirty pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cZeroPages", STAMUNIT_COUNT, "RAM: Ready zero pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cMonitoredPages", STAMUNIT_COUNT, "RAM: Write monitored pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cReadPages", STAMUNIT_COUNT, "ROM: Ready pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cDirtyPages", STAMUNIT_COUNT, "ROM: Dirty pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cZeroPages", STAMUNIT_COUNT, "ROM: Ready zero pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cMonitoredPages", STAMUNIT_COUNT, "ROM: Write monitored pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cReadPages", STAMUNIT_COUNT, "MMIO2: Ready pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cDirtyPages", STAMUNIT_COUNT, "MMIO2: Dirty pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cZeroPages", STAMUNIT_COUNT, "MMIO2: Ready zero pages.");
+ STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cMonitoredPages,STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cMonitoredPages",STAMUNIT_COUNT, "MMIO2: Write monitored pages.");
+
+#define PGM_REG_COUNTER(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b); \
+ AssertRC(rc);
+
+#define PGM_REG_U64(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b); \
+ AssertRC(rc);
+
+#define PGM_REG_U64_RESET(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b); \
+ AssertRC(rc);
+
+#define PGM_REG_U32(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b); \
+ AssertRC(rc);
+
+#define PGM_REG_COUNTER_BYTES(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, c, b); \
+ AssertRC(rc);
+
+#define PGM_REG_PROFILE(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b); \
+ AssertRC(rc);
+#define PGM_REG_PROFILE_NS(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, c, b); \
+ AssertRC(rc);
+
+#ifdef VBOX_WITH_STATISTICS
+ PGMSTATS *pStats = &pPGM->Stats;
+#endif
+
+ PGM_REG_PROFILE_NS(&pPGM->StatLargePageAlloc, "/PGM/LargePage/Alloc", "Time spent by the host OS for large page allocation.");
+ PGM_REG_COUNTER(&pPGM->StatLargePageAllocFailed, "/PGM/LargePage/AllocFailed", "Number of allocation failures.");
+ PGM_REG_COUNTER(&pPGM->StatLargePageOverflow, "/PGM/LargePage/Overflow", "The number of times allocating a large page took too long.");
+ PGM_REG_COUNTER(&pPGM->StatLargePageTlbFlush, "/PGM/LargePage/TlbFlush", "The number of times a full VCPU TLB flush was required after a large allocation.");
+ PGM_REG_COUNTER(&pPGM->StatLargePageZeroEvict, "/PGM/LargePage/ZeroEvict", "The number of zero page mappings we had to evict when allocating a large page.");
+#ifdef VBOX_WITH_STATISTICS
+ PGM_REG_PROFILE(&pStats->StatLargePageAlloc2, "/PGM/LargePage/Alloc2", "Time spent allocating large pages.");
+ PGM_REG_PROFILE(&pStats->StatLargePageSetup, "/PGM/LargePage/Setup", "Time spent setting up the newly allocated large pages.");
+ PGM_REG_PROFILE(&pStats->StatR3IsValidLargePage, "/PGM/LargePage/IsValidR3", "pgmPhysIsValidLargePage profiling - R3.");
+ PGM_REG_PROFILE(&pStats->StatRZIsValidLargePage, "/PGM/LargePage/IsValidRZ", "pgmPhysIsValidLargePage profiling - RZ.");
+
+ PGM_REG_COUNTER(&pStats->StatR3DetectedConflicts, "/PGM/R3/DetectedConflicts", "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
+ PGM_REG_PROFILE(&pStats->StatR3ResolveConflict, "/PGM/R3/ResolveConflict", "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
+ PGM_REG_COUNTER(&pStats->StatR3PhysRead, "/PGM/R3/Phys/Read", "The number of times PGMPhysRead was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysReadBytes, "/PGM/R3/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysWrite, "/PGM/R3/Phys/Write", "The number of times PGMPhysWrite was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysWriteBytes, "/PGM/R3/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysSimpleRead, "/PGM/R3/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysSimpleReadBytes, "/PGM/R3/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysSimpleWrite, "/PGM/R3/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysSimpleWriteBytes, "/PGM/R3/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
+
+ PGM_REG_COUNTER(&pStats->StatRZChunkR3MapTlbHits, "/PGM/ChunkR3Map/TlbHitsRZ", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatRZChunkR3MapTlbMisses, "/PGM/ChunkR3Map/TlbMissesRZ", "TLB misses.");
+ PGM_REG_PROFILE(&pStats->StatChunkAging, "/PGM/ChunkR3Map/Map/Aging", "Chunk aging profiling.");
+ PGM_REG_PROFILE(&pStats->StatChunkFindCandidate, "/PGM/ChunkR3Map/Map/Find", "Chunk unmap find profiling.");
+ PGM_REG_PROFILE(&pStats->StatChunkUnmap, "/PGM/ChunkR3Map/Map/Unmap", "Chunk unmap of address space profiling.");
+ PGM_REG_PROFILE(&pStats->StatChunkMap, "/PGM/ChunkR3Map/Map/Map", "Chunk map of address space profiling.");
+
+ PGM_REG_COUNTER(&pStats->StatRZPageMapTlbHits, "/PGM/RZ/Page/MapTlbHits", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatRZPageMapTlbMisses, "/PGM/RZ/Page/MapTlbMisses", "TLB misses.");
+ PGM_REG_COUNTER(&pStats->StatR3ChunkR3MapTlbHits, "/PGM/ChunkR3Map/TlbHitsR3", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatR3ChunkR3MapTlbMisses, "/PGM/ChunkR3Map/TlbMissesR3", "TLB misses.");
+ PGM_REG_COUNTER(&pStats->StatR3PageMapTlbHits, "/PGM/R3/Page/MapTlbHits", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatR3PageMapTlbMisses, "/PGM/R3/Page/MapTlbMisses", "TLB misses.");
+ PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushes, "/PGM/R3/Page/MapTlbFlushes", "TLB flushes (all contexts).");
+ PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushEntry, "/PGM/R3/Page/MapTlbFlushEntry", "TLB entry flushes (all contexts).");
+
+ PGM_REG_COUNTER(&pStats->StatRZRamRangeTlbHits, "/PGM/RZ/RamRange/TlbHits", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatRZRamRangeTlbMisses, "/PGM/RZ/RamRange/TlbMisses", "TLB misses.");
+ PGM_REG_COUNTER(&pStats->StatR3RamRangeTlbHits, "/PGM/R3/RamRange/TlbHits", "TLB hits.");
+ PGM_REG_COUNTER(&pStats->StatR3RamRangeTlbMisses, "/PGM/R3/RamRange/TlbMisses", "TLB misses.");
+
+ PGM_REG_COUNTER(&pStats->StatRZPhysHandlerReset, "/PGM/RZ/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysHandlerReset, "/PGM/R3/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
+ PGM_REG_COUNTER(&pStats->StatRZPhysHandlerLookupHits, "/PGM/RZ/PhysHandlerLookupHits", "The number of cache hits when looking up physical handlers.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysHandlerLookupHits, "/PGM/R3/PhysHandlerLookupHits", "The number of cache hits when looking up physical handlers.");
+ PGM_REG_COUNTER(&pStats->StatRZPhysHandlerLookupMisses, "/PGM/RZ/PhysHandlerLookupMisses", "The number of cache misses when looking up physical handlers.");
+ PGM_REG_COUNTER(&pStats->StatR3PhysHandlerLookupMisses, "/PGM/R3/PhysHandlerLookupMisses", "The number of cache misses when looking up physical handlers.");
+#endif /* VBOX_WITH_STATISTICS */
+ PPGMPHYSHANDLERTREE pPhysHndlTree = pVM->pgm.s.pPhysHandlerTree;
+ PGM_REG_U32(&pPhysHndlTree->m_cErrors, "/PGM/PhysHandlerTree/ErrorsTree", "Physical access handler tree errors.");
+ PGM_REG_U32(&pVM->pgm.s.PhysHandlerAllocator.m_cErrors, "/PGM/PhysHandlerTree/ErrorsAllocatorR3", "Physical access handler tree allocator errors (ring-3 only).");
+ PGM_REG_U64_RESET(&pPhysHndlTree->m_cInserts, "/PGM/PhysHandlerTree/Inserts", "Physical access handler tree inserts.");
+ PGM_REG_U32(&pVM->pgm.s.PhysHandlerAllocator.m_cNodes, "/PGM/PhysHandlerTree/MaxHandlers", "Max physical access handlers.");
+ PGM_REG_U64_RESET(&pPhysHndlTree->m_cRemovals, "/PGM/PhysHandlerTree/Removals", "Physical access handler tree removals.");
+ PGM_REG_U64_RESET(&pPhysHndlTree->m_cRebalancingOperations, "/PGM/PhysHandlerTree/RebalancingOperations", "Physical access handler tree rebalancing transformations.");
+
+#ifdef VBOX_WITH_STATISTICS
+ PGM_REG_COUNTER(&pStats->StatRZPageReplaceShared, "/PGM/RZ/Page/ReplacedShared", "Times a shared page was replaced.");
+ PGM_REG_COUNTER(&pStats->StatRZPageReplaceZero, "/PGM/RZ/Page/ReplacedZero", "Times the zero page was replaced.");
+/// @todo PGM_REG_COUNTER(&pStats->StatRZPageHandyAllocs, "/PGM/RZ/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
+ PGM_REG_COUNTER(&pStats->StatR3PageReplaceShared, "/PGM/R3/Page/ReplacedShared", "Times a shared page was replaced.");
+ PGM_REG_COUNTER(&pStats->StatR3PageReplaceZero, "/PGM/R3/Page/ReplacedZero", "Times the zero page was replaced.");
+/// @todo PGM_REG_COUNTER(&pStats->StatR3PageHandyAllocs, "/PGM/R3/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
+
+ PGM_REG_COUNTER(&pStats->StatRZPhysRead, "/PGM/RZ/Phys/Read", "The number of times PGMPhysRead was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysReadBytes, "/PGM/RZ/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
+ PGM_REG_COUNTER(&pStats->StatRZPhysWrite, "/PGM/RZ/Phys/Write", "The number of times PGMPhysWrite was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysWriteBytes, "/PGM/RZ/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
+ PGM_REG_COUNTER(&pStats->StatRZPhysSimpleRead, "/PGM/RZ/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysSimpleReadBytes, "/PGM/RZ/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
+ PGM_REG_COUNTER(&pStats->StatRZPhysSimpleWrite, "/PGM/RZ/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysSimpleWriteBytes, "/PGM/RZ/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
+
+ /* GC only: */
+ PGM_REG_COUNTER(&pStats->StatRCInvlPgConflict, "/PGM/RC/InvlPgConflict", "Number of times PGMInvalidatePage() detected a mapping conflict.");
+ PGM_REG_COUNTER(&pStats->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
+
+ PGM_REG_COUNTER(&pStats->StatRCPhysRead, "/PGM/RC/Phys/Read", "The number of times PGMPhysRead was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysReadBytes, "/PGM/RC/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
+ PGM_REG_COUNTER(&pStats->StatRCPhysWrite, "/PGM/RC/Phys/Write", "The number of times PGMPhysWrite was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysWriteBytes, "/PGM/RC/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
+ PGM_REG_COUNTER(&pStats->StatRCPhysSimpleRead, "/PGM/RC/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysSimpleReadBytes, "/PGM/RC/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
+ PGM_REG_COUNTER(&pStats->StatRCPhysSimpleWrite, "/PGM/RC/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
+ PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysSimpleWriteBytes, "/PGM/RC/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
+
+ PGM_REG_COUNTER(&pStats->StatTrackVirgin, "/PGM/Track/Virgin", "The number of first time shadowings");
+ PGM_REG_COUNTER(&pStats->StatTrackAliased, "/PGM/Track/Aliased", "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
+ PGM_REG_COUNTER(&pStats->StatTrackAliasedMany, "/PGM/Track/AliasedMany", "The number of times we're tracking using cRef2.");
+ PGM_REG_COUNTER(&pStats->StatTrackAliasedLots, "/PGM/Track/AliasedLots", "The number of times we're hitting pages which has overflowed cRef2");
+ PGM_REG_COUNTER(&pStats->StatTrackOverflows, "/PGM/Track/Overflows", "The number of times the extent list grows too long.");
+ PGM_REG_COUNTER(&pStats->StatTrackNoExtentsLeft, "/PGM/Track/NoExtentLeft", "The number of times the extent list was exhausted.");
+ PGM_REG_PROFILE(&pStats->StatTrackDeref, "/PGM/Track/Deref", "Profiling of SyncPageWorkerTrackDeref (expensive).");
+#endif
+
+#undef PGM_REG_COUNTER
+#undef PGM_REG_U64
+#undef PGM_REG_U64_RESET
+#undef PGM_REG_U32
+#undef PGM_REG_PROFILE
+#undef PGM_REG_PROFILE_NS
+
+ /*
+ * Note! The layout below matches the member layout exactly!
+ */
+
+ /*
+ * Common - stats
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PPGMCPU pPgmCpu = &pVM->apCpusR3[idCpu]->pgm.s;
+
+#define PGM_REG_COUNTER(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
+ AssertRC(rc);
+#define PGM_REG_PROFILE(a, b, c) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
+ AssertRC(rc);
+
+ PGM_REG_COUNTER(&pPgmCpu->cGuestModeChanges, "/PGM/CPU%u/cGuestModeChanges", "Number of guest mode changes.");
+ PGM_REG_COUNTER(&pPgmCpu->cA20Changes, "/PGM/CPU%u/cA20Changes", "Number of A20 gate changes.");
+
+#ifdef VBOX_WITH_STATISTICS
+ PGMCPUSTATS *pCpuStats = &pVM->apCpusR3[idCpu]->pgm.s.Stats;
+
+# if 0 /* rarely useful; leave for debugging. */
+ for (unsigned j = 0; j < RT_ELEMENTS(pPgmCpu->StatSyncPtPD); j++)
+ STAMR3RegisterF(pVM, &pCpuStats->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "The number of SyncPT per PD n.", "/PGM/CPU%u/PDSyncPT/%04X", i, j);
+ for (unsigned j = 0; j < RT_ELEMENTS(pCpuStats->StatSyncPagePD); j++)
+ STAMR3RegisterF(pVM, &pCpuStats->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "The number of SyncPage per PD n.", "/PGM/CPU%u/PDSyncPage/%04X", i, j);
+# endif
+ /* R0 only: */
+ PGM_REG_PROFILE(&pCpuStats->StatR0NpMiscfg, "/PGM/CPU%u/R0/NpMiscfg", "PGMR0Trap0eHandlerNPMisconfig() profiling.");
+ PGM_REG_COUNTER(&pCpuStats->StatR0NpMiscfgSyncPage, "/PGM/CPU%u/R0/NpMiscfgSyncPage", "SyncPage calls from PGMR0Trap0eHandlerNPMisconfig().");
+
+ /* RZ only: */
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0e, "/PGM/CPU%u/RZ/Trap0e", "Profiling of the PGMTrap0eHandler() body.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Ballooned, "/PGM/CPU%u/RZ/Trap0e/Time2/Ballooned", "Profiling of the Trap0eHandler body when the cause is read access to a ballooned page.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2CSAM, "/PGM/CPU%u/RZ/Trap0e/Time2/CSAM", "Profiling of the Trap0eHandler body when the cause is CSAM.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2DirtyAndAccessed, "/PGM/CPU%u/RZ/Trap0e/Time2/DirtyAndAccessedBits", "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2GuestTrap, "/PGM/CPU%u/RZ/Trap0e/Time2/GuestTrap", "Profiling of the Trap0eHandler body when the cause is a guest trap.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2HndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerPhysical", "Profiling of the Trap0eHandler body when the cause is a physical handler.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2HndUnhandled, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerUnhandled", "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2InvalidPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/InvalidPhys", "Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2MakeWritable, "/PGM/CPU%u/RZ/Trap0e/Time2/MakeWritable", "Profiling of the Trap0eHandler body when the cause is that a page needed to be made writeable.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Misc, "/PGM/CPU%u/RZ/Trap0e/Time2/Misc", "Profiling of the Trap0eHandler body when the cause is not known.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSync, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSync", "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncHndPhys", "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndObs, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncObsHnd", "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2SyncPT, "/PGM/CPU%u/RZ/Trap0e/Time2/SyncPT", "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2WPEmulation, "/PGM/CPU%u/RZ/Trap0e/Time2/WPEmulation", "Profiling of the Trap0eHandler body when the cause is CR0.WP emulation.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsHack, "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USHack", "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be enabled.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsUnhack, "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USUnhack", "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eConflicts, "/PGM/CPU%u/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysAll, "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysAll", "Number of traps due to physical all-access handlers.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysAllOpt, "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysAllOpt", "Number of the physical all-access handler traps using the optimization.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysWrite, "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysWrite", "Number of traps due to physical write-access handlers.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersUnhandled, "/PGM/CPU%u/RZ/Trap0e/Handlers/Unhandled", "Number of traps due to access outside range of monitored page(s).");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersInvalid, "/PGM/CPU%u/RZ/Trap0e/Handlers/Invalid", "Number of traps due to access to invalid physical memory.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPRead", "Number of user mode not present read page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPWrite", "Number of user mode not present write page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/Write", "Number of user mode write page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSReserved, "/PGM/CPU%u/RZ/Trap0e/Err/User/Reserved", "Number of user mode reserved bit page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/User/NXE", "Number of user mode NXE page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/Read", "Number of user mode read page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPRead", "Number of supervisor mode not present read page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPWrite", "Number of supervisor mode not present write page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Write", "Number of supervisor mode write page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVReserved, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Reserved", "Number of supervisor mode reserved bit page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NXE", "Number of supervisor mode NXE page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPF, "/PGM/CPU%u/RZ/Trap0e/GuestPF", "Number of real guest page faults.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulInRZ, "/PGM/CPU%u/RZ/Trap0e/WP/InRZ", "Number of guest page faults due to X86_CR0_WP emulation.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulToR3, "/PGM/CPU%u/RZ/Trap0e/WP/ToR3", "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
+#if 0 /* rarely useful; leave for debugging. */
+ for (unsigned j = 0; j < RT_ELEMENTS(pCpuStats->StatRZTrap0ePD); j++)
+ STAMR3RegisterF(pVM, &pCpuStats->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "The number of traps in page directory n.", "/PGM/CPU%u/RZ/Trap0e/PD/%04X", i, j);
+#endif
+ PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteHandled, "/PGM/CPU%u/RZ/CR3WriteHandled", "The number of times the Guest CR3 change was successfully handled.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteUnhandled, "/PGM/CPU%u/RZ/CR3WriteUnhandled", "The number of times the Guest CR3 change was passed back to the recompiler.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteConflict, "/PGM/CPU%u/RZ/CR3WriteConflict", "The number of times the Guest CR3 monitoring detected a conflict.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteHandled, "/PGM/CPU%u/RZ/ROMWriteHandled", "The number of times the Guest ROM change was successfully handled.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteUnhandled, "/PGM/CPU%u/RZ/ROMWriteUnhandled", "The number of times the Guest ROM change was passed back to the recompiler.");
+
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapMigrateInvlPg, "/PGM/CPU%u/RZ/DynMap/MigrateInvlPg", "invlpg count in PGMR0DynMapMigrateAutoSet.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZDynMapGCPageInl, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Hits", "Hash table lookup hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Misses", "Misses that falls back to the code common.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamHits", "1st ram range hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPageInl, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl", "Calls to pgmRZDynMapHCPageInlined.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Hits", "Hash table lookup hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Misses", "Misses that falls back to the code common.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPage, "/PGM/CPU%u/RZ/DynMap/Page", "Calls to pgmR0DynMapPage");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetOptimize, "/PGM/CPU%u/RZ/DynMap/Page/SetOptimize", "Calls to pgmRZDynMapOptimizeAutoSet.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchFlushes, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchFlushes", "Set search restoring to subset flushes.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchHits, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchHits", "Set search hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchMisses, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchMisses", "Set search misses.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPage, "/PGM/CPU%u/RZ/DynMap/Page/HCPage", "Calls to pgmRZDynMapHCPageCommon (ring-0).");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits0, "/PGM/CPU%u/RZ/DynMap/Page/Hits0", "Hits at iPage+0");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits1, "/PGM/CPU%u/RZ/DynMap/Page/Hits1", "Hits at iPage+1");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits2, "/PGM/CPU%u/RZ/DynMap/Page/Hits2", "Hits at iPage+2");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageInvlPg, "/PGM/CPU%u/RZ/DynMap/Page/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlow, "/PGM/CPU%u/RZ/DynMap/Page/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopHits, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopHits" , "Hits in the loop path.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopMisses, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
+ //PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLostHits, "/PGM/CPU%u/R0/DynMap/Page/SlowLostHits", "Lost hits.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSubsets, "/PGM/CPU%u/RZ/DynMap/Subsets", "Times PGMRZDynMapPushAutoSubset was called.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPopFlushes, "/PGM/CPU%u/RZ/DynMap/SubsetPopFlushes", "Times PGMRZDynMapPopAutoSubset flushes the subset.");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[0], "/PGM/CPU%u/RZ/DynMap/SetFilledPct000..09", "00-09% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[1], "/PGM/CPU%u/RZ/DynMap/SetFilledPct010..19", "10-19% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[2], "/PGM/CPU%u/RZ/DynMap/SetFilledPct020..29", "20-29% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[3], "/PGM/CPU%u/RZ/DynMap/SetFilledPct030..39", "30-39% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[4], "/PGM/CPU%u/RZ/DynMap/SetFilledPct040..49", "40-49% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[5], "/PGM/CPU%u/RZ/DynMap/SetFilledPct050..59", "50-59% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[6], "/PGM/CPU%u/RZ/DynMap/SetFilledPct060..69", "60-69% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[7], "/PGM/CPU%u/RZ/DynMap/SetFilledPct070..79", "70-79% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[8], "/PGM/CPU%u/RZ/DynMap/SetFilledPct080..89", "80-89% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[9], "/PGM/CPU%u/RZ/DynMap/SetFilledPct090..99", "90-99% filled (RC: min(set-size, dynmap-size))");
+ PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[10], "/PGM/CPU%u/RZ/DynMap/SetFilledPct100", "100% filled (RC: min(set-size, dynmap-size))");
+
+ /* HC only: */
+
+ /* RZ & R3: */
+ PGM_REG_PROFILE(&pCpuStats->StatRZSyncCR3, "/PGM/CPU%u/RZ/SyncCR3", "Profiling of the PGMSyncCR3() body.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZSyncCR3Handlers, "/PGM/CPU%u/RZ/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3Global, "/PGM/CPU%u/RZ/SyncCR3/Global", "The number of global CR3 syncs.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3NotGlobal, "/PGM/CPU%u/RZ/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstCacheHit, "/PGM/CPU%u/RZ/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstFreed, "/PGM/CPU%u/RZ/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstFreedSrcNP, "/PGM/CPU%u/RZ/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstNotPresent, "/PGM/CPU%u/RZ/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZSyncPT, "/PGM/CPU%u/RZ/SyncPT", "Profiling of the pfnSyncPT() body.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncPTFailed, "/PGM/CPU%u/RZ/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncPT4K, "/PGM/CPU%u/RZ/SyncPT/4K", "Nr of 4K PT syncs");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncPT4M, "/PGM/CPU%u/RZ/SyncPT/4M", "Nr of 4M PT syncs");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncPagePDNAs, "/PGM/CPU%u/RZ/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZSyncPagePDOutOfSync, "/PGM/CPU%u/RZ/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZAccessedPage, "/PGM/CPU%u/RZ/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZDirtyBitTracking, "/PGM/CPU%u/RZ/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPage, "/PGM/CPU%u/RZ/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageBig, "/PGM/CPU%u/RZ/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageSkipped, "/PGM/CPU%u/RZ/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageTrap, "/PGM/CPU%u/RZ/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageStale, "/PGM/CPU%u/RZ/DirtyPage/Stale", "The number of traps generated for dirty bit tracking (stale tlb entries).");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtiedPage, "/PGM/CPU%u/RZ/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZDirtyTrackRealPF, "/PGM/CPU%u/RZ/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageAlreadyDirty, "/PGM/CPU%u/RZ/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZInvalidatePage, "/PGM/CPU%u/RZ/InvalidatePage", "PGMInvalidatePage() profiling.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4KBPages, "/PGM/CPU%u/RZ/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPages, "/PGM/CPU%u/RZ/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPagesSkip, "/PGM/CPU%u/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNAs, "/PGM/CPU%u/RZ/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%u/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSizeChanges, "/PGM/CPU%u/RZ/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB).");
+ PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSkipped, "/PGM/CPU%u/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%u/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncUser, "/PGM/CPU%u/RZ/OutOfSync/User", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisorWrite,"/PGM/CPU%u/RZ/OutOfSync/SuperVisorWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncUserWrite, "/PGM/CPU%u/RZ/OutOfSync/UserWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncBallloon, "/PGM/CPU%u/RZ/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
+ PGM_REG_PROFILE(&pCpuStats->StatRZPrefetch, "/PGM/CPU%u/RZ/Prefetch", "PGMPrefetchPage profiling.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZFlushTLB, "/PGM/CPU%u/RZ/FlushTLB", "Profiling of the PGMFlushTLB() body.");
+ PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBNewCR3, "/PGM/CPU%u/RZ/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
+ PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBNewCR3Global, "/PGM/CPU%u/RZ/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
+ PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBSameCR3, "/PGM/CPU%u/RZ/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
+ PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBSameCR3Global, "/PGM/CPU%u/RZ/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
+ PGM_REG_PROFILE(&pCpuStats->StatRZGstModifyPage, "/PGM/CPU%u/RZ/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
+
+ PGM_REG_PROFILE(&pCpuStats->StatR3SyncCR3, "/PGM/CPU%u/R3/SyncCR3", "Profiling of the PGMSyncCR3() body.");
+ PGM_REG_PROFILE(&pCpuStats->StatR3SyncCR3Handlers, "/PGM/CPU%u/R3/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3Global, "/PGM/CPU%u/R3/SyncCR3/Global", "The number of global CR3 syncs.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3NotGlobal, "/PGM/CPU%u/R3/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstCacheHit, "/PGM/CPU%u/R3/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstFreed, "/PGM/CPU%u/R3/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstFreedSrcNP, "/PGM/CPU%u/R3/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstNotPresent, "/PGM/CPU%u/R3/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
+ PGM_REG_PROFILE(&pCpuStats->StatR3SyncPT, "/PGM/CPU%u/R3/SyncPT", "Profiling of the pfnSyncPT() body.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncPTFailed, "/PGM/CPU%u/R3/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncPT4K, "/PGM/CPU%u/R3/SyncPT/4K", "Nr of 4K PT syncs");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncPT4M, "/PGM/CPU%u/R3/SyncPT/4M", "Nr of 4M PT syncs");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncPagePDNAs, "/PGM/CPU%u/R3/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3SyncPagePDOutOfSync, "/PGM/CPU%u/R3/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3AccessedPage, "/PGM/CPU%u/R3/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
+ PGM_REG_PROFILE(&pCpuStats->StatR3DirtyBitTracking, "/PGM/CPU%u/R3/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPage, "/PGM/CPU%u/R3/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageBig, "/PGM/CPU%u/R3/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageSkipped, "/PGM/CPU%u/R3/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageTrap, "/PGM/CPU%u/R3/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtiedPage, "/PGM/CPU%u/R3/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3DirtyTrackRealPF, "/PGM/CPU%u/R3/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3PageAlreadyDirty, "/PGM/CPU%u/R3/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
+ PGM_REG_PROFILE(&pCpuStats->StatR3InvalidatePage, "/PGM/CPU%u/R3/InvalidatePage", "PGMInvalidatePage() profiling.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4KBPages, "/PGM/CPU%u/R3/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPages, "/PGM/CPU%u/R3/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPagesSkip, "/PGM/CPU%u/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNAs, "/PGM/CPU%u/R3/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%u/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSizeChanges, "/PGM/CPU%u/R3/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB).");
+ PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSkipped, "/PGM/CPU%u/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%u/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncUser, "/PGM/CPU%u/R3/OutOfSync/User", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncBallloon, "/PGM/CPU%u/R3/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
+ PGM_REG_PROFILE(&pCpuStats->StatR3Prefetch, "/PGM/CPU%u/R3/Prefetch", "PGMPrefetchPage profiling.");
+ PGM_REG_PROFILE(&pCpuStats->StatR3FlushTLB, "/PGM/CPU%u/R3/FlushTLB", "Profiling of the PGMFlushTLB() body.");
+ PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBNewCR3, "/PGM/CPU%u/R3/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
+ PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBNewCR3Global, "/PGM/CPU%u/R3/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
+ PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBSameCR3, "/PGM/CPU%u/R3/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
+ PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBSameCR3Global, "/PGM/CPU%u/R3/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
+ PGM_REG_PROFILE(&pCpuStats->StatR3GstModifyPage, "/PGM/CPU%u/R3/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
+#endif /* VBOX_WITH_STATISTICS */
+
+#undef PGM_REG_PROFILE
+#undef PGM_REG_COUNTER
+
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Ring-3 init finalizing.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
+{
+ /*
+ * Determine the max physical address width (MAXPHYADDR) and apply it to
+ * all the mask members and stuff.
+ */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t cMaxPhysAddrWidth;
+ uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
+ if ( uMaxExtLeaf >= 0x80000008
+ && uMaxExtLeaf <= 0x80000fff)
+ {
+ cMaxPhysAddrWidth = ASMCpuId_EAX(0x80000008) & 0xff;
+ LogRel(("PGM: The CPU physical address width is %u bits\n", cMaxPhysAddrWidth));
+ cMaxPhysAddrWidth = RT_MIN(52, cMaxPhysAddrWidth);
+ pVM->pgm.s.fLessThan52PhysicalAddressBits = cMaxPhysAddrWidth < 52;
+ for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 52; iBit++)
+ pVM->pgm.s.HCPhysInvMmioPg |= RT_BIT_64(iBit);
+ }
+ else
+ {
+ LogRel(("PGM: ASSUMING CPU physical address width of 48 bits (uMaxExtLeaf=%#x)\n", uMaxExtLeaf));
+ cMaxPhysAddrWidth = 48;
+ pVM->pgm.s.fLessThan52PhysicalAddressBits = true;
+ pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000);
+ }
+ /* Disabled the below assertion -- triggers 24 vs 39 on my Intel Skylake box for a 32-bit (Guest-type Other/Unknown) VM. */
+ //AssertMsg(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth,
+ // ("CPUM %u - PGM %u\n", pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth, cMaxPhysAddrWidth));
+#else
+ uint32_t const cMaxPhysAddrWidth = pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth;
+ LogRel(("PGM: The (guest) CPU physical address width is %u bits\n", cMaxPhysAddrWidth));
+#endif
+
+ /** @todo query from CPUM. */
+ pVM->pgm.s.GCPhysInvAddrMask = 0;
+ for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
+ pVM->pgm.s.GCPhysInvAddrMask |= RT_BIT_64(iBit);
+
+ /*
+ * Initialize the invalid paging entry masks, assuming NX is disabled.
+ */
+ uint64_t fMbzPageFrameMask = pVM->pgm.s.GCPhysInvAddrMask & UINT64_C(0x000ffffffffff000);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ uint64_t const fEptVpidCap = CPUMGetGuestIa32VmxEptVpidCap(pVM->apCpusR3[0]); /* should be identical for all VCPUs */
+ uint64_t const fGstEptMbzBigPdeMask = EPT_PDE2M_MBZ_MASK
+ | (RT_BF_GET(fEptVpidCap, VMX_BF_EPT_VPID_CAP_PDE_2M) ^ 1) << EPT_E_BIT_LEAF;
+ uint64_t const fGstEptMbzBigPdpteMask = EPT_PDPTE1G_MBZ_MASK
+ | (RT_BF_GET(fEptVpidCap, VMX_BF_EPT_VPID_CAP_PDPTE_1G) ^ 1) << EPT_E_BIT_LEAF;
+ //uint64_t const GCPhysRsvdAddrMask = pVM->pgm.s.GCPhysInvAddrMask & UINT64_C(0x000fffffffffffff); /* bits 63:52 ignored */
+#endif
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /** @todo The manuals are not entirely clear whether the physical
+ * address width is relevant. See table 5-9 in the intel
+ * manual vs the PDE4M descriptions. Write testcase (NP). */
+ pVCpu->pgm.s.fGst32BitMbzBigPdeMask = ((uint32_t)(fMbzPageFrameMask >> (32 - 13)) & X86_PDE4M_PG_HIGH_MASK)
+ | X86_PDE4M_MBZ_MASK;
+
+ pVCpu->pgm.s.fGstPaeMbzPteMask = fMbzPageFrameMask | X86_PTE_PAE_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstPaeMbzPdeMask = fMbzPageFrameMask | X86_PDE_PAE_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstPaeMbzBigPdeMask = fMbzPageFrameMask | X86_PDE2M_PAE_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstPaeMbzPdpeMask = fMbzPageFrameMask | X86_PDPE_PAE_MBZ_MASK;
+
+ pVCpu->pgm.s.fGstAmd64MbzPteMask = fMbzPageFrameMask | X86_PTE_LM_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstAmd64MbzPdeMask = fMbzPageFrameMask | X86_PDE_LM_MBZ_MASK_NX;
+ pVCpu->pgm.s.fGstAmd64MbzBigPdeMask = fMbzPageFrameMask | X86_PDE2M_LM_MBZ_MASK_NX;
+ pVCpu->pgm.s.fGstAmd64MbzPdpeMask = fMbzPageFrameMask | X86_PDPE_LM_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask = fMbzPageFrameMask | X86_PDPE1G_LM_MBZ_MASK_NO_NX;
+ pVCpu->pgm.s.fGstAmd64MbzPml4eMask = fMbzPageFrameMask | X86_PML4E_MBZ_MASK_NO_NX;
+
+ pVCpu->pgm.s.fGst64ShadowedPteMask = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D;
+ pVCpu->pgm.s.fGst64ShadowedPdeMask = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A;
+ pVCpu->pgm.s.fGst64ShadowedBigPdeMask = X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A;
+ pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask
+ = X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_G | X86_PDE4M_A | X86_PDE4M_D;
+ pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
+ pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ pVCpu->pgm.s.uEptVpidCapMsr = fEptVpidCap;
+ pVCpu->pgm.s.fGstEptMbzPteMask = fMbzPageFrameMask | EPT_PTE_MBZ_MASK;
+ pVCpu->pgm.s.fGstEptMbzPdeMask = fMbzPageFrameMask | EPT_PDE_MBZ_MASK;
+ pVCpu->pgm.s.fGstEptMbzBigPdeMask = fMbzPageFrameMask | fGstEptMbzBigPdeMask;
+ pVCpu->pgm.s.fGstEptMbzPdpteMask = fMbzPageFrameMask | EPT_PDPTE_MBZ_MASK;
+ pVCpu->pgm.s.fGstEptMbzBigPdpteMask = fMbzPageFrameMask | fGstEptMbzBigPdpteMask;
+ pVCpu->pgm.s.fGstEptMbzPml4eMask = fMbzPageFrameMask | EPT_PML4E_MBZ_MASK;
+
+ /* If any of the features in the assert below are enabled, additional bits would need to be shadowed. */
+ Assert( !pVM->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt
+ && !pVM->cpum.ro.GuestFeatures.fVmxSppEpt
+ && !pVM->cpum.ro.GuestFeatures.fVmxEptXcptVe
+ && !(fEptVpidCap & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY));
+ /* We currently do -not- shadow reserved bits in guest page tables but instead trap them using non-present permissions,
+ see todo in (NestedSyncPT). */
+ pVCpu->pgm.s.fGstEptShadowedPteMask = EPT_PRESENT_MASK | EPT_E_MEMTYPE_MASK | EPT_E_IGNORE_PAT;
+ pVCpu->pgm.s.fGstEptShadowedPdeMask = EPT_PRESENT_MASK;
+ pVCpu->pgm.s.fGstEptShadowedBigPdeMask = EPT_PRESENT_MASK | EPT_E_MEMTYPE_MASK | EPT_E_IGNORE_PAT | EPT_E_LEAF;
+ pVCpu->pgm.s.fGstEptShadowedPdpteMask = EPT_PRESENT_MASK | EPT_E_MEMTYPE_MASK | EPT_E_IGNORE_PAT | EPT_E_LEAF;
+ pVCpu->pgm.s.fGstEptShadowedPml4eMask = EPT_PRESENT_MASK | EPT_PML4E_MBZ_MASK;
+ /* If mode-based execute control for EPT is enabled, we would need to include bit 10 in the present mask. */
+ pVCpu->pgm.s.fGstEptPresentMask = EPT_PRESENT_MASK;
+#endif
+ }
+
+ /*
+ * Note that AMD uses all the 8 reserved bits for the address (so 40 bits in total);
+ * Intel only goes up to 36 bits, so we stick to 36 as well.
+ * Update: More recent intel manuals specifies 40 bits just like AMD.
+ */
+ uint32_t u32Dummy, u32Features;
+ CPUMGetGuestCpuId(VMMGetCpu(pVM), 1, 0, -1 /*f64BitMode*/, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
+ if (u32Features & X86_CPUID_FEATURE_EDX_PSE36)
+ pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(RT_MAX(36, cMaxPhysAddrWidth)) - 1;
+ else
+ pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1;
+
+ /*
+ * Allocate memory if we're supposed to do that.
+ */
+ int rc = VINF_SUCCESS;
+ if (pVM->pgm.s.fRamPreAlloc)
+ rc = pgmR3PhysRamPreAllocate(pVM);
+
+ //pgmLogState(pVM);
+ LogRel(("PGM: PGMR3InitFinalize: 4 MB PSE mask %RGp -> %Rrc\n", pVM->pgm.s.GCPhys4MBPSEMask, rc));
+ return rc;
+}
+
+
+/**
+ * Init phase completed callback.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat What has been completed.
+ * @thread EMT(0)
+ */
+VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ switch (enmWhat)
+ {
+ case VMINITCOMPLETED_HM:
+#ifdef VBOX_WITH_PCI_PASSTHROUGH
+ if (pVM->pgm.s.fPciPassthrough)
+ {
+ AssertLogRelReturn(pVM->pgm.s.fRamPreAlloc, VERR_PCI_PASSTHROUGH_NO_RAM_PREALLOC);
+ AssertLogRelReturn(HMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HM);
+ AssertLogRelReturn(HMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);
+
+ /*
+ * Report assignments to the IOMMU (hope that's good enough for now).
+ */
+ if (pVM->pgm.s.fPciPassthrough)
+ {
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_SETUP_IOMMU, 0, NULL);
+ AssertRCReturn(rc, rc);
+ }
+ }
+#else
+ AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_PGM_PCI_PASSTHRU_MISCONFIG);
+#endif
+ break;
+
+ default:
+ /* shut up gcc */
+ break;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this component.
+ *
+ * This function will be called at init and whenever the VMM need to relocate it
+ * self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ LogFlow(("PGMR3Relocate: offDelta=%RGv\n", offDelta));
+
+ /*
+ * Paging stuff.
+ */
+
+ /* Shadow, guest and both mode switch & relocation for each VCPU. */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
+ if ( idxShw < RT_ELEMENTS(g_aPgmShadowModeData)
+ && g_aPgmShadowModeData[idxShw].pfnRelocate)
+ g_aPgmShadowModeData[idxShw].pfnRelocate(pVCpu, offDelta);
+ else
+ AssertFailed();
+
+ uintptr_t const idxGst = pVCpu->pgm.s.idxGuestModeData;
+ if ( idxGst < RT_ELEMENTS(g_aPgmGuestModeData)
+ && g_aPgmGuestModeData[idxGst].pfnRelocate)
+ g_aPgmGuestModeData[idxGst].pfnRelocate(pVCpu, offDelta);
+ else
+ AssertFailed();
+ }
+
+ /*
+ * Ram ranges.
+ */
+ if (pVM->pgm.s.pRamRangesXR3)
+ pgmR3PhysRelinkRamRanges(pVM);
+
+ /*
+ * The page pool.
+ */
+ pgmR3PoolRelocate(pVM);
+}
+
+
+/**
+ * Resets a virtual CPU when unplugged.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
+{
+ uintptr_t const idxGst = pVCpu->pgm.s.idxGuestModeData;
+ if ( idxGst < RT_ELEMENTS(g_aPgmGuestModeData)
+ && g_aPgmGuestModeData[idxGst].pfnExit)
+ {
+ int rc = g_aPgmGuestModeData[idxGst].pfnExit(pVCpu);
+ AssertReleaseRC(rc);
+ }
+ pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
+ pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
+ pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
+
+ int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
+ AssertReleaseRC(rc);
+
+ STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cGuestModeChanges);
+
+ pgmR3PoolResetUnpluggedCpu(pVM, pVCpu);
+
+ /*
+ * Re-init other members.
+ */
+ pVCpu->pgm.s.fA20Enabled = true;
+ pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
+
+ /*
+ * Clear the FFs PGM owns.
+ */
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the PGM component this means that any PD write monitors
+ * needs to be removed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
+{
+ LogFlow(("PGMR3Reset:\n"));
+ VM_ASSERT_EMT(pVM);
+
+ PGM_LOCK_VOID(pVM);
+
+ /*
+ * Exit the guest paging mode before the pgm pool gets reset.
+ * Important to clean up the amd64 case.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ uintptr_t const idxGst = pVCpu->pgm.s.idxGuestModeData;
+ if ( idxGst < RT_ELEMENTS(g_aPgmGuestModeData)
+ && g_aPgmGuestModeData[idxGst].pfnExit)
+ {
+ int rc = g_aPgmGuestModeData[idxGst].pfnExit(pVCpu);
+ AssertReleaseRC(rc);
+ }
+ pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
+ pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
+ }
+
+#ifdef DEBUG
+ DBGFR3_INFO_LOG_SAFE(pVM, "mappings", NULL);
+ DBGFR3_INFO_LOG_SAFE(pVM, "handlers", "all nostat");
+#endif
+
+ /*
+ * Switch mode back to real mode. (Before resetting the pgm pool!)
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
+ AssertReleaseRC(rc);
+
+ STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cGuestModeChanges);
+ STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cA20Changes);
+ }
+
+ /*
+ * Reset the shadow page pool.
+ */
+ pgmR3PoolReset(pVM);
+
+ /*
+ * Re-init various other members and clear the FFs that PGM owns.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ pVCpu->pgm.s.fGst32BitPageSizeExtension = false;
+ PGMNotifyNxeChanged(pVCpu, false);
+
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
+
+ if (!pVCpu->pgm.s.fA20Enabled)
+ {
+ pVCpu->pgm.s.fA20Enabled = true;
+ pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
+#ifdef PGM_WITH_A20
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ pgmR3RefreshShadowModeAfterA20Change(pVCpu);
+ HMFlushTlb(pVCpu);
+#endif
+ }
+ }
+
+ //pgmLogState(pVM);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Memory setup after VM construction or reset.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fAtReset Indicates the context, after reset if @c true or after
+ * construction if @c false.
+ */
+VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
+{
+ if (fAtReset)
+ {
+ PGM_LOCK_VOID(pVM);
+
+ int rc = pgmR3PhysRamZeroAll(pVM);
+ AssertReleaseRC(rc);
+
+ rc = pgmR3PhysRomReset(pVM);
+ AssertReleaseRC(rc);
+
+ PGM_UNLOCK(pVM);
+ }
+}
+
+
+#ifdef VBOX_STRICT
+/**
+ * VM state change callback for clearing fNoMorePhysWrites after
+ * a snapshot has been created.
+ */
+static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, PCVMMR3VTABLE pVMM, VMSTATE enmState,
+ VMSTATE enmOldState, void *pvUser)
+{
+ if ( enmState == VMSTATE_RUNNING
+ || enmState == VMSTATE_RESUMING)
+ pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
+ RT_NOREF(pVMM, enmOldState, pvUser);
+}
+#endif
+
+/**
+ * Private API to reset fNoMorePhysWrites.
+ */
+VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM)
+{
+ pVM->pgm.s.fNoMorePhysWrites = false;
+}
+
+/**
+ * Terminates the PGM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3Term(PVM pVM)
+{
+ /* Must free shared pages here. */
+ PGM_LOCK_VOID(pVM);
+ pgmR3PhysRamTerm(pVM);
+ pgmR3PhysRomTerm(pVM);
+ PGM_UNLOCK(pVM);
+
+ PGMDeregisterStringFormatTypes();
+ return PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
+}
+
+
+/**
+ * Show paging mode.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs "all" (default), "guest", "shadow" or "host".
+ */
+static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /* digest argument. */
+ bool fGuest, fShadow, fHost;
+ if (pszArgs)
+ pszArgs = RTStrStripL(pszArgs);
+ if (!pszArgs || !*pszArgs || strstr(pszArgs, "all"))
+ fShadow = fHost = fGuest = true;
+ else
+ {
+ fShadow = fHost = fGuest = false;
+ if (strstr(pszArgs, "guest"))
+ fGuest = true;
+ if (strstr(pszArgs, "shadow"))
+ fShadow = true;
+ if (strstr(pszArgs, "host"))
+ fHost = true;
+ }
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+
+ /* print info. */
+ if (fGuest)
+ {
+ pHlp->pfnPrintf(pHlp, "Guest paging mode (VCPU #%u): %s (changed %RU64 times), A20 %s (changed %RU64 times)\n",
+ pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmGuestMode), pVCpu->pgm.s.cGuestModeChanges.c,
+ pVCpu->pgm.s.fA20Enabled ? "enabled" : "disabled", pVCpu->pgm.s.cA20Changes.c);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID)
+ pHlp->pfnPrintf(pHlp, "Guest SLAT mode (VCPU #%u): %s\n", pVCpu->idCpu,
+ PGMGetSlatModeName(pVCpu->pgm.s.enmGuestSlatMode));
+#endif
+ }
+ if (fShadow)
+ pHlp->pfnPrintf(pHlp, "Shadow paging mode (VCPU #%u): %s\n", pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmShadowMode));
+ if (fHost)
+ {
+ const char *psz;
+ switch (pVM->pgm.s.enmHostMode)
+ {
+ case SUPPAGINGMODE_INVALID: psz = "invalid"; break;
+ case SUPPAGINGMODE_32_BIT: psz = "32-bit"; break;
+ case SUPPAGINGMODE_32_BIT_GLOBAL: psz = "32-bit+G"; break;
+ case SUPPAGINGMODE_PAE: psz = "PAE"; break;
+ case SUPPAGINGMODE_PAE_GLOBAL: psz = "PAE+G"; break;
+ case SUPPAGINGMODE_PAE_NX: psz = "PAE+NX"; break;
+ case SUPPAGINGMODE_PAE_GLOBAL_NX: psz = "PAE+G+NX"; break;
+ case SUPPAGINGMODE_AMD64: psz = "AMD64"; break;
+ case SUPPAGINGMODE_AMD64_GLOBAL: psz = "AMD64+G"; break;
+ case SUPPAGINGMODE_AMD64_NX: psz = "AMD64+NX"; break;
+ case SUPPAGINGMODE_AMD64_GLOBAL_NX: psz = "AMD64+G+NX"; break;
+ default: psz = "unknown"; break;
+ }
+ pHlp->pfnPrintf(pHlp, "Host paging mode: %s\n", psz);
+ }
+}
+
+
+/**
+ * Dump registered MMIO ranges to the log.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ bool const fVerbose = pszArgs && strstr(pszArgs, "verbose") != NULL;
+
+ pHlp->pfnPrintf(pHlp,
+ "RAM ranges (pVM=%p)\n"
+ "%.*s %.*s\n",
+ pVM,
+ sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range ",
+ sizeof(RTHCPTR) * 2, "pvHC ");
+
+ for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ pHlp->pfnPrintf(pHlp,
+ "%RGp-%RGp %RHv %s\n",
+ pCur->GCPhys,
+ pCur->GCPhysLast,
+ pCur->pvR3,
+ pCur->pszDesc);
+ if (fVerbose)
+ {
+ RTGCPHYS const cPages = pCur->cb >> X86_PAGE_SHIFT;
+ RTGCPHYS iPage = 0;
+ while (iPage < cPages)
+ {
+ RTGCPHYS const iFirstPage = iPage;
+ PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]);
+ do
+ iPage++;
+ while (iPage < cPages && (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == enmType);
+ const char *pszType;
+ const char *pszMore = NULL;
+ switch (enmType)
+ {
+ case PGMPAGETYPE_RAM:
+ pszType = "RAM";
+ break;
+
+ case PGMPAGETYPE_MMIO2:
+ pszType = "MMIO2";
+ break;
+
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ pszType = "MMIO2-alias-MMIO";
+ break;
+
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
+ pszType = "special-alias-MMIO";
+ break;
+
+ case PGMPAGETYPE_ROM_SHADOW:
+ case PGMPAGETYPE_ROM:
+ {
+ pszType = enmType == PGMPAGETYPE_ROM_SHADOW ? "ROM-shadowed" : "ROM";
+
+ RTGCPHYS const GCPhysFirstPg = iFirstPage * X86_PAGE_SIZE;
+ PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
+ while (pRom && GCPhysFirstPg > pRom->GCPhysLast)
+ pRom = pRom->pNextR3;
+ if (pRom && GCPhysFirstPg - pRom->GCPhys < pRom->cb)
+ pszMore = pRom->pszDesc;
+ break;
+ }
+
+ case PGMPAGETYPE_MMIO:
+ {
+ pszType = "MMIO";
+ PGM_LOCK_VOID(pVM);
+ PPGMPHYSHANDLER pHandler;
+ int rc = pgmHandlerPhysicalLookup(pVM, iFirstPage * X86_PAGE_SIZE, &pHandler);
+ if (RT_SUCCESS(rc))
+ pszMore = pHandler->pszDesc;
+ PGM_UNLOCK(pVM);
+ break;
+ }
+
+ case PGMPAGETYPE_INVALID:
+ pszType = "invalid";
+ break;
+
+ default:
+ pszType = "bad";
+ break;
+ }
+ if (pszMore)
+ pHlp->pfnPrintf(pHlp, " %RGp-%RGp %-20s %s\n",
+ pCur->GCPhys + iFirstPage * X86_PAGE_SIZE,
+ pCur->GCPhys + iPage * X86_PAGE_SIZE - 1,
+ pszType, pszMore);
+ else
+ pHlp->pfnPrintf(pHlp, " %RGp-%RGp %s\n",
+ pCur->GCPhys + iFirstPage * X86_PAGE_SIZE,
+ pCur->GCPhys + iPage * X86_PAGE_SIZE - 1,
+ pszType);
+
+ }
+ }
+ }
+}
+
+
+/**
+ * Dump the page directory to the log.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /** @todo SMP support!! */
+ PVMCPU pVCpu = pVM->apCpusR3[0];
+
+/** @todo fix this! Convert the PGMR3DumpHierarchyHC functions to do guest stuff. */
+ /* Big pages supported? */
+ const bool fPSE = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
+
+ /* Global pages supported? */
+ const bool fPGE = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE);
+
+ NOREF(pszArgs);
+
+ /*
+ * Get page directory addresses.
+ */
+ PGM_LOCK_VOID(pVM);
+ PX86PD pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
+ Assert(pPDSrc);
+
+ /*
+ * Iterate the page directory.
+ */
+ for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
+ {
+ X86PDE PdeSrc = pPDSrc->a[iPD];
+ if (PdeSrc.u & X86_PDE_P)
+ {
+ if ((PdeSrc.u & X86_PDE_PS) && fPSE)
+ pHlp->pfnPrintf(pHlp,
+ "%04X - %RGp P=%d U=%d RW=%d G=%d - BIG\n",
+ iPD,
+ pgmGstGet4MBPhysPage(pVM, PdeSrc), PdeSrc.u & X86_PDE_P, !!(PdeSrc.u & X86_PDE_US),
+ !!(PdeSrc.u & X86_PDE_RW), (PdeSrc.u & X86_PDE4M_G) && fPGE);
+ else
+ pHlp->pfnPrintf(pHlp,
+ "%04X - %RGp P=%d U=%d RW=%d [G=%d]\n",
+ iPD,
+ (RTGCPHYS)(PdeSrc.u & X86_PDE_PG_MASK), PdeSrc.u & X86_PDE_P, !!(PdeSrc.u & X86_PDE_US),
+ !!(PdeSrc.u & X86_PDE_RW), (PdeSrc.u & X86_PDE4M_G) && fPGE);
+ }
+ }
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Called by pgmPoolFlushAllInt prior to flushing the pool.
+ *
+ * @returns VBox status code, fully asserted.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu)
+{
+ /* Unmap the old CR3 value before flushing everything. */
+ int rc = VINF_SUCCESS;
+ uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
+ if ( idxBth < RT_ELEMENTS(g_aPgmBothModeData)
+ && g_aPgmBothModeData[idxBth].pfnUnmapCR3)
+ {
+ rc = g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
+ AssertRC(rc);
+ }
+
+ /* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */
+ uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
+ if ( idxShw < RT_ELEMENTS(g_aPgmShadowModeData)
+ && g_aPgmShadowModeData[idxShw].pfnExit)
+ {
+ rc = g_aPgmShadowModeData[idxShw].pfnExit(pVCpu);
+ AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
+ }
+
+ Assert(pVCpu->pgm.s.pShwPageCR3R3 == NULL);
+ return rc;
+}
+
+
+/**
+ * Called by pgmPoolFlushAllInt after flushing the pool.
+ *
+ * @returns VBox status code, fully asserted.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
+{
+ pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
+ int rc = PGMHCChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu), false /* fForce */);
+ Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ AssertRCReturn(rc, rc);
+ AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
+
+ Assert(pVCpu->pgm.s.pShwPageCR3R3 != NULL || pVCpu->pgm.s.enmShadowMode == PGMMODE_NONE);
+ AssertMsg( pVCpu->pgm.s.enmShadowMode >= PGMMODE_NESTED_32BIT
+ || CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu),
+ ("%RHp != %RHp %s\n", (RTHCPHYS)CPUMGetHyperCR3(pVCpu), PGMGetHyperCR3(pVCpu), PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
+ return rc;
+}
+
+
+/**
+ * Called by PGMR3PhysSetA20 after changing the A20 state.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
+{
+ /** @todo Probably doing a bit too much here. */
+ int rc = pgmR3ExitShadowModeBeforePoolFlush(pVCpu);
+ AssertReleaseRC(rc);
+ rc = pgmR3ReEnterShadowModeAfterPoolFlush(pVCpu->CTX_SUFF(pVM), pVCpu);
+ AssertReleaseRC(rc);
+}
+
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmerror' and '.pgmerroroff' commands.}
+ */
+static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, cArgs == 0 || (cArgs == 1 && paArgs[0].enmType == DBGCVAR_TYPE_STRING));
+
+ if (!cArgs)
+ {
+ /*
+ * Print the list of error injection locations with status.
+ */
+ DBGCCmdHlpPrintf(pCmdHlp, "PGM error inject locations:\n");
+ DBGCCmdHlpPrintf(pCmdHlp, " handy - %RTbool\n", pVM->pgm.s.fErrInjHandyPages);
+ }
+ else
+ {
+ /*
+ * String switch on where to inject the error.
+ */
+ bool const fNewState = !strcmp(pCmd->pszCmd, "pgmerror");
+ const char *pszWhere = paArgs[0].u.pszString;
+ if (!strcmp(pszWhere, "handy"))
+ ASMAtomicWriteBool(&pVM->pgm.s.fErrInjHandyPages, fNewState);
+ else
+ return DBGCCmdHlpPrintf(pCmdHlp, "error: Invalid 'where' value: %s.\n", pszWhere);
+ DBGCCmdHlpPrintf(pCmdHlp, "done\n");
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsync' command.}
+ */
+static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp));
+ if (!pVCpu)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid CPU ID");
+
+ /*
+ * Force page directory sync.
+ */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+
+ int rc = DBGCCmdHlpPrintf(pCmdHlp, "Forcing page directory sync.\n");
+ if (RT_FAILURE(rc))
+ return rc;
+
+ return VINF_SUCCESS;
+}
+
+#ifdef VBOX_STRICT
+
+/**
+ * EMT callback for pgmR3CmdAssertCR3.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pcErrors Where to return the error count.
+ */
+static DECLCALLBACK(int) pgmR3CmdAssertCR3EmtWorker(PUVM pUVM, unsigned *pcErrors)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ *pcErrors = PGMAssertCR3(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmassertcr3' command.}
+ */
+static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+
+ int rc = DBGCCmdHlpPrintf(pCmdHlp, "Checking shadow CR3 page tables for consistency.\n");
+ if (RT_FAILURE(rc))
+ return rc;
+
+ unsigned cErrors = 0;
+ rc = VMR3ReqCallWaitU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp), (PFNRT)pgmR3CmdAssertCR3EmtWorker, 2, pUVM, &cErrors);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "VMR3ReqCallWaitU failed: %Rrc", rc);
+ if (cErrors > 0)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "PGMAssertCR3: %u error(s)", cErrors);
+ return DBGCCmdHlpPrintf(pCmdHlp, "PGMAssertCR3: OK\n");
+}
+
+#endif /* VBOX_STRICT */
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsyncalways' command.}
+ */
+static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp));
+ if (!pVCpu)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid CPU ID");
+
+ /*
+ * Force page directory sync.
+ */
+ int rc;
+ if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS)
+ {
+ ASMAtomicAndU32(&pVCpu->pgm.s.fSyncFlags, ~PGM_SYNC_ALWAYS);
+ rc = DBGCCmdHlpPrintf(pCmdHlp, "Disabled permanent forced page directory syncing.\n");
+ }
+ else
+ {
+ ASMAtomicOrU32(&pVCpu->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ rc = DBGCCmdHlpPrintf(pCmdHlp, "Enabled permanent forced page directory syncing.\n");
+ }
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmphystofile' command.}
+ */
+static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ NOREF(pCmd);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, cArgs == 1 || cArgs == 2);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, paArgs[0].enmType == DBGCVAR_TYPE_STRING);
+ if (cArgs == 2)
+ {
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, paArgs[1].enmType == DBGCVAR_TYPE_STRING);
+ if (strcmp(paArgs[1].u.pszString, "nozero"))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
+ }
+ bool fIncZeroPgs = cArgs < 2;
+
+ /*
+ * Open the output file and get the ram parameters.
+ */
+ RTFILE hFile;
+ int rc = RTFileOpen(&hFile, paArgs[0].u.pszString, RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
+
+ uint32_t cbRamHole = 0;
+ CFGMR3QueryU32Def(CFGMR3GetRootU(pUVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
+ uint64_t cbRam = 0;
+ CFGMR3QueryU64Def(CFGMR3GetRootU(pUVM), "RamSize", &cbRam, 0);
+ RTGCPHYS GCPhysEnd = cbRam + cbRamHole;
+
+ /*
+ * Dump the physical memory, page by page.
+ */
+ RTGCPHYS GCPhys = 0;
+ char abZeroPg[GUEST_PAGE_SIZE];
+ RT_ZERO(abZeroPg);
+
+ PGM_LOCK_VOID(pVM);
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ pRam && pRam->GCPhys < GCPhysEnd && RT_SUCCESS(rc);
+ pRam = pRam->pNextR3)
+ {
+ /* fill the gap */
+ if (pRam->GCPhys > GCPhys && fIncZeroPgs)
+ {
+ while (pRam->GCPhys > GCPhys && RT_SUCCESS(rc))
+ {
+ rc = RTFileWrite(hFile, abZeroPg, GUEST_PAGE_SIZE, NULL);
+ GCPhys += GUEST_PAGE_SIZE;
+ }
+ }
+
+ PCPGMPAGE pPage = &pRam->aPages[0];
+ while (GCPhys < pRam->GCPhysLast && RT_SUCCESS(rc))
+ {
+ if ( PGM_PAGE_IS_ZERO(pPage)
+ || PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ if (fIncZeroPgs)
+ {
+ rc = RTFileWrite(hFile, abZeroPg, GUEST_PAGE_SIZE, NULL);
+ if (RT_FAILURE(rc))
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ }
+ }
+ else
+ {
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_RAM:
+ case PGMPAGETYPE_ROM_SHADOW: /* trouble?? */
+ case PGMPAGETYPE_ROM:
+ case PGMPAGETYPE_MMIO2:
+ {
+ void const *pvPage;
+ PGMPAGEMAPLOCK Lock;
+ rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvPage, &Lock);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTFileWrite(hFile, pvPage, GUEST_PAGE_SIZE, NULL);
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ if (RT_FAILURE(rc))
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ }
+ else
+ DBGCCmdHlpPrintf(pCmdHlp, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ break;
+ }
+
+ default:
+ AssertFailed();
+ RT_FALL_THRU();
+ case PGMPAGETYPE_MMIO:
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
+ if (fIncZeroPgs)
+ {
+ rc = RTFileWrite(hFile, abZeroPg, GUEST_PAGE_SIZE, NULL);
+ if (RT_FAILURE(rc))
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ }
+ break;
+ }
+ }
+
+
+ /* advance */
+ GCPhys += GUEST_PAGE_SIZE;
+ pPage++;
+ }
+ }
+ PGM_UNLOCK(pVM);
+
+ RTFileClose(hFile);
+ if (RT_SUCCESS(rc))
+ return DBGCCmdHlpPrintf(pCmdHlp, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
+ return VINF_SUCCESS;
+}
+
+#endif /* VBOX_WITH_DEBUGGER */
+
+/**
+ * pvUser argument of the pgmR3CheckIntegrity*Node callbacks.
+ */
+typedef struct PGMCHECKINTARGS
+{
+ bool fLeftToRight; /**< true: left-to-right; false: right-to-left. */
+ uint32_t cErrors;
+ PPGMPHYSHANDLER pPrevPhys;
+ PVM pVM;
+} PGMCHECKINTARGS, *PPGMCHECKINTARGS;
+
+/**
+ * Validate a node in the physical handler tree.
+ *
+ * @returns 0 on if ok, other wise 1.
+ * @param pNode The handler node.
+ * @param pvUser pVM.
+ */
+static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PPGMPHYSHANDLER pNode, void *pvUser)
+{
+ PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
+
+ AssertLogRelMsgReturnStmt(!((uintptr_t)pNode & 7), ("pNode=%p\n", pNode), pArgs->cErrors++, VERR_INVALID_POINTER);
+
+ AssertLogRelMsgStmt(pNode->Key <= pNode->KeyLast,
+ ("pNode=%p %RGp-%RGp %s\n", pNode, pNode->Key, pNode->KeyLast, pNode->pszDesc),
+ pArgs->cErrors++);
+
+ AssertLogRelMsgStmt( !pArgs->pPrevPhys
+ || ( pArgs->fLeftToRight
+ ? pArgs->pPrevPhys->KeyLast < pNode->Key
+ : pArgs->pPrevPhys->KeyLast > pNode->Key),
+ ("pPrevPhys=%p %RGp-%RGp %s\n"
+ " pNode=%p %RGp-%RGp %s\n",
+ pArgs->pPrevPhys, pArgs->pPrevPhys->Key, pArgs->pPrevPhys->KeyLast, pArgs->pPrevPhys->pszDesc,
+ pNode, pNode->Key, pNode->KeyLast, pNode->pszDesc),
+ pArgs->cErrors++);
+
+ pArgs->pPrevPhys = pNode;
+ return 0;
+}
+
+
+/**
+ * Perform an integrity check on the PGM component.
+ *
+ * @returns VINF_SUCCESS if everything is fine.
+ * @returns VBox error status after asserting on integrity breach.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
+{
+ /*
+ * Check the trees.
+ */
+ PGMCHECKINTARGS Args = { true, 0, NULL, pVM };
+ int rc = pVM->pgm.s.pPhysHandlerTree->doWithAllFromLeft(&pVM->pgm.s.PhysHandlerAllocator,
+ pgmR3CheckIntegrityPhysHandlerNode, &Args);
+ AssertLogRelRCReturn(rc, rc);
+
+ Args.fLeftToRight = false;
+ Args.pPrevPhys = NULL;
+ rc = pVM->pgm.s.pPhysHandlerTree->doWithAllFromRight(&pVM->pgm.s.PhysHandlerAllocator,
+ pgmR3CheckIntegrityPhysHandlerNode, &Args);
+ AssertLogRelMsgReturn(pVM->pgm.s.pPhysHandlerTree->m_cErrors == 0,
+ ("m_cErrors=%#x\n", pVM->pgm.s.pPhysHandlerTree->m_cErrors == 0),
+ VERR_INTERNAL_ERROR);
+
+ return Args.cErrors == 0 ? VINF_SUCCESS : VERR_INTERNAL_ERROR;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PGMDbg.cpp b/src/VBox/VMM/VMMR3/PGMDbg.cpp
new file mode 100644
index 00000000..5514339c
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMDbg.cpp
@@ -0,0 +1,3494 @@
+/* $Id: PGMDbg.cpp $ */
+/** @file
+ * PGM - Page Manager and Monitor - Debugger & Debugging APIs.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM
+/** @todo define VBOX_WITHOUT_PAGING_BIT_FIELDS - not so important here, should only be reading for debugging purposes. */
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/stam.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/vmm/uvm.h>
+#include "PGMInline.h"
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The max needle size that we will bother searching for
+ * This must not be more than half a page! */
+#define MAX_NEEDLE_SIZE 256
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * State structure for the paging hierarchy dumpers.
+ */
+typedef struct PGMR3DUMPHIERARCHYSTATE
+{
+ /** Pointer to the VM. */
+ PVM pVM;
+ /** Output helpers. */
+ PCDBGFINFOHLP pHlp;
+ /** Set if PSE, PAE or long mode is enabled. */
+ bool fPse;
+ /** Set if PAE or long mode is enabled. */
+ bool fPae;
+ /** Set if long mode is enabled. */
+ bool fLme;
+ /** Set if nested paging. */
+ bool fNp;
+ /** Set if EPT. */
+ bool fEpt;
+ /** Set if NXE is enabled. */
+ bool fNxe;
+ /** The number or chars the address needs. */
+ uint8_t cchAddress;
+ /** The last reserved bit. */
+ uint8_t uLastRsvdBit;
+ /** Dump the page info as well (shadow page summary / guest physical
+ * page summary). */
+ bool fDumpPageInfo;
+ /** Whether or not to print the header. */
+ bool fPrintHeader;
+ /** Whether to print the CR3 value */
+ bool fPrintCr3;
+ /** Padding*/
+ bool afReserved[5];
+ /** The current address. */
+ uint64_t u64Address;
+ /** The last address to dump structures for. */
+ uint64_t u64FirstAddress;
+ /** The last address to dump structures for. */
+ uint64_t u64LastAddress;
+ /** Mask with the high reserved bits set. */
+ uint64_t u64HighReservedBits;
+ /** The number of leaf entries that we've printed. */
+ uint64_t cLeaves;
+} PGMR3DUMPHIERARCHYSTATE;
+/** Pointer to the paging hierarchy dumper state. */
+typedef PGMR3DUMPHIERARCHYSTATE *PPGMR3DUMPHIERARCHYSTATE;
+
+
+/**
+ * Assembly scanning function.
+ *
+ * @returns Pointer to possible match or NULL.
+ * @param pbHaystack Pointer to what we search in.
+ * @param cbHaystack Number of bytes to search.
+ * @param pbNeedle Pointer to what we search for.
+ * @param cbNeedle Size of what we're searching for.
+ */
+
+typedef DECLCALLBACKTYPE(uint8_t const *, FNPGMR3DBGFIXEDMEMSCAN,(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle));
+/** Pointer to an fixed size and step assembly scanner function. */
+typedef FNPGMR3DBGFIXEDMEMSCAN *PFNPGMR3DBGFIXEDMEMSCAN;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan8Wide8Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan4Wide4Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan2Wide2Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan1Wide1Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan4Wide1Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+DECLASM(uint8_t const *) pgmR3DbgFixedMemScan8Wide1Step(uint8_t const *, uint32_t, uint8_t const *, size_t);
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static char const g_aaszEptMemType[2][8][3] =
+{
+ { "--", "!1", "!2", "!3", "!4", "!5", "!6", "!7" }, /* non-leaf */
+ { "UC", "WC", "2!", "3!", "WT", "WP", "WB", "7!" } /* leaf */
+};
+
+
+/**
+ * Converts a R3 pointer to a GC physical address.
+ *
+ * Only for the debugger.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, *pGCPhys is set.
+ * @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param R3Ptr The R3 pointer to convert.
+ * @param pGCPhys Where to store the GC physical address on success.
+ */
+VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys)
+{
+ NOREF(pUVM); NOREF(R3Ptr);
+ *pGCPhys = NIL_RTGCPHYS;
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Converts a R3 pointer to a HC physical address.
+ *
+ * Only for the debugger.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, *pHCPhys is set.
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical page but has no physical backing.
+ * @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param R3Ptr The R3 pointer to convert.
+ * @param pHCPhys Where to store the HC physical address on success.
+ */
+VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys)
+{
+ NOREF(pUVM); NOREF(R3Ptr);
+ *pHCPhys = NIL_RTHCPHYS;
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Converts a HC physical address to a GC physical address.
+ *
+ * Only for the debugger.
+ *
+ * @returns VBox status code
+ * @retval VINF_SUCCESS on success, *pGCPhys is set.
+ * @retval VERR_INVALID_POINTER if the HC physical address is not within the GC physical memory.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param HCPhys The HC physical address to convert.
+ * @param pGCPhys Where to store the GC physical address on success.
+ */
+VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PUVM pUVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Validate and adjust the input a bit.
+ */
+ if (HCPhys == NIL_RTHCPHYS)
+ return VERR_INVALID_POINTER;
+ unsigned off = HCPhys & GUEST_PAGE_OFFSET_MASK;
+ HCPhys &= X86_PTE_PAE_PG_MASK;
+ if (HCPhys == 0)
+ return VERR_INVALID_POINTER;
+
+ for (PPGMRAMRANGE pRam = pUVM->pVM->pgm.s.CTX_SUFF(pRamRangesX);
+ pRam;
+ pRam = pRam->CTX_SUFF(pNext))
+ {
+ uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
+ while (iPage-- > 0)
+ if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
+ {
+ *pGCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT) + off;
+ return VINF_SUCCESS;
+ }
+ }
+ return VERR_INVALID_POINTER;
+}
+
+
+/**
+ * Read physical memory API for the debugger, similar to
+ * PGMPhysSimpleReadGCPhys.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pvDst Where to store what's read.
+ * @param GCPhysSrc Where to start reading from.
+ * @param cb The number of bytes to attempt reading.
+ * @param fFlags Flags, MBZ.
+ * @param pcbRead For store the actual number of bytes read, pass NULL if
+ * partial reads are unwanted.
+ * @todo Unused?
+ */
+VMMR3_INT_DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
+{
+ /* validate */
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+
+ /* try simple first. */
+ int rc = PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysSrc, cb);
+ if (RT_SUCCESS(rc) || !pcbRead)
+ return rc;
+
+ /* partial read that failed, chop it up in pages. */
+ *pcbRead = 0;
+ rc = VINF_SUCCESS;
+ while (cb > 0)
+ {
+ size_t cbChunk = GUEST_PAGE_SIZE;
+ cbChunk -= GCPhysSrc & GUEST_PAGE_OFFSET_MASK;
+ if (cbChunk > cb)
+ cbChunk = cb;
+
+ rc = PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysSrc, cbChunk);
+
+ /* advance */
+ if (RT_FAILURE(rc))
+ break;
+ *pcbRead += cbChunk;
+ cb -= cbChunk;
+ GCPhysSrc += cbChunk;
+ pvDst = (uint8_t *)pvDst + cbChunk;
+ }
+
+ return *pcbRead && RT_FAILURE(rc) ? -rc : rc;
+}
+
+
+/**
+ * Write physical memory API for the debugger, similar to
+ * PGMPhysSimpleWriteGCPhys.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhysDst Where to start writing.
+ * @param pvSrc What to write.
+ * @param cb The number of bytes to attempt writing.
+ * @param fFlags Flags, MBZ.
+ * @param pcbWritten For store the actual number of bytes written, pass NULL
+ * if partial writes are unwanted.
+ * @todo Unused?
+ */
+VMMR3_INT_DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
+{
+ /* validate */
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+
+ /* try simple first. */
+ int rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysDst, pvSrc, cb);
+ if (RT_SUCCESS(rc) || !pcbWritten)
+ return rc;
+
+ /* partial write that failed, chop it up in pages. */
+ *pcbWritten = 0;
+ rc = VINF_SUCCESS;
+ while (cb > 0)
+ {
+ size_t cbChunk = GUEST_PAGE_SIZE;
+ cbChunk -= GCPhysDst & GUEST_PAGE_OFFSET_MASK;
+ if (cbChunk > cb)
+ cbChunk = cb;
+
+ rc = PGMPhysSimpleWriteGCPhys(pVM, GCPhysDst, pvSrc, cbChunk);
+
+ /* advance */
+ if (RT_FAILURE(rc))
+ break;
+ *pcbWritten += cbChunk;
+ cb -= cbChunk;
+ GCPhysDst += cbChunk;
+ pvSrc = (uint8_t const *)pvSrc + cbChunk;
+ }
+
+ return *pcbWritten && RT_FAILURE(rc) ? -rc : rc;
+
+}
+
+
+/**
+ * Read virtual memory API for the debugger, similar to PGMPhysSimpleReadGCPtr.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pvDst Where to store what's read.
+ * @param GCPtrSrc Where to start reading from.
+ * @param cb The number of bytes to attempt reading.
+ * @param fFlags Flags, MBZ.
+ * @param pcbRead For store the actual number of bytes read, pass NULL if
+ * partial reads are unwanted.
+ * @todo Unused?
+ */
+VMMR3_INT_DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
+{
+ /* validate */
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+
+ /** @todo SMP support! */
+ PVMCPU pVCpu = pVM->apCpusR3[0];
+
+/** @todo deal with HMA */
+ /* try simple first. */
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, pvDst, GCPtrSrc, cb);
+ if (RT_SUCCESS(rc) || !pcbRead)
+ return rc;
+
+ /* partial read that failed, chop it up in pages. */
+ *pcbRead = 0;
+ rc = VINF_SUCCESS;
+ while (cb > 0)
+ {
+ size_t cbChunk = GUEST_PAGE_SIZE;
+ cbChunk -= GCPtrSrc & GUEST_PAGE_OFFSET_MASK;
+ if (cbChunk > cb)
+ cbChunk = cb;
+
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pvDst, GCPtrSrc, cbChunk);
+
+ /* advance */
+ if (RT_FAILURE(rc))
+ break;
+ *pcbRead += cbChunk;
+ cb -= cbChunk;
+ GCPtrSrc += cbChunk;
+ pvDst = (uint8_t *)pvDst + cbChunk;
+ }
+
+ return *pcbRead && RT_FAILURE(rc) ? -rc : rc;
+
+}
+
+
+/**
+ * Write virtual memory API for the debugger, similar to
+ * PGMPhysSimpleWriteGCPtr.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPtrDst Where to start writing.
+ * @param pvSrc What to write.
+ * @param cb The number of bytes to attempt writing.
+ * @param fFlags Flags, MBZ.
+ * @param pcbWritten For store the actual number of bytes written, pass NULL
+ * if partial writes are unwanted.
+ * @todo Unused?
+ */
+VMMR3_INT_DECL(int) PGMR3DbgWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
+{
+ /* validate */
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+ AssertReturn(pVM, VERR_INVALID_PARAMETER);
+
+ /** @todo SMP support! */
+ PVMCPU pVCpu = pVM->apCpusR3[0];
+
+/** @todo deal with HMA */
+ /* try simple first. */
+ int rc = PGMPhysSimpleWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
+ if (RT_SUCCESS(rc) || !pcbWritten)
+ return rc;
+
+ /* partial write that failed, chop it up in pages. */
+ *pcbWritten = 0;
+ rc = VINF_SUCCESS;
+ while (cb > 0)
+ {
+ size_t cbChunk = GUEST_PAGE_SIZE;
+ cbChunk -= GCPtrDst & GUEST_PAGE_OFFSET_MASK;
+ if (cbChunk > cb)
+ cbChunk = cb;
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cbChunk);
+
+ /* advance */
+ if (RT_FAILURE(rc))
+ break;
+ *pcbWritten += cbChunk;
+ cb -= cbChunk;
+ GCPtrDst += cbChunk;
+ pvSrc = (uint8_t const *)pvSrc + cbChunk;
+ }
+
+ return *pcbWritten && RT_FAILURE(rc) ? -rc : rc;
+
+}
+
+
+#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
+/*
+ * For AMD64 and x86 we've got optimized assembly code for these search functions.
+ */
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan8Wide8Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 8); RT_NOREF(cbNeedle);
+ const uint64_t uNeedle = *(const uint64_t *)pbNeedle;
+ uint64_t const *puHaystack = (uint64_t const *)pbHaystack;
+ cbHaystack /= sizeof(uint64_t);
+ while (cbHaystack-- > 0)
+ if (*puHaystack != uNeedle)
+ puHaystack++;
+ else
+ return (uint8_t const *)puHaystack;
+ return NULL;
+}
+
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan4Wide4Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 4); RT_NOREF(cbNeedle);
+ const uint32_t uNeedle = *(const uint32_t *)pbNeedle;
+ uint32_t const *puHaystack = (uint32_t const *)pbHaystack;
+ cbHaystack /= sizeof(uint32_t);
+ while (cbHaystack-- > 0)
+ if (*puHaystack != uNeedle)
+ puHaystack++;
+ else
+ return (uint8_t const *)puHaystack;
+ return NULL;
+}
+
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan2Wide2Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 2); RT_NOREF(cbNeedle);
+ const uint16_t uNeedle = *(const uint16_t *)pbNeedle;
+ uint16_t const *puHaystack = (uint16_t const *)pbHaystack;
+ cbHaystack /= sizeof(uint16_t);
+ while (cbHaystack-- > 0)
+ if (*puHaystack != uNeedle)
+ puHaystack++;
+ else
+ return (uint8_t const *)puHaystack;
+ return NULL;
+}
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan1Wide1Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 1); RT_NOREF(cbNeedle);
+ const uint8_t bNeedle = *pbNeedle;
+ while (cbHaystack-- > 0)
+ if (*pbHaystack != bNeedle)
+ pbHaystack++;
+ else
+ return pbHaystack;
+ return NULL;
+}
+
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan4Wide1Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 4); RT_NOREF(cbNeedle);
+ uint32_t const uNeedle = *(uint32_t const *)pbNeedle;
+ while (cbHaystack >= sizeof(uint32_t))
+ {
+ uint8_t const *pbHit = (uint8_t const *)memchr(pbHaystack, (uint8_t)uNeedle, cbHaystack - sizeof(uint32_t) + 1);
+ if (pbHit)
+ {
+ uint32_t const uFound = !((uintptr_t)pbHit & 3) ? *(const uint32_t *)pbHit
+ : RT_MAKE_U32_FROM_U8(pbHit[0], pbHit[1], pbHit[2], pbHit[3]);
+ if (uFound == uNeedle)
+ return pbHit;
+ cbHaystack -= (uintptr_t)pbHit - (uintptr_t)pbHaystack + 1;
+ pbHaystack = pbHit + 1;
+ }
+ else
+ break;
+ }
+ return NULL;
+}
+
+
+static DECLCALLBACK(uint8_t const *) pgmR3DbgFixedMemScan8Wide1Step(uint8_t const *pbHaystack, uint32_t cbHaystack,
+ uint8_t const *pbNeedle, size_t cbNeedle)
+{
+ Assert(cbNeedle == 8); RT_NOREF(cbNeedle);
+ uint64_t const uNeedle = *(uint64_t const *)pbNeedle;
+ while (cbHaystack >= sizeof(uint64_t))
+ {
+ uint8_t const *pbHit = (uint8_t const *)memchr(pbHaystack, (uint8_t)uNeedle, cbHaystack - sizeof(uint64_t) + 1);
+ if (pbHit)
+ {
+ uint32_t const uFound = !((uintptr_t)pbHit & 7) ? *(const uint32_t *)pbHit
+ : RT_MAKE_U64_FROM_U8(pbHit[0], pbHit[1], pbHit[2], pbHit[3],
+ pbHit[4], pbHit[5], pbHit[6], pbHit[7]);
+ if (uFound == uNeedle)
+ return pbHit;
+ cbHaystack -= (uintptr_t)pbHit - (uintptr_t)pbHaystack + 1;
+ pbHaystack = pbHit + 1;
+ }
+ else
+ break;
+ }
+ return NULL;
+}
+
+#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
+
+
+/**
+ * memchr() with alignment considerations.
+ *
+ * @returns Pointer to matching byte, NULL if none found.
+ * @param pb Where to search. Aligned.
+ * @param b What to search for.
+ * @param cb How much to search .
+ * @param uAlign The alignment restriction of the result.
+ */
+static const uint8_t *pgmR3DbgAlignedMemChr(const uint8_t *pb, uint8_t b, size_t cb, uint32_t uAlign)
+{
+ const uint8_t *pbRet;
+ if (uAlign <= 32)
+ {
+ pbRet = (const uint8_t *)memchr(pb, b, cb);
+ if ((uintptr_t)pbRet & (uAlign - 1))
+ {
+ do
+ {
+ pbRet++;
+ size_t cbLeft = cb - (pbRet - pb);
+ if (!cbLeft)
+ {
+ pbRet = NULL;
+ break;
+ }
+ pbRet = (const uint8_t *)memchr(pbRet, b, cbLeft);
+ } while ((uintptr_t)pbRet & (uAlign - 1));
+ }
+ }
+ else
+ {
+ pbRet = NULL;
+ if (cb)
+ {
+ for (;;)
+ {
+ if (*pb == b)
+ {
+ pbRet = pb;
+ break;
+ }
+ if (cb <= uAlign)
+ break;
+ cb -= uAlign;
+ pb += uAlign;
+ }
+ }
+ }
+ return pbRet;
+}
+
+
+/**
+ * Scans a page for a byte string, keeping track of potential
+ * cross page matches.
+ *
+ * @returns true and *poff on match.
+ * false on mismatch.
+ * @param pbPage Pointer to the current page.
+ * @param poff Input: The offset into the page (aligned).
+ * Output: The page offset of the match on success.
+ * @param cb The number of bytes to search, starting of *poff.
+ * @param uAlign The needle alignment. This is of course less than a page.
+ * @param pabNeedle The byte string to search for.
+ * @param cbNeedle The length of the byte string.
+ * @param pfnFixedMemScan Pointer to assembly scan function, if available for
+ * the given needle and alignment combination.
+ * @param pabPrev The buffer that keeps track of a partial match that we
+ * bring over from the previous page. This buffer must be
+ * at least cbNeedle - 1 big.
+ * @param pcbPrev Input: The number of partial matching bytes from the previous page.
+ * Output: The number of partial matching bytes from this page.
+ * Initialize to 0 before the first call to this function.
+ */
+static bool pgmR3DbgScanPage(const uint8_t *pbPage, int32_t *poff, uint32_t cb, uint32_t uAlign,
+ const uint8_t *pabNeedle, size_t cbNeedle, PFNPGMR3DBGFIXEDMEMSCAN pfnFixedMemScan,
+ uint8_t *pabPrev, size_t *pcbPrev)
+{
+ /*
+ * Try complete any partial match from the previous page.
+ */
+ if (*pcbPrev > 0)
+ {
+ size_t cbPrev = *pcbPrev;
+ Assert(!*poff);
+ Assert(cbPrev < cbNeedle);
+ if (!memcmp(pbPage, pabNeedle + cbPrev, cbNeedle - cbPrev))
+ {
+ if (cbNeedle - cbPrev > cb)
+ return false;
+ *poff = -(int32_t)cbPrev;
+ return true;
+ }
+
+ /* check out the remainder of the previous page. */
+ const uint8_t *pb = pabPrev;
+ for (;;)
+ {
+ if (cbPrev <= uAlign)
+ break;
+ cbPrev -= uAlign;
+ pb = pgmR3DbgAlignedMemChr(pb + uAlign, *pabNeedle, cbPrev, uAlign);
+ if (!pb)
+ break;
+ cbPrev = *pcbPrev - (pb - pabPrev);
+ if ( !memcmp(pb + 1, &pabNeedle[1], cbPrev - 1)
+ && !memcmp(pbPage, pabNeedle + cbPrev, cbNeedle - cbPrev))
+ {
+ if (cbNeedle - cbPrev > cb)
+ return false;
+ *poff = -(int32_t)cbPrev;
+ return true;
+ }
+ }
+
+ *pcbPrev = 0;
+ }
+
+ /*
+ * Match the body of the page.
+ */
+ const uint8_t *pb = pbPage + *poff;
+ const uint8_t * const pbEnd = pb + cb;
+ for (;;)
+ {
+ AssertMsg(((uintptr_t)pb & (uAlign - 1)) == 0, ("%#p %#x\n", pb, uAlign));
+ if (pfnFixedMemScan)
+ pb = pfnFixedMemScan(pb, cb, pabNeedle, cbNeedle);
+ else
+ pb = pgmR3DbgAlignedMemChr(pb, *pabNeedle, cb, uAlign);
+ if (!pb)
+ break;
+ cb = pbEnd - pb;
+ if (cb >= cbNeedle)
+ {
+ /* match? */
+ if (!memcmp(pb + 1, &pabNeedle[1], cbNeedle - 1))
+ {
+ *poff = pb - pbPage;
+ return true;
+ }
+ }
+ else
+ {
+ /* partial match at the end of the page? */
+ if (!memcmp(pb + 1, &pabNeedle[1], cb - 1))
+ {
+ /* We're copying one byte more that we really need here, but wtf. */
+ memcpy(pabPrev, pb, cb);
+ *pcbPrev = cb;
+ return false;
+ }
+ }
+
+ /* no match, skip ahead. */
+ if (cb <= uAlign)
+ break;
+ pb += uAlign;
+ cb -= uAlign;
+ }
+
+ return false;
+}
+
+
+static PFNPGMR3DBGFIXEDMEMSCAN pgmR3DbgSelectMemScanFunction(uint32_t GCPhysAlign, size_t cbNeedle)
+{
+ switch (GCPhysAlign)
+ {
+ case 1:
+ if (cbNeedle >= 8)
+ return pgmR3DbgFixedMemScan8Wide1Step;
+ if (cbNeedle >= 4)
+ return pgmR3DbgFixedMemScan4Wide1Step;
+ return pgmR3DbgFixedMemScan1Wide1Step;
+ case 2:
+ if (cbNeedle >= 2)
+ return pgmR3DbgFixedMemScan2Wide2Step;
+ break;
+ case 4:
+ if (cbNeedle >= 4)
+ return pgmR3DbgFixedMemScan4Wide4Step;
+ break;
+ case 8:
+ if (cbNeedle >= 8)
+ return pgmR3DbgFixedMemScan8Wide8Step;
+ break;
+ }
+ return NULL;
+}
+
+
+
+/**
+ * Scans guest physical memory for a byte string.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS and *pGCPtrHit on success.
+ * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
+ * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
+ * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Where to start searching.
+ * @param cbRange The number of bytes to search.
+ * @param GCPhysAlign The alignment of the needle. Must be a power of two
+ * and less or equal to 4GB.
+ * @param pabNeedle The byte string to search for.
+ * @param cbNeedle The length of the byte string. Max 256 bytes.
+ * @param pGCPhysHit Where to store the address of the first occurrence on success.
+ */
+VMMR3_INT_DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange, RTGCPHYS GCPhysAlign,
+ const uint8_t *pabNeedle, size_t cbNeedle, PRTGCPHYS pGCPhysHit)
+{
+ /*
+ * Validate and adjust the input a bit.
+ */
+ if (!RT_VALID_PTR(pGCPhysHit))
+ return VERR_INVALID_POINTER;
+ *pGCPhysHit = NIL_RTGCPHYS;
+
+ if ( !RT_VALID_PTR(pabNeedle)
+ || GCPhys == NIL_RTGCPHYS)
+ return VERR_INVALID_POINTER;
+ if (!cbNeedle)
+ return VERR_INVALID_PARAMETER;
+ if (cbNeedle > MAX_NEEDLE_SIZE)
+ return VERR_INVALID_PARAMETER;
+
+ if (!cbRange)
+ return VERR_DBGF_MEM_NOT_FOUND;
+ if (GCPhys + cbNeedle - 1 < GCPhys)
+ return VERR_DBGF_MEM_NOT_FOUND;
+
+ if (!GCPhysAlign)
+ return VERR_INVALID_PARAMETER;
+ if (GCPhysAlign > UINT32_MAX)
+ return VERR_NOT_POWER_OF_TWO;
+ if (GCPhysAlign & (GCPhysAlign - 1))
+ return VERR_INVALID_PARAMETER;
+
+ if (GCPhys & (GCPhysAlign - 1))
+ {
+ RTGCPHYS Adj = GCPhysAlign - (GCPhys & (GCPhysAlign - 1));
+ if ( cbRange <= Adj
+ || GCPhys + Adj < GCPhys)
+ return VERR_DBGF_MEM_NOT_FOUND;
+ GCPhys += Adj;
+ cbRange -= Adj;
+ }
+
+ const bool fAllZero = ASMMemIsZero(pabNeedle, cbNeedle);
+ const uint32_t cIncPages = GCPhysAlign <= GUEST_PAGE_SIZE
+ ? 1
+ : GCPhysAlign >> GUEST_PAGE_SHIFT;
+ const RTGCPHYS GCPhysLast = GCPhys + cbRange - 1 >= GCPhys
+ ? GCPhys + cbRange - 1
+ : ~(RTGCPHYS)0;
+
+ PFNPGMR3DBGFIXEDMEMSCAN pfnMemScan = pgmR3DbgSelectMemScanFunction((uint32_t)GCPhysAlign, cbNeedle);
+
+ /*
+ * Search the memory - ignore MMIO and zero pages, also don't
+ * bother to match across ranges.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
+ pRam;
+ pRam = pRam->CTX_SUFF(pNext))
+ {
+ /*
+ * If the search range starts prior to the current ram range record,
+ * adjust the search range and possibly conclude the search.
+ */
+ RTGCPHYS off;
+ if (GCPhys < pRam->GCPhys)
+ {
+ if (GCPhysLast < pRam->GCPhys)
+ break;
+ GCPhys = pRam->GCPhys;
+ off = 0;
+ }
+ else
+ off = GCPhys - pRam->GCPhys;
+ if (off < pRam->cb)
+ {
+ /*
+ * Iterate the relevant pages.
+ */
+ uint8_t abPrev[MAX_NEEDLE_SIZE];
+ size_t cbPrev = 0;
+ const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPage = off >> GUEST_PAGE_SHIFT;
+ uint32_t offPage = GCPhys & GUEST_PAGE_OFFSET_MASK;
+ GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
+ for (;; offPage = 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ if ( ( !PGM_PAGE_IS_ZERO(pPage)
+ || fAllZero)
+ && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ void const *pvPage;
+ PGMPAGEMAPLOCK Lock;
+ int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvPage, &Lock);
+ if (RT_SUCCESS(rc))
+ {
+ int32_t offHit = offPage;
+ bool fRc;
+ if (GCPhysAlign < GUEST_PAGE_SIZE)
+ {
+ uint32_t cbSearch = (GCPhys ^ GCPhysLast) & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
+ ? GUEST_PAGE_SIZE - (uint32_t)offPage
+ : (GCPhysLast & GUEST_PAGE_OFFSET_MASK) + 1 - (uint32_t)offPage;
+ fRc = pgmR3DbgScanPage((uint8_t const *)pvPage, &offHit, cbSearch, (uint32_t)GCPhysAlign,
+ pabNeedle, cbNeedle, pfnMemScan, &abPrev[0], &cbPrev);
+ }
+ else
+ fRc = memcmp(pvPage, pabNeedle, cbNeedle) == 0
+ && (GCPhysLast - GCPhys) >= cbNeedle;
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ if (fRc)
+ {
+ *pGCPhysHit = GCPhys + offHit;
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ cbPrev = 0; /* ignore error. */
+ }
+ else
+ cbPrev = 0;
+
+ /* advance to the next page. */
+ GCPhys += (RTGCPHYS)cIncPages << GUEST_PAGE_SHIFT;
+ if (GCPhys >= GCPhysLast) /* (may not always hit, but we're run out of ranges.) */
+ {
+ PGM_UNLOCK(pVM);
+ return VERR_DBGF_MEM_NOT_FOUND;
+ }
+ iPage += cIncPages;
+ if ( iPage < cIncPages
+ || iPage >= cPages)
+ break;
+ }
+ }
+ }
+ PGM_UNLOCK(pVM);
+ return VERR_DBGF_MEM_NOT_FOUND;
+}
+
+
+/**
+ * Scans (guest) virtual memory for a byte string.
+ *
+ * @returns VBox status codes:
+ * @retval VINF_SUCCESS and *pGCPtrHit on success.
+ * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
+ * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
+ * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the CPU
+ * context to search from.
+ * @param GCPtr Where to start searching.
+ * @param GCPtrAlign The alignment of the needle. Must be a power of two
+ * and less or equal to 4GB.
+ * @param cbRange The number of bytes to search. Max 256 bytes.
+ * @param pabNeedle The byte string to search for.
+ * @param cbNeedle The length of the byte string.
+ * @param pGCPtrHit Where to store the address of the first occurrence on success.
+ */
+VMMR3_INT_DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR cbRange, RTGCPTR GCPtrAlign,
+ const uint8_t *pabNeedle, size_t cbNeedle, PRTGCUINTPTR pGCPtrHit)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * Validate and adjust the input a bit.
+ */
+ if (!RT_VALID_PTR(pGCPtrHit))
+ return VERR_INVALID_POINTER;
+ *pGCPtrHit = 0;
+
+ if (!RT_VALID_PTR(pabNeedle))
+ return VERR_INVALID_POINTER;
+ if (!cbNeedle)
+ return VERR_INVALID_PARAMETER;
+ if (cbNeedle > MAX_NEEDLE_SIZE)
+ return VERR_INVALID_PARAMETER;
+
+ if (!cbRange)
+ return VERR_DBGF_MEM_NOT_FOUND;
+ if (GCPtr + cbNeedle - 1 < GCPtr)
+ return VERR_DBGF_MEM_NOT_FOUND;
+
+ if (!GCPtrAlign)
+ return VERR_INVALID_PARAMETER;
+ if (GCPtrAlign > UINT32_MAX)
+ return VERR_NOT_POWER_OF_TWO;
+ if (GCPtrAlign & (GCPtrAlign - 1))
+ return VERR_INVALID_PARAMETER;
+
+ if (GCPtr & (GCPtrAlign - 1))
+ {
+ RTGCPTR Adj = GCPtrAlign - (GCPtr & (GCPtrAlign - 1));
+ if ( cbRange <= Adj
+ || GCPtr + Adj < GCPtr)
+ return VERR_DBGF_MEM_NOT_FOUND;
+ GCPtr += Adj;
+ cbRange -= Adj;
+ }
+
+ /* Only paged protected mode or long mode here, use the physical scan for
+ the other modes. */
+ PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ AssertReturn(PGMMODE_WITH_PAGING(enmMode), VERR_PGM_NOT_USED_IN_MODE);
+
+ /*
+ * Search the memory - ignore MMIO, zero and not-present pages.
+ */
+ const bool fAllZero = ASMMemIsZero(pabNeedle, cbNeedle);
+ RTGCPTR GCPtrMask = PGMMODE_IS_LONG_MODE(enmMode) ? UINT64_MAX : UINT32_MAX;
+ uint8_t abPrev[MAX_NEEDLE_SIZE];
+ size_t cbPrev = 0;
+ const uint32_t cIncPages = GCPtrAlign <= GUEST_PAGE_SIZE
+ ? 1
+ : GCPtrAlign >> GUEST_PAGE_SHIFT;
+ const RTGCPTR GCPtrLast = GCPtr + cbRange - 1 >= GCPtr
+ ? (GCPtr + cbRange - 1) & GCPtrMask
+ : GCPtrMask;
+ RTGCPTR cPages = (((GCPtrLast - GCPtr) + (GCPtr & GUEST_PAGE_OFFSET_MASK)) >> GUEST_PAGE_SHIFT) + 1;
+ uint32_t offPage = GCPtr & GUEST_PAGE_OFFSET_MASK;
+ GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
+
+ PFNPGMR3DBGFIXEDMEMSCAN pfnMemScan = pgmR3DbgSelectMemScanFunction((uint32_t)GCPtrAlign, cbNeedle);
+
+ VMSTATE enmVMState = pVM->enmVMState;
+ uint32_t const cYieldCountDownReload = VMSTATE_IS_RUNNING(enmVMState) ? 4096 : 65536;
+ uint32_t cYieldCountDown = cYieldCountDownReload;
+ RTGCPHYS GCPhysPrev = NIL_RTGCPHYS;
+ bool fFullWalk = true;
+ PGMPTWALK Walk;
+ PGMPTWALKGST WalkGst;
+
+ PGM_LOCK_VOID(pVM);
+ for (;; offPage = 0)
+ {
+ int rc;
+ if (fFullWalk)
+ rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk, &WalkGst);
+ else
+ rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk, &WalkGst);
+ if (RT_SUCCESS(rc) && Walk.fSucceeded)
+ {
+ fFullWalk = false;
+
+ /* Skip if same page as previous one (W10 optimization). */
+ if ( Walk.GCPhys != GCPhysPrev
+ || cbPrev != 0)
+ {
+ PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
+ if ( pPage
+ && ( !PGM_PAGE_IS_ZERO(pPage)
+ || fAllZero)
+ && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ GCPhysPrev = Walk.GCPhys;
+ void const *pvPage;
+ PGMPAGEMAPLOCK Lock;
+ rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.GCPhys, &pvPage, &Lock);
+ if (RT_SUCCESS(rc))
+ {
+ int32_t offHit = offPage;
+ bool fRc;
+ if (GCPtrAlign < GUEST_PAGE_SIZE)
+ {
+ uint32_t cbSearch = cPages > 0
+ ? GUEST_PAGE_SIZE - (uint32_t)offPage
+ : (GCPtrLast & GUEST_PAGE_OFFSET_MASK) + 1 - (uint32_t)offPage;
+ fRc = pgmR3DbgScanPage((uint8_t const *)pvPage, &offHit, cbSearch, (uint32_t)GCPtrAlign,
+ pabNeedle, cbNeedle, pfnMemScan, &abPrev[0], &cbPrev);
+ }
+ else
+ fRc = memcmp(pvPage, pabNeedle, cbNeedle) == 0
+ && (GCPtrLast - GCPtr) >= cbNeedle;
+ PGMPhysReleasePageMappingLock(pVM, &Lock);
+ if (fRc)
+ {
+ *pGCPtrHit = GCPtr + offHit;
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ cbPrev = 0; /* ignore error. */
+ }
+ else
+ cbPrev = 0;
+ }
+ else
+ cbPrev = 0;
+ }
+ else
+ {
+ Assert(WalkGst.enmType != PGMPTWALKGSTTYPE_INVALID);
+ Assert(!Walk.fSucceeded);
+ cbPrev = 0; /* ignore error. */
+
+ /*
+ * Try skip as much as possible. No need to figure out that a PDE
+ * is not present 512 times!
+ */
+ uint64_t cPagesCanSkip;
+ switch (Walk.uLevel)
+ {
+ case 1:
+ /* page level, use cIncPages */
+ cPagesCanSkip = 1;
+ break;
+ case 2:
+ if (WalkGst.enmType == PGMPTWALKGSTTYPE_32BIT)
+ {
+ cPagesCanSkip = X86_PG_ENTRIES - ((GCPtr >> X86_PT_SHIFT) & X86_PT_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PD_SHIFT) - 1)));
+ }
+ else
+ {
+ cPagesCanSkip = X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PD_PAE_SHIFT) - 1)));
+ }
+ break;
+ case 3:
+ cPagesCanSkip = (X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK)) * X86_PG_PAE_ENTRIES
+ - ((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PDPT_SHIFT) - 1)));
+ break;
+ case 4:
+ cPagesCanSkip = (X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64))
+ * X86_PG_PAE_ENTRIES * X86_PG_PAE_ENTRIES
+ - ((((GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK)) * X86_PG_PAE_ENTRIES)
+ - (( GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PML4_SHIFT) - 1)));
+ break;
+ case 8:
+ /* The CR3 value is bad, forget the whole search. */
+ cPagesCanSkip = cPages;
+ break;
+ default:
+ AssertMsgFailed(("%d\n", Walk.uLevel));
+ cPagesCanSkip = 0;
+ break;
+ }
+ if (cPages <= cPagesCanSkip)
+ break;
+ fFullWalk = true;
+ if (cPagesCanSkip >= cIncPages)
+ {
+ cPages -= cPagesCanSkip;
+ GCPtr += (RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT;
+ continue;
+ }
+ }
+
+ /* advance to the next page. */
+ if (cPages <= cIncPages)
+ break;
+ cPages -= cIncPages;
+ GCPtr += (RTGCPTR)cIncPages << X86_PT_PAE_SHIFT;
+
+ /* Yield the PGM lock every now and then. */
+ if (!--cYieldCountDown)
+ {
+ fFullWalk = PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX);
+ cYieldCountDown = cYieldCountDownReload;
+ }
+ }
+ PGM_UNLOCK(pVM);
+ return VERR_DBGF_MEM_NOT_FOUND;
+}
+
+
+/**
+ * Initializes the dumper state.
+ *
+ * @param pState The state to initialize.
+ * @param pVM The cross context VM structure.
+ * @param fFlags The flags.
+ * @param u64FirstAddr The first address.
+ * @param u64LastAddr The last address.
+ * @param pHlp The output helpers.
+ */
+static void pgmR3DumpHierarchyInitState(PPGMR3DUMPHIERARCHYSTATE pState, PVM pVM, uint32_t fFlags,
+ uint64_t u64FirstAddr, uint64_t u64LastAddr, PCDBGFINFOHLP pHlp)
+{
+ pState->pVM = pVM;
+ pState->pHlp = pHlp ? pHlp : DBGFR3InfoLogHlp();
+ pState->fPse = !!(fFlags & (DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME));
+ pState->fPae = !!(fFlags & (DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME));
+ pState->fLme = !!(fFlags & DBGFPGDMP_FLAGS_LME);
+ pState->fNp = !!(fFlags & DBGFPGDMP_FLAGS_NP);
+ pState->fEpt = !!(fFlags & DBGFPGDMP_FLAGS_EPT);
+ pState->fNxe = !!(fFlags & DBGFPGDMP_FLAGS_NXE);
+ pState->cchAddress = pState->fLme || pState->fEpt ? 16 : 8;
+ pState->uLastRsvdBit = pState->fNxe ? 62 : 63;
+ pState->fDumpPageInfo = !!(fFlags & DBGFPGDMP_FLAGS_PAGE_INFO);
+ pState->fPrintHeader = !!(fFlags & DBGFPGDMP_FLAGS_HEADER);
+ pState->fPrintCr3 = !!(fFlags & DBGFPGDMP_FLAGS_PRINT_CR3);
+ pState->afReserved[0] = false;
+ pState->afReserved[1] = false;
+ pState->afReserved[2] = false;
+ pState->afReserved[3] = false;
+ pState->afReserved[4] = false;
+ pState->u64Address = u64FirstAddr;
+ pState->u64FirstAddress = u64FirstAddr;
+ pState->u64LastAddress = u64LastAddr;
+ pState->u64HighReservedBits = pState->uLastRsvdBit == 62 ? UINT64_C(0x7ff) << 52 : UINT64_C(0xfff) << 52;
+ pState->cLeaves = 0;
+}
+
+
+/**
+ * The simple way out, too tired to think of a more elegant solution.
+ *
+ * @returns The base address of this page table/directory/whatever.
+ * @param pState The state where we get the current address.
+ * @param cShift The shift count for the table entries.
+ * @param cEntries The number of table entries.
+ * @param piFirst Where to return the table index of the first
+ * entry to dump.
+ * @param piLast Where to return the table index of the last
+ * entry.
+ */
+static uint64_t pgmR3DumpHierarchyCalcRange(PPGMR3DUMPHIERARCHYSTATE pState, uint32_t cShift, uint32_t cEntries,
+ uint32_t *piFirst, uint32_t *piLast)
+{
+ const uint64_t iBase = (pState->u64Address >> cShift) & ~(uint64_t)(cEntries - 1);
+ const uint64_t iFirst = pState->u64FirstAddress >> cShift;
+ const uint64_t iLast = pState->u64LastAddress >> cShift;
+
+ if ( iBase >= iFirst
+ && iBase + cEntries - 1 <= iLast)
+ {
+ /* full range. */
+ *piFirst = 0;
+ *piLast = cEntries - 1;
+ }
+ else if ( iBase + cEntries - 1 < iFirst
+ || iBase > iLast)
+ {
+ /* no match */
+ *piFirst = cEntries;
+ *piLast = 0;
+ }
+ else
+ {
+ /* partial overlap */
+ *piFirst = iBase <= iFirst
+ ? iFirst - iBase
+ : 0;
+ *piLast = iBase + cEntries - 1 <= iLast
+ ? cEntries - 1
+ : iLast - iBase;
+ }
+
+ return iBase << cShift;
+}
+
+
+/**
+ * Maps/finds the shadow page.
+ *
+ * @returns VBox status code.
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the shadow page.
+ * @param pszDesc The description.
+ * @param ppv Where to return the pointer.
+ */
+static int pgmR3DumpHierarchyShwMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, const char *pszDesc, void const **ppv)
+{
+ PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys);
+ if (pPoolPage)
+ {
+ *ppv = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & GUEST_PAGE_OFFSET_MASK);
+ return VINF_SUCCESS;
+ }
+ pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n",
+ pState->cchAddress, pState->u64Address, pszDesc, HCPhys);
+ return VERR_PGM_POOL_GET_PAGE_FAILED;
+}
+
+
+/**
+ * Dumps the a shadow page summary or smth.
+ *
+ * @param pState The dumper state.
+ * @param HCPhys The page address.
+ */
+static void pgmR3DumpHierarchyShwTablePageInfo(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
+{
+ PGM_LOCK_VOID(pState->pVM);
+ char szPage[80];
+ PPGMPOOLPAGE pPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.CTX_SUFF(pPool), HCPhys);
+ if (pPage)
+ RTStrPrintf(szPage, sizeof(szPage), " idx=0i%u", pPage->idx);
+ else
+ strcpy(szPage, " not found");
+ PGM_UNLOCK(pState->pVM);
+ pState->pHlp->pfnPrintf(pState->pHlp, "%s", szPage);
+}
+
+
+/**
+ * Figures out which guest page this is and dumps a summary.
+ *
+ * @param pState The dumper state.
+ * @param HCPhys The page address.
+ * @param cbPage The page size.
+ */
+static void pgmR3DumpHierarchyShwGuestPageInfo(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, uint32_t cbPage)
+{
+ char szPage[80];
+ RTGCPHYS GCPhys;
+ int rc = PGMR3DbgHCPhys2GCPhys(pState->pVM->pUVM, HCPhys, &GCPhys);
+ if (RT_SUCCESS(rc))
+ {
+ PGM_LOCK_VOID(pState->pVM);
+ PCPGMPAGE pPage = pgmPhysGetPage(pState->pVM, GCPhys);
+ if (pPage)
+ RTStrPrintf(szPage, sizeof(szPage), "%R[pgmpage]", pPage);
+ else
+ strcpy(szPage, "not found");
+ PGM_UNLOCK(pState->pVM);
+ pState->pHlp->pfnPrintf(pState->pHlp, " -> %RGp %s", GCPhys, szPage);
+ }
+ else
+ pState->pHlp->pfnPrintf(pState->pHlp, " not found");
+ NOREF(cbPage);
+}
+
+
+/**
+ * Dumps an EPT shadow page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The page table address.
+ */
+static int pgmR3DumpHierarchyShwEptPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
+{
+ PCEPTPT pPT = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "EPT level 1", (void const **)&pPT);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ PVM const pVM = pState->pVM;
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PT_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPT->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PT_SHIFT);
+ if ( (u & (EPT_E_WRITE | EPT_E_MEMTYPE_MASK | EPT_E_READ | EPT_E_EXECUTE))
+ != (EPT_E_WRITE | EPT_E_MEMTYPE_INVALID_3)
+ || (u & EPT_E_PG_MASK) != pVM->pgm.s.HCPhysInvMmioPg)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 1 | %c%c%c %s %c L %c %c %c %c %c %c %c 4K %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[1][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? 'I' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, u & EPT_E_PG_MASK, _4K);
+ //if ((u >> 52) & 0x7ff)
+ // pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx%s", (u >> 52) & 0x7ff, pState->fLme ? "" : "!");
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ }
+ else
+ {
+ const char *pszDesc = "???";
+ PGM_LOCK_VOID(pVM);
+ PPGMPHYSHANDLER pHandler;
+ int rc3 = pgmHandlerPhysicalLookup(pVM, u64BaseAddress, &pHandler);
+ if (RT_SUCCESS(rc3))
+ pszDesc = pHandler->pszDesc;
+ PGM_UNLOCK(pVM);
+
+ pState->pHlp->pfnPrintf(pState->pHlp, "%016llx 1 | invalid / MMIO optimization (%s)\n",
+ pState->u64Address, pszDesc);
+ }
+ pState->cLeaves++;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps an EPT shadow page directory table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwEptPD(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPD pPD = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "EPT level 2", (void const **)&pPD);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PD_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPD->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PD_SHIFT);
+ if (u & EPT_E_LEAF)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 2 | %c%c%c %s %c L %c %c %c %c %c %c %c 2M %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[1][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? 'I' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, u & EPT_PDE2M_PG_MASK, _2M);
+ //if ((u >> 52) & 0x7ff)
+ // pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx%s", (u >> 52) & 0x7ff, pState->fLme ? "" : "!");
+ if (u & EPT_PDE2M_MBZ_MASK)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 20:12=%02llx!", (u >> 12) & 0x1ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 2 | %c%c%c %s %c - %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[0][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, u & EPT_E_PG_MASK);
+ //if ((u >> 52) & 0x7ff)
+ // pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx!", (u >> 52) & 0x7ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwEptPT(pState, u & EPT_E_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps an EPT shadow page directory pointer table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory pointer table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwEptPDPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPDPT pPDPT = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "EPT level 3", (void const **)&pPDPT);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PDPT_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPDPT->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PDPT_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 3 | %c%c%c %s %c %c %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[!!(u & EPT_E_LEAF)][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_LEAF ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, u & EPT_E_PG_MASK);
+ //if ((u >> 52) & 0x7ff)
+ // pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx", (u >> 52) & 0x7ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwEptPD(pState, u & EPT_E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps an EPT shadow PML4 table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwEptPML4(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPML4 pPML4 = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "EPT level 4", (void const **)&pPML4);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth);
+ cMaxDepth--;
+
+ uint32_t iFirst = (pState->u64FirstAddress >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
+ uint32_t iLast = (pState->u64LastAddress >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPML4->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = (uint64_t)i << X86_PML4_SHIFT;
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 4 | %c%c%c %s %c %c %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[!!(u & EPT_E_LEAF)][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_LEAF ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, u & EPT_E_PG_MASK);
+ //if ((u >> 52) & 0x7ff)
+ // pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx!", (u >> 52) & 0x7ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwEptPDPT(pState, u & EPT_E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps a PAE shadow page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The page table address.
+ */
+static int pgmR3DumpHierarchyShwPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
+{
+ PCPGMSHWPTPAE pPT = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PT_PAE_SHIFT, X86_PG_PAE_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ if (PGMSHWPTEPAE_GET_U(pPT->a[i]) & X86_PTE_P)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PT_PAE_SHIFT);
+ if (PGMSHWPTEPAE_IS_P(pPT->a[i]))
+ {
+ X86PTEPAE Pte;
+ Pte.u = PGMSHWPTEPAE_GET_U(pPT->a[i]);
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 4M a p ? */
+ ? "%016llx 1 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx"
+ : "%08llx 1 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx",
+ pState->u64Address,
+ Pte.n.u1Write ? 'W' : 'R',
+ Pte.n.u1User ? 'U' : 'S',
+ Pte.n.u1Accessed ? 'A' : '-',
+ Pte.n.u1Dirty ? 'D' : '-',
+ Pte.n.u1Global ? 'G' : '-',
+ Pte.n.u1WriteThru ? "WT" : "--",
+ Pte.n.u1CacheDisable? "CD" : "--",
+ Pte.n.u1PAT ? "AT" : "--",
+ Pte.n.u1NoExecute ? "NX" : "--",
+ Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
+ Pte.u & RT_BIT(10) ? '1' : '0',
+ Pte.u & PGM_PTFLAGS_CSAM_VALIDATED? 'v' : '-',
+ Pte.u & X86_PTE_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, Pte.u & X86_PTE_PAE_PG_MASK, _4K);
+ if ((Pte.u >> 52) & 0x7ff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx%s", (Pte.u >> 52) & 0x7ff, pState->fLme ? "" : "!");
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ }
+ else if ( (PGMSHWPTEPAE_GET_U(pPT->a[i]) & (pState->pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX))
+ == (pState->pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX))
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme
+ ? "%016llx 1 | invalid / MMIO optimization\n"
+ : "%08llx 1 | invalid / MMIO optimization\n",
+ pState->u64Address);
+ else
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme
+ ? "%016llx 1 | invalid: %RX64\n"
+ : "%08llx 1 | invalid: %RX64\n",
+ pState->u64Address, PGMSHWPTEPAE_GET_U(pPT->a[i]));
+ pState->cLeaves++;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps a PAE shadow page directory table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwPaePD(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCX86PDPAE pPD = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PD_PAE_SHIFT, X86_PG_PAE_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDEPAE Pde = pPD->a[i];
+ if (Pde.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PD_PAE_SHIFT);
+ if (Pde.b.u1Size)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 2M a p ? phys*/
+ ? "%016llx 2 | P %c %c %c %c %c %s %s %s %s 2M %c%c%c %016llx"
+ : "%08llx 2 | P %c %c %c %c %c %s %s %s %s 2M %c%c%c %016llx",
+ pState->u64Address,
+ Pde.b.u1Write ? 'W' : 'R',
+ Pde.b.u1User ? 'U' : 'S',
+ Pde.b.u1Accessed ? 'A' : '-',
+ Pde.b.u1Dirty ? 'D' : '-',
+ Pde.b.u1Global ? 'G' : '-',
+ Pde.b.u1WriteThru ? "WT" : "--",
+ Pde.b.u1CacheDisable? "CD" : "--",
+ Pde.b.u1PAT ? "AT" : "--",
+ Pde.b.u1NoExecute ? "NX" : "--",
+ Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',
+ '-',
+ Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
+ Pde.u & X86_PDE2M_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, Pde.u & X86_PDE2M_PAE_PG_MASK, _2M);
+ if ((Pde.u >> 52) & 0x7ff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx%s", (Pde.u >> 52) & 0x7ff, pState->fLme ? "" : "!");
+ if ((Pde.u >> 13) & 0xff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 20:13=%02llx%s", (Pde.u >> 13) & 0x0ff, pState->fLme ? "" : "!");
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 4M a p ? phys */
+ ? "%016llx 2 | P %c %c %c %c %c %s %s .. %s .. %c%c%c %016llx"
+ : "%08llx 2 | P %c %c %c %c %c %s %s .. %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pde.n.u1Write ? 'W' : 'R',
+ Pde.n.u1User ? 'U' : 'S',
+ Pde.n.u1Accessed ? 'A' : '-',
+ Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
+ Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
+ Pde.n.u1WriteThru ? "WT" : "--",
+ Pde.n.u1CacheDisable? "CD" : "--",
+ Pde.n.u1NoExecute ? "NX" : "--",
+ Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',
+ '-',
+ Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
+ Pde.u & X86_PDE_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, Pde.u & X86_PDE_PAE_PG_MASK);
+ if ((Pde.u >> 52) & 0x7ff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx!", (Pde.u >> 52) & 0x7ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps a PAE shadow page directory pointer table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory pointer table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwPaePDPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ /* Fend of addresses that are out of range in PAE mode - simplifies the code below. */
+ if (!pState->fLme && pState->u64Address >= _4G)
+ return VINF_SUCCESS;
+
+ PCX86PDPT pPDPT = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory pointer table", (void const **)&pPDPT);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PDPT_SHIFT,
+ pState->fLme ? X86_PG_AMD64_PDPE_ENTRIES : X86_PG_PAE_PDPE_ENTRIES,
+ &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDPE Pdpe = pPDPT->a[i];
+ if (Pdpe.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PDPT_SHIFT);
+ if (pState->fLme)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /*P R S A D G WT CD AT NX .. a p ? */
+ "%016llx 3 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pdpe.lm.u1Write ? 'W' : 'R',
+ Pdpe.lm.u1User ? 'U' : 'S',
+ Pdpe.lm.u1Accessed ? 'A' : '-',
+ Pdpe.lm.u3Reserved & 1? '?' : '.', /* ignored */
+ Pdpe.lm.u3Reserved & 4? '!' : '.', /* mbz */
+ Pdpe.lm.u1WriteThru ? "WT" : "--",
+ Pdpe.lm.u1CacheDisable? "CD" : "--",
+ Pdpe.lm.u3Reserved & 2? "!" : "..",/* mbz */
+ Pdpe.lm.u1NoExecute ? "NX" : "--",
+ Pdpe.u & RT_BIT(9) ? '1' : '0',
+ Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
+ Pdpe.u & RT_BIT(11) ? '1' : '0',
+ Pdpe.u & X86_PDPE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, Pdpe.u & X86_PDPE_PG_MASK);
+ if ((Pdpe.u >> 52) & 0x7ff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx", (Pdpe.u >> 52) & 0x7ff);
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX .. a p ? */
+ "%08llx 3 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pdpe.n.u2Reserved & 1? '!' : '.', /* mbz */
+ Pdpe.n.u2Reserved & 2? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 1? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 2? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 8? '!' : '.', /* mbz */
+ Pdpe.n.u1WriteThru ? "WT" : "--",
+ Pdpe.n.u1CacheDisable? "CD" : "--",
+ Pdpe.n.u4Reserved & 2? "!" : "..",/* mbz */
+ Pdpe.lm.u1NoExecute ? "!!" : "..",/* mbz */
+ Pdpe.u & RT_BIT(9) ? '1' : '0',
+ Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
+ Pdpe.u & RT_BIT(11) ? '1' : '0',
+ Pdpe.u & X86_PDPE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, Pdpe.u & X86_PDPE_PG_MASK);
+ if ((Pdpe.u >> 52) & 0xfff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 63:52=%03llx!", (Pdpe.u >> 52) & 0xfff);
+ }
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwPaePD(pState, Pdpe.u & X86_PDPE_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps a 64-bit shadow PML4 table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShwPaePML4(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCX86PML4 pPML4 = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page map level 4", (void const **)&pPML4);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth);
+ cMaxDepth--;
+
+ /*
+ * This is a bit tricky as we're working on unsigned addresses while the
+ * AMD64 spec uses signed tricks.
+ */
+ uint32_t iFirst = (pState->u64FirstAddress >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ uint32_t iLast = (pState->u64LastAddress >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ if ( pState->u64LastAddress <= UINT64_C(0x00007fffffffffff)
+ || pState->u64FirstAddress >= UINT64_C(0xffff800000000000))
+ { /* Simple, nothing to adjust */ }
+ else if (pState->u64FirstAddress <= UINT64_C(0x00007fffffffffff))
+ iLast = X86_PG_AMD64_ENTRIES / 2 - 1;
+ else if (pState->u64LastAddress >= UINT64_C(0xffff800000000000))
+ iFirst = X86_PG_AMD64_ENTRIES / 2;
+ else
+ iFirst = X86_PG_AMD64_ENTRIES; /* neither address is canonical */
+
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PML4E Pml4e = pPML4->a[i];
+ if (Pml4e.n.u1Present)
+ {
+ pState->u64Address = ((uint64_t)i << X86_PML4_SHIFT)
+ | (i >= RT_ELEMENTS(pPML4->a) / 2 ? UINT64_C(0xffff000000000000) : 0);
+ pState->pHlp->pfnPrintf(pState->pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
+ "%016llx 4 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pml4e.n.u1Write ? 'W' : 'R',
+ Pml4e.n.u1User ? 'U' : 'S',
+ Pml4e.n.u1Accessed ? 'A' : '-',
+ Pml4e.n.u3Reserved & 1? '?' : '.', /* ignored */
+ Pml4e.n.u3Reserved & 4? '!' : '.', /* mbz */
+ Pml4e.n.u1WriteThru ? "WT" : "--",
+ Pml4e.n.u1CacheDisable? "CD" : "--",
+ Pml4e.n.u3Reserved & 2? "!" : "..",/* mbz */
+ Pml4e.n.u1NoExecute ? "NX" : "--",
+ Pml4e.u & RT_BIT(9) ? '1' : '0',
+ Pml4e.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
+ Pml4e.u & RT_BIT(11) ? '1' : '0',
+ Pml4e.u & X86_PML4E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, Pml4e.u & X86_PML4E_PG_MASK);
+ if ((Pml4e.u >> 52) & 0x7ff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 62:52=%03llx!", (Pml4e.u >> 52) & 0x7ff);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShwPaePDPT(pState, Pml4e.u & X86_PML4E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Dumps a 32-bit shadow page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the table.
+ */
+static int pgmR3DumpHierarchyShw32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
+{
+ PCX86PT pPT = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PT_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PTE Pte = pPT->a[i];
+ if (Pte.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + (i << X86_PT_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d */
+ "%08llx 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x",
+ pState->u64Address,
+ Pte.n.u1Write ? 'W' : 'R',
+ Pte.n.u1User ? 'U' : 'S',
+ Pte.n.u1Accessed ? 'A' : '-',
+ Pte.n.u1Dirty ? 'D' : '-',
+ Pte.n.u1Global ? 'G' : '-',
+ Pte.n.u1WriteThru ? "WT" : "--",
+ Pte.n.u1CacheDisable? "CD" : "--",
+ Pte.n.u1PAT ? "AT" : "--",
+ Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
+ Pte.u & RT_BIT(10) ? '1' : '0',
+ Pte.u & PGM_PTFLAGS_CSAM_VALIDATED ? 'v' : '-',
+ Pte.u & X86_PDE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, Pte.u & X86_PDE_PG_MASK, _4K);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps a 32-bit shadow page directory and page tables.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyShw32BitPD(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ if (pState->u64Address >= _4G)
+ return VINF_SUCCESS;
+
+ PCX86PD pPD = NULL;
+ int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDE Pde = pPD->a[i];
+ if (Pde.n.u1Present)
+ {
+ pState->u64Address = (uint32_t)i << X86_PD_SHIFT;
+ if (Pde.b.u1Size && pState->fPse)
+ {
+ uint64_t u64Phys = ((uint64_t)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT)
+ | (Pde.u & X86_PDE4M_PG_MASK);
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d phys */
+ "%08llx 2 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08llx",
+ pState->u64Address,
+ Pde.b.u1Write ? 'W' : 'R',
+ Pde.b.u1User ? 'U' : 'S',
+ Pde.b.u1Accessed ? 'A' : '-',
+ Pde.b.u1Dirty ? 'D' : '-',
+ Pde.b.u1Global ? 'G' : '-',
+ Pde.b.u1WriteThru ? "WT" : "--",
+ Pde.b.u1CacheDisable? "CD" : "--",
+ Pde.b.u1PAT ? "AT" : "--",
+ Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',
+ '-',
+ Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
+ u64Phys);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwGuestPageInfo(pState, u64Phys, _4M);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d phys */
+ "%08llx 2 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x",
+ pState->u64Address,
+ Pde.n.u1Write ? 'W' : 'R',
+ Pde.n.u1User ? 'U' : 'S',
+ Pde.n.u1Accessed ? 'A' : '-',
+ Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
+ Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
+ Pde.n.u1WriteThru ? "WT" : "--",
+ Pde.n.u1CacheDisable? "CD" : "--",
+ Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',
+ '-',
+ Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
+ Pde.u & X86_PDE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, Pde.u & X86_PDE_PG_MASK);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyShw32BitPT(pState, Pde.u & X86_PDE_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Internal worker that initiates the actual dump.
+ *
+ * @returns VBox status code.
+ * @param pState The dumper state.
+ * @param cr3 The CR3 value.
+ * @param cMaxDepth The max depth.
+ */
+static int pgmR3DumpHierarchyShwDoIt(PPGMR3DUMPHIERARCHYSTATE pState, uint64_t cr3, unsigned cMaxDepth)
+{
+ int rc;
+ unsigned const cch = pState->cchAddress;
+ uint64_t const cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK /** @todo this should be X86_CR3_EPT_PAGE_MASK */
+ : pState->fLme ? X86_CR3_AMD64_PAGE_MASK
+ : pState->fPae ? X86_CR3_PAE_PAGE_MASK
+ : X86_CR3_PAGE_MASK;
+ if (pState->fPrintCr3)
+ {
+ const char * const pszMode = pState->fEpt ? "Extended Page Tables"
+ : pState->fLme ? "Long Mode"
+ : pState->fPae ? "PAE Mode"
+ : pState->fPse ? "32-bit w/ PSE"
+ : "32-bit";
+ pState->pHlp->pfnPrintf(pState->pHlp, "cr3=%0*llx", cch, cr3);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyShwTablePageInfo(pState, cr3 & X86_CR3_AMD64_PAGE_MASK);
+ pState->pHlp->pfnPrintf(pState->pHlp, " %s%s%s\n",
+ pszMode,
+ pState->fNp ? " + Nested Paging" : "",
+ pState->fNxe ? " + NX" : "");
+ }
+
+
+ if (pState->fEpt)
+ {
+ if (pState->fPrintHeader)
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ "%-*s R - Readable\n"
+ "%-*s |W - Writeable\n"
+ "%-*s ||X - Executable\n"
+ "%-*s ||| EMT - EPT memory type\n"
+ "%-*s ||| | I - Ignored PAT?\n"
+ "%-*s ||| | | L - leaf\n"
+ "%-*s ||| | | | A - accessed\n"
+ "%-*s ||| | | | | D - dirty\n"
+ "%-*s ||| | | | | | U - user execute\n"
+ "%-*s ||| | | | | | | w - Paging writable\n"
+ "%-*s ||| | | | | | | | k - Supervisor shadow stack writable\n"
+ "%-*s ||| | | | | | | | | v - Suppress #VE\n"
+ "%-*s Level ||| | | | | | | | | | page\n"
+ /* xxxx n **** RWX MT I L A D U w k v 4K xxxxxxxxxxxxx
+ RWX 7 - - - - - - - - 0123456701234567 */
+ ,
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
+ cch, "", cch, "", cch, "", cch, "", cch, "Address");
+ /** @todo assumes 4-level EPT tables for now. */
+ rc = pgmR3DumpHierarchyShwEptPML4(pState, cr3 & cr3Mask, cMaxDepth);
+ }
+ else
+ {
+ if (pState->fPrintHeader)
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ "%-*s P - Present\n"
+ "%-*s | R/W - Read (0) / Write (1)\n"
+ "%-*s | | U/S - User (1) / Supervisor (0)\n"
+ "%-*s | | | A - Accessed\n"
+ "%-*s | | | | D - Dirty\n"
+ "%-*s | | | | | G - Global\n"
+ "%-*s | | | | | | WT - Write thru\n"
+ "%-*s | | | | | | | CD - Cache disable\n"
+ "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
+ "%-*s | | | | | | | | | NX - No execute (K8)\n"
+ "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
+ "%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
+ "%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
+ "%-*s Level | | | | | | | | | | | | Page\n"
+ /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
+ - W U - - - -- -- -- -- -- 010 */
+ ,
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address");
+ if (pState->fLme)
+ rc = pgmR3DumpHierarchyShwPaePML4(pState, cr3 & cr3Mask, cMaxDepth);
+ else if (pState->fPae)
+ rc = pgmR3DumpHierarchyShwPaePDPT(pState, cr3 & cr3Mask, cMaxDepth);
+ else
+ rc = pgmR3DumpHierarchyShw32BitPD(pState, cr3 & cr3Mask, cMaxDepth);
+ }
+
+ if (!pState->cLeaves)
+ pState->pHlp->pfnPrintf(pState->pHlp, "not present\n");
+ return rc;
+}
+
+
+/**
+ * dbgfR3PagingDumpEx worker.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cr3 The CR3 register value.
+ * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
+ * @param u64FirstAddr The start address.
+ * @param u64LastAddr The address to stop after.
+ * @param cMaxDepth The max depth.
+ * @param pHlp The output callbacks. Defaults to log if NULL.
+ *
+ * @internal
+ */
+VMMR3_INT_DECL(int) PGMR3DumpHierarchyShw(PVM pVM, uint64_t cr3, uint32_t fFlags, uint64_t u64FirstAddr, uint64_t u64LastAddr,
+ uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
+{
+ /* Minimal validation as we're only supposed to service DBGF. */
+ AssertReturn(~(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & (DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3)), VERR_INVALID_PARAMETER);
+ AssertReturn(fFlags & DBGFPGDMP_FLAGS_SHADOW, VERR_INVALID_PARAMETER);
+
+ PGMR3DUMPHIERARCHYSTATE State;
+ pgmR3DumpHierarchyInitState(&State, pVM, fFlags, u64FirstAddr, u64LastAddr, pHlp);
+ PGM_LOCK_VOID(pVM);
+ int rc = pgmR3DumpHierarchyShwDoIt(&State, cr3, cMaxDepth);
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pVM The cross context VM structure.
+ * @param cr3 The root of the hierarchy.
+ * @param cr4 The cr4, only PAE and PSE is currently used.
+ * @param fLongMode Set if long mode, false if not long mode.
+ * @param cMaxDepth Number of levels to dump.
+ * @param pHlp Pointer to the output functions.
+ *
+ * @deprecated Use DBGFR3PagingDumpEx.
+ */
+VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
+{
+ if (!cMaxDepth)
+ return VINF_SUCCESS;
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ uint32_t fFlags = DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3 | DBGFPGDMP_FLAGS_PAGE_INFO | DBGFPGDMP_FLAGS_SHADOW;
+ fFlags |= cr4 & (X86_CR4_PAE | X86_CR4_PSE);
+ if (fLongMode)
+ fFlags |= DBGFPGDMP_FLAGS_LME;
+
+ return DBGFR3PagingDumpEx(pVM->pUVM, pVCpu->idCpu, fFlags, cr3, 0, fLongMode ? UINT64_MAX : UINT32_MAX, cMaxDepth, pHlp);
+}
+
+
+/**
+ * Maps the guest page.
+ *
+ * @returns VBox status code.
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the guest page.
+ * @param pszDesc The description.
+ * @param ppv Where to return the pointer.
+ * @param pLock Where to return the mapping lock. Hand this to
+ * PGMPhysReleasePageMappingLock when done.
+ */
+static int pgmR3DumpHierarchyGstMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTGCPHYS GCPhys, const char *pszDesc,
+ void const **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ int rc = PGMPhysGCPhys2CCPtrReadOnly(pState->pVM, GCPhys, ppv, pLock);
+ if (RT_FAILURE(rc))
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! Failed to map %s at GCPhys=%RGp: %Rrc!\n",
+ pState->cchAddress, pState->u64Address, pszDesc, GCPhys, rc);
+ return rc;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Figures out which guest page this is and dumps a summary.
+ *
+ * @param pState The dumper state.
+ * @param GCPhys The page address.
+ * @param cbPage The page size.
+ */
+static void pgmR3DumpHierarchyGstPageInfo(PPGMR3DUMPHIERARCHYSTATE pState, RTGCPHYS GCPhys, uint32_t cbPage)
+{
+ char szPage[80];
+ PGM_LOCK_VOID(pState->pVM);
+ PCPGMPAGE pPage = pgmPhysGetPage(pState->pVM, GCPhys);
+ if (pPage)
+ RTStrPrintf(szPage, sizeof(szPage), " %R[pgmpage]", pPage);
+ else
+ strcpy(szPage, " not found");
+ PGM_UNLOCK(pState->pVM);
+ pState->pHlp->pfnPrintf(pState->pHlp, "%s", szPage);
+ NOREF(cbPage);
+}
+
+
+/**
+ * Checks the entry for reserved bits.
+ *
+ * @param pState The dumper state.
+ * @param u64Entry The entry to check.
+ */
+static void pgmR3DumpHierarchyGstCheckReservedHighBits(PPGMR3DUMPHIERARCHYSTATE pState, uint64_t u64Entry)
+{
+ uint32_t uRsvd = (u64Entry & pState->u64HighReservedBits) >> 52;
+ if (uRsvd)
+ pState->pHlp->pfnPrintf(pState->pHlp, " %u:52=%03x%s",
+ pState->uLastRsvdBit, uRsvd, pState->fLme ? "" : "!");
+ /** @todo check the valid physical bits as well. */
+}
+
+
+/**
+ * Dumps an EPT guest page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The page table address.
+ */
+static int pgmR3DumpHierarchyGstEptPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
+{
+ PCEPTPT pPT = NULL;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, HCPhys, "Guest EPT level 1", (void const **)&pPT, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PT_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPT->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PT_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 1 | %c%c%c %s %c L %c %c %c %c %c %c %c 4K %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[1][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? 'I' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u & EPT_E_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps an EPT guest page directory table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstEptPD(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPD pPD = NULL;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, HCPhys, "Guest EPT level 2", (void const **)&pPD, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PD_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPD->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PD_SHIFT);
+ if (u & EPT_E_LEAF)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 2 | %c%c%c %s %c L %c %c %c %c %c %c %c 2M %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[1][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? 'I' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u & EPT_PDE2M_PG_MASK, _2M);
+ if (u & EPT_PDE2M_MBZ_MASK)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 20:12=%02llx!", (u >> 12) & 0x1ff);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 2 | %c%c%c %s %c - %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[0][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u & EPT_E_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstEptPT(pState, u & EPT_E_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps an EPT guest page directory pointer table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the page directory pointer table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstEptPDPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPDPT pPDPT = NULL;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, HCPhys, "Guest EPT level 3", (void const **)&pPDPT, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, EPT_PDPT_SHIFT, EPT_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPDPT->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << EPT_PDPT_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 3 | %c%c%c %s %c %c %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[!!(u & EPT_E_LEAF)][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_LEAF ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u & EPT_E_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstEptPD(pState, u & EPT_E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps an EPT guest PML4 table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param HCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstEptPML4(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, unsigned cMaxDepth)
+{
+ PCEPTPML4 pPML4 = NULL;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, HCPhys, "Guest EPT level 4", (void const **)&pPML4, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth);
+ cMaxDepth--;
+
+ uint32_t iFirst = (pState->u64FirstAddress >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
+ uint32_t iLast = (pState->u64LastAddress >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ uint64_t const u = pPML4->a[i].u;
+ if (u & EPT_PRESENT_MASK)
+ {
+ pState->u64Address = (uint64_t)i << X86_PML4_SHIFT;
+ pState->pHlp->pfnPrintf(pState->pHlp, /* R W X MT I L A D U w k s v */
+ "%016llx 4 | %c%c%c %s %c %c %c %c %c %c %c %c %c %016llx",
+ pState->u64Address,
+ u & EPT_E_READ ? 'R' : '-',
+ u & EPT_E_WRITE ? 'W' : '-',
+ u & EPT_E_EXECUTE ? 'X' : '-',
+ g_aaszEptMemType[!!(u & EPT_E_LEAF)][(u >> EPT_E_MEMTYPE_SHIFT) & EPT_E_MEMTYPE_SMASK],
+ u & EPT_E_IGNORE_PAT ? '!' : '-',
+ u & EPT_E_LEAF ? '!' : '-',
+ u & EPT_E_ACCESSED ? 'A' : '-',
+ u & EPT_E_DIRTY ? 'D' : '-',
+ u & EPT_E_USER_EXECUTE ? 'U' : '-',
+ u & EPT_E_PAGING_WRITE ? 'w' : '-',
+ u & EPT_E_SUPER_SHW_STACK ? 'k' : '-',
+ u & EPT_E_SUBPAGE_WRITE_PERM ? 's' : '-',
+ u & EPT_E_SUPPRESS_VE ? 'v' : '-',
+ u & EPT_E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u & EPT_E_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstEptPDPT(pState, u & EPT_E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps a PAE guest page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The page table address.
+ */
+static int pgmR3DumpHierarchyGstPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTGCPHYS GCPhys)
+{
+ PCX86PTPAE pPT;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page table", (void const **)&pPT, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PT_PAE_SHIFT, X86_PG_PAE_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PTEPAE Pte = pPT->a[i];
+ if (Pte.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PT_PAE_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 4M a p ? */
+ ? "%016llx 3 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx"
+ : "%08llx 2 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx",
+ pState->u64Address,
+ Pte.n.u1Write ? 'W' : 'R',
+ Pte.n.u1User ? 'U' : 'S',
+ Pte.n.u1Accessed ? 'A' : '-',
+ Pte.n.u1Dirty ? 'D' : '-',
+ Pte.n.u1Global ? 'G' : '-',
+ Pte.n.u1WriteThru ? "WT" : "--",
+ Pte.n.u1CacheDisable? "CD" : "--",
+ Pte.n.u1PAT ? "AT" : "--",
+ Pte.n.u1NoExecute ? "NX" : "--",
+ Pte.u & RT_BIT(9) ? '1' : '0',
+ Pte.u & RT_BIT(10) ? '1' : '0',
+ Pte.u & RT_BIT(11) ? '1' : '0',
+ Pte.u & X86_PTE_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pte.u & X86_PTE_PAE_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pte.u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps a PAE guest page directory table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstPaePD(PPGMR3DUMPHIERARCHYSTATE pState, RTGCPHYS GCPhys, unsigned cMaxDepth)
+{
+ PCX86PDPAE pPD;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page directory", (void const **)&pPD, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PD_PAE_SHIFT, X86_PG_PAE_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDEPAE Pde = pPD->a[i];
+ if (Pde.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PD_PAE_SHIFT);
+ if (Pde.b.u1Size)
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 2M a p ? phys*/
+ ? "%016llx 2 | P %c %c %c %c %c %s %s %s %s 2M %c%c%c %016llx"
+ : "%08llx 1 | P %c %c %c %c %c %s %s %s %s 2M %c%c%c %016llx",
+ pState->u64Address,
+ Pde.b.u1Write ? 'W' : 'R',
+ Pde.b.u1User ? 'U' : 'S',
+ Pde.b.u1Accessed ? 'A' : '-',
+ Pde.b.u1Dirty ? 'D' : '-',
+ Pde.b.u1Global ? 'G' : '-',
+ Pde.b.u1WriteThru ? "WT" : "--",
+ Pde.b.u1CacheDisable ? "CD" : "--",
+ Pde.b.u1PAT ? "AT" : "--",
+ Pde.b.u1NoExecute ? "NX" : "--",
+ Pde.u & RT_BIT_64(9) ? '1' : '0',
+ Pde.u & RT_BIT_64(10) ? '1' : '0',
+ Pde.u & RT_BIT_64(11) ? '1' : '0',
+ Pde.u & X86_PDE2M_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pde.u & X86_PDE2M_PAE_PG_MASK, _2M);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pde.u);
+ if ((Pde.u >> 13) & 0xff)
+ pState->pHlp->pfnPrintf(pState->pHlp, " 20:13=%02llx%s", (Pde.u >> 13) & 0x0ff, pState->fLme ? "" : "!");
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ pState->fLme /*P R S A D G WT CD AT NX 4M a p ? phys */
+ ? "%016llx 2 | P %c %c %c %c %c %s %s .. %s .. %c%c%c %016llx"
+ : "%08llx 1 | P %c %c %c %c %c %s %s .. %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pde.n.u1Write ? 'W' : 'R',
+ Pde.n.u1User ? 'U' : 'S',
+ Pde.n.u1Accessed ? 'A' : '-',
+ Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
+ Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
+ Pde.n.u1WriteThru ? "WT" : "--",
+ Pde.n.u1CacheDisable ? "CD" : "--",
+ Pde.n.u1NoExecute ? "NX" : "--",
+ Pde.u & RT_BIT_64(9) ? '1' : '0',
+ Pde.u & RT_BIT_64(10) ? '1' : '0',
+ Pde.u & RT_BIT_64(11) ? '1' : '0',
+ Pde.u & X86_PDE_PAE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pde.u & X86_PDE_PAE_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pde.u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps a PAE guest page directory pointer table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstPaePDPT(PPGMR3DUMPHIERARCHYSTATE pState, RTGCPHYS GCPhys, unsigned cMaxDepth)
+{
+ /* Fend of addresses that are out of range in PAE mode - simplifies the code below. */
+ if (!pState->fLme && pState->u64Address >= _4G)
+ return VINF_SUCCESS;
+
+ PCX86PDPT pPDPT;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page directory pointer table", (void const **)&pPDPT, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PDPT_SHIFT,
+ pState->fLme ? X86_PG_AMD64_PDPE_ENTRIES : X86_PG_PAE_PDPE_ENTRIES,
+ &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDPE Pdpe = pPDPT->a[i];
+ if (Pdpe.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + ((uint64_t)i << X86_PDPT_SHIFT);
+ if (pState->fLme)
+ {
+ /** @todo Do 1G pages. */
+ pState->pHlp->pfnPrintf(pState->pHlp, /*P R S A D G WT CD AT NX .. a p ? */
+ "%016llx 1 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pdpe.lm.u1Write ? 'W' : 'R',
+ Pdpe.lm.u1User ? 'U' : 'S',
+ Pdpe.lm.u1Accessed ? 'A' : '-',
+ Pdpe.lm.u3Reserved & 1 ? '?' : '.', /* ignored */
+ Pdpe.lm.u3Reserved & 4 ? '!' : '.', /* mbz */
+ Pdpe.lm.u1WriteThru ? "WT" : "--",
+ Pdpe.lm.u1CacheDisable ? "CD" : "--",
+ Pdpe.lm.u3Reserved & 2 ? "!" : "..",/* mbz */
+ Pdpe.lm.u1NoExecute ? "NX" : "--",
+ Pdpe.u & RT_BIT_64(9) ? '1' : '0',
+ Pdpe.u & RT_BIT_64(10) ? '1' : '0',
+ Pdpe.u & RT_BIT_64(11) ? '1' : '0',
+ Pdpe.u & X86_PDPE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pdpe.u & X86_PDPE_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pdpe.u);
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX .. a p ? */
+ "%08llx 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pdpe.n.u2Reserved & 1 ? '!' : '.', /* mbz */
+ Pdpe.n.u2Reserved & 2 ? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 1 ? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 2 ? '!' : '.', /* mbz */
+ Pdpe.n.u4Reserved & 8 ? '!' : '.', /* mbz */
+ Pdpe.n.u1WriteThru ? "WT" : "--",
+ Pdpe.n.u1CacheDisable ? "CD" : "--",
+ Pdpe.n.u4Reserved & 2 ? "!" : "..", /* mbz */
+ Pdpe.lm.u1NoExecute ? "!!" : "..",/* mbz */
+ Pdpe.u & RT_BIT_64(9) ? '1' : '0',
+ Pdpe.u & RT_BIT_64(10) ? '1' : '0',
+ Pdpe.u & RT_BIT_64(11) ? '1' : '0',
+ Pdpe.u & X86_PDPE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pdpe.u & X86_PDPE_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pdpe.u);
+ }
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstPaePD(pState, Pdpe.u & X86_PDPE_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps a 32-bit guest page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGstPaePML4(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS GCPhys, unsigned cMaxDepth)
+{
+ PCX86PML4 pPML4;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page map level 4", (void const **)&pPML4, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth);
+ cMaxDepth--;
+
+ /*
+ * This is a bit tricky as we're working on unsigned addresses while the
+ * AMD64 spec uses signed tricks.
+ */
+ uint32_t iFirst = (pState->u64FirstAddress >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ uint32_t iLast = (pState->u64LastAddress >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ if ( pState->u64LastAddress <= UINT64_C(0x00007fffffffffff)
+ || pState->u64FirstAddress >= UINT64_C(0xffff800000000000))
+ { /* Simple, nothing to adjust */ }
+ else if (pState->u64FirstAddress <= UINT64_C(0x00007fffffffffff))
+ iLast = X86_PG_AMD64_ENTRIES / 2 - 1;
+ else if (pState->u64LastAddress >= UINT64_C(0xffff800000000000))
+ iFirst = X86_PG_AMD64_ENTRIES / 2;
+ else
+ iFirst = X86_PG_AMD64_ENTRIES; /* neither address is canonical */
+
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PML4E Pml4e = pPML4->a[i];
+ if (Pml4e.n.u1Present)
+ {
+ pState->u64Address = ((uint64_t)i << X86_PML4_SHIFT)
+ | (i >= RT_ELEMENTS(pPML4->a) / 2 ? UINT64_C(0xffff000000000000) : 0);
+ pState->pHlp->pfnPrintf(pState->pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
+ "%016llx 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx",
+ pState->u64Address,
+ Pml4e.n.u1Write ? 'W' : 'R',
+ Pml4e.n.u1User ? 'U' : 'S',
+ Pml4e.n.u1Accessed ? 'A' : '-',
+ Pml4e.n.u3Reserved & 1 ? '?' : '.', /* ignored */
+ Pml4e.n.u3Reserved & 4 ? '!' : '.', /* mbz */
+ Pml4e.n.u1WriteThru ? "WT" : "--",
+ Pml4e.n.u1CacheDisable ? "CD" : "--",
+ Pml4e.n.u3Reserved & 2 ? "!" : "..",/* mbz */
+ Pml4e.n.u1NoExecute ? "NX" : "--",
+ Pml4e.u & RT_BIT_64(9) ? '1' : '0',
+ Pml4e.u & RT_BIT_64(10) ? '1' : '0',
+ Pml4e.u & RT_BIT_64(11) ? '1' : '0',
+ Pml4e.u & X86_PML4E_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pml4e.u & X86_PML4E_PG_MASK, _4K);
+ pgmR3DumpHierarchyGstCheckReservedHighBits(pState, Pml4e.u);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGstPaePDPT(pState, Pml4e.u & X86_PML4E_PG_MASK, cMaxDepth);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Dumps a 32-bit guest page table.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the table.
+ */
+static int pgmR3DumpHierarchyGst32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS GCPhys)
+{
+ PCX86PT pPT;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page table", (void const **)&pPT, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint32_t iFirst, iLast;
+ uint64_t u64BaseAddress = pgmR3DumpHierarchyCalcRange(pState, X86_PT_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PTE Pte = pPT->a[i];
+ if (Pte.n.u1Present)
+ {
+ pState->u64Address = u64BaseAddress + (i << X86_PT_SHIFT);
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d */
+ "%08llx 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x",
+ pState->u64Address,
+ Pte.n.u1Write ? 'W' : 'R',
+ Pte.n.u1User ? 'U' : 'S',
+ Pte.n.u1Accessed ? 'A' : '-',
+ Pte.n.u1Dirty ? 'D' : '-',
+ Pte.n.u1Global ? 'G' : '-',
+ Pte.n.u1WriteThru ? "WT" : "--",
+ Pte.n.u1CacheDisable ? "CD" : "--",
+ Pte.n.u1PAT ? "AT" : "--",
+ Pte.u & RT_BIT_32(9) ? '1' : '0',
+ Pte.u & RT_BIT_32(10) ? '1' : '0',
+ Pte.u & RT_BIT_32(11) ? '1' : '0',
+ Pte.u & X86_PDE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pte.u & X86_PDE_PG_MASK, _4K);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps a 32-bit guest page directory and page tables.
+ *
+ * @returns VBox status code (VINF_SUCCESS).
+ * @param pState The dumper state.
+ * @param GCPhys The physical address of the table.
+ * @param cMaxDepth The maximum depth.
+ */
+static int pgmR3DumpHierarchyGst32BitPD(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS GCPhys, unsigned cMaxDepth)
+{
+ if (pState->u64Address >= _4G)
+ return VINF_SUCCESS;
+
+ PCX86PD pPD;
+ PGMPAGEMAPLOCK Lock;
+ int rc = pgmR3DumpHierarchyGstMapPage(pState, GCPhys, "Page directory", (void const **)&pPD, &Lock);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ Assert(cMaxDepth > 0);
+ cMaxDepth--;
+
+ uint32_t iFirst, iLast;
+ pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);
+ for (uint32_t i = iFirst; i <= iLast; i++)
+ {
+ X86PDE Pde = pPD->a[i];
+ if (Pde.n.u1Present)
+ {
+ pState->u64Address = (uint32_t)i << X86_PD_SHIFT;
+ if (Pde.b.u1Size && pState->fPse)
+ {
+ uint64_t u64Phys = ((uint64_t)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT)
+ | (Pde.u & X86_PDE4M_PG_MASK);
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d phys */
+ "%08llx 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08llx",
+ pState->u64Address,
+ Pde.b.u1Write ? 'W' : 'R',
+ Pde.b.u1User ? 'U' : 'S',
+ Pde.b.u1Accessed ? 'A' : '-',
+ Pde.b.u1Dirty ? 'D' : '-',
+ Pde.b.u1Global ? 'G' : '-',
+ Pde.b.u1WriteThru ? "WT" : "--",
+ Pde.b.u1CacheDisable ? "CD" : "--",
+ Pde.b.u1PAT ? "AT" : "--",
+ Pde.u & RT_BIT_32(9) ? '1' : '0',
+ Pde.u & RT_BIT_32(10) ? '1' : '0',
+ Pde.u & RT_BIT_32(11) ? '1' : '0',
+ u64Phys);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, u64Phys, _4M);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+ pState->cLeaves++;
+ }
+ else
+ {
+ pState->pHlp->pfnPrintf(pState->pHlp,/*P R S A D G WT CD AT NX 4M a m d phys */
+ "%08llx 0 | P %c %c %c %c %c %s %s .. .. .. %c%c%c %08x",
+ pState->u64Address,
+ Pde.n.u1Write ? 'W' : 'R',
+ Pde.n.u1User ? 'U' : 'S',
+ Pde.n.u1Accessed ? 'A' : '-',
+ Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
+ Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
+ Pde.n.u1WriteThru ? "WT" : "--",
+ Pde.n.u1CacheDisable ? "CD" : "--",
+ Pde.u & RT_BIT_32(9) ? '1' : '0',
+ Pde.u & RT_BIT_32(10) ? '1' : '0',
+ Pde.u & RT_BIT_32(11) ? '1' : '0',
+ Pde.u & X86_PDE_PG_MASK);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, Pde.u & X86_PDE_PG_MASK, _4K);
+ pState->pHlp->pfnPrintf(pState->pHlp, "\n");
+
+ if (cMaxDepth)
+ {
+ int rc2 = pgmR3DumpHierarchyGst32BitPT(pState, Pde.u & X86_PDE_PG_MASK);
+ if (rc2 < rc && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ else
+ pState->cLeaves++;
+ }
+ }
+ }
+
+ PGMPhysReleasePageMappingLock(pState->pVM, &Lock);
+ return rc;
+}
+
+
+/**
+ * Internal worker that initiates the actual dump.
+ *
+ * @returns VBox status code.
+ * @param pState The dumper state.
+ * @param cr3 The CR3 value.
+ * @param cMaxDepth The max depth.
+ */
+static int pgmR3DumpHierarchyGstDoIt(PPGMR3DUMPHIERARCHYSTATE pState, uint64_t cr3, unsigned cMaxDepth)
+{
+ int rc;
+ unsigned const cch = pState->cchAddress;
+ uint64_t const cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK /** @todo this should be X86_CR3_EPT_PAGE_MASK, but it is wrong */
+ : pState->fLme ? X86_CR3_AMD64_PAGE_MASK
+ : pState->fPae ? X86_CR3_PAE_PAGE_MASK
+ : X86_CR3_PAGE_MASK;
+ if (pState->fPrintCr3)
+ {
+ const char * const pszMode = pState->fEpt ? "Extended Page Tables"
+ : pState->fLme ? "Long Mode"
+ : pState->fPae ? "PAE Mode"
+ : pState->fPse ? "32-bit w/ PSE"
+ : "32-bit";
+ pState->pHlp->pfnPrintf(pState->pHlp, "cr3=%0*llx", cch, cr3);
+ if (pState->fDumpPageInfo)
+ pgmR3DumpHierarchyGstPageInfo(pState, cr3 & X86_CR3_AMD64_PAGE_MASK, _4K);
+ pState->pHlp->pfnPrintf(pState->pHlp, " %s%s%s\n",
+ pszMode,
+ pState->fNp ? " + Nested Paging" : "",
+ pState->fNxe ? " + NX" : "");
+ }
+
+
+ if (pState->fEpt)
+ {
+ if (pState->fPrintHeader)
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ "%-*s R - Readable\n"
+ "%-*s |W - Writeable\n"
+ "%-*s ||X - Executable\n"
+ "%-*s ||| EMT - EPT memory type\n"
+ "%-*s ||| | I - Ignored PAT?\n"
+ "%-*s ||| | | L - leaf\n"
+ "%-*s ||| | | | A - accessed\n"
+ "%-*s ||| | | | | D - dirty\n"
+ "%-*s ||| | | | | | U - user execute\n"
+ "%-*s ||| | | | | | | w - Paging writable\n"
+ "%-*s ||| | | | | | | | k - Supervisor shadow stack writable\n"
+ "%-*s ||| | | | | | | | | v - Suppress #VE\n"
+ "%-*s Level ||| | | | | | | | | | page\n"
+ /* xxxx n **** RWX MT I L A D U w k v 4K xxxxxxxxxxxxx
+ RWX 7 - - - - - - - - 0123456701234567 */
+ ,
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
+ cch, "", cch, "", cch, "", cch, "", cch, "Address");
+ /** @todo assumes 4-level EPT tables for now. */
+ rc = pgmR3DumpHierarchyGstEptPML4(pState, cr3 & cr3Mask, cMaxDepth);
+ }
+ else
+ {
+ if (pState->fPrintHeader)
+ pState->pHlp->pfnPrintf(pState->pHlp,
+ "%-*s P - Present\n"
+ "%-*s | R/W - Read (0) / Write (1)\n"
+ "%-*s | | U/S - User (1) / Supervisor (0)\n"
+ "%-*s | | | A - Accessed\n"
+ "%-*s | | | | D - Dirty\n"
+ "%-*s | | | | | G - Global\n"
+ "%-*s | | | | | | WT - Write thru\n"
+ "%-*s | | | | | | | CD - Cache disable\n"
+ "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
+ "%-*s | | | | | | | | | NX - No execute (K8)\n"
+ "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
+ "%-*s | | | | | | | | | | | AVL - 3 available bits.\n"
+ "%-*s Level | | | | | | | | | | | | Page\n"
+ /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
+ - W U - - - -- -- -- -- -- 010 */
+ ,
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
+ cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address");
+ if (pState->fLme)
+ rc = pgmR3DumpHierarchyGstPaePML4(pState, cr3 & cr3Mask, cMaxDepth);
+ else if (pState->fPae)
+ rc = pgmR3DumpHierarchyGstPaePDPT(pState, cr3 & cr3Mask, cMaxDepth);
+ else
+ rc = pgmR3DumpHierarchyGst32BitPD(pState, cr3 & cr3Mask, cMaxDepth);
+ }
+
+ if (!pState->cLeaves)
+ pState->pHlp->pfnPrintf(pState->pHlp, "not present\n");
+ return rc;
+}
+
+
+/**
+ * dbgfR3PagingDumpEx worker.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cr3 The CR3 register value.
+ * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
+ * @param FirstAddr The start address.
+ * @param LastAddr The address to stop after.
+ * @param cMaxDepth The max depth.
+ * @param pHlp The output callbacks. Defaults to log if NULL.
+ *
+ * @internal
+ */
+VMMR3_INT_DECL(int) PGMR3DumpHierarchyGst(PVM pVM, uint64_t cr3, uint32_t fFlags, RTGCPTR FirstAddr, RTGCPTR LastAddr,
+ uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
+{
+ /* Minimal validation as we're only supposed to service DBGF. */
+ AssertReturn(~(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & (DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3)), VERR_INVALID_PARAMETER);
+ AssertReturn(fFlags & DBGFPGDMP_FLAGS_GUEST, VERR_INVALID_PARAMETER);
+
+ PGMR3DUMPHIERARCHYSTATE State;
+ pgmR3DumpHierarchyInitState(&State, pVM, fFlags, FirstAddr, LastAddr, pHlp);
+ return pgmR3DumpHierarchyGstDoIt(&State, cr3, cMaxDepth);
+}
+
+
+/**
+ * For aiding with reset problems and similar.
+ *
+ * @param pVM The cross context VM handle.
+ */
+void pgmLogState(PVM pVM)
+{
+#if 0
+ RTLogRelPrintf("\npgmLogState pgmLogState pgmLogState pgmLogState pgmLogState\n");
+
+ /*
+ * Per CPU stuff.
+ */
+ for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ {
+ PPGMCPU pPgmCpu = &pVM->aCpus[iCpu].pgm.s;
+ RTLogRelPrintf("pgmLogState: CPU #%u\n", iCpu);
+# define LOG_PGMCPU_MEMBER(aFmt, aMember) RTLogRelPrintf(" %32s: %" aFmt "\n", #aMember, pPgmCpu->aMember)
+ LOG_PGMCPU_MEMBER("#RX32", offVM);
+ LOG_PGMCPU_MEMBER("#RX32", offVCpu);
+ LOG_PGMCPU_MEMBER("#RX32", offPGM);
+ LOG_PGMCPU_MEMBER("RGp", GCPhysA20Mask);
+ LOG_PGMCPU_MEMBER("RTbool", fA20Enabled);
+ LOG_PGMCPU_MEMBER("RTbool", fNoExecuteEnabled);
+ LOG_PGMCPU_MEMBER("#RX32", fSyncFlags);
+ LOG_PGMCPU_MEMBER("d", enmShadowMode);
+ LOG_PGMCPU_MEMBER("d", enmGuestMode);
+ LOG_PGMCPU_MEMBER("RGp", GCPhysCR3);
+
+ LOG_PGMCPU_MEMBER("p", pGst32BitPdR3);
+ LOG_PGMCPU_MEMBER("p", pGst32BitPdR0);
+ LOG_PGMCPU_MEMBER("RRv", pGst32BitPdRC);
+ LOG_PGMCPU_MEMBER("#RX32", fGst32BitMbzBigPdeMask);
+ LOG_PGMCPU_MEMBER("RTbool", fGst32BitPageSizeExtension);
+
+ LOG_PGMCPU_MEMBER("p", pGstPaePdptR3);
+ LOG_PGMCPU_MEMBER("p", pGstPaePdptR0);
+ LOG_PGMCPU_MEMBER("RRv", pGstPaePdptRC);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[0]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[1]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[2]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR3[3]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[0]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[1]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[2]);
+ LOG_PGMCPU_MEMBER("p", apGstPaePDsR0[3]);
+ LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[0]);
+ LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[1]);
+ LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[2]);
+ LOG_PGMCPU_MEMBER("RRv", apGstPaePDsR0[3]);
+ LOG_PGMCPU_MEMBER("RGp", aGCPhysGstPaePDs[0]);
+ LOG_PGMCPU_MEMBER("RGp", aGCPhysGstPaePDs[1]);
+ LOG_PGMCPU_MEMBER("RGp", aGCPhysGstPaePDs[2]);
+ LOG_PGMCPU_MEMBER("RGp", aGCPhysGstPaePDs[3]);
+ LOG_PGMCPU_MEMBER("#RX64", aGstPaePdpeRegs[0].u);
+ LOG_PGMCPU_MEMBER("#RX64", aGstPaePdpeRegs[1].u);
+ LOG_PGMCPU_MEMBER("#RX64", aGstPaePdpeRegs[2].u);
+ LOG_PGMCPU_MEMBER("#RX64", aGstPaePdpeRegs[3].u);
+ LOG_PGMCPU_MEMBER("#RX64", fGstPaeMbzPteMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstPaeMbzPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstPaeMbzBigPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstPaeMbzBigPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstPaeMbzPdpeMask);
+
+ LOG_PGMCPU_MEMBER("p", pGstAmd64Pml4R3);
+ LOG_PGMCPU_MEMBER("p", pGstAmd64Pml4R0);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPteMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzBigPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPdpeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzBigPdpeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64MbzPml4eMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64ShadowedPdpeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGstAmd64ShadowedPml4eMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGst64ShadowedPteMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGst64ShadowedPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGst64ShadowedBigPdeMask);
+ LOG_PGMCPU_MEMBER("#RX64", fGst64ShadowedBigPde4PteMask);
+
+ LOG_PGMCPU_MEMBER("p", pShwPageCR3R3);
+ LOG_PGMCPU_MEMBER("p", pShwPageCR3R0);
+ LOG_PGMCPU_MEMBER("RRv", pShwPageCR3RC);
+
+ LOG_PGMCPU_MEMBER("p", pfnR3ShwRelocate);
+ LOG_PGMCPU_MEMBER("p", pfnR3ShwExit);
+ LOG_PGMCPU_MEMBER("p", pfnR3ShwGetPage);
+ LOG_PGMCPU_MEMBER("p", pfnR3ShwModifyPage);
+ LOG_PGMCPU_MEMBER("p", pfnR0ShwGetPage);
+ LOG_PGMCPU_MEMBER("p", pfnR0ShwModifyPage);
+ LOG_PGMCPU_MEMBER("p", pfnR3GstRelocate);
+ LOG_PGMCPU_MEMBER("p", pfnR3GstExit);
+ LOG_PGMCPU_MEMBER("p", pfnR3GstGetPage);
+ LOG_PGMCPU_MEMBER("p", pfnR3GstModifyPage);
+ LOG_PGMCPU_MEMBER("p", pfnR0GstGetPage);
+ LOG_PGMCPU_MEMBER("p", pfnR0GstModifyPage);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthRelocate);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthInvalidatePage);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthSyncCR3);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthPrefetchPage);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthMapCR3);
+ LOG_PGMCPU_MEMBER("p", pfnR3BthUnmapCR3);
+ LOG_PGMCPU_MEMBER("p", pfnR0BthMapCR3);
+ LOG_PGMCPU_MEMBER("p", pfnR0BthUnmapCR3);
+ LOG_PGMCPU_MEMBER("#RX64", cNetwareWp0Hacks);
+ LOG_PGMCPU_MEMBER("#RX64", cPoolAccessHandler);
+
+ }
+
+ /*
+ * PGM globals.
+ */
+ RTLogRelPrintf("PGM globals\n");
+ PPGM pPgm = &pVM->pgm.s;
+# define LOG_PGM_MEMBER(aFmt, aMember) RTLogRelPrintf(" %32s: %" aFmt "\n", #aMember, pPgm->aMember)
+ LOG_PGM_MEMBER("#RX32", offVM);
+ LOG_PGM_MEMBER("#RX32", offVCpuPGM);
+ LOG_PGM_MEMBER("RTbool", fRamPreAlloc);
+ LOG_PGM_MEMBER("RTbool", fPhysWriteMonitoringEngaged);
+ LOG_PGM_MEMBER("RTbool", fLessThan52PhysicalAddressBits);
+ LOG_PGM_MEMBER("RTbool", fNestedPaging);
+ LOG_PGM_MEMBER("d", enmHostMode);
+ LOG_PGM_MEMBER("RTbool", fNoMorePhysWrites);
+ LOG_PGM_MEMBER("RTbool", fPageFusionAllowed);
+ LOG_PGM_MEMBER("RTbool", fPciPassthrough);
+ LOG_PGM_MEMBER("#x", cMmio2Regions);
+ LOG_PGM_MEMBER("RTbool", fRestoreRomPagesOnReset);
+ LOG_PGM_MEMBER("RTbool", fZeroRamPagesOnReset);
+ LOG_PGM_MEMBER("RTbool", fFinalizedMappings);
+ LOG_PGM_MEMBER("RTbool", fMappingsFixed);
+ LOG_PGM_MEMBER("RTbool", fMappingsFixedRestored);
+ LOG_PGM_MEMBER("%#x", cbMappingFixed);
+ LOG_PGM_MEMBER("%#x", idRamRangesGen);
+ LOG_PGM_MEMBER("#RGv", GCPtrMappingFixed);
+ LOG_PGM_MEMBER("#RGv", GCPtrPrevRamRangeMapping);
+ LOG_PGM_MEMBER("%#x", hRomPhysHandlerType);
+ LOG_PGM_MEMBER("#RGp", GCPhys4MBPSEMask);
+ LOG_PGM_MEMBER("#RGp", GCPhysInvAddrMask);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[0]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[1]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[2]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[3]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[4]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[5]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[6]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR3[7]);
+ LOG_PGM_MEMBER("p", pRamRangesXR3);
+ LOG_PGM_MEMBER("p", pRamRangeTreeR3);
+ LOG_PGM_MEMBER("p", pTreesR3);
+ LOG_PGM_MEMBER("p", pLastPhysHandlerR3);
+ LOG_PGM_MEMBER("p", pPoolR3);
+ LOG_PGM_MEMBER("p", pMappingsR3);
+ LOG_PGM_MEMBER("p", pRomRangesR3);
+ LOG_PGM_MEMBER("p", pRegMmioRangesR3);
+ LOG_PGM_MEMBER("p", paModeData);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[0]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[1]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[2]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[3]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[4]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR3[5]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[0]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[1]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[2]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[3]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[4]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[5]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[6]);
+ LOG_PGM_MEMBER("p", apRamRangesTlbR0[7]);
+ LOG_PGM_MEMBER("p", pRamRangesXR0);
+ LOG_PGM_MEMBER("p", pRamRangeTreeR0);
+ LOG_PGM_MEMBER("p", pTreesR0);
+ LOG_PGM_MEMBER("p", pLastPhysHandlerR0);
+ LOG_PGM_MEMBER("p", pPoolR0);
+ LOG_PGM_MEMBER("p", pMappingsR0);
+ LOG_PGM_MEMBER("p", pRomRangesR0);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[0]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[1]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[2]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[3]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[4]);
+ LOG_PGM_MEMBER("p", apMmio2RangesR0[5]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[0]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[1]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[2]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[3]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[4]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[5]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[6]);
+ LOG_PGM_MEMBER("RRv", apRamRangesTlbRC[7]);
+ LOG_PGM_MEMBER("RRv", pRamRangesXRC);
+ LOG_PGM_MEMBER("RRv", pRamRangeTreeRC);
+ LOG_PGM_MEMBER("RRv", pTreesRC);
+ LOG_PGM_MEMBER("RRv", pLastPhysHandlerRC);
+ LOG_PGM_MEMBER("RRv", pPoolRC);
+ LOG_PGM_MEMBER("RRv", pMappingsRC);
+ LOG_PGM_MEMBER("RRv", pRomRangesRC);
+ LOG_PGM_MEMBER("RRv", paDynPageMap32BitPTEsGC);
+ LOG_PGM_MEMBER("RRv", paDynPageMapPaePTEsGC);
+
+ LOG_PGM_MEMBER("#RGv", GCPtrCR3Mapping);
+ LOG_PGM_MEMBER("p", pInterPD);
+ LOG_PGM_MEMBER("p", apInterPTs[0]);
+ LOG_PGM_MEMBER("p", apInterPTs[1]);
+ LOG_PGM_MEMBER("p", apInterPaePTs[0]);
+ LOG_PGM_MEMBER("p", apInterPaePTs[1]);
+ LOG_PGM_MEMBER("p", apInterPaePDs[0]);
+ LOG_PGM_MEMBER("p", apInterPaePDs[1]);
+ LOG_PGM_MEMBER("p", apInterPaePDs[2]);
+ LOG_PGM_MEMBER("p", apInterPaePDs[3]);
+ LOG_PGM_MEMBER("p", pInterPaePDPT);
+ LOG_PGM_MEMBER("p", pInterPaePML4);
+ LOG_PGM_MEMBER("p", pInterPaePDPT64);
+ LOG_PGM_MEMBER("#RHp", HCPhysInterPD);
+ LOG_PGM_MEMBER("#RHp", HCPhysInterPaePDPT);
+ LOG_PGM_MEMBER("#RHp", HCPhysInterPaePML4);
+ LOG_PGM_MEMBER("RRv", pbDynPageMapBaseGC);
+ LOG_PGM_MEMBER("RRv", pRCDynMap);
+ LOG_PGM_MEMBER("p", pvR0DynMapUsed);
+ LOG_PGM_MEMBER("%#x", cDeprecatedPageLocks);
+
+ /**
+ * Data associated with managing the ring-3 mappings of the allocation chunks.
+ */
+ LOG_PGM_MEMBER("p", ChunkR3Map.pTree);
+ //LOG_PGM_MEMBER(PGMCHUNKR3MAPTLB ChunkR3Map.Tlb);
+ LOG_PGM_MEMBER("%#x", ChunkR3Map.c);
+ LOG_PGM_MEMBER("%#x", ChunkR3Map.cMax);
+ LOG_PGM_MEMBER("%#x", ChunkR3Map.iNow);
+ //LOG_PGM_MEMBER(PGMPAGER3MAPTLB PhysTlbHC);
+
+ LOG_PGM_MEMBER("#RHp", HCPhysZeroPg);
+ LOG_PGM_MEMBER("p", pvZeroPgR3);
+ LOG_PGM_MEMBER("p", pvZeroPgR0);
+ LOG_PGM_MEMBER("RRv", pvZeroPgRC);
+ LOG_PGM_MEMBER("#RHp", HCPhysMmioPg);
+ LOG_PGM_MEMBER("#RHp", HCPhysInvMmioPg);
+ LOG_PGM_MEMBER("p", pvMmioPgR3);
+ LOG_PGM_MEMBER("RTbool", fErrInjHandyPages);
+
+ /*
+ * PGM page pool.
+ */
+ PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
+ RTLogRelPrintf("PGM Page Pool\n");
+# define LOG_PGMPOOL_MEMBER(aFmt, aMember) RTLogRelPrintf(" %32s: %" aFmt "\n", #aMember, pPool->aMember)
+ LOG_PGMPOOL_MEMBER("p", pVMR3);
+ LOG_PGMPOOL_MEMBER("p", pVMR0);
+ LOG_PGMPOOL_MEMBER("RRv", pVMRC);
+ LOG_PGMPOOL_MEMBER("#x", cMaxPages);
+ LOG_PGMPOOL_MEMBER("#x", cCurPages);
+ LOG_PGMPOOL_MEMBER("#x", iFreeHead);
+ LOG_PGMPOOL_MEMBER("#x", u16Padding);
+ LOG_PGMPOOL_MEMBER("#x", iUserFreeHead);
+ LOG_PGMPOOL_MEMBER("#x", cMaxUsers);
+ LOG_PGMPOOL_MEMBER("#x", cPresent);
+ LOG_PGMPOOL_MEMBER("RRv", paUsersRC);
+ LOG_PGMPOOL_MEMBER("p", paUsersR3);
+ LOG_PGMPOOL_MEMBER("p", paUsersR0);
+ LOG_PGMPOOL_MEMBER("#x", iPhysExtFreeHead);
+ LOG_PGMPOOL_MEMBER("#x", cMaxPhysExts);
+ LOG_PGMPOOL_MEMBER("RRv", paPhysExtsRC);
+ LOG_PGMPOOL_MEMBER("p", paPhysExtsR3);
+ LOG_PGMPOOL_MEMBER("p", paPhysExtsR0);
+ for (uint32_t i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
+ RTLogRelPrintf(" aiHash[%u]: %#x\n", i, pPool->aiHash[i]);
+ LOG_PGMPOOL_MEMBER("#x", iAgeHead);
+ LOG_PGMPOOL_MEMBER("#x", iAgeTail);
+ LOG_PGMPOOL_MEMBER("RTbool", fCacheEnabled);
+ LOG_PGMPOOL_MEMBER("RTbool", afPadding1[0]);
+ LOG_PGMPOOL_MEMBER("RTbool", afPadding1[1]);
+ LOG_PGMPOOL_MEMBER("RTbool", afPadding1[2]);
+ LOG_PGMPOOL_MEMBER("#x", iModifiedHead);
+ LOG_PGMPOOL_MEMBER("#x", cModifiedPages);
+ LOG_PGMPOOL_MEMBER("#x", hAccessHandlerType);
+ LOG_PGMPOOL_MEMBER("#x", idxFreeDirtyPage);
+ LOG_PGMPOOL_MEMBER("#x", cDirtyPages);
+ for (uint32_t i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
+ RTLogRelPrintf(" aDirtyPages[%u].uIdx: %#x\n", i, pPool->aDirtyPages[i].uIdx);
+ LOG_PGMPOOL_MEMBER("#x", cUsedPages);
+ LOG_PGMPOOL_MEMBER("#x", HCPhysTree);
+ for (uint32_t i = 0; i < pPool->cCurPages; i++)
+ {
+ PPGMPOOLPAGE pPage = &pPool->aPages[i];
+# define LOG_PAGE_MEMBER(aFmt, aMember) RTLogRelPrintf(" %3u:%-32s: %" aFmt "\n", i, #aMember, pPage->aMember)
+ RTLogRelPrintf("%3u:%-32s: %p\n", i, "", pPage);
+ LOG_PAGE_MEMBER("RHp", Core.Key);
+ LOG_PAGE_MEMBER("p", pvPageR3);
+ LOG_PAGE_MEMBER("RGp", GCPhys);
+ LOG_PAGE_MEMBER("d", enmKind);
+ LOG_PAGE_MEMBER("d", enmAccess);
+ LOG_PAGE_MEMBER("RTbool", fA20Enabled);
+ LOG_PAGE_MEMBER("RTbool", fZeroed);
+ LOG_PAGE_MEMBER("RTbool", fSeenNonGlobal);
+ LOG_PAGE_MEMBER("RTbool", fMonitored);
+ LOG_PAGE_MEMBER("RTbool", fCached);
+ LOG_PAGE_MEMBER("RTbool", fReusedFlushPending);
+ LOG_PAGE_MEMBER("RTbool", fDirty);
+ LOG_PAGE_MEMBER("RTbool", fPadding1);
+ LOG_PAGE_MEMBER("RTbool", fPadding2);
+ LOG_PAGE_MEMBER("#x", idx);
+ LOG_PAGE_MEMBER("#x", iNext);
+ LOG_PAGE_MEMBER("#x", iUserHead);
+ LOG_PAGE_MEMBER("#x", cPresent);
+ LOG_PAGE_MEMBER("#x", iFirstPresent);
+ LOG_PAGE_MEMBER("#x", cModifications);
+ LOG_PAGE_MEMBER("#x", iModifiedNext);
+ LOG_PAGE_MEMBER("#x", iModifiedPrev);
+ LOG_PAGE_MEMBER("#x", iMonitoredNext);
+ LOG_PAGE_MEMBER("#x", iMonitoredPrev);
+ LOG_PAGE_MEMBER("#x", iAgeNext);
+ LOG_PAGE_MEMBER("#x", iAgePrev);
+ LOG_PAGE_MEMBER("#x", idxDirtyEntry);
+ LOG_PAGE_MEMBER("RGv", GCPtrLastAccessHandlerRip);
+ LOG_PAGE_MEMBER("RGv", GCPtrLastAccessHandlerFault);
+ LOG_PAGE_MEMBER("#RX64", cLastAccessHandler);
+ LOG_PAGE_MEMBER("#RX32", cLocked);
+# ifdef VBOX_STRICT
+ LOG_PAGE_MEMBER("RGv", GCPtrDirtyFault);
+# endif
+ if ( pPage->enmKind == PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
+ || pPage->enmKind == PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
+ || pPage->enmKind == PGMPOOLKIND_32BIT_PD
+ || pPage->enmKind == PGMPOOLKIND_32BIT_PD_PHYS)
+ {
+ uint32_t const *pu32Page = (uint32_t const *)pPage->pvPageR3;
+ for (uint32_t i = 0; i < 1024/2; i += 4)
+ RTLogRelPrintf(" %#05x: %RX32 %RX32 %RX32 %RX32\n", i, pu32Page[i], pu32Page[i+1], pu32Page[i+2], pu32Page[i+3]);
+ }
+ else if ( pPage->enmKind != PGMPOOLKIND_FREE
+ && pPage->enmKind != PGMPOOLKIND_INVALID)
+ {
+ uint64_t const *pu64Page = (uint64_t const *)pPage->pvPageR3;
+ for (uint32_t i = 0; i < 512/2; i += 2)
+ RTLogRelPrintf(" %#05x: %RX64 %RX64\n", i, pu64Page[i], pu64Page[i+1]);
+ }
+ }
+
+ RTLogRelPrintf("pgmLogState pgmLogState pgmLogState pgmLogState pgmLogState\n\n");
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
diff --git a/src/VBox/VMM/VMMR3/PGMHandler.cpp b/src/VBox/VMM/VMMR3/PGMHandler.cpp
new file mode 100644
index 00000000..5fb6ccb3
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMHandler.cpp
@@ -0,0 +1,345 @@
+/* $Id: PGMHandler.cpp $ */
+/** @file
+ * PGM - Page Manager / Monitor, Access Handlers.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/ssm.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include "PGMInline.h"
+#include <VBox/dbg.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+#include <iprt/thread.h>
+#include <iprt/string.h>
+#include <VBox/param.h>
+#include <VBox/vmm/hm.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) pgmR3HandlerPhysicalOneClear(PPGMPHYSHANDLER pHandler, void *pvUser);
+static DECLCALLBACK(int) pgmR3HandlerPhysicalOneSet(PPGMPHYSHANDLER pHandler, void *pvUser);
+static DECLCALLBACK(int) pgmR3InfoHandlersPhysicalOne(PPGMPHYSHANDLER pHandler, void *pvUser);
+
+
+
+/**
+ * @callback_method_impl{FNPGMPHYSHANDLER,
+ * Invalid callback entry triggering guru mediation}
+ */
+DECLCALLBACK(VBOXSTRICTRC) pgmR3HandlerPhysicalHandlerInvalid(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys,
+ void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
+ PGMACCESSORIGIN enmOrigin, uint64_t uUser)
+{
+ RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
+ LogRel(("GCPhys=%RGp cbBuf=%#zx enmAccessType=%d uUser=%#RX64\n", GCPhys, cbBuf, enmAccessType, uUser));
+ return VERR_PGM_HANDLER_IPE_1;
+}
+
+
+/**
+ * Register a physical page access handler type.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmKind The kind of access handler.
+ * @param fFlags PGMPHYSHANDLER_F_XXX
+ * @param pfnHandler Pointer to the ring-3 handler callback.
+ * @param pszDesc The type description.
+ * @param phType Where to return the type handle (cross context safe).
+ */
+VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags,
+ PFNPGMPHYSHANDLER pfnHandler, const char *pszDesc,
+ PPGMPHYSHANDLERTYPE phType)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(phType, VERR_INVALID_POINTER);
+ *phType = NIL_PGMPHYSHANDLERTYPE;
+
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn( enmKind == PGMPHYSHANDLERKIND_WRITE
+ || enmKind == PGMPHYSHANDLERKIND_ALL
+ || enmKind == PGMPHYSHANDLERKIND_MMIO,
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(fFlags & ~PGMPHYSHANDLER_F_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
+
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ /*
+ * Do the allocating.
+ */
+ uint32_t const idxType = pVM->pgm.s.cPhysHandlerTypes;
+ AssertLogRelReturn(idxType < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes), VERR_OUT_OF_RESOURCES);
+ PPGMPHYSHANDLERTYPEINTR3 const pType = &pVM->pgm.s.aPhysHandlerTypes[idxType];
+ AssertReturn(pType->enmKind == PGMPHYSHANDLERKIND_INVALID, VERR_PGM_HANDLER_IPE_1);
+ pVM->pgm.s.cPhysHandlerTypes = idxType + 1;
+
+ pType->enmKind = enmKind;
+ pType->uState = enmKind == PGMPHYSHANDLERKIND_WRITE
+ ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL;
+ pType->fKeepPgmLock = RT_BOOL(fFlags & PGMPHYSHANDLER_F_KEEP_PGM_LOCK);
+ pType->fRing0DevInsIdx = RT_BOOL(fFlags & PGMPHYSHANDLER_F_R0_DEVINS_IDX);
+ pType->fNotInHm = RT_BOOL(fFlags & PGMPHYSHANDLER_F_NOT_IN_HM);
+ pType->pfnHandler = pfnHandler;
+ pType->pszDesc = pszDesc;
+
+ *phType = pType->hType;
+ LogFlow(("PGMR3HandlerPhysicalTypeRegisterEx: hType=%#RX64/%#x: enmKind=%d fFlags=%#x pfnHandler=%p pszDesc=%s\n",
+ pType->hType, idxType, enmKind, fFlags, pfnHandler, pszDesc));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Updates the physical page access handlers.
+ *
+ * @param pVM The cross context VM structure.
+ * @remark Only used when restoring a saved state.
+ */
+void pgmR3HandlerPhysicalUpdateAll(PVM pVM)
+{
+ LogFlow(("pgmHandlerPhysicalUpdateAll:\n"));
+
+ /*
+ * Clear and set.
+ * (the right -> left on the setting pass is just bird speculating on cache hits)
+ */
+ PGM_LOCK_VOID(pVM);
+
+ int rc = pVM->pgm.s.pPhysHandlerTree->doWithAllFromLeft(&pVM->pgm.s.PhysHandlerAllocator, pgmR3HandlerPhysicalOneClear, pVM);
+ AssertRC(rc);
+ rc = pVM->pgm.s.pPhysHandlerTree->doWithAllFromRight(&pVM->pgm.s.PhysHandlerAllocator, pgmR3HandlerPhysicalOneSet, pVM);
+ AssertRC(rc);
+
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Clears all the page level flags for one physical handler range.
+ *
+ * @returns 0
+ * @param pHandler The physical access handler entry.
+ * @param pvUser Pointer to the VM.
+ */
+static DECLCALLBACK(int) pgmR3HandlerPhysicalOneClear(PPGMPHYSHANDLER pHandler, void *pvUser)
+{
+ PPGMRAMRANGE pRamHint = NULL;
+ RTGCPHYS GCPhys = pHandler->Key;
+ RTUINT cPages = pHandler->cPages;
+ PVM pVM = (PVM)pvUser;
+ for (;;)
+ {
+ PPGMPAGE pPage;
+ int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
+ if (RT_SUCCESS(rc))
+ {
+ PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Tell NEM about the protection change. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
+ PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
+ NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
+ PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
+ pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
+ PGM_PAGE_SET_NEM_STATE(pPage, u2State);
+ }
+#endif
+ }
+ else
+ AssertRC(rc);
+
+ if (--cPages == 0)
+ return 0;
+ GCPhys += GUEST_PAGE_SIZE;
+ }
+}
+
+
+/**
+ * Sets all the page level flags for one physical handler range.
+ *
+ * @returns 0
+ * @param pHandler The physical access handler entry.
+ * @param pvUser Pointer to the VM.
+ */
+static DECLCALLBACK(int) pgmR3HandlerPhysicalOneSet(PPGMPHYSHANDLER pHandler, void *pvUser)
+{
+ PVM pVM = (PVM)pvUser;
+ PCPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pHandler);
+ unsigned uState = pType->uState;
+ PPGMRAMRANGE pRamHint = NULL;
+ RTGCPHYS GCPhys = pHandler->Key;
+ RTUINT cPages = pHandler->cPages;
+ for (;;)
+ {
+ PPGMPAGE pPage;
+ int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
+ if (RT_SUCCESS(rc))
+ {
+ PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pType->fNotInHm);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Tell NEM about the protection change. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
+ PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
+ NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
+ PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
+ pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
+ PGM_PAGE_SET_NEM_STATE(pPage, u2State);
+ }
+#endif
+ }
+ else
+ AssertRC(rc);
+
+ if (--cPages == 0)
+ return 0;
+ GCPhys += GUEST_PAGE_SIZE;
+ }
+}
+
+
+/**
+ * Arguments for pgmR3InfoHandlersPhysicalOne and pgmR3InfoHandlersVirtualOne.
+ */
+typedef struct PGMHANDLERINFOARG
+{
+ /** The output helpers.*/
+ PCDBGFINFOHLP pHlp;
+ /** Pointer to the cross context VM handle. */
+ PVM pVM;
+ /** Set if statistics should be dumped. */
+ bool fStats;
+} PGMHANDLERINFOARG, *PPGMHANDLERINFOARG;
+
+
+/**
+ * Info callback for 'pgmhandlers'.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The output helpers.
+ * @param pszArgs The arguments. phys or virt.
+ */
+DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /*
+ * Parse options.
+ */
+ PGMHANDLERINFOARG Args = { pHlp, pVM, /* .fStats = */ true };
+ if (pszArgs)
+ Args.fStats = strstr(pszArgs, "nost") == NULL;
+
+ /*
+ * Dump the handlers.
+ */
+ pHlp->pfnPrintf(pHlp,
+ "Physical handlers: max %#x, %u allocator error%s, %u tree error%s\n"
+ "%*s %*s %*s uUser Type Description\n",
+ pVM->pgm.s.PhysHandlerAllocator.m_cNodes,
+ pVM->pgm.s.PhysHandlerAllocator.m_cErrors, pVM->pgm.s.PhysHandlerAllocator.m_cErrors != 0 ? "s" : "",
+ pVM->pgm.s.pPhysHandlerTree->m_cErrors, pVM->pgm.s.pPhysHandlerTree->m_cErrors != 0 ? "s" : "",
+ - (int)sizeof(RTGCPHYS) * 2, "From",
+ - (int)sizeof(RTGCPHYS) * 2 - 3, "- To (incl)",
+ - (int)sizeof(RTHCPTR) * 2 - 1, "Handler (R3)");
+ pVM->pgm.s.pPhysHandlerTree->doWithAllFromLeft(&pVM->pgm.s.PhysHandlerAllocator, pgmR3InfoHandlersPhysicalOne, &Args);
+}
+
+
+/**
+ * Displays one physical handler range.
+ *
+ * @returns 0
+ * @param pHandler The physical access handler entry.
+ * @param pvUser Pointer to command helper functions.
+ */
+static DECLCALLBACK(int) pgmR3InfoHandlersPhysicalOne(PPGMPHYSHANDLER pHandler, void *pvUser)
+{
+ PPGMHANDLERINFOARG pArgs = (PPGMHANDLERINFOARG)pvUser;
+ PCDBGFINFOHLP pHlp = pArgs->pHlp;
+ PCPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pArgs->pVM, pHandler);
+ const char *pszType;
+ switch (pType->enmKind)
+ {
+ case PGMPHYSHANDLERKIND_MMIO: pszType = "MMIO "; break;
+ case PGMPHYSHANDLERKIND_WRITE: pszType = "Write "; break;
+ case PGMPHYSHANDLERKIND_ALL: pszType = "All "; break;
+ default: pszType = "???????"; break;
+ }
+
+ char szFlags[80];
+ size_t cchFlags = 0;
+ if (pType->fKeepPgmLock)
+ cchFlags = RTStrPrintf(szFlags, sizeof(szFlags), "(keep-pgm-lock");
+ if (pType->fRing0DevInsIdx)
+ cchFlags += RTStrPrintf(&szFlags[cchFlags], sizeof(szFlags) - cchFlags, cchFlags ? ", keep-pgm-lock" : "(keep-pgm-lock");
+ if (pType->fRing0Enabled)
+ cchFlags += RTStrPrintf(&szFlags[cchFlags], sizeof(szFlags) - cchFlags, cchFlags ? ", r0-enabled)" : "(r0-enabled)");
+ else
+ cchFlags += RTStrPrintf(&szFlags[cchFlags], sizeof(szFlags) - cchFlags, cchFlags ? ", r3-only)" : "(r3-only)");
+
+ pHlp->pfnPrintf(pHlp,
+ "%RGp - %RGp %p %016RX64 %s %s %s\n",
+ pHandler->Key, pHandler->KeyLast, pType->pfnHandler, pHandler->uUser, pszType, pHandler->pszDesc, szFlags);
+#ifdef VBOX_WITH_STATISTICS
+ if (pArgs->fStats)
+ pHlp->pfnPrintf(pHlp, " cPeriods: %9RU64 cTicks: %11RU64 Min: %11RU64 Avg: %11RU64 Max: %11RU64\n",
+ pHandler->Stat.cPeriods, pHandler->Stat.cTicks, pHandler->Stat.cTicksMin,
+ pHandler->Stat.cPeriods ? pHandler->Stat.cTicks / pHandler->Stat.cPeriods : 0, pHandler->Stat.cTicksMax);
+#endif
+ return 0;
+}
+
diff --git a/src/VBox/VMM/VMMR3/PGMPhys.cpp b/src/VBox/VMM/VMMR3/PGMPhys.cpp
new file mode 100644
index 00000000..fb9fd668
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMPhys.cpp
@@ -0,0 +1,6000 @@
+/* $Id: PGMPhys.cpp $ */
+/** @file
+ * PGM - Page Manager and Monitor, Physical Memory Addressing.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM_PHYS
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pdmdev.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include "PGMInline.h"
+
+#include <VBox/sup.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#ifdef VBOX_STRICT
+# include <iprt/crc.h>
+#endif
+#include <iprt/thread.h>
+#include <iprt/string.h>
+#include <iprt/system.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The number of pages to free in one batch. */
+#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
+
+
+
+/*********************************************************************************************************************************
+* Reading and Writing Guest Pysical Memory *
+*********************************************************************************************************************************/
+
+/*
+ * PGMR3PhysReadU8-64
+ * PGMR3PhysWriteU8-64
+ */
+#define PGMPHYSFN_READNAME PGMR3PhysReadU8
+#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
+#define PGMPHYS_DATASIZE 1
+#define PGMPHYS_DATATYPE uint8_t
+#include "PGMPhysRWTmpl.h"
+
+#define PGMPHYSFN_READNAME PGMR3PhysReadU16
+#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
+#define PGMPHYS_DATASIZE 2
+#define PGMPHYS_DATATYPE uint16_t
+#include "PGMPhysRWTmpl.h"
+
+#define PGMPHYSFN_READNAME PGMR3PhysReadU32
+#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
+#define PGMPHYS_DATASIZE 4
+#define PGMPHYS_DATATYPE uint32_t
+#include "PGMPhysRWTmpl.h"
+
+#define PGMPHYSFN_READNAME PGMR3PhysReadU64
+#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
+#define PGMPHYS_DATASIZE 8
+#define PGMPHYS_DATATYPE uint64_t
+#include "PGMPhysRWTmpl.h"
+
+
+/**
+ * EMT worker for PGMR3PhysReadExternal.
+ */
+static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
+ PGMACCESSORIGIN enmOrigin)
+{
+ VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Read from physical memory, external users.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Physical address to read from.
+ * @param pvBuf Where to read into.
+ * @param cbRead How many bytes to read.
+ * @param enmOrigin Who is calling.
+ *
+ * @thread Any but EMTs.
+ */
+VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
+{
+ VM_ASSERT_OTHER_THREAD(pVM);
+
+ AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
+ LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
+
+ PGM_LOCK_VOID(pVM);
+
+ /*
+ * Copy loop on ram ranges.
+ */
+ PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
+ for (;;)
+ {
+ /* Inside range or not? */
+ if (pRam && GCPhys >= pRam->GCPhys)
+ {
+ /*
+ * Must work our way thru this page by page.
+ */
+ RTGCPHYS off = GCPhys - pRam->GCPhys;
+ while (off < pRam->cb)
+ {
+ unsigned iPage = off >> GUEST_PAGE_SHIFT;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+
+ /*
+ * If the page has an ALL access handler, we'll have to
+ * delegate the job to EMT.
+ */
+ if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
+ || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
+ {
+ PGM_UNLOCK(pVM);
+
+ return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
+ pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
+ }
+ Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
+
+ /*
+ * Simple stuff, go ahead.
+ */
+ size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
+ if (cb > cbRead)
+ cb = cbRead;
+ PGMPAGEMAPLOCK PgMpLck;
+ const void *pvSrc;
+ int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ memcpy(pvBuf, pvSrc, cb);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ else
+ {
+ AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
+ pRam->GCPhys + off, pPage, rc));
+ memset(pvBuf, 0xff, cb);
+ }
+
+ /* next page */
+ if (cb >= cbRead)
+ {
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+ }
+ cbRead -= cb;
+ off += cb;
+ GCPhys += cb;
+ pvBuf = (char *)pvBuf + cb;
+ } /* walk pages in ram range. */
+ }
+ else
+ {
+ LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
+
+ /*
+ * Unassigned address space.
+ */
+ size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
+ if (cb >= cbRead)
+ {
+ memset(pvBuf, 0xff, cbRead);
+ break;
+ }
+ memset(pvBuf, 0xff, cb);
+
+ cbRead -= cb;
+ pvBuf = (char *)pvBuf + cb;
+ GCPhys += cb;
+ }
+
+ /* Advance range if necessary. */
+ while (pRam && GCPhys > pRam->GCPhysLast)
+ pRam = pRam->CTX_SUFF(pNext);
+ } /* Ram range walk */
+
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * EMT worker for PGMR3PhysWriteExternal.
+ */
+static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
+ PGMACCESSORIGIN enmOrigin)
+{
+ /** @todo VERR_EM_NO_MEMORY */
+ VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Write to physical memory, external users.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS.
+ * @retval VERR_EM_NO_MEMORY.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Physical address to write to.
+ * @param pvBuf What to write.
+ * @param cbWrite How many bytes to write.
+ * @param enmOrigin Who is calling.
+ *
+ * @thread Any but EMTs.
+ */
+VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
+{
+ VM_ASSERT_OTHER_THREAD(pVM);
+
+ AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
+ ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
+ GCPhys, cbWrite, enmOrigin));
+ AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
+ LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
+
+ PGM_LOCK_VOID(pVM);
+
+ /*
+ * Copy loop on ram ranges, stop when we hit something difficult.
+ */
+ PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
+ for (;;)
+ {
+ /* Inside range or not? */
+ if (pRam && GCPhys >= pRam->GCPhys)
+ {
+ /*
+ * Must work our way thru this page by page.
+ */
+ RTGCPTR off = GCPhys - pRam->GCPhys;
+ while (off < pRam->cb)
+ {
+ RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+
+ /*
+ * Is the page problematic, we have to do the work on the EMT.
+ *
+ * Allocating writable pages and access handlers are
+ * problematic, write monitored pages are simple and can be
+ * dealt with here.
+ */
+ if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
+ || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
+ || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
+ {
+ if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
+ && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+ pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
+ else
+ {
+ PGM_UNLOCK(pVM);
+
+ return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
+ pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
+ }
+ }
+ Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
+
+ /*
+ * Simple stuff, go ahead.
+ */
+ size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
+ if (cb > cbWrite)
+ cb = cbWrite;
+ PGMPAGEMAPLOCK PgMpLck;
+ void *pvDst;
+ int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ memcpy(pvDst, pvBuf, cb);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ else
+ AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
+ pRam->GCPhys + off, pPage, rc));
+
+ /* next page */
+ if (cb >= cbWrite)
+ {
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+ }
+
+ cbWrite -= cb;
+ off += cb;
+ GCPhys += cb;
+ pvBuf = (const char *)pvBuf + cb;
+ } /* walk pages in ram range */
+ }
+ else
+ {
+ /*
+ * Unassigned address space, skip it.
+ */
+ if (!pRam)
+ break;
+ size_t cb = pRam->GCPhys - GCPhys;
+ if (cb >= cbWrite)
+ break;
+ cbWrite -= cb;
+ pvBuf = (const char *)pvBuf + cb;
+ GCPhys += cb;
+ }
+
+ /* Advance range if necessary. */
+ while (pRam && GCPhys > pRam->GCPhysLast)
+ pRam = pRam->CTX_SUFF(pNext);
+ } /* Ram range walk */
+
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/*********************************************************************************************************************************
+* Mapping Guest Physical Memory *
+*********************************************************************************************************************************/
+
+/**
+ * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
+ *
+ * @returns see PGMR3PhysGCPhys2CCPtrExternal
+ * @param pVM The cross context VM structure.
+ * @param pGCPhys Pointer to the guest physical address.
+ * @param ppv Where to store the mapping address.
+ * @param pLock Where to store the lock.
+ */
+static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ /*
+ * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
+ * an access handler after it succeeds.
+ */
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
+ if (RT_SUCCESS(rc))
+ {
+ PPGMPAGEMAPTLBE pTlbe;
+ int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
+ AssertFatalRC(rc2);
+ PPGMPAGE pPage = pTlbe->pPage;
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
+ {
+ PGMPhysReleasePageMappingLock(pVM, pLock);
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+ }
+ else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ || pgmPoolIsDirtyPage(pVM, *pGCPhys)
+#endif
+ )
+ {
+ /* We *must* flush any corresponding pgm pool page here, otherwise we'll
+ * not be informed about writes and keep bogus gst->shw mappings around.
+ */
+ pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
+ Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
+ /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
+ * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
+ }
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Requests the mapping of a guest page into ring-3, external threads.
+ *
+ * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
+ * release it.
+ *
+ * This API will assume your intention is to write to the page, and will
+ * therefore replace shared and zero pages. If you do not intend to modify the
+ * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
+ * backing or if the page has any active access handlers. The caller
+ * must fall back on using PGMR3PhysWriteExternal.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The guest physical address of the page that should be mapped.
+ * @param ppv Where to store the address corresponding to GCPhys.
+ * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
+ *
+ * @remark Avoid calling this API from within critical sections (other than the
+ * PGM one) because of the deadlock risk when we have to delegating the
+ * task to an EMT.
+ * @thread Any.
+ */
+VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ AssertPtr(ppv);
+ AssertPtr(pLock);
+
+ Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Query the Physical TLB entry for the page (may fail).
+ */
+ PPGMPAGEMAPTLBE pTlbe;
+ rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
+ if (RT_SUCCESS(rc))
+ {
+ PPGMPAGE pPage = pTlbe->pPage;
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+ else
+ {
+ /*
+ * If the page is shared, the zero page, or being write monitored
+ * it must be converted to an page that's writable if possible.
+ * We can only deal with write monitored pages here, the rest have
+ * to be on an EMT.
+ */
+ if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
+ || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ || pgmPoolIsDirtyPage(pVM, GCPhys)
+#endif
+ )
+ {
+ if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
+ && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ && !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
+#endif
+ )
+ pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
+ else
+ {
+ PGM_UNLOCK(pVM);
+
+ return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
+ pVM, &GCPhys, ppv, pLock);
+ }
+ }
+
+ /*
+ * Now, just perform the locking and calculate the return address.
+ */
+ PPGMPAGEMAP pMap = pTlbe->pMap;
+ if (pMap)
+ pMap->cRefs++;
+
+ unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
+ if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+ {
+ if (cLocks == 0)
+ pVM->pgm.s.cWriteLockedPages++;
+ PGM_PAGE_INC_WRITE_LOCKS(pPage);
+ }
+ else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
+ {
+ PGM_PAGE_INC_WRITE_LOCKS(pPage);
+ AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
+ if (pMap)
+ pMap->cRefs++; /* Extra ref to prevent it from going away. */
+ }
+
+ *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
+ pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
+ pLock->pvMap = pMap;
+ }
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Requests the mapping of a guest page into ring-3, external threads.
+ *
+ * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
+ * release it.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
+ * backing or if the page as an active ALL access handler. The caller
+ * must fall back on using PGMPhysRead.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The guest physical address of the page that should be mapped.
+ * @param ppv Where to store the address corresponding to GCPhys.
+ * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
+ *
+ * @remark Avoid calling this API from within critical sections (other than
+ * the PGM one) because of the deadlock risk.
+ * @thread Any.
+ */
+VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
+{
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Query the Physical TLB entry for the page (may fail).
+ */
+ PPGMPAGEMAPTLBE pTlbe;
+ rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
+ if (RT_SUCCESS(rc))
+ {
+ PPGMPAGE pPage = pTlbe->pPage;
+#if 1
+ /* MMIO pages doesn't have any readable backing. */
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+#else
+ if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+#endif
+ else
+ {
+ /*
+ * Now, just perform the locking and calculate the return address.
+ */
+ PPGMPAGEMAP pMap = pTlbe->pMap;
+ if (pMap)
+ pMap->cRefs++;
+
+ unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
+ if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+ {
+ if (cLocks == 0)
+ pVM->pgm.s.cReadLockedPages++;
+ PGM_PAGE_INC_READ_LOCKS(pPage);
+ }
+ else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
+ {
+ PGM_PAGE_INC_READ_LOCKS(pPage);
+ AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
+ if (pMap)
+ pMap->cRefs++; /* Extra ref to prevent it from going away. */
+ }
+
+ *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
+ pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
+ pLock->pvMap = pMap;
+ }
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Requests the mapping of multiple guest page into ring-3, external threads.
+ *
+ * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
+ * ASAP to release them.
+ *
+ * This API will assume your intention is to write to the pages, and will
+ * therefore replace shared and zero pages. If you do not intend to modify the
+ * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
+ * backing or if any of the pages the page has any active access
+ * handlers. The caller must fall back on using PGMR3PhysWriteExternal.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
+ * an invalid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param cPages Number of pages to lock.
+ * @param paGCPhysPages The guest physical address of the pages that
+ * should be mapped (@a cPages entries).
+ * @param papvPages Where to store the ring-3 mapping addresses
+ * corresponding to @a paGCPhysPages.
+ * @param paLocks Where to store the locking information that
+ * pfnPhysBulkReleasePageMappingLock needs (@a cPages
+ * in length).
+ *
+ * @remark Avoid calling this API from within critical sections (other than the
+ * PGM one) because of the deadlock risk when we have to delegating the
+ * task to an EMT.
+ * @thread Any.
+ */
+VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
+ void **papvPages, PPGMPAGEMAPLOCK paLocks)
+{
+ Assert(cPages > 0);
+ AssertPtr(papvPages);
+ AssertPtr(paLocks);
+
+ Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Lock the pages one by one.
+ * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
+ */
+ int32_t cNextYield = 128;
+ uint32_t iPage;
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ if (--cNextYield > 0)
+ { /* likely */ }
+ else
+ {
+ PGM_UNLOCK(pVM);
+ ASMNopPause();
+ PGM_LOCK_VOID(pVM);
+ cNextYield = 128;
+ }
+
+ /*
+ * Query the Physical TLB entry for the page (may fail).
+ */
+ PPGMPAGEMAPTLBE pTlbe;
+ rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
+ if (RT_SUCCESS(rc))
+ { }
+ else
+ break;
+ PPGMPAGE pPage = pTlbe->pPage;
+
+ /*
+ * No MMIO or active access handlers.
+ */
+ if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
+ && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+ { }
+ else
+ {
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+ break;
+ }
+
+ /*
+ * The page must be in the allocated state and not be a dirty pool page.
+ * We can handle converting a write monitored page to an allocated one, but
+ * anything more complicated must be delegated to an EMT.
+ */
+ bool fDelegateToEmt = false;
+ if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
+#else
+ fDelegateToEmt = false;
+#endif
+ else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
+ {
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
+ pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
+ else
+ fDelegateToEmt = true;
+#endif
+ }
+ else
+ fDelegateToEmt = true;
+ if (!fDelegateToEmt)
+ { }
+ else
+ {
+ /* We could do this delegation in bulk, but considered too much work vs gain. */
+ PGM_UNLOCK(pVM);
+ rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
+ pVM, &paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
+ PGM_LOCK_VOID(pVM);
+ if (RT_FAILURE(rc))
+ break;
+ cNextYield = 128;
+ }
+
+ /*
+ * Now, just perform the locking and address calculation.
+ */
+ PPGMPAGEMAP pMap = pTlbe->pMap;
+ if (pMap)
+ pMap->cRefs++;
+
+ unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
+ if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+ {
+ if (cLocks == 0)
+ pVM->pgm.s.cWriteLockedPages++;
+ PGM_PAGE_INC_WRITE_LOCKS(pPage);
+ }
+ else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
+ {
+ PGM_PAGE_INC_WRITE_LOCKS(pPage);
+ AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
+ if (pMap)
+ pMap->cRefs++; /* Extra ref to prevent it from going away. */
+ }
+
+ papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
+ paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
+ paLocks[iPage].pvMap = pMap;
+ }
+
+ PGM_UNLOCK(pVM);
+
+ /*
+ * On failure we must unlock any pages we managed to get already.
+ */
+ if (RT_FAILURE(rc) && iPage > 0)
+ PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
+
+ return rc;
+}
+
+
+/**
+ * Requests the mapping of multiple guest page into ring-3, for reading only,
+ * external threads.
+ *
+ * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
+ * to release them.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
+ * backing or if any of the pages the page has an active ALL access
+ * handler. The caller must fall back on using PGMR3PhysWriteExternal.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
+ * an invalid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param cPages Number of pages to lock.
+ * @param paGCPhysPages The guest physical address of the pages that
+ * should be mapped (@a cPages entries).
+ * @param papvPages Where to store the ring-3 mapping addresses
+ * corresponding to @a paGCPhysPages.
+ * @param paLocks Where to store the lock information that
+ * pfnPhysReleasePageMappingLock needs (@a cPages
+ * in length).
+ *
+ * @remark Avoid calling this API from within critical sections (other than
+ * the PGM one) because of the deadlock risk.
+ * @thread Any.
+ */
+VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
+ void const **papvPages, PPGMPAGEMAPLOCK paLocks)
+{
+ Assert(cPages > 0);
+ AssertPtr(papvPages);
+ AssertPtr(paLocks);
+
+ Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Lock the pages one by one.
+ * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
+ */
+ int32_t cNextYield = 256;
+ uint32_t iPage;
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ if (--cNextYield > 0)
+ { /* likely */ }
+ else
+ {
+ PGM_UNLOCK(pVM);
+ ASMNopPause();
+ PGM_LOCK_VOID(pVM);
+ cNextYield = 256;
+ }
+
+ /*
+ * Query the Physical TLB entry for the page (may fail).
+ */
+ PPGMPAGEMAPTLBE pTlbe;
+ rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
+ if (RT_SUCCESS(rc))
+ { }
+ else
+ break;
+ PPGMPAGE pPage = pTlbe->pPage;
+
+ /*
+ * No MMIO or active all access handlers, everything else can be accessed.
+ */
+ if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
+ && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+ { }
+ else
+ {
+ rc = VERR_PGM_PHYS_PAGE_RESERVED;
+ break;
+ }
+
+ /*
+ * Now, just perform the locking and address calculation.
+ */
+ PPGMPAGEMAP pMap = pTlbe->pMap;
+ if (pMap)
+ pMap->cRefs++;
+
+ unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
+ if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+ {
+ if (cLocks == 0)
+ pVM->pgm.s.cReadLockedPages++;
+ PGM_PAGE_INC_READ_LOCKS(pPage);
+ }
+ else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
+ {
+ PGM_PAGE_INC_READ_LOCKS(pPage);
+ AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
+ if (pMap)
+ pMap->cRefs++; /* Extra ref to prevent it from going away. */
+ }
+
+ papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
+ paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
+ paLocks[iPage].pvMap = pMap;
+ }
+
+ PGM_UNLOCK(pVM);
+
+ /*
+ * On failure we must unlock any pages we managed to get already.
+ */
+ if (RT_FAILURE(rc) && iPage > 0)
+ PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
+
+ return rc;
+}
+
+
+/**
+ * Converts a GC physical address to a HC ring-3 pointer, with some
+ * additional checks.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
+ * access handler of some kind.
+ * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
+ * accesses or is odd in any way.
+ * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address to convert. Since this is only
+ * used for filling the REM TLB, the A20 mask must be
+ * applied before calling this API.
+ * @param fWritable Whether write access is required.
+ * @param ppv Where to store the pointer corresponding to GCPhys on
+ * success.
+ */
+VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
+{
+ PGM_LOCK_VOID(pVM);
+ PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
+
+ PPGMRAMRANGE pRam;
+ PPGMPAGE pPage;
+ int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
+ if (RT_SUCCESS(rc))
+ {
+ if (PGM_PAGE_IS_BALLOONED(pPage))
+ rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
+ else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
+ rc = VINF_SUCCESS;
+ else
+ {
+ if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
+ rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
+ else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+ {
+ /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
+ * in -norawr0 mode. */
+ if (fWritable)
+ rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
+ }
+ else
+ {
+ /* Temporarily disabled physical handler(s), since the recompiler
+ doesn't get notified when it's reset we'll have to pretend it's
+ operating normally. */
+ if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
+ rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
+ else
+ rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ int rc2;
+
+ /* Make sure what we return is writable. */
+ if (fWritable)
+ switch (PGM_PAGE_GET_STATE(pPage))
+ {
+ case PGM_PAGE_STATE_ALLOCATED:
+ break;
+ case PGM_PAGE_STATE_BALLOONED:
+ AssertFailed();
+ break;
+ case PGM_PAGE_STATE_ZERO:
+ case PGM_PAGE_STATE_SHARED:
+ if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
+ break;
+ RT_FALL_THRU();
+ case PGM_PAGE_STATE_WRITE_MONITORED:
+ rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
+ AssertLogRelRCReturn(rc2, rc2);
+ break;
+ }
+
+ /* Get a ring-3 mapping of the address. */
+ PPGMPAGER3MAPTLBE pTlbe;
+ rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
+ AssertLogRelRCReturn(rc2, rc2);
+ *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
+ /** @todo mapping/locking hell; this isn't horribly efficient since
+ * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
+
+ Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
+ }
+ else
+ Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
+
+ /* else: handler catching all access, no pointer returned. */
+ }
+ else
+ rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+
+/*********************************************************************************************************************************
+* RAM Range Management *
+*********************************************************************************************************************************/
+
+#define MAKE_LEAF(a_pNode) \
+ do { \
+ (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
+ (a_pNode)->pRightR3 = NIL_RTR3PTR; \
+ (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
+ (a_pNode)->pRightR0 = NIL_RTR0PTR; \
+ } while (0)
+
+#define INSERT_LEFT(a_pParent, a_pNode) \
+ do { \
+ (a_pParent)->pLeftR3 = (a_pNode); \
+ (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
+ } while (0)
+#define INSERT_RIGHT(a_pParent, a_pNode) \
+ do { \
+ (a_pParent)->pRightR3 = (a_pNode); \
+ (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
+ } while (0)
+
+
+/**
+ * Recursive tree builder.
+ *
+ * @param ppRam Pointer to the iterator variable.
+ * @param iDepth The current depth. Inserts a leaf node if 0.
+ */
+static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
+{
+ PPGMRAMRANGE pRam;
+ if (iDepth <= 0)
+ {
+ /*
+ * Leaf node.
+ */
+ pRam = *ppRam;
+ if (pRam)
+ {
+ *ppRam = pRam->pNextR3;
+ MAKE_LEAF(pRam);
+ }
+ }
+ else
+ {
+
+ /*
+ * Intermediate node.
+ */
+ PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
+
+ pRam = *ppRam;
+ if (!pRam)
+ return pLeft;
+ *ppRam = pRam->pNextR3;
+ MAKE_LEAF(pRam);
+ INSERT_LEFT(pRam, pLeft);
+
+ PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
+ if (pRight)
+ INSERT_RIGHT(pRam, pRight);
+ }
+ return pRam;
+}
+
+
+/**
+ * Rebuilds the RAM range search trees.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
+{
+
+ /*
+ * Create the reasonably balanced tree in a sequential fashion.
+ * For simplicity (laziness) we use standard recursion here.
+ */
+ int iDepth = 0;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
+ while (pRam)
+ {
+ PPGMRAMRANGE pLeft = pRoot;
+
+ pRoot = pRam;
+ pRam = pRam->pNextR3;
+ MAKE_LEAF(pRoot);
+ INSERT_LEFT(pRoot, pLeft);
+
+ PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
+ if (pRight)
+ INSERT_RIGHT(pRoot, pRight);
+ /** @todo else: rotate the tree. */
+
+ iDepth++;
+ }
+
+ pVM->pgm.s.pRamRangeTreeR3 = pRoot;
+ pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
+
+#ifdef VBOX_STRICT
+ /*
+ * Verify that the above code works.
+ */
+ unsigned cRanges = 0;
+ for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ cRanges++;
+ Assert(cRanges > 0);
+
+ unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
+ if ((1U << cMaxDepth) < cRanges)
+ cMaxDepth++;
+
+ for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ unsigned cDepth = 0;
+ PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
+ for (;;)
+ {
+ if (pRam == pRam2)
+ break;
+ Assert(pRam2);
+ if (pRam->GCPhys < pRam2->GCPhys)
+ pRam2 = pRam2->pLeftR3;
+ else
+ pRam2 = pRam2->pRightR3;
+ }
+ AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
+ }
+#endif /* VBOX_STRICT */
+}
+
+#undef MAKE_LEAF
+#undef INSERT_LEFT
+#undef INSERT_RIGHT
+
+/**
+ * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
+ *
+ * Called when anything was relocated.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pgmR3PhysRelinkRamRanges(PVM pVM)
+{
+ PPGMRAMRANGE pCur;
+
+#ifdef VBOX_STRICT
+ for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ Assert((pCur->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0);
+ Assert((pCur->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
+ Assert((pCur->cb & GUEST_PAGE_OFFSET_MASK) == 0);
+ Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
+ for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
+ Assert( pCur2 == pCur
+ || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
+ }
+#endif
+
+ pCur = pVM->pgm.s.pRamRangesXR3;
+ if (pCur)
+ {
+ pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
+
+ for (; pCur->pNextR3; pCur = pCur->pNextR3)
+ pCur->pNextR0 = pCur->pNextR3->pSelfR0;
+
+ Assert(pCur->pNextR0 == NIL_RTR0PTR);
+ }
+ else
+ {
+ Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
+ }
+ ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
+
+ pgmR3PhysRebuildRamRangeSearchTrees(pVM);
+}
+
+
+/**
+ * Links a new RAM range into the list.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pNew Pointer to the new list entry.
+ * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
+ */
+static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
+{
+ AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
+
+ PGM_LOCK_VOID(pVM);
+
+ PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
+ pNew->pNextR3 = pRam;
+ pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
+
+ if (pPrev)
+ {
+ pPrev->pNextR3 = pNew;
+ pPrev->pNextR0 = pNew->pSelfR0;
+ }
+ else
+ {
+ pVM->pgm.s.pRamRangesXR3 = pNew;
+ pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
+ }
+ ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
+
+ pgmR3PhysRebuildRamRangeSearchTrees(pVM);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Unlink an existing RAM range from the list.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pRam Pointer to the new list entry.
+ * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
+ */
+static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
+{
+ Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
+
+ PGM_LOCK_VOID(pVM);
+
+ PPGMRAMRANGE pNext = pRam->pNextR3;
+ if (pPrev)
+ {
+ pPrev->pNextR3 = pNext;
+ pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
+ }
+ else
+ {
+ Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
+ pVM->pgm.s.pRamRangesXR3 = pNext;
+ pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
+ }
+ ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
+
+ pgmR3PhysRebuildRamRangeSearchTrees(pVM);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Unlink an existing RAM range from the list.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pRam Pointer to the new list entry.
+ */
+static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
+{
+ PGM_LOCK_VOID(pVM);
+
+ /* find prev. */
+ PPGMRAMRANGE pPrev = NULL;
+ PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
+ while (pCur != pRam)
+ {
+ pPrev = pCur;
+ pCur = pCur->pNextR3;
+ }
+ AssertFatal(pCur);
+
+ pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Gets the number of ram ranges.
+ *
+ * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+
+ PGM_LOCK_VOID(pVM);
+ uint32_t cRamRanges = 0;
+ for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
+ cRamRanges++;
+ PGM_UNLOCK(pVM);
+ return cRamRanges;
+}
+
+
+/**
+ * Get information about a range.
+ *
+ * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
+ * @param pVM The cross context VM structure.
+ * @param iRange The ordinal of the range.
+ * @param pGCPhysStart Where to return the start of the range. Optional.
+ * @param pGCPhysLast Where to return the address of the last byte in the
+ * range. Optional.
+ * @param ppszDesc Where to return the range description. Optional.
+ * @param pfIsMmio Where to indicate that this is a pure MMIO range.
+ * Optional.
+ */
+VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
+ const char **ppszDesc, bool *pfIsMmio)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PGM_LOCK_VOID(pVM);
+ uint32_t iCurRange = 0;
+ for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
+ if (iCurRange == iRange)
+ {
+ if (pGCPhysStart)
+ *pGCPhysStart = pCur->GCPhys;
+ if (pGCPhysLast)
+ *pGCPhysLast = pCur->GCPhysLast;
+ if (ppszDesc)
+ *ppszDesc = pCur->pszDesc;
+ if (pfIsMmio)
+ *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
+
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+ }
+ PGM_UNLOCK(pVM);
+ return VERR_OUT_OF_RANGE;
+}
+
+
+/*********************************************************************************************************************************
+* RAM *
+*********************************************************************************************************************************/
+
+/**
+ * Frees the specified RAM page and replaces it with the ZERO page.
+ *
+ * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pReq Pointer to the request. This is NULL when doing a
+ * bulk free in NEM memory mode.
+ * @param pcPendingPages Where the number of pages waiting to be freed are
+ * kept. This will normally be incremented. This is
+ * NULL when doing a bulk free in NEM memory mode.
+ * @param pPage Pointer to the page structure.
+ * @param GCPhys The guest physical address of the page, if applicable.
+ * @param enmNewType New page type for NEM notification, since several
+ * callers will change the type upon successful return.
+ *
+ * @remarks The caller must own the PGM lock.
+ */
+int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
+ PGMPAGETYPE enmNewType)
+{
+ /*
+ * Assert sanity.
+ */
+ PGM_LOCK_ASSERT_OWNER(pVM);
+ if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
+ && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
+ {
+ AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
+ return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
+ }
+
+ /** @todo What about ballooning of large pages??! */
+ Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
+ && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+
+ if ( PGM_PAGE_IS_ZERO(pPage)
+ || PGM_PAGE_IS_BALLOONED(pPage))
+ return VINF_SUCCESS;
+
+ const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
+ Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
+ if (RT_UNLIKELY(!PGM_IS_IN_NEM_MODE(pVM)
+ ? idPage == NIL_GMM_PAGEID
+ || idPage > GMM_PAGEID_LAST
+ || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID
+ : idPage != NIL_GMM_PAGEID))
+ {
+ AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
+ return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
+ }
+#ifdef VBOX_WITH_NATIVE_NEM
+ const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
+#endif
+
+ /* update page count stats. */
+ if (PGM_PAGE_IS_SHARED(pPage))
+ pVM->pgm.s.cSharedPages--;
+ else
+ pVM->pgm.s.cPrivatePages--;
+ pVM->pgm.s.cZeroPages++;
+
+ /* Deal with write monitored pages. */
+ if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
+ {
+ PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
+ pVM->pgm.s.cWrittenToPages++;
+ }
+
+ /*
+ * pPage = ZERO page.
+ */
+ PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
+ PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
+ PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
+ PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
+ PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
+
+ /* Flush physical page map TLB entry. */
+ pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
+ IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID); /// @todo move to the perform step.
+
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /*
+ * Skip the rest if we're doing a bulk free in NEM memory mode.
+ */
+ if (!pReq)
+ return VINF_SUCCESS;
+ AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
+#endif
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Notify NEM. */
+ /** @todo Remove this one? */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
+ NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, pVM->pgm.s.abZeroPg,
+ pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
+ PGM_PAGE_SET_NEM_STATE(pPage, u2State);
+ }
+#else
+ RT_NOREF(enmNewType);
+#endif
+
+ /*
+ * Make sure it's not in the handy page array.
+ */
+ for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
+ {
+ if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
+ {
+ pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
+ pVM->pgm.s.aHandyPages[i].fZeroed = false;
+ pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
+ break;
+ }
+ if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
+ {
+ pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
+ break;
+ }
+ }
+
+ /*
+ * Push it onto the page array.
+ */
+ uint32_t iPage = *pcPendingPages;
+ Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
+ *pcPendingPages += 1;
+
+ pReq->aPages[iPage].idPage = idPage;
+
+ if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
+ return VINF_SUCCESS;
+
+ /*
+ * Flush the pages.
+ */
+ int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ *pcPendingPages = 0;
+ }
+ return rc;
+}
+
+
+/**
+ * Frees a range of pages, replacing them with ZERO pages of the specified type.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRam The RAM range in which the pages resides.
+ * @param GCPhys The address of the first page.
+ * @param GCPhysLast The address of the last page.
+ * @param pvMmio2 Pointer to the ring-3 mapping of any MMIO2 memory that
+ * will replace the pages we're freeing up.
+ */
+static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, void *pvMmio2)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /*
+ * In simplified memory mode we don't actually free the memory,
+ * we just unmap it and let NEM do any unlocking of it.
+ */
+ if (pVM->pgm.s.fNemMode)
+ {
+ Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
+ uint8_t u2State = 0; /* (We don't support UINT8_MAX here.) */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
+ int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
+ pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
+ pvMmio2, &u2State, NULL /*puNemRange*/);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /* Iterate the pages. */
+ PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
+ while (cPagesLeft-- > 0)
+ {
+ int rc = pgmPhysFreePage(pVM, NULL, NULL, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
+ AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
+
+ PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
+ PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
+
+ GCPhys += GUEST_PAGE_SIZE;
+ pPageDst++;
+ }
+ return VINF_SUCCESS;
+ }
+#else /* !VBOX_WITH_PGM_NEM_MODE */
+ RT_NOREF(pvMmio2);
+#endif /* !VBOX_WITH_PGM_NEM_MODE */
+
+ /*
+ * Regular mode.
+ */
+ /* Prepare. */
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ AssertLogRelRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Tell NEM up-front. */
+ uint8_t u2State = UINT8_MAX;
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
+ rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, NULL, pvMmio2,
+ &u2State, NULL /*puNemRange*/);
+ AssertLogRelRCReturnStmt(rc, GMMR3FreePagesCleanup(pReq), rc);
+ }
+#endif
+
+ /* Iterate the pages. */
+ PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
+ while (cPagesLeft-- > 0)
+ {
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
+ AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
+
+ PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (u2State != UINT8_MAX)
+ PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
+#endif
+
+ GCPhys += GUEST_PAGE_SIZE;
+ pPageDst++;
+ }
+
+ /* Finish pending and cleanup. */
+ if (cPendingPages)
+ {
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ GMMR3FreePagesCleanup(pReq);
+
+ return rc;
+}
+
+
+/**
+ * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
+ *
+ * In NEM mode, this will allocate the pages backing the RAM range and this may
+ * fail. NEM registration may also fail. (In regular HM mode it won't fail.)
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pNew The new RAM range.
+ * @param GCPhys The address of the RAM range.
+ * @param GCPhysLast The last address of the RAM range.
+ * @param R0PtrNew Ditto for R0.
+ * @param fFlags PGM_RAM_RANGE_FLAGS_FLOATING or zero.
+ * @param pszDesc The description.
+ * @param pPrev The previous RAM range (for linking).
+ */
+static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
+ RTR0PTR R0PtrNew, uint32_t fFlags, const char *pszDesc, PPGMRAMRANGE pPrev)
+{
+ /*
+ * Initialize the range.
+ */
+ pNew->pSelfR0 = R0PtrNew;
+ pNew->GCPhys = GCPhys;
+ pNew->GCPhysLast = GCPhysLast;
+ pNew->cb = GCPhysLast - GCPhys + 1;
+ pNew->pszDesc = pszDesc;
+ pNew->fFlags = fFlags;
+ pNew->uNemRange = UINT32_MAX;
+ pNew->pvR3 = NULL;
+ pNew->paLSPages = NULL;
+
+ uint32_t const cPages = pNew->cb >> GUEST_PAGE_SHIFT;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!pVM->pgm.s.fNemMode)
+#endif
+ {
+ RTGCPHYS iPage = cPages;
+ while (iPage-- > 0)
+ PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
+
+ /* Update the page count stats. */
+ pVM->pgm.s.cZeroPages += cPages;
+ pVM->pgm.s.cAllPages += cPages;
+ }
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ else
+ {
+ int rc = SUPR3PageAlloc(RT_ALIGN_Z(pNew->cb, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT,
+ pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pNew->pvR3);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ RTGCPHYS iPage = cPages;
+ while (iPage-- > 0)
+ PGM_PAGE_INIT(&pNew->aPages[iPage], UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
+ PGMPAGETYPE_RAM, PGM_PAGE_STATE_ALLOCATED);
+
+ /* Update the page count stats. */
+ pVM->pgm.s.cPrivatePages += cPages;
+ pVM->pgm.s.cAllPages += cPages;
+ }
+#endif
+
+ /*
+ * Link it.
+ */
+ pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /*
+ * Notify NEM now that it has been linked.
+ */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = UINT8_MAX;
+ int rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, pNew->cb, pNew->pvR3, &u2State, &pNew->uNemRange);
+ if (RT_SUCCESS(rc))
+ {
+ if (u2State != UINT8_MAX)
+ pgmPhysSetNemStateForPages(&pNew->aPages[0], cPages, u2State);
+ }
+ else
+ pgmR3PhysUnlinkRamRange2(pVM, pNew, pPrev);
+ return rc;
+ }
+#endif
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * PGMR3PhysRegisterRam worker that registers a high chunk.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The address of the RAM.
+ * @param cRamPages The number of RAM pages to register.
+ * @param iChunk The chunk number.
+ * @param pszDesc The RAM range description.
+ * @param ppPrev Previous RAM range pointer. In/Out.
+ */
+static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, uint32_t iChunk,
+ const char *pszDesc, PPGMRAMRANGE *ppPrev)
+{
+ const char *pszDescChunk = iChunk == 0
+ ? pszDesc
+ : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
+ AssertReturn(pszDescChunk, VERR_NO_MEMORY);
+
+ /*
+ * Allocate memory for the new chunk.
+ */
+ size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cRamPages]), HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
+ AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
+ RTR0PTR R0PtrChunk = NIL_RTR0PTR;
+ void *pvChunk = NULL;
+ int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM));
+ memset(pvChunk, 0, cChunkPages << HOST_PAGE_SHIFT);
+
+ PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
+
+ /*
+ * Ok, init and link the range.
+ */
+ rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << GUEST_PAGE_SHIFT) - 1,
+ R0PtrChunk, PGM_RAM_RANGE_FLAGS_FLOATING, pszDescChunk, *ppPrev);
+ if (RT_SUCCESS(rc))
+ *ppPrev = pNew;
+
+ if (RT_FAILURE(rc))
+ SUPR3PageFreeEx(pvChunk, cChunkPages);
+ }
+
+ RTMemTmpFree(paChunkPages);
+ return rc;
+}
+
+
+/**
+ * Sets up a range RAM.
+ *
+ * This will check for conflicting registrations, make a resource
+ * reservation for the memory (with GMM), and setup the per-page
+ * tracking structures (PGMPAGE).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The physical address of the RAM.
+ * @param cb The size of the RAM.
+ * @param pszDesc The description - not copied, so, don't free or change it.
+ */
+VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
+{
+ /*
+ * Validate input.
+ */
+ Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
+ AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
+ AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ PGM_LOCK_VOID(pVM);
+
+ /*
+ * Find range location and check for conflicts.
+ */
+ PPGMRAMRANGE pPrev = NULL;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam && GCPhysLast >= pRam->GCPhys)
+ {
+ AssertLogRelMsgReturnStmt( GCPhysLast < pRam->GCPhys
+ || GCPhys > pRam->GCPhysLast,
+ ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
+ GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
+ PGM_UNLOCK(pVM), VERR_PGM_RAM_CONFLICT);
+
+ /* next */
+ pPrev = pRam;
+ pRam = pRam->pNextR3;
+ }
+
+ /*
+ * Register it with GMM (the API bitches).
+ */
+ const RTGCPHYS cPages = cb >> GUEST_PAGE_SHIFT;
+ int rc = MMR3IncreaseBaseReservation(pVM, cPages);
+ if (RT_FAILURE(rc))
+ {
+ PGM_UNLOCK(pVM);
+ return rc;
+ }
+
+ if ( GCPhys >= _4G
+ && cPages > 256)
+ {
+ /*
+ * The PGMRAMRANGE structures for the high memory can get very big.
+ * There used to be some limitations on SUPR3PageAllocEx allocation
+ * sizes, so traditionally we limited this to 16MB chunks. These days
+ * we do ~64 MB chunks each covering 16GB of guest RAM, making sure
+ * each range is a multiple of 1GB to enable eager hosts to use 1GB
+ * pages in NEM mode.
+ *
+ * See also pgmR3PhysMmio2CalcChunkCount.
+ */
+ uint32_t const cPagesPerChunk = _4M;
+ Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */
+
+ RTGCPHYS cPagesLeft = cPages;
+ RTGCPHYS GCPhysChunk = GCPhys;
+ uint32_t iChunk = 0;
+ while (cPagesLeft > 0)
+ {
+ uint32_t cPagesInChunk = cPagesLeft;
+ if (cPagesInChunk > cPagesPerChunk)
+ cPagesInChunk = cPagesPerChunk;
+
+ rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, iChunk, pszDesc, &pPrev);
+ AssertRCReturn(rc, rc);
+
+ /* advance */
+ GCPhysChunk += (RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT;
+ cPagesLeft -= cPagesInChunk;
+ iChunk++;
+ }
+ }
+ else
+ {
+ /*
+ * Allocate, initialize and link the new RAM range.
+ */
+ const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
+ PPGMRAMRANGE pNew = NULL;
+ RTR0PTR pNewR0 = NIL_RTR0PTR;
+ rc = SUPR3PageAllocEx(RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT, 0 /*fFlags*/,
+ (void **)&pNew, &pNewR0, NULL /*paPages*/);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
+
+ rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, pNewR0, 0 /*fFlags*/, pszDesc, pPrev);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
+ }
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
+ *
+ * We do this late in the init process so that all the ROM and MMIO ranges have
+ * been registered already and we don't go wasting memory on them.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PhysRamPreAllocate(PVM pVM)
+{
+ Assert(pVM->pgm.s.fRamPreAlloc);
+ Log(("pgmR3PhysRamPreAllocate: enter\n"));
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
+#endif
+
+ /*
+ * Walk the RAM ranges and allocate all RAM pages, halt at
+ * the first allocation error.
+ */
+ uint64_t cPages = 0;
+ uint64_t NanoTS = RTTimeNanoTS();
+ PGM_LOCK_VOID(pVM);
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ PPGMPAGE pPage = &pRam->aPages[0];
+ RTGCPHYS GCPhys = pRam->GCPhys;
+ uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
+ while (cLeft-- > 0)
+ {
+ if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
+ {
+ switch (PGM_PAGE_GET_STATE(pPage))
+ {
+ case PGM_PAGE_STATE_ZERO:
+ {
+ int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
+ PGM_UNLOCK(pVM);
+ return rc;
+ }
+ cPages++;
+ break;
+ }
+
+ case PGM_PAGE_STATE_BALLOONED:
+ case PGM_PAGE_STATE_ALLOCATED:
+ case PGM_PAGE_STATE_WRITE_MONITORED:
+ case PGM_PAGE_STATE_SHARED:
+ /* nothing to do here. */
+ break;
+ }
+ }
+
+ /* next */
+ pPage++;
+ GCPhys += GUEST_PAGE_SIZE;
+ }
+ }
+ PGM_UNLOCK(pVM);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+
+ LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
+ Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks shared page checksums.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
+{
+#ifdef VBOX_STRICT
+ PGM_LOCK_VOID(pVM);
+
+ if (pVM->pgm.s.cSharedPages > 0)
+ {
+ /*
+ * Walk the ram ranges.
+ */
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
+ AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb,
+ ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
+
+ while (iPage-- > 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ if (PGM_PAGE_IS_SHARED(pPage))
+ {
+ uint32_t u32Checksum = pPage->s.u2Unused0/* | ((uint32_t)pPage->s.u2Unused1 << 8)*/;
+ if (!u32Checksum)
+ {
+ RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ void const *pvPage;
+ int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t u32Checksum2 = RTCrc32(pvPage, GUEST_PAGE_SIZE);
+# if 0
+ AssertMsg((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
+# else
+ if ((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum)
+ LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
+ else
+ AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
+# endif
+ }
+ else
+ AssertRC(rc);
+ }
+ }
+
+ } /* for each page */
+
+ } /* for each ram range */
+ }
+
+ PGM_UNLOCK(pVM);
+#endif /* VBOX_STRICT */
+ NOREF(pVM);
+}
+
+
+/**
+ * Resets the physical memory state.
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PhysRamReset(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ /* Reset the memory balloon. */
+ int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
+ AssertRC(rc);
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ /* Clear all registered shared modules. */
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ rc = GMMR3ResetSharedModules(pVM);
+ AssertRC(rc);
+#endif
+ /* Reset counters. */
+ pVM->pgm.s.cReusedSharedPages = 0;
+ pVM->pgm.s.cBalloonedPages = 0;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resets (zeros) the RAM after all devices and components have been reset.
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PhysRamZeroAll(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ /*
+ * We batch up pages that should be freed instead of calling GMM for
+ * each and every one of them.
+ */
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Walk the ram ranges.
+ */
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
+ AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
+
+ if ( !pVM->pgm.s.fRamPreAlloc
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ && !pVM->pgm.s.fNemMode
+#endif
+ && pVM->pgm.s.fZeroRamPagesOnReset)
+ {
+ /* Replace all RAM pages by ZERO pages. */
+ while (iPage-- > 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_RAM:
+ /* Do not replace pages part of a 2 MB continuous range
+ with zero pages, but zero them instead. */
+ if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
+ || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
+ {
+ void *pvPage;
+ rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
+ AssertLogRelRCReturn(rc, rc);
+ RT_BZERO(pvPage, GUEST_PAGE_SIZE);
+ }
+ else if (PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ /* Turn into a zero page; the balloon status is lost when the VM reboots. */
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
+ }
+ else if (!PGM_PAGE_IS_ZERO(pPage))
+ {
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
+ pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ break;
+
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
+ pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
+ pRam, true /*fDoAccounting*/, false /*fFlushIemTlbs*/);
+ break;
+
+ case PGMPAGETYPE_MMIO2:
+ case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
+ case PGMPAGETYPE_ROM:
+ case PGMPAGETYPE_MMIO:
+ break;
+ default:
+ AssertFailed();
+ }
+ } /* for each page */
+ }
+ else
+ {
+ /* Zero the memory. */
+ while (iPage-- > 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_RAM:
+ switch (PGM_PAGE_GET_STATE(pPage))
+ {
+ case PGM_PAGE_STATE_ZERO:
+ break;
+
+ case PGM_PAGE_STATE_BALLOONED:
+ /* Turn into a zero page; the balloon status is lost when the VM reboots. */
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
+ break;
+
+ case PGM_PAGE_STATE_SHARED:
+ case PGM_PAGE_STATE_WRITE_MONITORED:
+ rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
+ AssertLogRelRCReturn(rc, rc);
+ RT_FALL_THRU();
+
+ case PGM_PAGE_STATE_ALLOCATED:
+ if (pVM->pgm.s.fZeroRamPagesOnReset)
+ {
+ void *pvPage;
+ rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
+ AssertLogRelRCReturn(rc, rc);
+ RT_BZERO(pvPage, GUEST_PAGE_SIZE);
+ }
+ break;
+ }
+ break;
+
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
+ pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
+ pRam, true /*fDoAccounting*/, false /*fFlushIemTlbs*/);
+ break;
+
+ case PGMPAGETYPE_MMIO2:
+ case PGMPAGETYPE_ROM_SHADOW:
+ case PGMPAGETYPE_ROM:
+ case PGMPAGETYPE_MMIO:
+ break;
+ default:
+ AssertFailed();
+
+ }
+ } /* for each page */
+ }
+
+ }
+
+ /*
+ * Finish off any pages pending freeing.
+ */
+ if (cPendingPages)
+ {
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ GMMR3FreePagesCleanup(pReq);
+
+ /*
+ * Flush the IEM TLB, just to be sure it really is done.
+ */
+ IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees all RAM during VM termination
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PhysRamTerm(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ /* Reset the memory balloon. */
+ int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
+ AssertRC(rc);
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ /*
+ * Clear all registered shared modules.
+ */
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ rc = GMMR3ResetSharedModules(pVM);
+ AssertRC(rc);
+
+ /*
+ * Flush the handy pages updates to make sure no shared pages are hiding
+ * in there. (Not unlikely if the VM shuts down, apparently.)
+ */
+# ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!pVM->pgm.s.fNemMode)
+# endif
+ rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
+#endif
+
+ /*
+ * We batch up pages that should be freed instead of calling GMM for
+ * each and every one of them.
+ */
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Walk the ram ranges.
+ */
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
+ AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
+
+ while (iPage-- > 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_RAM:
+ /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
+ /** @todo change this to explicitly free private pages here. */
+ if (PGM_PAGE_IS_SHARED(pPage))
+ {
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
+ pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ break;
+
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
+ case PGMPAGETYPE_MMIO2:
+ case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
+ case PGMPAGETYPE_ROM:
+ case PGMPAGETYPE_MMIO:
+ break;
+ default:
+ AssertFailed();
+ }
+ } /* for each page */
+ }
+
+ /*
+ * Finish off any pages pending freeing.
+ */
+ if (cPendingPages)
+ {
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ GMMR3FreePagesCleanup(pReq);
+ return VINF_SUCCESS;
+}
+
+
+
+/*********************************************************************************************************************************
+* MMIO *
+*********************************************************************************************************************************/
+
+/**
+ * This is the interface IOM is using to register an MMIO region.
+ *
+ * It will check for conflicts and ensure that a RAM range structure
+ * is present before calling the PGMR3HandlerPhysicalRegister API to
+ * register the callbacks.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The start of the MMIO region.
+ * @param cb The size of the MMIO region.
+ * @param hType The physical access handler type registration.
+ * @param uUser The user argument.
+ * @param pszDesc The description of the MMIO region.
+ */
+VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
+ uint64_t uUser, const char *pszDesc)
+{
+ /*
+ * Assert on some assumption.
+ */
+ VM_ASSERT_EMT(pVM);
+ AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
+#ifdef VBOX_STRICT
+ PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
+ Assert(pType);
+ Assert(pType->enmKind == PGMPHYSHANDLERKIND_MMIO);
+#endif
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Make sure there's a RAM range structure for the region.
+ */
+ RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
+ bool fRamExists = false;
+ PPGMRAMRANGE pRamPrev = NULL;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam && GCPhysLast >= pRam->GCPhys)
+ {
+ if ( GCPhysLast >= pRam->GCPhys
+ && GCPhys <= pRam->GCPhysLast)
+ {
+ /* Simplification: all within the same range. */
+ AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
+ && GCPhysLast <= pRam->GCPhysLast,
+ ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
+ GCPhys, GCPhysLast, pszDesc,
+ pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
+ PGM_UNLOCK(pVM),
+ VERR_PGM_RAM_CONFLICT);
+
+ /* Check that it's all RAM or MMIO pages. */
+ PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
+ while (cLeft-- > 0)
+ {
+ AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
+ ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
+ GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
+ PGM_UNLOCK(pVM),
+ VERR_PGM_RAM_CONFLICT);
+ pPage++;
+ }
+
+ /* Looks good. */
+ fRamExists = true;
+ break;
+ }
+
+ /* next */
+ pRamPrev = pRam;
+ pRam = pRam->pNextR3;
+ }
+ PPGMRAMRANGE pNew;
+ if (fRamExists)
+ {
+ pNew = NULL;
+
+ /*
+ * Make all the pages in the range MMIO/ZERO pages, freeing any
+ * RAM pages currently mapped here. This might not be 100% correct
+ * for PCI memory, but we're doing the same thing for MMIO2 pages.
+ */
+ rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, NULL);
+ AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
+
+ /* Force a PGM pool flush as guest ram references have been changed. */
+ /** @todo not entirely SMP safe; assuming for now the guest takes
+ * care of this internally (not touch mapped mmio while changing the
+ * mapping). */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ }
+ else
+ {
+ /*
+ * No RAM range, insert an ad hoc one.
+ *
+ * Note that we don't have to tell REM about this range because
+ * PGMHandlerPhysicalRegisterEx will do that for us.
+ */
+ Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
+
+ /* Alloc. */
+ const uint32_t cPages = cb >> GUEST_PAGE_SHIFT;
+ const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
+ const size_t cRangePages = RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ RTR0PTR pNewR0 = NIL_RTR0PTR;
+ rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pNew, &pNewR0, NULL /*paPages*/);
+ AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), PGM_UNLOCK(pVM), rc);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Notify NEM. */
+ uint8_t u2State = 0; /* (must have valid state as there can't be anything to preserve) */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, cPages << GUEST_PAGE_SHIFT, 0 /*fFlags*/, NULL, NULL,
+ &u2State, &pNew->uNemRange);
+ AssertLogRelRCReturnStmt(rc, SUPR3PageFreeEx(pNew, cRangePages), rc);
+ }
+#endif
+
+ /* Initialize the range. */
+ pNew->pSelfR0 = pNewR0;
+ pNew->GCPhys = GCPhys;
+ pNew->GCPhysLast = GCPhysLast;
+ pNew->cb = cb;
+ pNew->pszDesc = pszDesc;
+ pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
+ pNew->pvR3 = NULL;
+ pNew->paLSPages = NULL;
+
+ uint32_t iPage = cPages;
+ while (iPage-- > 0)
+ {
+ PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
+#ifdef VBOX_WITH_NATIVE_NEM
+ PGM_PAGE_SET_NEM_STATE(&pNew->aPages[iPage], u2State);
+#endif
+ }
+ Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
+
+ /* update the page count stats. */
+ pVM->pgm.s.cPureMmioPages += cPages;
+ pVM->pgm.s.cAllPages += cPages;
+
+ /* link it */
+ pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
+ }
+
+ /*
+ * Register the access handler.
+ */
+ rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, uUser, pszDesc);
+ if (RT_SUCCESS(rc))
+ {
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Late NEM notification. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
+ rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
+ fRamExists ? (uint8_t *)pRam->pvR3 + (uintptr_t)(GCPhys - pRam->GCPhys) : NULL,
+ NULL, !fRamExists ? &pRam->uNemRange : NULL);
+ AssertLogRelRCReturn(rc, rc);
+ }
+#endif
+ }
+ /** @todo the phys handler failure handling isn't complete, esp. wrt NEM. */
+ else if (!fRamExists)
+ {
+ pVM->pgm.s.cPureMmioPages -= cb >> GUEST_PAGE_SHIFT;
+ pVM->pgm.s.cAllPages -= cb >> GUEST_PAGE_SHIFT;
+
+ /* remove the ad hoc range. */
+ pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
+ pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
+ SUPR3PageFreeEx(pRam, RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cb >> GUEST_PAGE_SHIFT]),
+ HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT);
+ }
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * This is the interface IOM is using to register an MMIO region.
+ *
+ * It will take care of calling PGMHandlerPhysicalDeregister and clean up
+ * any ad hoc PGMRAMRANGE left behind.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The start of the MMIO region.
+ * @param cb The size of the MMIO region.
+ */
+VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
+{
+ VM_ASSERT_EMT(pVM);
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * First deregister the handler, then check if we should remove the ram range.
+ */
+ rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
+ PPGMRAMRANGE pRamPrev = NULL;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam && GCPhysLast >= pRam->GCPhys)
+ {
+ /** @todo We're being a bit too careful here. rewrite. */
+ if ( GCPhysLast == pRam->GCPhysLast
+ && GCPhys == pRam->GCPhys)
+ {
+ Assert(pRam->cb == cb);
+
+ /*
+ * See if all the pages are dead MMIO pages.
+ */
+ uint32_t const cGuestPages = cb >> GUEST_PAGE_SHIFT;
+ bool fAllMMIO = true;
+ uint32_t iPage = 0;
+ uint32_t cLeft = cGuestPages;
+ while (cLeft-- > 0)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
+ /*|| not-out-of-action later */)
+ {
+ fAllMMIO = false;
+ AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage));
+ break;
+ }
+ Assert( PGM_PAGE_IS_ZERO(pPage)
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
+ pPage++;
+ }
+ if (fAllMMIO)
+ {
+ /*
+ * Ad-hoc range, unlink and free it.
+ */
+ Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
+ GCPhys, GCPhysLast, pRam->pszDesc));
+ /** @todo check the ad-hoc flags? */
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */
+ {
+ rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/,
+ NULL, NULL, NULL, &pRam->uNemRange);
+ AssertLogRelRCReturn(rc, rc);
+ }
+#endif
+
+ pVM->pgm.s.cAllPages -= cGuestPages;
+ pVM->pgm.s.cPureMmioPages -= cGuestPages;
+
+ pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
+ const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
+ const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
+ pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
+ SUPR3PageFreeEx(pRam, RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT);
+ break;
+ }
+ }
+
+ /*
+ * Range match? It will all be within one range (see PGMAllHandler.cpp).
+ */
+ if ( GCPhysLast >= pRam->GCPhys
+ && GCPhys <= pRam->GCPhysLast)
+ {
+ Assert(GCPhys >= pRam->GCPhys);
+ Assert(GCPhysLast <= pRam->GCPhysLast);
+
+ /*
+ * Turn the pages back into RAM pages.
+ */
+ uint32_t iPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
+ uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
+ while (cLeft--)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
+ ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage));
+ if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
+ PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
+ iPage++;
+ }
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Notify REM (failure will probably leave things in a non-working state). */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = UINT8_MAX;
+ rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
+ pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
+ NULL, &u2State, &pRam->uNemRange);
+ AssertLogRelRCReturn(rc, rc);
+ if (u2State != UINT8_MAX)
+ pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
+ cb >> GUEST_PAGE_SHIFT, u2State);
+ }
+#endif
+ break;
+ }
+
+ /* next */
+ pRamPrev = pRam;
+ pRam = pRam->pNextR3;
+ }
+ }
+
+ /* Force a PGM pool flush as guest ram references have been changed. */
+ /** @todo Not entirely SMP safe; assuming for now the guest takes care of
+ * this internally (not touch mapped mmio while changing the mapping). */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+
+ pgmPhysInvalidatePageMapTLB(pVM);
+ pgmPhysInvalidRamRangeTlbs(pVM);
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+
+/*********************************************************************************************************************************
+* MMIO2 *
+*********************************************************************************************************************************/
+
+/**
+ * Locate a MMIO2 range.
+ *
+ * @returns Pointer to the MMIO2 range.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param iSubDev The sub-device number.
+ * @param iRegion The region.
+ * @param hMmio2 Handle to look up. If NIL, use the @a iSubDev and
+ * @a iRegion.
+ */
+DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMmio2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev,
+ uint32_t iRegion, PGMMMIO2HANDLE hMmio2)
+{
+ if (hMmio2 != NIL_PGMMMIO2HANDLE)
+ {
+ if (hMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3) && hMmio2 != 0)
+ {
+ PPGMREGMMIO2RANGE pCur = pVM->pgm.s.apMmio2RangesR3[hMmio2 - 1];
+ if (pCur && pCur->pDevInsR3 == pDevIns)
+ {
+ Assert(pCur->idMmio2 == hMmio2);
+ AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
+ return pCur;
+ }
+ Assert(!pCur);
+ }
+ for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
+ if (pCur->idMmio2 == hMmio2)
+ {
+ AssertBreak(pCur->pDevInsR3 == pDevIns);
+ AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
+ return pCur;
+ }
+ }
+ else
+ {
+ /*
+ * Search the list. There shouldn't be many entries.
+ */
+ /** @todo Optimize this lookup! There may now be many entries and it'll
+ * become really slow when doing MMR3HyperMapMMIO2 and similar. */
+ for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
+ if ( pCur->pDevInsR3 == pDevIns
+ && pCur->iRegion == iRegion
+ && pCur->iSubDev == iSubDev)
+ return pCur;
+ }
+ return NULL;
+}
+
+
+/**
+ * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map.
+ */
+static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
+{
+ int rc = VINF_SUCCESS;
+ for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
+ {
+ Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING));
+ int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys,
+ pCurMmio2->RamRange.GCPhysLast);
+ AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
+ pCurMmio2->RamRange.pszDesc, rc2));
+ if (RT_SUCCESS(rc2))
+ pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING;
+ else if (RT_SUCCESS(rc))
+ rc = rc2;
+ if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ return rc;
+ }
+ AssertFailed();
+ return rc;
+}
+
+
+/**
+ * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap.
+ */
+static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
+{
+ for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
+ {
+ if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)
+ {
+ int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3);
+ AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
+ pCurMmio2->RamRange.pszDesc, rc2));
+ pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING;
+ }
+ if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ return VINF_SUCCESS;
+ }
+ AssertFailed();
+ return VINF_SUCCESS;
+
+}
+
+
+/**
+ * Calculates the number of chunks
+ *
+ * @returns Number of registration chunk needed.
+ * @param pVM The cross context VM structure.
+ * @param cb The size of the MMIO/MMIO2 range.
+ * @param pcPagesPerChunk Where to return the number of pages tracked by each
+ * chunk. Optional.
+ * @param pcbChunk Where to return the guest mapping size for a chunk.
+ */
+static uint16_t pgmR3PhysMmio2CalcChunkCount(PVM pVM, RTGCPHYS cb, uint32_t *pcPagesPerChunk, uint32_t *pcbChunk)
+{
+ RT_NOREF_PV(pVM); /* without raw mode */
+
+ /*
+ * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
+ * needing a few bytes extra the PGMREGMMIO2RANGE structure.
+ *
+ * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
+ * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
+ */
+ uint32_t const cPagesPerChunk = _4M;
+ Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */
+ uint32_t const cbChunk = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]);
+ AssertRelease(cPagesPerChunk < _16M);
+
+ if (pcbChunk)
+ *pcbChunk = cbChunk;
+ if (pcPagesPerChunk)
+ *pcPagesPerChunk = cPagesPerChunk;
+
+ /* Calc the number of chunks we need. */
+ RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT;
+ uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk);
+ AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages);
+ return cChunks;
+}
+
+
+/**
+ * Worker for PGMR3PhysMMIO2Register that allocates and the PGMREGMMIO2RANGE
+ * structures and does basic initialization.
+ *
+ * Caller must set type specfic members and initialize the PGMPAGE structures.
+ *
+ * This was previously also used by PGMR3PhysMmio2PreRegister, a function for
+ * pre-registering MMIO that was later (6.1) replaced by a new handle based IOM
+ * interface. The reference to caller and type above is purely historical.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param iSubDev The sub-device number (internal PCI config number).
+ * @param iRegion The region number. If the MMIO2 memory is a PCI
+ * I/O region this number has to be the number of that
+ * region. Otherwise it can be any number safe
+ * UINT8_MAX.
+ * @param cb The size of the region. Must be page aligned.
+ * @param fFlags PGMPHYS_MMIO2_FLAGS_XXX.
+ * @param idMmio2 The MMIO2 ID for the first chunk.
+ * @param pszDesc The description.
+ * @param ppHeadRet Where to return the pointer to the first
+ * registration chunk.
+ *
+ * @thread EMT
+ */
+static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
+ uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)
+{
+ /*
+ * Figure out how many chunks we need and of which size.
+ */
+ uint32_t cPagesPerChunk;
+ uint16_t cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, &cPagesPerChunk, NULL);
+ AssertReturn(cChunks, VERR_PGM_PHYS_MMIO_EX_IPE);
+
+ /*
+ * Allocate the chunks.
+ */
+ PPGMREGMMIO2RANGE *ppNext = ppHeadRet;
+ *ppNext = NULL;
+
+ int rc = VINF_SUCCESS;
+ uint32_t cPagesLeft = cb >> GUEST_PAGE_SHIFT;
+ for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++)
+ {
+ /*
+ * We currently do a single RAM range for the whole thing. This will
+ * probably have to change once someone needs really large MMIO regions,
+ * as we will be running into SUPR3PageAllocEx limitations and such.
+ */
+ const uint32_t cPagesTrackedByChunk = RT_MIN(cPagesLeft, cPagesPerChunk);
+ const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesTrackedByChunk]);
+ PPGMREGMMIO2RANGE pNew = NULL;
+
+ /*
+ * Allocate memory for the registration structure.
+ */
+ size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ size_t const cbChunk = (1 + cChunkPages + 1) << HOST_PAGE_SHIFT;
+ AssertLogRelBreakStmt(cbChunk == (uint32_t)cbChunk, rc = VERR_OUT_OF_RANGE);
+ RTR0PTR R0PtrChunk = NIL_RTR0PTR;
+ void *pvChunk = NULL;
+ rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, NULL /*paPages*/);
+ AssertLogRelMsgRCBreak(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages));
+
+ Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM));
+ RT_BZERO(pvChunk, cChunkPages << HOST_PAGE_SHIFT);
+
+ pNew = (PPGMREGMMIO2RANGE)pvChunk;
+ pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING;
+ pNew->RamRange.pSelfR0 = R0PtrChunk + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
+
+ /*
+ * Initialize the registration structure (caller does specific bits).
+ */
+ pNew->pDevInsR3 = pDevIns;
+ //pNew->pvR3 = NULL;
+ //pNew->pNext = NULL;
+ if (iChunk == 0)
+ pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
+ if (iChunk + 1 == cChunks)
+ pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
+ if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
+ pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES;
+ pNew->iSubDev = iSubDev;
+ pNew->iRegion = iRegion;
+ pNew->idSavedState = UINT8_MAX;
+ pNew->idMmio2 = idMmio2;
+ //pNew->pPhysHandlerR3 = NULL;
+ //pNew->paLSPages = NULL;
+ pNew->RamRange.GCPhys = NIL_RTGCPHYS;
+ pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
+ pNew->RamRange.pszDesc = pszDesc;
+ pNew->RamRange.cb = pNew->cbReal = (RTGCPHYS)cPagesTrackedByChunk << X86_PAGE_SHIFT;
+ pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX;
+ pNew->RamRange.uNemRange = UINT32_MAX;
+ //pNew->RamRange.pvR3 = NULL;
+ //pNew->RamRange.paLSPages = NULL;
+
+ *ppNext = pNew;
+ ASMCompilerBarrier();
+ cPagesLeft -= cPagesTrackedByChunk;
+ ppNext = &pNew->pNextR3;
+
+ /*
+ * Pre-allocate a handler if we're tracking dirty pages, unless NEM takes care of this.
+ */
+ if ( (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ && (!VM_IS_NEM_ENABLED(pVM) || !NEMR3IsMmio2DirtyPageTrackingSupported(pVM))
+#endif
+ )
+
+ {
+ rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, idMmio2, pszDesc, &pNew->pPhysHandlerR3);
+ AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2));
+ }
+ }
+ Assert(cPagesLeft == 0);
+
+ if (RT_SUCCESS(rc))
+ {
+ Assert((*ppHeadRet)->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Free floating ranges.
+ */
+ while (*ppHeadRet)
+ {
+ PPGMREGMMIO2RANGE pFree = *ppHeadRet;
+ *ppHeadRet = pFree->pNextR3;
+
+ if (pFree->pPhysHandlerR3)
+ {
+ pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3);
+ pFree->pPhysHandlerR3 = NULL;
+ }
+
+ if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
+ {
+ const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE,
+ RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]);
+ size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ SUPR3PageFreeEx(pFree, cChunkPages);
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Common worker PGMR3PhysMmio2PreRegister & PGMR3PhysMMIO2Register that links a
+ * complete registration entry into the lists and lookup tables.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pNew The new MMIO / MMIO2 registration to link.
+ */
+static void pgmR3PhysMmio2Link(PVM pVM, PPGMREGMMIO2RANGE pNew)
+{
+ Assert(pNew->idMmio2 != UINT8_MAX);
+
+ /*
+ * Link it into the list (order doesn't matter, so insert it at the head).
+ *
+ * Note! The range we're linking may consist of multiple chunks, so we
+ * have to find the last one.
+ */
+ PPGMREGMMIO2RANGE pLast = pNew;
+ for (pLast = pNew; ; pLast = pLast->pNextR3)
+ {
+ if (pLast->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ Assert(pLast->pNextR3);
+ Assert(pLast->pNextR3->pDevInsR3 == pNew->pDevInsR3);
+ Assert(pLast->pNextR3->iSubDev == pNew->iSubDev);
+ Assert(pLast->pNextR3->iRegion == pNew->iRegion);
+ Assert(pLast->pNextR3->idMmio2 == pLast->idMmio2 + 1);
+ }
+
+ PGM_LOCK_VOID(pVM);
+
+ /* Link in the chain of ranges at the head of the list. */
+ pLast->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
+ pVM->pgm.s.pRegMmioRangesR3 = pNew;
+
+ /* Insert the MMIO2 range/page IDs. */
+ uint8_t idMmio2 = pNew->idMmio2;
+ for (;;)
+ {
+ Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
+ Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
+ pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
+ pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = pNew->RamRange.pSelfR0 - RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
+ if (pNew->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pNew = pNew->pNextR3;
+ idMmio2++;
+ }
+
+ pgmPhysInvalidatePageMapTLB(pVM);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Allocate and register an MMIO2 region.
+ *
+ * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
+ * associated with a device. It is also non-shared memory with a permanent
+ * ring-3 mapping and page backing (presently).
+ *
+ * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
+ * the VM, in which case we'll drop the base memory pages. Presently we will
+ * make no attempt to preserve anything that happens to be present in the base
+ * memory that is replaced, this is of course incorrect but it's too much
+ * effort.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
+ * memory.
+ * @retval VERR_ALREADY_EXISTS if the region already exists.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param iSubDev The sub-device number.
+ * @param iRegion The region number. If the MMIO2 memory is a PCI
+ * I/O region this number has to be the number of that
+ * region. Otherwise it can be any number save
+ * UINT8_MAX.
+ * @param cb The size of the region. Must be page aligned.
+ * @param fFlags Reserved for future use, must be zero.
+ * @param pszDesc The description.
+ * @param ppv Where to store the pointer to the ring-3 mapping of
+ * the memory.
+ * @param phRegion Where to return the MMIO2 region handle. Optional.
+ * @thread EMT
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
+ uint32_t fFlags, const char *pszDesc, void **ppv, PGMMMIO2HANDLE *phRegion)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(ppv, VERR_INVALID_POINTER);
+ *ppv = NULL;
+ if (phRegion)
+ {
+ AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
+ *phRegion = NIL_PGMMMIO2HANDLE;
+ }
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
+ AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
+ AssertReturn(pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE) == NULL, VERR_ALREADY_EXISTS);
+ AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(cb, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
+
+ const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
+ AssertLogRelReturn(((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
+ AssertLogRelReturn(cGuestPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
+ AssertLogRelReturn(cGuestPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE);
+
+ /*
+ * For the 2nd+ instance, mangle the description string so it's unique.
+ */
+ if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
+ {
+ pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
+ if (!pszDesc)
+ return VERR_NO_MEMORY;
+ }
+
+ /*
+ * Allocate an MMIO2 range ID (not freed on failure).
+ *
+ * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so
+ * the IDs goes from 1 thru PGM_MMIO2_MAX_RANGES.
+ */
+ unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL);
+
+ PGM_LOCK_VOID(pVM);
+ AssertCompile(PGM_MMIO2_MAX_RANGES < 255);
+ uint8_t const idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
+ unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
+ if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES)
+ {
+ PGM_UNLOCK(pVM);
+ AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
+ }
+ pVM->pgm.s.cMmio2Regions = cNewMmio2Regions;
+ PGM_UNLOCK(pVM);
+
+ /*
+ * Try reserve and allocate the backing memory first as this is what is
+ * most likely to fail.
+ */
+ int rc = MMR3AdjustFixedReservation(pVM, cGuestPages, pszDesc);
+ if (RT_SUCCESS(rc))
+ {
+ const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
+ PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cHostPages * sizeof(SUPPAGE));
+ if (RT_SUCCESS(rc))
+ {
+ void *pvPages = NULL;
+#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
+ RTR0PTR pvPagesR0 = NIL_RTR0PTR;
+#endif
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (PGM_IS_IN_NEM_MODE(pVM))
+ rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pvPages);
+ else
+#endif
+ {
+#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
+ rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, &pvPagesR0, paPages);
+#else
+ rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
+#endif
+ }
+ if (RT_SUCCESS(rc))
+ {
+ memset(pvPages, 0, cGuestPages * GUEST_PAGE_SIZE);
+
+ /*
+ * Create the registered MMIO range record for it.
+ */
+ PPGMREGMMIO2RANGE pNew;
+ rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew);
+ if (RT_SUCCESS(rc))
+ {
+ if (phRegion)
+ *phRegion = idMmio2; /* The ID of the first chunk. */
+
+ uint32_t iSrcPage = 0;
+ uint8_t *pbCurPages = (uint8_t *)pvPages;
+ for (PPGMREGMMIO2RANGE pCur = pNew; pCur; pCur = pCur->pNextR3)
+ {
+ pCur->pvR3 = pbCurPages;
+#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
+ pCur->pvR0 = pvPagesR0 + (iSrcPage << GUEST_PAGE_SHIFT);
+#endif
+ pCur->RamRange.pvR3 = pbCurPages;
+
+ uint32_t iDstPage = pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (PGM_IS_IN_NEM_MODE(pVM))
+ while (iDstPage-- > 0)
+ PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], UINT64_C(0x0000ffffffff0000),
+ PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
+ PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
+ else
+#endif
+ {
+ AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT);
+ while (iDstPage-- > 0)
+ PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], paPages[iDstPage + iSrcPage].Phys,
+ PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
+ PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
+ }
+
+ /* advance. */
+ iSrcPage += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
+ pbCurPages += pCur->RamRange.cb;
+ }
+
+ RTMemTmpFree(paPages);
+
+ /*
+ * Update the page count stats, link the registration and we're done.
+ */
+ pVM->pgm.s.cAllPages += cGuestPages;
+ pVM->pgm.s.cPrivatePages += cGuestPages;
+
+ pgmR3PhysMmio2Link(pVM, pNew);
+
+ *ppv = pvPages;
+ return VINF_SUCCESS;
+ }
+
+ SUPR3PageFreeEx(pvPages, cHostPages);
+ }
+ }
+ RTMemTmpFree(paPages);
+ MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc);
+ }
+ if (pDevIns->iInstance > 0)
+ MMR3HeapFree((void *)pszDesc);
+ return rc;
+}
+
+
+/**
+ * Deregisters and frees an MMIO2 region.
+ *
+ * Any physical access handlers registered for the region must be deregistered
+ * before calling this function.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param hMmio2 The MMIO2 handle to deregister, or NIL if all
+ * regions for the given device is to be deregistered.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2Deregister(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
+{
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+
+ /*
+ * The loop here scanning all registrations will make sure that multi-chunk ranges
+ * get properly deregistered, though it's original purpose was the wildcard iRegion.
+ */
+ PGM_LOCK_VOID(pVM);
+ int rc = VINF_SUCCESS;
+ unsigned cFound = 0;
+ PPGMREGMMIO2RANGE pPrev = NULL;
+ PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3;
+ while (pCur)
+ {
+ uint32_t const fFlags = pCur->fFlags;
+ if ( pCur->pDevInsR3 == pDevIns
+ && ( hMmio2 == NIL_PGMMMIO2HANDLE
+ || pCur->idMmio2 == hMmio2))
+ {
+ cFound++;
+
+ /*
+ * Unmap it if it's mapped.
+ */
+ if (fFlags & PGMREGMMIO2RANGE_F_MAPPED)
+ {
+ int rc2 = PGMR3PhysMmio2Unmap(pVM, pCur->pDevInsR3, pCur->idMmio2, pCur->RamRange.GCPhys);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ /*
+ * Unlink it
+ */
+ PPGMREGMMIO2RANGE pNext = pCur->pNextR3;
+ if (pPrev)
+ pPrev->pNextR3 = pNext;
+ else
+ pVM->pgm.s.pRegMmioRangesR3 = pNext;
+ pCur->pNextR3 = NULL;
+
+ uint8_t idMmio2 = pCur->idMmio2;
+ Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3));
+ if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3))
+ {
+ Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
+ pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
+ pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
+ }
+
+ /*
+ * Free the memory.
+ */
+ uint32_t const cGuestPages = pCur->cbReal >> GUEST_PAGE_SHIFT;
+ uint32_t const cHostPages = RT_ALIGN_T(pCur->cbReal, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!pVM->pgm.s.fNemMode)
+#endif
+ {
+ int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+
+ rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pCur->RamRange.pszDesc);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ else
+ {
+ int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+#endif
+
+ if (pCur->pPhysHandlerR3)
+ {
+ pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3);
+ pCur->pPhysHandlerR3 = NULL;
+ }
+
+ /* we're leaking hyper memory here if done at runtime. */
+#ifdef VBOX_STRICT
+ VMSTATE const enmState = VMR3GetState(pVM);
+ AssertMsg( enmState == VMSTATE_POWERING_OFF
+ || enmState == VMSTATE_POWERING_OFF_LS
+ || enmState == VMSTATE_OFF
+ || enmState == VMSTATE_OFF_LS
+ || enmState == VMSTATE_DESTROYING
+ || enmState == VMSTATE_TERMINATED
+ || enmState == VMSTATE_CREATING
+ , ("%s\n", VMR3GetStateName(enmState)));
+#endif
+
+ if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
+ {
+ const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cGuestPages]);
+ size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ SUPR3PageFreeEx(pCur, cChunkPages);
+ }
+ /*else
+ {
+ rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call.
+ AssertRCReturn(rc, rc);
+ } */
+
+
+ /* update page count stats */
+ pVM->pgm.s.cAllPages -= cGuestPages;
+ pVM->pgm.s.cPrivatePages -= cGuestPages;
+
+ /* next */
+ pCur = pNext;
+ if (hMmio2 != NIL_PGMMMIO2HANDLE)
+ {
+ if (fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ hMmio2++;
+ Assert(pCur->idMmio2 == hMmio2);
+ Assert(pCur->pDevInsR3 == pDevIns);
+ Assert(!(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK));
+ }
+ }
+ else
+ {
+ pPrev = pCur;
+ pCur = pCur->pNextR3;
+ }
+ }
+ pgmPhysInvalidatePageMapTLB(pVM);
+ PGM_UNLOCK(pVM);
+ return !cFound && hMmio2 != NIL_PGMMMIO2HANDLE ? VERR_NOT_FOUND : rc;
+}
+
+
+/**
+ * Maps a MMIO2 region.
+ *
+ * This is typically done when a guest / the bios / state loading changes the
+ * PCI config. The replacing of base memory has the same restrictions as during
+ * registration, of course.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param hMmio2 The handle of the region to map.
+ * @param GCPhys The guest-physical address to be remapped.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
+{
+ /*
+ * Validate input.
+ *
+ * Note! It's safe to walk the MMIO/MMIO2 list since registrations only
+ * happens during VM construction.
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
+ AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
+ AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
+
+ PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturn(pFirstMmio, VERR_NOT_FOUND);
+ Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
+
+ PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
+ RTGCPHYS cbRange = 0;
+ for (;;)
+ {
+ AssertReturn(!(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_WRONG_ORDER);
+ Assert(pLastMmio->RamRange.GCPhys == NIL_RTGCPHYS);
+ Assert(pLastMmio->RamRange.GCPhysLast == NIL_RTGCPHYS);
+ Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
+ Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
+ Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
+ cbRange += pLastMmio->RamRange.cb;
+ if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pLastMmio = pLastMmio->pNextR3;
+ }
+
+ RTGCPHYS GCPhysLast = GCPhys + cbRange - 1;
+ AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
+
+ /*
+ * Find our location in the ram range list, checking for restriction
+ * we don't bother implementing yet (partially overlapping, multiple
+ * ram ranges).
+ */
+ PGM_LOCK_VOID(pVM);
+
+ AssertReturnStmt(!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
+
+ bool fRamExists = false;
+ PPGMRAMRANGE pRamPrev = NULL;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam && GCPhysLast >= pRam->GCPhys)
+ {
+ if ( GCPhys <= pRam->GCPhysLast
+ && GCPhysLast >= pRam->GCPhys)
+ {
+ /* Completely within? */
+ AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
+ && GCPhysLast <= pRam->GCPhysLast,
+ ("%RGp-%RGp (MMIOEx/%s) falls partly outside %RGp-%RGp (%s)\n",
+ GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc,
+ pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
+ PGM_UNLOCK(pVM),
+ VERR_PGM_RAM_CONFLICT);
+
+ /* Check that all the pages are RAM pages. */
+ PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = cbRange >> GUEST_PAGE_SHIFT;
+ while (cPagesLeft-- > 0)
+ {
+ AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
+ ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
+ GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc),
+ PGM_UNLOCK(pVM),
+ VERR_PGM_RAM_CONFLICT);
+ pPage++;
+ }
+
+ /* There can only be one MMIO/MMIO2 chunk matching here! */
+ AssertLogRelMsgReturnStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
+ ("%RGp-%RGp (MMIOEx/%s, flags %#X) consists of multiple chunks whereas the RAM somehow doesn't!\n",
+ GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
+ PGM_UNLOCK(pVM),
+ VERR_PGM_PHYS_MMIO_EX_IPE);
+
+ fRamExists = true;
+ break;
+ }
+
+ /* next */
+ pRamPrev = pRam;
+ pRam = pRam->pNextR3;
+ }
+ Log(("PGMR3PhysMmio2Map: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pFirstMmio->RamRange.pszDesc));
+
+
+ /*
+ * Make the changes.
+ */
+ RTGCPHYS GCPhysCur = GCPhys;
+ for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
+ {
+ pCurMmio->RamRange.GCPhys = GCPhysCur;
+ pCurMmio->RamRange.GCPhysLast = GCPhysCur + pCurMmio->RamRange.cb - 1;
+ if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ {
+ Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
+ break;
+ }
+ GCPhysCur += pCurMmio->RamRange.cb;
+ }
+
+ if (fRamExists)
+ {
+ /*
+ * Make all the pages in the range MMIO/ZERO pages, freeing any
+ * RAM pages currently mapped here. This might not be 100% correct
+ * for PCI memory, but we're doing the same thing for MMIO2 pages.
+ *
+ * We replace these MMIO/ZERO pages with real pages in the MMIO2 case.
+ */
+ Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */
+ Assert(pFirstMmio->pvR3 == pFirstMmio->RamRange.pvR3);
+ Assert(pFirstMmio->RamRange.pvR3 != NULL);
+
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /* We cannot mix MMIO2 into a RAM range in simplified memory mode because pRam->pvR3 can't point
+ both at the RAM and MMIO2, so we won't ever write & read from the actual MMIO2 memory if we try. */
+ AssertLogRelMsgReturn(!pVM->pgm.s.fNemMode, ("%s at %RGp-%RGp\n", pFirstMmio->RamRange.pszDesc, GCPhys, GCPhysLast),
+ VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
+#endif
+
+ int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, pFirstMmio->RamRange.pvR3);
+ AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
+
+ /* Replace the pages, freeing all present RAM pages. */
+ PPGMPAGE pPageSrc = &pFirstMmio->RamRange.aPages[0];
+ PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ while (cPagesLeft-- > 0)
+ {
+ Assert(PGM_PAGE_IS_MMIO(pPageDst));
+
+ RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
+ uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
+ PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
+ PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
+ PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
+ PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
+ PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
+ PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
+ PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
+ /* NEM state is set by pgmR3PhysFreePageRange. */
+
+ pVM->pgm.s.cZeroPages--;
+ GCPhys += GUEST_PAGE_SIZE;
+ pPageSrc++;
+ pPageDst++;
+ }
+
+ /* Flush physical page map TLB. */
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ /* Force a PGM pool flush as guest ram references have been changed. */
+ /** @todo not entirely SMP safe; assuming for now the guest takes care of
+ * this internally (not touch mapped mmio while changing the mapping). */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ }
+ else
+ {
+ /*
+ * No RAM range, insert the ones prepared during registration.
+ */
+ for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
+ {
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Tell NEM and get the new NEM state for the pages. */
+ uint8_t u2NemState = 0;
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, pCurMmio->RamRange.GCPhys,
+ pCurMmio->RamRange.GCPhysLast - pCurMmio->RamRange.GCPhys + 1,
+ NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
+ | (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
+ ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0),
+ NULL /*pvRam*/, pCurMmio->RamRange.pvR3,
+ &u2NemState, &pCurMmio->RamRange.uNemRange);
+ AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
+ }
+#endif
+
+ /* Clear the tracking data of pages we're going to reactivate. */
+ PPGMPAGE pPageSrc = &pCurMmio->RamRange.aPages[0];
+ uint32_t cPagesLeft = pCurMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ while (cPagesLeft-- > 0)
+ {
+ PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
+ PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
+#ifdef VBOX_WITH_NATIVE_NEM
+ PGM_PAGE_SET_NEM_STATE(pPageSrc, u2NemState);
+#endif
+ pPageSrc++;
+ }
+
+ /* link in the ram range */
+ pgmR3PhysLinkRamRange(pVM, &pCurMmio->RamRange, pRamPrev);
+
+ if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ {
+ Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
+ break;
+ }
+ pRamPrev = &pCurMmio->RamRange;
+ }
+ }
+
+ /*
+ * If the range have dirty page monitoring enabled, enable that.
+ *
+ * We ignore failures here for now because if we fail, the whole mapping
+ * will have to be reversed and we'll end up with nothing at all on the
+ * screen and a grumpy guest, whereas if we just go on, we'll only have
+ * visual distortions to gripe about. There will be something in the
+ * release log.
+ */
+ if ( pFirstMmio->pPhysHandlerR3
+ && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio);
+
+ /*
+ * We're good, set the flags and invalid the mapping TLB.
+ */
+ for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
+ {
+ pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED;
+ if (fRamExists)
+ pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING;
+ else
+ pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING;
+ if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ }
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /*
+ * Late NEM notification.
+ */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ int rc;
+ uint32_t fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2;
+ if (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES)
+ fNemFlags |= NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES;
+ if (fRamExists)
+ rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
+ pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL, pFirstMmio->pvR3,
+ NULL /*puNemRange*/);
+ else
+ {
+ rc = VINF_SUCCESS;
+ for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
+ {
+ rc = NEMR3NotifyPhysMmioExMapLate(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags,
+ NULL, pCurMmio->RamRange.pvR3, &pCurMmio->RamRange.uNemRange);
+ if ((pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) || RT_FAILURE(rc))
+ break;
+ }
+ }
+ AssertLogRelRCReturnStmt(rc, PGMR3PhysMmio2Unmap(pVM, pDevIns, hMmio2, GCPhys); PGM_UNLOCK(pVM), rc);
+ }
+#endif
+
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Unmaps an MMIO2 region.
+ *
+ * This is typically done when a guest / the bios / state loading changes the
+ * PCI config. The replacing of base memory has the same restrictions as during
+ * registration, of course.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
+{
+ /*
+ * Validate input
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
+ AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ }
+
+ PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturn(pFirstMmio, VERR_NOT_FOUND);
+ Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
+ RTGCPHYS cbRange = 0;
+ for (;;)
+ {
+ AssertReturnStmt(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
+ AssertReturnStmt(pLastMmio->RamRange.GCPhys == GCPhys + cbRange || GCPhys == NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
+ Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
+ Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
+ Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
+ cbRange += pLastMmio->RamRange.cb;
+ if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pLastMmio = pLastMmio->pNextR3;
+ }
+
+ Log(("PGMR3PhysMmio2Unmap: %RGp-%RGp %s\n",
+ pFirstMmio->RamRange.GCPhys, pLastMmio->RamRange.GCPhysLast, pFirstMmio->RamRange.pszDesc));
+
+ uint16_t const fOldFlags = pFirstMmio->fFlags;
+ AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
+
+ /*
+ * If monitoring dirty pages, we must deregister the handlers first.
+ */
+ if ( pFirstMmio->pPhysHandlerR3
+ && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio);
+
+ /*
+ * Unmap it.
+ */
+ int rcRet = VINF_SUCCESS;
+#ifdef VBOX_WITH_NATIVE_NEM
+ uint32_t const fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
+ | (fOldFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
+ ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0);
+#endif
+ if (fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING)
+ {
+ /*
+ * We've replaced RAM, replace with zero pages.
+ *
+ * Note! This is where we might differ a little from a real system, because
+ * it's likely to just show the RAM pages as they were before the
+ * MMIO/MMIO2 region was mapped here.
+ */
+ /* Only one chunk allowed when overlapping! */
+ Assert(fOldFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK);
+
+ /* Restore the RAM pages we've replaced. */
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam->GCPhys > pFirstMmio->RamRange.GCPhysLast)
+ pRam = pRam->pNextR3;
+
+ PPGMPAGE pPageDst = &pRam->aPages[(pFirstMmio->RamRange.GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ pVM->pgm.s.cZeroPages += cPagesLeft; /** @todo not correct for NEM mode */
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. Note! we cannot be here in simple memory mode, see mapping function. */
+ {
+ uint8_t u2State = UINT8_MAX;
+ rc = NEMR3NotifyPhysMmioExUnmap(pVM, pFirstMmio->RamRange.GCPhys, pFirstMmio->RamRange.cb,
+ fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
+ pRam->pvR3
+ ? (uint8_t *)pRam->pvR3 + pFirstMmio->RamRange.GCPhys - pRam->GCPhys : NULL,
+ pFirstMmio->pvR3, &u2State, &pRam->uNemRange);
+ AssertRCStmt(rc, rcRet = rc);
+ if (u2State != UINT8_MAX)
+ pgmPhysSetNemStateForPages(pPageDst, cPagesLeft, u2State);
+ }
+#endif
+
+ while (cPagesLeft-- > 0)
+ {
+ PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
+ pPageDst++;
+ }
+
+ /* Flush physical page map TLB. */
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ /* Update range state. */
+ pFirstMmio->RamRange.GCPhys = NIL_RTGCPHYS;
+ pFirstMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
+ pFirstMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
+ }
+ else
+ {
+ /*
+ * Unlink the chunks related to the MMIO/MMIO2 region.
+ */
+ for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
+ {
+#ifdef VBOX_WITH_NATIVE_NEM
+ if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. */
+ {
+ uint8_t u2State = UINT8_MAX;
+ rc = NEMR3NotifyPhysMmioExUnmap(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags,
+ NULL, pCurMmio->pvR3, &u2State, &pCurMmio->RamRange.uNemRange);
+ AssertRCStmt(rc, rcRet = rc);
+ if (u2State != UINT8_MAX)
+ pgmPhysSetNemStateForPages(pCurMmio->RamRange.aPages, pCurMmio->RamRange.cb >> GUEST_PAGE_SHIFT, u2State);
+ }
+#endif
+ pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
+ pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
+ pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
+ pCurMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
+ if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ }
+ }
+
+ /* Force a PGM pool flush as guest ram references have been changed. */
+ /** @todo not entirely SMP safe; assuming for now the guest takes care
+ * of this internally (not touch mapped mmio while changing the
+ * mapping). */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+
+ pgmPhysInvalidatePageMapTLB(pVM);
+ pgmPhysInvalidRamRangeTlbs(pVM);
+
+ PGM_UNLOCK(pVM);
+ return rcRet;
+}
+
+
+/**
+ * Reduces the mapping size of a MMIO2 region.
+ *
+ * This is mainly for dealing with old saved states after changing the default
+ * size of a mapping region. See PGMDevHlpMMIOExReduce and
+ * PDMPCIDEV::pfnRegionLoadChangeHookR3.
+ *
+ * The region must not currently be mapped when making this call. The VM state
+ * must be state restore or VM construction.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the region.
+ * @param hMmio2 The handle of the region to reduce.
+ * @param cbRegion The new mapping size.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2Reduce(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS cbRegion)
+{
+ /*
+ * Validate input
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
+ AssertReturn(cbRegion >= X86_PAGE_SIZE, VERR_INVALID_PARAMETER);
+ AssertReturn(!(cbRegion & X86_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
+ VMSTATE enmVmState = VMR3GetState(pVM);
+ AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING
+ || enmVmState == VMSTATE_LOADING,
+ ("enmVmState=%d (%s)\n", enmVmState, VMR3GetStateName(enmVmState)),
+ VERR_VM_INVALID_VM_STATE);
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ if (pFirstMmio)
+ {
+ Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
+ if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED))
+ {
+ /*
+ * NOTE! Current implementation does not support multiple ranges.
+ * Implement when there is a real world need and thus a testcase.
+ */
+ AssertLogRelMsgStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
+ ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
+ rc = VERR_NOT_SUPPORTED);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Make the change.
+ */
+ Log(("PGMR3PhysMmio2Reduce: %s changes from %RGp bytes (%RGp) to %RGp bytes.\n",
+ pFirstMmio->RamRange.pszDesc, pFirstMmio->RamRange.cb, pFirstMmio->cbReal, cbRegion));
+
+ AssertLogRelMsgStmt(cbRegion <= pFirstMmio->cbReal,
+ ("%s: cbRegion=%#RGp cbReal=%#RGp\n", pFirstMmio->RamRange.pszDesc, cbRegion, pFirstMmio->cbReal),
+ rc = VERR_OUT_OF_RANGE);
+ if (RT_SUCCESS(rc))
+ {
+ pFirstMmio->RamRange.cb = cbRegion;
+ }
+ }
+ }
+ else
+ rc = VERR_WRONG_ORDER;
+ }
+ else
+ rc = VERR_NOT_FOUND;
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Validates @a hMmio2, making sure it belongs to @a pDevIns.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device which allegedly owns @a hMmio2.
+ * @param hMmio2 The handle to validate.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2ValidateHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
+{
+ /*
+ * Validate input
+ */
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+
+ /*
+ * Just do this the simple way. No need for locking as this is only taken at
+ */
+ PGM_LOCK_VOID(pVM);
+ PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ PGM_UNLOCK(pVM);
+ AssertReturn(pFirstMmio, VERR_INVALID_HANDLE);
+ AssertReturn(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_HANDLE);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the mapping address of an MMIO2 region.
+ *
+ * @returns Mapping address, NIL_RTGCPHYS if not mapped or invalid handle.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device owning the MMIO2 handle.
+ * @param hMmio2 The region handle.
+ */
+VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
+{
+ AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
+
+ PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturn(pFirstRegMmio, NIL_RTGCPHYS);
+
+ if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
+ return pFirstRegMmio->RamRange.GCPhys;
+ return NIL_RTGCPHYS;
+}
+
+
+/**
+ * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap.
+ *
+ * Called holding the PGM lock.
+ */
+static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
+ void *pvBitmap, size_t cbBitmap)
+{
+ /*
+ * Continue validation.
+ */
+ PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
+ AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
+ == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK),
+ VERR_INVALID_FUNCTION);
+ AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
+
+ RTGCPHYS cbTotal = 0;
+ uint16_t fTotalDirty = 0;
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
+ {
+ cbTotal += pCur->RamRange.cb; /* Not using cbReal here, because NEM is not in on the creating, only the mapping. */
+ fTotalDirty |= pCur->fFlags;
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pCur = pCur->pNextR3;
+ AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
+ AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
+ == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
+ VERR_INTERNAL_ERROR_4);
+ }
+ size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, GUEST_PAGE_SIZE * 64, RTGCPHYS) / GUEST_PAGE_SIZE / 8;
+
+ if (cbBitmap)
+ {
+ AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER);
+ AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER);
+ AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Do the work.
+ */
+ int rc = VINF_SUCCESS;
+ if (pvBitmap)
+ {
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pFirstRegMmio->pPhysHandlerR3 == NULL)
+ {
+/** @todo This does not integrate at all with --execute-all-in-iem, leaving the
+ * screen blank when using it together with --driverless. Fixing this won't be
+ * entirely easy as we take the PGM_PAGE_HNDL_PHYS_STATE_DISABLED page status to
+ * mean a dirty page. */
+ AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
+ uint8_t *pbBitmap = (uint8_t *)pvBitmap;
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
+ {
+ size_t const cbBitmapChunk = pCur->RamRange.cb / GUEST_PAGE_SIZE / 8;
+ Assert((RTGCPHYS)cbBitmapChunk * GUEST_PAGE_SIZE * 8 == pCur->RamRange.cb);
+ int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb,
+ pCur->RamRange.uNemRange, pbBitmap, cbBitmapChunk);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pbBitmap += pCur->RamRange.cb / GUEST_PAGE_SIZE / 8;
+ }
+ }
+ else
+#endif
+ if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY)
+ {
+ if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ {
+ /*
+ * Reset each chunk, gathering dirty bits.
+ */
+ RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */
+ uint32_t iPageNo = 0;
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
+ {
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
+ {
+ int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pCur->RamRange.GCPhys, pvBitmap, iPageNo);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
+ }
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
+ }
+ }
+ else
+ {
+ /*
+ * If not mapped or tracking is disabled, we return the
+ * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages. We cannot
+ * get more accurate data than that after unmapping or disabling.
+ */
+ RT_BZERO(pvBitmap, cbBitmap);
+ uint32_t iPageNo = 0;
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
+ {
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
+ {
+ ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pCur->RamRange.cb >> GUEST_PAGE_SHIFT));
+ pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
+ }
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
+ }
+ }
+ }
+ /*
+ * No dirty chunks.
+ */
+ else
+ RT_BZERO(pvBitmap, cbBitmap);
+ }
+ /*
+ * No bitmap. Reset the region if tracking is currently enabled.
+ */
+ else if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ {
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pFirstRegMmio->pPhysHandlerR3 == NULL)
+ {
+ AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
+ {
+ int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb,
+ pCur->RamRange.uNemRange, NULL, 0);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ }
+ }
+ else
+#endif
+ {
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
+ {
+ pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
+ int rc2 = PGMHandlerPhysicalReset(pVM, pCur->RamRange.GCPhys);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Queries the dirty page bitmap and resets the monitoring.
+ *
+ * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when
+ * creating the range for this to work.
+ *
+ * @returns VBox status code.
+ * @retval VERR_INVALID_FUNCTION if not created using
+ * PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device owning the MMIO2 handle.
+ * @param hMmio2 The region handle.
+ * @param pvBitmap The output bitmap. Must be 8-byte aligned. Ignored
+ * when @a cbBitmap is zero.
+ * @param cbBitmap The size of the bitmap. Must be the size of the whole
+ * MMIO2 range, rounded up to the nearest 8 bytes.
+ * When zero only a reset is done.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
+ void *pvBitmap, size_t cbBitmap)
+{
+ /*
+ * Do some basic validation before grapping the PGM lock and continuing.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+ AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER);
+ int rc = PGM_LOCK(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ STAM_PROFILE_START(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
+ rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap);
+ STAM_PROFILE_STOP(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
+ PGM_UNLOCK(pVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Worker for PGMR3PhysMmio2ControlDirtyPageTracking
+ *
+ * Called owning the PGM lock.
+ */
+static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
+{
+ /*
+ * Continue validation.
+ */
+ PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
+ AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
+ == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)
+ , VERR_INVALID_FUNCTION);
+ AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
+
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /*
+ * This is a nop if NEM is responsible for doing the tracking, we simply
+ * leave the tracking on all the time there.
+ */
+ if (pFirstRegMmio->pPhysHandlerR3 == NULL)
+ {
+ AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
+ return VINF_SUCCESS;
+ }
+#endif
+
+ /*
+ * Anyting needing doing?
+ */
+ if (fEnabled != RT_BOOL(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
+ {
+ LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
+
+ /*
+ * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag.
+ */
+ for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
+ {
+ if (fEnabled)
+ pCur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
+ else
+ pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
+ if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
+ break;
+ pCur = pCur->pNextR3;
+ AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
+ AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
+ == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
+ , VERR_INTERNAL_ERROR_4);
+ }
+
+ /*
+ * Enable/disable handlers if currently mapped.
+ *
+ * We ignore status codes here as we've already changed the flags and
+ * returning a failure status now would be confusing. Besides, the two
+ * functions will continue past failures. As argued in the mapping code,
+ * it's in the release log.
+ */
+ if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
+ {
+ if (fEnabled)
+ pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio);
+ else
+ pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio);
+ }
+ }
+ else
+ LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Controls the dirty page tracking for an MMIO2 range.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device owning the MMIO2 memory.
+ * @param hMmio2 The handle of the region.
+ * @param fEnabled The new tracking state.
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
+{
+ /*
+ * Do some basic validation before grapping the PGM lock and continuing.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+ int rc = PGM_LOCK(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled);
+ PGM_UNLOCK(pVM);
+ }
+ return rc;
+}
+
+
+/**
+ * Changes the region number of an MMIO2 region.
+ *
+ * This is only for dealing with save state issues, nothing else.
+ *
+ * @return VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device owning the MMIO2 memory.
+ * @param hMmio2 The handle of the region.
+ * @param iNewRegion The new region index.
+ *
+ * @thread EMT(0)
+ * @sa @bugref{9359}
+ */
+VMMR3_INT_DECL(int) PGMR3PhysMmio2ChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion)
+{
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE);
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
+ AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
+
+ AssertReturn(pVM->enmVMState == VMSTATE_LOADING, VERR_INVALID_STATE);
+
+ int rc = PGM_LOCK(pVM);
+ AssertRCReturn(rc, rc);
+
+ PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
+ AssertReturnStmt(pFirstRegMmio, PGM_UNLOCK(pVM), VERR_NOT_FOUND);
+ AssertReturnStmt(pgmR3PhysMmio2Find(pVM, pDevIns, pFirstRegMmio->iSubDev, iNewRegion, NIL_PGMMMIO2HANDLE) == NULL,
+ PGM_UNLOCK(pVM), VERR_RESOURCE_IN_USE);
+
+ /*
+ * Make the change.
+ */
+ pFirstRegMmio->iRegion = (uint8_t)iNewRegion;
+
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+}
+
+
+
+/*********************************************************************************************************************************
+* ROM *
+*********************************************************************************************************************************/
+
+/**
+ * Worker for PGMR3PhysRomRegister.
+ *
+ * This is here to simplify lock management, i.e. the caller does all the
+ * locking and we can simply return without needing to remember to unlock
+ * anything first.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the ROM.
+ * @param GCPhys First physical address in the range.
+ * Must be page aligned!
+ * @param cb The size of the range (in bytes).
+ * Must be page aligned!
+ * @param pvBinary Pointer to the binary data backing the ROM image.
+ * @param cbBinary The size of the binary data pvBinary points to.
+ * This must be less or equal to @a cb.
+ * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
+ * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
+ * @param pszDesc Pointer to description string. This must not be freed.
+ */
+static int pgmR3PhysRomRegisterLocked(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
+ const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
+ RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
+ AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
+ AssertReturn(!(fFlags & ~PGMPHYS_ROM_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
+
+ const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
+#endif
+
+ /*
+ * Find the ROM location in the ROM list first.
+ */
+ PPGMROMRANGE pRomPrev = NULL;
+ PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
+ while (pRom && GCPhysLast >= pRom->GCPhys)
+ {
+ if ( GCPhys <= pRom->GCPhysLast
+ && GCPhysLast >= pRom->GCPhys)
+ AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
+ GCPhys, GCPhysLast, pszDesc,
+ pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
+ VERR_PGM_RAM_CONFLICT);
+ /* next */
+ pRomPrev = pRom;
+ pRom = pRom->pNextR3;
+ }
+
+ /*
+ * Find the RAM location and check for conflicts.
+ *
+ * Conflict detection is a bit different than for RAM registration since a
+ * ROM can be located within a RAM range. So, what we have to check for is
+ * other memory types (other than RAM that is) and that we don't span more
+ * than one RAM range (lazy).
+ */
+ bool fRamExists = false;
+ PPGMRAMRANGE pRamPrev = NULL;
+ PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ while (pRam && GCPhysLast >= pRam->GCPhys)
+ {
+ if ( GCPhys <= pRam->GCPhysLast
+ && GCPhysLast >= pRam->GCPhys)
+ {
+ /* completely within? */
+ AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
+ && GCPhysLast <= pRam->GCPhysLast,
+ ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
+ GCPhys, GCPhysLast, pszDesc,
+ pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
+ VERR_PGM_RAM_CONFLICT);
+ fRamExists = true;
+ break;
+ }
+
+ /* next */
+ pRamPrev = pRam;
+ pRam = pRam->pNextR3;
+ }
+ if (fRamExists)
+ {
+ PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
+ uint32_t cPagesLeft = cGuestPages;
+ while (cPagesLeft-- > 0)
+ {
+ AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
+ ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
+ pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT),
+ pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
+ Assert(PGM_PAGE_IS_ZERO(pPage) || PGM_IS_IN_NEM_MODE(pVM));
+ pPage++;
+ }
+ }
+
+ /*
+ * Update the base memory reservation if necessary.
+ */
+ uint32_t cExtraBaseCost = fRamExists ? 0 : cGuestPages;
+ if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ cExtraBaseCost += cGuestPages;
+ if (cExtraBaseCost)
+ {
+ int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /*
+ * Early NEM notification before we've made any changes or anything.
+ */
+ uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)
+ | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0);
+ uint8_t u2NemState = UINT8_MAX;
+ uint32_t uNemRange = 0;
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ int rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cGuestPages << GUEST_PAGE_SHIFT,
+ fRamExists ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
+ fNemNotify, &u2NemState, fRamExists ? &pRam->uNemRange : &uNemRange);
+ AssertLogRelRCReturn(rc, rc);
+ }
+#endif
+
+ /*
+ * Allocate memory for the virgin copy of the RAM. In simplified memory mode,
+ * we allocate memory for any ad-hoc RAM range and for shadow pages.
+ */
+ PGMMALLOCATEPAGESREQ pReq = NULL;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ void *pvRam = NULL;
+ void *pvAlt = NULL;
+ if (pVM->pgm.s.fNemMode)
+ {
+ if (!fRamExists)
+ {
+ int rc = SUPR3PageAlloc(cHostPages, 0, &pvRam);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+ int rc = SUPR3PageAlloc(cHostPages, 0, &pvAlt);
+ if (RT_FAILURE(rc))
+ {
+ if (pvRam)
+ SUPR3PageFree(pvRam, cHostPages);
+ return rc;
+ }
+ }
+ }
+ else
+#endif
+ {
+ int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cGuestPages, GMMACCOUNT_BASE);
+ AssertRCReturn(rc, rc);
+
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
+ {
+ pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << GUEST_PAGE_SHIFT);
+ pReq->aPages[iPage].fZeroed = false;
+ pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
+ pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
+ }
+
+ rc = GMMR3AllocatePagesPerform(pVM, pReq);
+ if (RT_FAILURE(rc))
+ {
+ GMMR3AllocatePagesCleanup(pReq);
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the new ROM range and RAM range (if necessary).
+ */
+ PPGMROMRANGE pRomNew = NULL;
+ RTR0PTR pRomNewR0 = NIL_RTR0PTR;
+ size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]), 128);
+ size_t const cbRamRange = fRamExists ? 0 : RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]);
+ size_t const cRangePages = RT_ALIGN_Z(cbRomRange + cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
+ int rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pRomNew, &pRomNewR0, NULL /*paPages*/);
+ if (RT_SUCCESS(rc))
+ {
+
+ /*
+ * Initialize and insert the RAM range (if required).
+ */
+ PPGMRAMRANGE pRamNew;
+ uint32_t const idxFirstRamPage = fRamExists ? (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT : 0;
+ PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
+ if (!fRamExists)
+ {
+ /* New RAM range. */
+ pRamNew = (PPGMRAMRANGE)((uintptr_t)pRomNew + cbRomRange);
+ pRamNew->pSelfR0 = !pRomNewR0 ? NIL_RTR0PTR : pRomNewR0 + cbRomRange;
+ pRamNew->GCPhys = GCPhys;
+ pRamNew->GCPhysLast = GCPhysLast;
+ pRamNew->cb = cb;
+ pRamNew->pszDesc = pszDesc;
+ pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
+ pRamNew->pvR3 = NULL;
+ pRamNew->paLSPages = NULL;
+#ifdef VBOX_WITH_NATIVE_NEM
+ pRamNew->uNemRange = uNemRange;
+#endif
+
+ PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage];
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ {
+ AssertPtr(pvRam); Assert(pReq == NULL);
+ pRamNew->pvR3 = pvRam;
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
+ {
+ PGM_PAGE_INIT(pRamPage, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
+ PGMPAGETYPE_ROM, PGM_PAGE_STATE_ALLOCATED);
+ pRomPage->Virgin = *pRamPage;
+ }
+ }
+ else
+#endif
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
+ {
+ PGM_PAGE_INIT(pRamPage,
+ pReq->aPages[iPage].HCPhysGCPhys,
+ pReq->aPages[iPage].idPage,
+ PGMPAGETYPE_ROM,
+ PGM_PAGE_STATE_ALLOCATED);
+
+ pRomPage->Virgin = *pRamPage;
+ }
+
+ pVM->pgm.s.cAllPages += cGuestPages;
+ pVM->pgm.s.cPrivatePages += cGuestPages;
+ pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
+ }
+ else
+ {
+ /* Existing RAM range. */
+ PPGMPAGE pRamPage = &pRam->aPages[idxFirstRamPage];
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ {
+ Assert(pvRam == NULL); Assert(pReq == NULL);
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
+ {
+ Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
+ Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
+ Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
+ PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
+ PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
+ PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
+ PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
+ PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
+
+ pRomPage->Virgin = *pRamPage;
+ }
+ }
+ else
+#endif
+ {
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
+ {
+ PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
+ PGM_PAGE_SET_HCPHYS(pVM, pRamPage, pReq->aPages[iPage].HCPhysGCPhys);
+ PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
+ PGM_PAGE_SET_PAGEID(pVM, pRamPage, pReq->aPages[iPage].idPage);
+ PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
+ PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
+ PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
+
+ pRomPage->Virgin = *pRamPage;
+ }
+ pVM->pgm.s.cZeroPages -= cGuestPages;
+ pVM->pgm.s.cPrivatePages += cGuestPages;
+ }
+ pRamNew = pRam;
+ }
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Set the NEM state of the pages if needed. */
+ if (u2NemState != UINT8_MAX)
+ pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState);
+#endif
+
+ /* Flush physical page map TLB. */
+ pgmPhysInvalidatePageMapTLB(pVM);
+
+ /*
+ * Register the ROM access handler.
+ */
+ rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType, GCPhys, pszDesc);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Copy the image over to the virgin pages.
+ * This must be done after linking in the RAM range.
+ */
+ size_t cbBinaryLeft = cbBinary;
+ PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage];
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
+ {
+ void *pvDstPage;
+ rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << GUEST_PAGE_SHIFT), &pvDstPage);
+ if (RT_FAILURE(rc))
+ {
+ VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
+ break;
+ }
+ if (cbBinaryLeft >= GUEST_PAGE_SIZE)
+ {
+ memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), GUEST_PAGE_SIZE);
+ cbBinaryLeft -= GUEST_PAGE_SIZE;
+ }
+ else
+ {
+ RT_BZERO(pvDstPage, GUEST_PAGE_SIZE); /* (shouldn't be necessary, but can't hurt either) */
+ if (cbBinaryLeft > 0)
+ {
+ memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), cbBinaryLeft);
+ cbBinaryLeft = 0;
+ }
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the ROM range.
+ * Note that the Virgin member of the pages has already been initialized above.
+ */
+ pRomNew->pSelfR0 = pRomNewR0;
+ pRomNew->GCPhys = GCPhys;
+ pRomNew->GCPhysLast = GCPhysLast;
+ pRomNew->cb = cb;
+ pRomNew->fFlags = fFlags;
+ pRomNew->idSavedState = UINT8_MAX;
+ pRomNew->cbOriginal = cbBinary;
+ pRomNew->pszDesc = pszDesc;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ pRomNew->pbR3Alternate = (uint8_t *)pvAlt;
+#endif
+ pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
+ ? pvBinary : RTMemDup(pvBinary, cbBinary);
+ if (pRomNew->pvOriginal)
+ {
+ for (unsigned iPage = 0; iPage < cGuestPages; iPage++)
+ {
+ PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
+ pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ PGM_PAGE_INIT(&pPage->Shadow, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
+ PGMPAGETYPE_ROM_SHADOW, PGM_PAGE_STATE_ALLOCATED);
+ else
+#endif
+ PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
+ }
+
+ /* update the page count stats for the shadow pages. */
+ if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ pVM->pgm.s.cPrivatePages += cGuestPages;
+ else
+#endif
+ pVM->pgm.s.cZeroPages += cGuestPages;
+ pVM->pgm.s.cAllPages += cGuestPages;
+ }
+
+ /*
+ * Insert the ROM range, tell REM and return successfully.
+ */
+ pRomNew->pNextR3 = pRom;
+ pRomNew->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
+
+ if (pRomPrev)
+ {
+ pRomPrev->pNextR3 = pRomNew;
+ pRomPrev->pNextR0 = pRomNew->pSelfR0;
+ }
+ else
+ {
+ pVM->pgm.s.pRomRangesR3 = pRomNew;
+ pVM->pgm.s.pRomRangesR0 = pRomNew->pSelfR0;
+ }
+
+ pgmPhysInvalidatePageMapTLB(pVM);
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!pVM->pgm.s.fNemMode)
+#endif
+ GMMR3AllocatePagesCleanup(pReq);
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /*
+ * Notify NEM again.
+ */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ u2NemState = UINT8_MAX;
+ rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamNew, GCPhys),
+ fNemNotify, &u2NemState,
+ fRamExists ? &pRam->uNemRange : &pRamNew->uNemRange);
+ if (u2NemState != UINT8_MAX)
+ pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState);
+ if (RT_SUCCESS(rc))
+ return rc;
+ }
+ else
+#endif
+ return rc;
+
+ /*
+ * bail out
+ */
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* unlink */
+ if (pRomPrev)
+ {
+ pRomPrev->pNextR3 = pRom;
+ pRomPrev->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
+ }
+ else
+ {
+ pVM->pgm.s.pRomRangesR3 = pRom;
+ pVM->pgm.s.pRomRangesR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
+ }
+
+ if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+# ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ pVM->pgm.s.cPrivatePages -= cGuestPages;
+ else
+# endif
+ pVM->pgm.s.cZeroPages -= cGuestPages;
+ pVM->pgm.s.cAllPages -= cGuestPages;
+ }
+#endif
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
+ AssertRC(rc2);
+ }
+
+ if (!fRamExists)
+ pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
+ else
+ {
+ PPGMPAGE pRamPage = &pRam->aPages[idxFirstRamPage];
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ {
+ Assert(pvRam == NULL); Assert(pReq == NULL);
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
+ {
+ Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
+ Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
+ Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
+ PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_RAM);
+ PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
+ }
+ }
+ else
+#endif
+ {
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
+ PGM_PAGE_INIT_ZERO(pRamPage, pVM, PGMPAGETYPE_RAM);
+ pVM->pgm.s.cZeroPages += cGuestPages;
+ pVM->pgm.s.cPrivatePages -= cGuestPages;
+ }
+ }
+
+ SUPR3PageFreeEx(pRomNew, cRangePages);
+ }
+
+ /** @todo Purge the mapping cache or something... */
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ {
+ Assert(!pReq);
+ if (pvRam)
+ SUPR3PageFree(pvRam, cHostPages);
+ if (pvAlt)
+ SUPR3PageFree(pvAlt, cHostPages);
+ }
+ else
+#endif
+ {
+ GMMR3FreeAllocatedPages(pVM, pReq);
+ GMMR3AllocatePagesCleanup(pReq);
+ }
+ return rc;
+}
+
+
+/**
+ * Registers a ROM image.
+ *
+ * Shadowed ROM images requires double the amount of backing memory, so,
+ * don't use that unless you have to. Shadowing of ROM images is process
+ * where we can select where the reads go and where the writes go. On real
+ * hardware the chipset provides means to configure this. We provide
+ * PGMR3PhysProtectROM() for this purpose.
+ *
+ * A read-only copy of the ROM image will always be kept around while we
+ * will allocate RAM pages for the changes on demand (unless all memory
+ * is configured to be preallocated).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns The device instance owning the ROM.
+ * @param GCPhys First physical address in the range.
+ * Must be page aligned!
+ * @param cb The size of the range (in bytes).
+ * Must be page aligned!
+ * @param pvBinary Pointer to the binary data backing the ROM image.
+ * @param cbBinary The size of the binary data pvBinary points to.
+ * This must be less or equal to @a cb.
+ * @param fFlags Mask of flags, PGMPHYS_ROM_FLAGS_XXX.
+ * @param pszDesc Pointer to description string. This must not be freed.
+ *
+ * @remark There is no way to remove the rom, automatically on device cleanup or
+ * manually from the device yet. This isn't difficult in any way, it's
+ * just not something we expect to be necessary for a while.
+ */
+VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
+ const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
+{
+ Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
+ pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
+ PGM_LOCK_VOID(pVM);
+ int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
+ * that the virgin part is untouched.
+ *
+ * This is done after the normal memory has been cleared.
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PhysRomReset(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ const uint32_t cGuestPages = pRom->cb >> GUEST_PAGE_SHIFT;
+
+ if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+ /*
+ * Reset the physical handler.
+ */
+ int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * What we do with the shadow pages depends on the memory
+ * preallocation option. If not enabled, we'll just throw
+ * out all the dirty pages and replace them by the zero page.
+ */
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (pVM->pgm.s.fNemMode)
+ {
+ /* Clear all the shadow pages (currently using alternate backing). */
+ RT_BZERO(pRom->pbR3Alternate, pRom->cb);
+ }
+ else
+#endif
+ if (!pVM->pgm.s.fRamPreAlloc)
+ {
+ /* Free the dirty pages. */
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ AssertRCReturn(rc, rc);
+
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
+ if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
+ && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
+ {
+ Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
+ pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT),
+ (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pRom->aPages[iPage].Shadow));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ if (cPendingPages)
+ {
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ GMMR3FreePagesCleanup(pReq);
+ }
+ else
+ {
+ /* clear all the shadow pages. */
+ for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
+ {
+ if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
+ continue;
+ Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
+ void *pvDstPage;
+ const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
+ rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
+ if (RT_FAILURE(rc))
+ break;
+ RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
+ }
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /*
+ * Restore the original ROM pages after a saved state load.
+ * Also, in strict builds check that ROM pages remain unmodified.
+ */
+#ifndef VBOX_STRICT
+ if (pVM->pgm.s.fRestoreRomPagesOnReset)
+#endif
+ {
+ size_t cbSrcLeft = pRom->cbOriginal;
+ uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
+ uint32_t cRestored = 0;
+ for (uint32_t iPage = 0; iPage < cGuestPages && cbSrcLeft > 0; iPage++, pbSrcPage += GUEST_PAGE_SIZE)
+ {
+ const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
+ PPGMPAGE const pPage = pgmPhysGetPage(pVM, GCPhys);
+ void const *pvDstPage = NULL;
+ int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvDstPage);
+ if (RT_FAILURE(rc))
+ break;
+
+ if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE)))
+ {
+ if (pVM->pgm.s.fRestoreRomPagesOnReset)
+ {
+ void *pvDstPageW = NULL;
+ rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvDstPageW);
+ AssertLogRelRCReturn(rc, rc);
+ memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE));
+ cRestored++;
+ }
+ else
+ LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
+ }
+ cbSrcLeft -= RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE);
+ }
+ if (cRestored > 0)
+ LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cGuestPages));
+ }
+ }
+
+ /* Clear the ROM restore flag now as we only need to do this once after
+ loading saved state. */
+ pVM->pgm.s.fRestoreRomPagesOnReset = false;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called by PGMR3Term to free resources.
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pgmR3PhysRomTerm(PVM pVM)
+{
+ /*
+ * Free the heap copy of the original bits.
+ */
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ if ( pRom->pvOriginal
+ && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
+ {
+ RTMemFree((void *)pRom->pvOriginal);
+ pRom->pvOriginal = NULL;
+ }
+ }
+}
+
+
+/**
+ * Change the shadowing of a range of ROM pages.
+ *
+ * This is intended for implementing chipset specific memory registers
+ * and will not be very strict about the input. It will silently ignore
+ * any pages that are not the part of a shadowed ROM.
+ *
+ * @returns VBox status code.
+ * @retval VINF_PGM_SYNC_CR3
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Where to start. Page aligned.
+ * @param cb How much to change. Page aligned.
+ * @param enmProt The new ROM protection.
+ */
+VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
+{
+ /*
+ * Check input
+ */
+ if (!cb)
+ return VINF_SUCCESS;
+ AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
+ AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
+ AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
+
+ /*
+ * Process the request.
+ */
+ PGM_LOCK_VOID(pVM);
+ int rc = VINF_SUCCESS;
+ bool fFlushTLB = false;
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ if ( GCPhys <= pRom->GCPhysLast
+ && GCPhysLast >= pRom->GCPhys
+ && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
+ {
+ /*
+ * Iterate the relevant pages and make necessary the changes.
+ */
+#ifdef VBOX_WITH_NATIVE_NEM
+ PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
+ AssertPtrReturn(pRam, VERR_INTERNAL_ERROR_3);
+#endif
+ bool fChanges = false;
+ uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
+ ? pRom->cb >> GUEST_PAGE_SHIFT
+ : (GCPhysLast - pRom->GCPhys + 1) >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
+ iPage < cPages;
+ iPage++)
+ {
+ PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
+ if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
+ {
+ fChanges = true;
+
+ /* flush references to the page. */
+ RTGCPHYS const GCPhysPage = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
+ PPGMPAGE pRamPage = pgmPhysGetPage(pVM, GCPhysPage);
+ int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pRamPage, true /*fFlushPTEs*/, &fFlushTLB);
+ if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
+ rc = rc2;
+#ifdef VBOX_WITH_NATIVE_NEM
+ uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pRamPage);
+#endif
+
+ PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
+ PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
+
+ *pOld = *pRamPage;
+ *pRamPage = *pNew;
+ /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
+
+#ifdef VBOX_WITH_NATIVE_NEM
+# ifdef VBOX_WITH_PGM_NEM_MODE
+ /* In simplified mode we have to switch the page data around too. */
+ if (pVM->pgm.s.fNemMode)
+ {
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ uint8_t * const pbRamPage = PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage);
+ memcpy(abPage, &pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], sizeof(abPage));
+ memcpy(&pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], pbRamPage, sizeof(abPage));
+ memcpy(pbRamPage, abPage, sizeof(abPage));
+ }
+# endif
+ /* Tell NEM about the backing and protection change. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pNew);
+ NEMHCNotifyPhysPageChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pOld), PGM_PAGE_GET_HCPHYS(pNew),
+ PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
+ pgmPhysPageCalcNemProtection(pRamPage, enmType), enmType, &u2State);
+ PGM_PAGE_SET_NEM_STATE(pRamPage, u2State);
+ }
+#endif
+ }
+ pRomPage->enmProt = enmProt;
+ }
+
+ /*
+ * Reset the access handler if we made changes, no need
+ * to optimize this.
+ */
+ if (fChanges)
+ {
+ int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
+ if (RT_FAILURE(rc2))
+ {
+ PGM_UNLOCK(pVM);
+ AssertRC(rc);
+ return rc2;
+ }
+ }
+
+ /* Advance - cb isn't updated. */
+ GCPhys = pRom->GCPhys + (cPages << GUEST_PAGE_SHIFT);
+ }
+ }
+ PGM_UNLOCK(pVM);
+ if (fFlushTLB)
+ PGM_INVL_ALL_VCPU_TLBS(pVM);
+
+ return rc;
+}
+
+
+
+/*********************************************************************************************************************************
+* Ballooning *
+*********************************************************************************************************************************/
+
+#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
+
+/**
+ * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
+ *
+ * This is only called on one of the EMTs while the other ones are waiting for
+ * it to complete this function.
+ *
+ * @returns VINF_SUCCESS (VBox strict status code).
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
+ * @param pvUser User parameter
+ */
+static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ uintptr_t *paUser = (uintptr_t *)pvUser;
+ bool fInflate = !!paUser[0];
+ unsigned cPages = paUser[1];
+ RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ int rc;
+
+ Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
+ PGM_LOCK_VOID(pVM);
+
+ if (fInflate)
+ {
+ /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
+ pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
+
+ /* Replace pages with ZERO pages. */
+ rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ if (RT_FAILURE(rc))
+ {
+ PGM_UNLOCK(pVM);
+ AssertLogRelRC(rc);
+ return rc;
+ }
+
+ /* Iterate the pages. */
+ for (unsigned i = 0; i < cPages; i++)
+ {
+ PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
+ if ( pPage == NULL
+ || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
+ {
+ Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
+ break;
+ }
+
+ LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
+
+ /* Flush the shadow PT if this page was previously used as a guest page table. */
+ pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
+
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
+ if (RT_FAILURE(rc))
+ {
+ PGM_UNLOCK(pVM);
+ AssertLogRelRC(rc);
+ return rc;
+ }
+ Assert(PGM_PAGE_IS_ZERO(pPage));
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
+ }
+
+ if (cPendingPages)
+ {
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ if (RT_FAILURE(rc))
+ {
+ PGM_UNLOCK(pVM);
+ AssertLogRelRC(rc);
+ return rc;
+ }
+ }
+ GMMR3FreePagesCleanup(pReq);
+ }
+ else
+ {
+ /* Iterate the pages. */
+ for (unsigned i = 0; i < cPages; i++)
+ {
+ PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
+ AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
+
+ LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
+
+ Assert(PGM_PAGE_IS_BALLOONED(pPage));
+
+ /* Change back to zero page. (NEM does not need to be informed.) */
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
+ }
+
+ /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
+ }
+
+ /* Notify GMM about the balloon change. */
+ rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
+ if (RT_SUCCESS(rc))
+ {
+ if (!fInflate)
+ {
+ Assert(pVM->pgm.s.cBalloonedPages >= cPages);
+ pVM->pgm.s.cBalloonedPages -= cPages;
+ }
+ else
+ pVM->pgm.s.cBalloonedPages += cPages;
+ }
+
+ PGM_UNLOCK(pVM);
+
+ /* Flush the recompiler's TLB as well. */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+
+ AssertLogRelRC(rc);
+ return rc;
+}
+
+
+/**
+ * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
+ *
+ * @param pVM The cross context VM structure.
+ * @param fInflate Inflate or deflate memory balloon
+ * @param cPages Number of pages to free
+ * @param paPhysPage Array of guest physical addresses
+ */
+static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
+{
+ uintptr_t paUser[3];
+
+ paUser[0] = fInflate;
+ paUser[1] = cPages;
+ paUser[2] = (uintptr_t)paPhysPage;
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
+ AssertRC(rc);
+
+ /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
+ RTMemFree(paPhysPage);
+}
+
+#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
+
+/**
+ * Inflate or deflate a memory balloon
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fInflate Inflate or deflate memory balloon
+ * @param cPages Number of pages to free
+ * @param paPhysPage Array of guest physical addresses
+ */
+VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
+{
+ /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
+#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
+ int rc;
+
+ /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
+ AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
+
+ /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
+ * In the SMP case we post a request packet to postpone the job.
+ */
+ if (pVM->cCpus > 1)
+ {
+ unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
+ RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
+ AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
+
+ memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
+
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
+ AssertRC(rc);
+ }
+ else
+ {
+ uintptr_t paUser[3];
+
+ paUser[0] = fInflate;
+ paUser[1] = cPages;
+ paUser[2] = (uintptr_t)paPhysPage;
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
+ AssertRC(rc);
+ }
+ return rc;
+
+#else
+ NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
+
+/*********************************************************************************************************************************
+* Write Monitoring *
+*********************************************************************************************************************************/
+
+/**
+ * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
+ * physical RAM.
+ *
+ * This is only called on one of the EMTs while the other ones are waiting for
+ * it to complete this function.
+ *
+ * @returns VINF_SUCCESS (VBox strict status code).
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
+ * @param pvUser User parameter, unused.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ NOREF(pvUser); NOREF(pVCpu);
+
+ PGM_LOCK_VOID(pVM);
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ pgmPoolResetDirtyPages(pVM);
+#endif
+
+ /** @todo pointless to write protect the physical page pointed to by RSP. */
+
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
+ pRam;
+ pRam = pRam->CTX_SUFF(pNext))
+ {
+ uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
+
+ if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
+ || enmPageType == PGMPAGETYPE_MMIO2)
+ {
+ /*
+ * A RAM page.
+ */
+ switch (PGM_PAGE_GET_STATE(pPage))
+ {
+ case PGM_PAGE_STATE_ALLOCATED:
+ /** @todo Optimize this: Don't always re-enable write
+ * monitoring if the page is known to be very busy. */
+ if (PGM_PAGE_IS_WRITTEN_TO(pPage))
+ PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
+
+ pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
+ break;
+
+ case PGM_PAGE_STATE_SHARED:
+ AssertFailed();
+ break;
+
+ case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
+ default:
+ break;
+ }
+ }
+ }
+ }
+ pgmR3PoolWriteProtectPages(pVM);
+ PGM_INVL_ALL_VCPU_TLBS(pVM);
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+/**
+ * Protect all physical RAM to monitor writes
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
+{
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
+ AssertRC(rc);
+ return rc;
+}
+
+
+/*********************************************************************************************************************************
+* Stats. *
+*********************************************************************************************************************************/
+
+/**
+ * Query the amount of free memory inside VMMR0
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pcbAllocMem Where to return the amount of memory allocated
+ * by VMs.
+ * @param pcbFreeMem Where to return the amount of memory that is
+ * allocated from the host but not currently used
+ * by any VMs.
+ * @param pcbBallonedMem Where to return the sum of memory that is
+ * currently ballooned by the VMs.
+ * @param pcbSharedMem Where to return the amount of memory that is
+ * currently shared.
+ */
+VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
+ uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ uint64_t cAllocPages = 0;
+ uint64_t cFreePages = 0;
+ uint64_t cBalloonPages = 0;
+ uint64_t cSharedPages = 0;
+ if (!SUPR3IsDriverless())
+ {
+ int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (pcbAllocMem)
+ *pcbAllocMem = cAllocPages * _4K;
+
+ if (pcbFreeMem)
+ *pcbFreeMem = cFreePages * _4K;
+
+ if (pcbBallonedMem)
+ *pcbBallonedMem = cBalloonPages * _4K;
+
+ if (pcbSharedMem)
+ *pcbSharedMem = cSharedPages * _4K;
+
+ Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
+ cAllocPages, cFreePages, cBalloonPages, cSharedPages));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Query memory stats for the VM.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pcbTotalMem Where to return total amount memory the VM may
+ * possibly use.
+ * @param pcbPrivateMem Where to return the amount of private memory
+ * currently allocated.
+ * @param pcbSharedMem Where to return the amount of actually shared
+ * memory currently used by the VM.
+ * @param pcbZeroMem Where to return the amount of memory backed by
+ * zero pages.
+ *
+ * @remarks The total mem is normally larger than the sum of the three
+ * components. There are two reasons for this, first the amount of
+ * shared memory is what we're sure is shared instead of what could
+ * possibly be shared with someone. Secondly, because the total may
+ * include some pure MMIO pages that doesn't go into any of the three
+ * sub-counts.
+ *
+ * @todo Why do we return reused shared pages instead of anything that could
+ * potentially be shared? Doesn't this mean the first VM gets a much
+ * lower number of shared pages?
+ */
+VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
+ uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ if (pcbTotalMem)
+ *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * GUEST_PAGE_SIZE;
+
+ if (pcbPrivateMem)
+ *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * GUEST_PAGE_SIZE;
+
+ if (pcbSharedMem)
+ *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * GUEST_PAGE_SIZE;
+
+ if (pcbZeroMem)
+ *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * GUEST_PAGE_SIZE;
+
+ Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
+ return VINF_SUCCESS;
+}
+
+
+
+/*********************************************************************************************************************************
+* Chunk Mappings and Page Allocation *
+*********************************************************************************************************************************/
+
+/**
+ * Tree enumeration callback for dealing with age rollover.
+ * It will perform a simple compression of the current age.
+ */
+static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
+{
+ /* Age compression - ASSUMES iNow == 4. */
+ PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
+ if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
+ pChunk->iLastUsed = 3;
+ else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
+ pChunk->iLastUsed = 2;
+ else if (pChunk->iLastUsed)
+ pChunk->iLastUsed = 1;
+ else /* iLastUsed = 0 */
+ pChunk->iLastUsed = 4;
+
+ NOREF(pvUser);
+ return 0;
+}
+
+
+/**
+ * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
+ */
+typedef struct PGMR3PHYSCHUNKUNMAPCB
+{
+ PVM pVM; /**< Pointer to the VM. */
+ PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
+} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
+
+
+/**
+ * Callback used to find the mapping that's been unused for
+ * the longest time.
+ */
+static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
+{
+ PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
+ PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
+
+ /*
+ * Check for locks and compare when last used.
+ */
+ if (pChunk->cRefs)
+ return 0;
+ if (pChunk->cPermRefs)
+ return 0;
+ if ( pArg->pChunk
+ && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
+ return 0;
+
+ /*
+ * Check that it's not in any of the TLBs.
+ */
+ PVM pVM = pArg->pVM;
+ if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
+ == pChunk->Core.Key)
+ {
+ pChunk = NULL;
+ return 0;
+ }
+#ifdef VBOX_STRICT
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
+ {
+ Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
+ Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
+ }
+#endif
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
+ if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
+ return 0;
+
+ pArg->pChunk = pChunk;
+ return 0;
+}
+
+
+/**
+ * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
+ *
+ * The candidate will not be part of any TLBs, so no need to flush
+ * anything afterwards.
+ *
+ * @returns Chunk id.
+ * @param pVM The cross context VM structure.
+ */
+static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ /*
+ * Enumerate the age tree starting with the left most node.
+ */
+ STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
+ PGMR3PHYSCHUNKUNMAPCB Args;
+ Args.pVM = pVM;
+ Args.pChunk = NULL;
+ RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
+ Assert(Args.pChunk);
+ if (Args.pChunk)
+ {
+ Assert(Args.pChunk->cRefs == 0);
+ Assert(Args.pChunk->cPermRefs == 0);
+ STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
+ return Args.pChunk->Core.Key;
+ }
+
+ STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
+ return INT32_MAX;
+}
+
+
+/**
+ * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
+ *
+ * This is only called on one of the EMTs while the other ones are waiting for
+ * it to complete this function.
+ *
+ * @returns VINF_SUCCESS (VBox strict status code).
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
+ * @param pvUser User pointer. Unused
+ *
+ */
+static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PGM_LOCK_VOID(pVM);
+ NOREF(pVCpu); NOREF(pvUser);
+
+ if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
+ {
+ /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
+ /** @todo also not really efficient to unmap a chunk that contains PD
+ * or PT pages. */
+ pgmR3PoolClearAllRendezvous(pVM, pVM->apCpusR3[0], NULL /* no need to flush the REM TLB as we already did that above */);
+
+ /*
+ * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
+ */
+ GMMMAPUNMAPCHUNKREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pvR3 = NULL;
+ Req.idChunkMap = NIL_GMM_CHUNKID;
+ Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
+ if (Req.idChunkUnmap != INT32_MAX)
+ {
+ STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkUnmap, a);
+ rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
+ STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkUnmap, a);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Remove the unmapped one.
+ */
+ PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
+ AssertRelease(pUnmappedChunk);
+ AssertRelease(!pUnmappedChunk->cRefs);
+ AssertRelease(!pUnmappedChunk->cPermRefs);
+ pUnmappedChunk->pv = NULL;
+ pUnmappedChunk->Core.Key = UINT32_MAX;
+ MMR3HeapFree(pUnmappedChunk);
+ pVM->pgm.s.ChunkR3Map.c--;
+ pVM->pgm.s.cUnmappedChunks++;
+
+ /*
+ * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
+ */
+ /** @todo We should not flush chunks which include cr3 mappings. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PPGMCPU pPGM = &pVM->apCpusR3[idCpu]->pgm.s;
+
+ pPGM->pGst32BitPdR3 = NULL;
+ pPGM->pGstPaePdptR3 = NULL;
+ pPGM->pGstAmd64Pml4R3 = NULL;
+ pPGM->pGstEptPml4R3 = NULL;
+ pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
+ pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
+ pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
+ pPGM->pGstEptPml4R0 = NIL_RTR0PTR;
+ for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
+ {
+ pPGM->apGstPaePDsR3[i] = NULL;
+ pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
+ }
+
+ /* Flush REM TLBs. */
+ CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+ }
+ }
+ }
+ }
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+/**
+ * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
+ *
+ * @param pVM The cross context VM structure.
+ */
+static DECLCALLBACK(void) pgmR3PhysUnmapChunk(PVM pVM)
+{
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
+ AssertRC(rc);
+}
+
+
+/**
+ * Maps the given chunk into the ring-3 mapping cache.
+ *
+ * This will call ring-0.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idChunk The chunk in question.
+ * @param ppChunk Where to store the chunk tracking structure.
+ *
+ * @remarks Called from within the PGM critical section.
+ * @remarks Can be called from any thread!
+ */
+int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
+{
+ int rc;
+
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ /*
+ * Move the chunk time forward.
+ */
+ pVM->pgm.s.ChunkR3Map.iNow++;
+ if (pVM->pgm.s.ChunkR3Map.iNow == 0)
+ {
+ pVM->pgm.s.ChunkR3Map.iNow = 4;
+ RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
+ }
+
+ /*
+ * Allocate a new tracking structure first.
+ */
+ PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
+ AssertReturn(pChunk, VERR_NO_MEMORY);
+ pChunk->Core.Key = idChunk;
+ pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
+
+ /*
+ * Request the ring-0 part to map the chunk in question.
+ */
+ GMMMAPUNMAPCHUNKREQ Req;
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.pvR3 = NULL;
+ Req.idChunkMap = idChunk;
+ Req.idChunkUnmap = NIL_GMM_CHUNKID;
+
+ /* Must be callable from any thread, so can't use VMMR3CallR0. */
+ STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkMap, a);
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
+ STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkMap, a);
+ if (RT_SUCCESS(rc))
+ {
+ pChunk->pv = Req.pvR3;
+
+ /*
+ * If we're running out of virtual address space, then we should
+ * unmap another chunk.
+ *
+ * Currently, an unmap operation requires that all other virtual CPUs
+ * are idling and not by chance making use of the memory we're
+ * unmapping. So, we create an async unmap operation here.
+ *
+ * Now, when creating or restoring a saved state this wont work very
+ * well since we may want to restore all guest RAM + a little something.
+ * So, we have to do the unmap synchronously. Fortunately for us
+ * though, during these operations the other virtual CPUs are inactive
+ * and it should be safe to do this.
+ */
+ /** @todo Eventually we should lock all memory when used and do
+ * map+unmap as one kernel call without any rendezvous or
+ * other precautions. */
+ if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
+ {
+ switch (VMR3GetState(pVM))
+ {
+ case VMSTATE_LOADING:
+ case VMSTATE_SAVING:
+ {
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if ( pVCpu
+ && pVM->pgm.s.cDeprecatedPageLocks == 0)
+ {
+ pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
+ break;
+ }
+ }
+ RT_FALL_THRU();
+ default:
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
+ AssertRC(rc);
+ break;
+ }
+ }
+
+ /*
+ * Update the tree. We must do this after any unmapping to make sure
+ * the chunk we're going to return isn't unmapped by accident.
+ */
+ AssertPtr(Req.pvR3);
+ bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
+ AssertRelease(fRc);
+ pVM->pgm.s.ChunkR3Map.c++;
+ pVM->pgm.s.cMappedChunks++;
+ }
+ else
+ {
+ /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
+ * should probably restrict ourselves on linux. */
+ AssertRC(rc);
+ MMR3HeapFree(pChunk);
+ pChunk = NULL;
+ }
+
+ *ppChunk = pChunk;
+ return rc;
+}
+
+
+/**
+ * Invalidates the TLB for the ring-3 mapping cache.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
+{
+ PGM_LOCK_VOID(pVM);
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
+ {
+ pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
+ pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
+ }
+ /* The page map TLB references chunks, so invalidate that one too. */
+ pgmPhysInvalidatePageMapTLB(pVM);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Response to VM_FF_PGM_NEED_HANDY_PAGES and helper for pgmPhysEnsureHandyPage.
+ *
+ * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
+ * signal and clear the out of memory condition. When called, this API is used
+ * to try clear the condition when the user wants to resume.
+ *
+ * @returns The following VBox status codes.
+ * @retval VINF_SUCCESS on success. FFs cleared.
+ * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
+ * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
+ * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
+ * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
+ * handler.
+ */
+VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
+{
+ PGM_LOCK_VOID(pVM);
+
+ /*
+ * Allocate more pages, noting down the index of the first new page.
+ */
+ uint32_t iClear = pVM->pgm.s.cHandyPages;
+ AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
+ Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
+ int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
+ /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
+ if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
+ && pVM->pgm.s.cHandyPages > 0)
+ {
+ /* Still handy pages left, so don't panic. */
+ rc = VINF_SUCCESS;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
+ Assert(pVM->pgm.s.cHandyPages > 0);
+#ifdef VBOX_STRICT
+ uint32_t i;
+ for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
+ if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
+ || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
+ || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & GUEST_PAGE_OFFSET_MASK))
+ break;
+ if (i != pVM->pgm.s.cHandyPages)
+ {
+ RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
+ RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
+ for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
+ RTAssertMsg2Add("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%s\n", j,
+ pVM->pgm.s.aHandyPages[j].idPage,
+ pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
+ pVM->pgm.s.aHandyPages[j].idSharedPage,
+ j == i ? " <---" : "");
+ RTAssertPanic();
+ }
+#endif
+ }
+ else
+ {
+ /*
+ * We should never get here unless there is a genuine shortage of
+ * memory (or some internal error). Flag the error so the VM can be
+ * suspended ASAP and the user informed. If we're totally out of
+ * handy pages we will return failure.
+ */
+ /* Report the failure. */
+ LogRel(("PGM: Failed to procure handy pages; rc=%Rrc cHandyPages=%#x\n"
+ " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
+ rc, pVM->pgm.s.cHandyPages,
+ pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cSharedPages, pVM->pgm.s.cZeroPages));
+
+ if ( rc != VERR_NO_MEMORY
+ && rc != VERR_NO_PHYS_MEMORY
+ && rc != VERR_LOCK_FAILED)
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
+ {
+ LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
+ i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
+ pVM->pgm.s.aHandyPages[i].idSharedPage));
+ uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
+ if (idPage != NIL_GMM_PAGEID)
+ {
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
+ pRam;
+ pRam = pRam->pNextR3)
+ {
+ uint32_t const cPages = pRam->cb >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
+ LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
+ pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
+ }
+ }
+ }
+
+ if (rc == VERR_NO_MEMORY)
+ {
+ uint64_t cbHostRamAvail = 0;
+ int rc2 = RTSystemQueryAvailableRam(&cbHostRamAvail);
+ if (RT_SUCCESS(rc2))
+ LogRel(("Host RAM: %RU64MB available\n", cbHostRamAvail / _1M));
+ else
+ LogRel(("Cannot determine the amount of available host memory\n"));
+ }
+
+ /* Set the FFs and adjust rc. */
+ VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
+ VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
+ if ( rc == VERR_NO_MEMORY
+ || rc == VERR_NO_PHYS_MEMORY
+ || rc == VERR_LOCK_FAILED)
+ rc = VINF_EM_NO_MEMORY;
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/*********************************************************************************************************************************
+* Other Stuff *
+*********************************************************************************************************************************/
+
+/**
+ * Sets the Address Gate 20 state.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param fEnable True if the gate should be enabled.
+ * False if the gate should be disabled.
+ */
+VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
+{
+ LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
+ if (pVCpu->pgm.s.fA20Enabled != fEnable)
+ {
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ if ( CPUMIsGuestInVmxRootMode(pCtx)
+ && !fEnable)
+ {
+ Log(("Cannot enter A20M mode while in VMX root mode\n"));
+ return;
+ }
+#endif
+ pVCpu->pgm.s.fA20Enabled = fEnable;
+ pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
+ if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)))
+ NEMR3NotifySetA20(pVCpu, fEnable);
+#ifdef PGM_WITH_A20
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ pgmR3RefreshShadowModeAfterA20Change(pVCpu);
+ HMFlushTlb(pVCpu);
+#endif
+#if 0 /* PGMGetPage will apply the A20 mask to the GCPhys it returns, so we must invalid both sides of the TLB. */
+ IEMTlbInvalidateAllPhysical(pVCpu);
+#else
+ IEMTlbInvalidateAll(pVCpu);
+#endif
+ STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
+ }
+}
+
diff --git a/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h b/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h
new file mode 100644
index 00000000..19f35903
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h
@@ -0,0 +1,71 @@
+/* $Id: PGMPhysRWTmpl.h $ */
+/** @file
+ * PGM - Page Manager and Monitor, Physical Memory Access Template.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/**
+ * Read physical memory. (one byte/word/dword)
+ *
+ * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
+ * want to ignore those.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Physical address start reading from.
+ * @param enmOrigin Who is calling.
+ */
+VMMDECL(PGMPHYS_DATATYPE) PGMPHYSFN_READNAME(PVM pVM, RTGCPHYS GCPhys, PGMACCESSORIGIN enmOrigin)
+{
+ Assert(VM_IS_EMT(pVM));
+ PGMPHYS_DATATYPE val;
+ VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, &val, sizeof(val), enmOrigin);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
+ return val;
+}
+
+
+/**
+ * Write to physical memory. (one byte/word/dword)
+ *
+ * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
+ * want to ignore those.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Physical address to write to.
+ * @param val What to write.
+ * @param enmOrigin Who is calling.
+ */
+VMMDECL(void) PGMPHYSFN_WRITENAME(PVM pVM, RTGCPHYS GCPhys, PGMPHYS_DATATYPE val, PGMACCESSORIGIN enmOrigin)
+{
+ Assert(VM_IS_EMT(pVM));
+ VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, &val, sizeof(val), enmOrigin);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
+}
+
+#undef PGMPHYSFN_READNAME
+#undef PGMPHYSFN_WRITENAME
+#undef PGMPHYS_DATATYPE
+#undef PGMPHYS_DATASIZE
+
diff --git a/src/VBox/VMM/VMMR3/PGMPool.cpp b/src/VBox/VMM/VMMR3/PGMPool.cpp
new file mode 100644
index 00000000..476fb44e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMPool.cpp
@@ -0,0 +1,1345 @@
+/* $Id: PGMPool.cpp $ */
+/** @file
+ * PGM Shadow Page Pool.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_pgm_pool PGM Shadow Page Pool
+ *
+ * Motivations:
+ * -# Relationship between shadow page tables and physical guest pages. This
+ * should allow us to skip most of the global flushes now following access
+ * handler changes. The main expense is flushing shadow pages.
+ * -# Limit the pool size if necessary (default is kind of limitless).
+ * -# Allocate shadow pages from RC. We use to only do this in SyncCR3.
+ * -# Required for 64-bit guests.
+ * -# Combining the PD cache and page pool in order to simplify caching.
+ *
+ *
+ * @section sec_pgm_pool_outline Design Outline
+ *
+ * The shadow page pool tracks pages used for shadowing paging structures (i.e.
+ * page tables, page directory, page directory pointer table and page map
+ * level-4). Each page in the pool has an unique identifier. This identifier is
+ * used to link a guest physical page to a shadow PT. The identifier is a
+ * non-zero value and has a relativly low max value - say 14 bits. This makes it
+ * possible to fit it into the upper bits of the of the aHCPhys entries in the
+ * ram range.
+ *
+ * By restricting host physical memory to the first 48 bits (which is the
+ * announced physical memory range of the K8L chip (scheduled for 2008)), we
+ * can safely use the upper 16 bits for shadow page ID and reference counting.
+ *
+ * Update: The 48 bit assumption will be lifted with the new physical memory
+ * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB
+ * into a box in some years.
+ *
+ * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT
+ * or PD. This is solved by creating a list of physical cross reference extents
+ * when ever this happens. Each node in the list (extent) is can contain 3 page
+ * pool indexes. The list it self is chained using indexes into the paPhysExt
+ * array.
+ *
+ *
+ * @section sec_pgm_pool_life Life Cycle of a Shadow Page
+ *
+ * -# The SyncPT function requests a page from the pool.
+ * The request includes the kind of page it is (PT/PD, PAE/legacy), the
+ * address of the page it's shadowing, and more.
+ * -# The pool responds to the request by allocating a new page.
+ * When the cache is enabled, it will first check if it's in the cache.
+ * Should the pool be exhausted, one of two things can be done:
+ * -# Flush the whole pool and current CR3.
+ * -# Use the cache to find a page which can be flushed (~age).
+ * -# The SyncPT function will sync one or more pages and insert it into the
+ * shadow PD.
+ * -# The SyncPage function may sync more pages on a later \#PFs.
+ * -# The page is freed / flushed in SyncCR3 (perhaps) and some other cases.
+ * When caching is enabled, the page isn't flush but remains in the cache.
+ *
+ *
+ * @section sec_pgm_pool_monitoring Monitoring
+ *
+ * We always monitor GUEST_PAGE_SIZE chunks of memory. When we've got multiple
+ * shadow pages for the same GUEST_PAGE_SIZE of guest memory (PAE and mixed
+ * PD/PT) the pages sharing the monitor get linked using the
+ * iMonitoredNext/Prev. The head page is the pvUser to the access handlers.
+ *
+ *
+ * @section sec_pgm_pool_impl Implementation
+ *
+ * The pool will take pages from the MM page pool. The tracking data
+ * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The
+ * pool content can be accessed both by using the page id and the physical
+ * address (HC). The former is managed by means of an array, the latter by an
+ * offset based AVL tree.
+ *
+ * Flushing of a pool page means that we iterate the content (we know what kind
+ * it is) and updates the link information in the ram range.
+ *
+ * ...
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM_POOL
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/mm.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/vmm/uvm.h>
+#include "PGMInline.h"
+
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <VBox/dbg.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef struct PGMPOOLCHECKERSTATE
+{
+ PDBGCCMDHLP pCmdHlp;
+ PVM pVM;
+ PPGMPOOL pPool;
+ PPGMPOOLPAGE pPage;
+ bool fFirstMsg;
+ uint32_t cErrors;
+} PGMPOOLCHECKERSTATE;
+typedef PGMPOOLCHECKERSTATE *PPGMPOOLCHECKERSTATE;
+
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static FNDBGFHANDLERINT pgmR3PoolInfoPages;
+static FNDBGFHANDLERINT pgmR3PoolInfoRoots;
+
+#ifdef VBOX_WITH_DEBUGGER
+static FNDBGCCMD pgmR3PoolCmdCheck;
+
+/** Command descriptors. */
+static const DBGCCMD g_aCmds[] =
+{
+ /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
+ { "pgmpoolcheck", 0, 0, NULL, 0, 0, pgmR3PoolCmdCheck, "", "Check the pgm pool pages." },
+};
+#endif
+
+/**
+ * Initializes the pool
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+int pgmR3PoolInit(PVM pVM)
+{
+ int rc;
+
+ AssertCompile(NIL_PGMPOOL_IDX == 0);
+ /* pPage->cLocked is an unsigned byte. */
+ AssertCompile(VMM_MAX_CPU_COUNT <= 255);
+
+ /*
+ * Query Pool config.
+ */
+ PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool");
+
+ /* Default pgm pool size is 1024 pages (4MB). */
+ uint16_t cMaxPages = 1024;
+
+ /* Adjust it up relative to the RAM size, using the nested paging formula. */
+ uint64_t cbRam;
+ rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0); AssertRCReturn(rc, rc);
+ /** @todo guest x86 specific */
+ uint64_t u64MaxPages = (cbRam >> 9)
+ + (cbRam >> 18)
+ + (cbRam >> 27)
+ + 32 * GUEST_PAGE_SIZE;
+ u64MaxPages >>= GUEST_PAGE_SHIFT;
+ if (u64MaxPages > PGMPOOL_IDX_LAST)
+ cMaxPages = PGMPOOL_IDX_LAST;
+ else
+ cMaxPages = (uint16_t)u64MaxPages;
+
+ /** @cfgm{/PGM/Pool/MaxPages, uint16_t, \#pages, 16, 0x3fff, F(ram-size)}
+ * The max size of the shadow page pool in pages. The pool will grow dynamically
+ * up to this limit.
+ */
+ rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, cMaxPages);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
+ ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER);
+ AssertCompile(RT_IS_POWER_OF_TWO(PGMPOOL_CFG_MAX_GROW));
+ if (cMaxPages < PGMPOOL_IDX_LAST)
+ cMaxPages = RT_ALIGN(cMaxPages, PGMPOOL_CFG_MAX_GROW / 2);
+ if (cMaxPages > PGMPOOL_IDX_LAST)
+ cMaxPages = PGMPOOL_IDX_LAST;
+ LogRel(("PGM: PGMPool: cMaxPages=%u (u64MaxPages=%llu)\n", cMaxPages, u64MaxPages));
+
+ /** @todo
+ * We need to be much more careful with our allocation strategy here.
+ * For nested paging we don't need pool user info nor extents at all, but
+ * we can't check for nested paging here (too early during init to get a
+ * confirmation it can be used). The default for large memory configs is a
+ * bit large for shadow paging, so I've restricted the extent maximum to 8k
+ * (8k * 16 = 128k of hyper heap).
+ *
+ * Also when large page support is enabled, we typically don't need so much,
+ * although that depends on the availability of 2 MB chunks on the host.
+ */
+
+ /** @cfgm{/PGM/Pool/MaxUsers, uint16_t, \#users, MaxUsers, 32K, MaxPages*2}
+ * The max number of shadow page user tracking records. Each shadow page has
+ * zero of other shadow pages (or CR3s) that references it, or uses it if you
+ * like. The structures describing these relationships are allocated from a
+ * fixed sized pool. This configuration variable defines the pool size.
+ */
+ uint16_t cMaxUsers;
+ rc = CFGMR3QueryU16Def(pCfg, "MaxUsers", &cMaxUsers, cMaxPages * 2);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K,
+ ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER);
+
+ /** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, \#extents, 16, MaxPages * 2, MIN(MaxPages*2\,8192)}
+ * The max number of extents for tracking aliased guest pages.
+ */
+ uint16_t cMaxPhysExts;
+ rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts,
+ RT_MIN(cMaxPages * 2, 8192 /* 8Ki max as this eat too much hyper heap */));
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelMsgReturn(cMaxPhysExts >= 16 && cMaxPhysExts <= PGMPOOL_IDX_LAST,
+ ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxPhysExts), VERR_INVALID_PARAMETER);
+
+ /** @cfgm{/PGM/Pool/ChacheEnabled, bool, true}
+ * Enables or disabling caching of shadow pages. Caching means that we will try
+ * reuse shadow pages instead of recreating them everything SyncCR3, SyncPT or
+ * SyncPage requests one. When reusing a shadow page, we can save time
+ * reconstructing it and it's children.
+ */
+ bool fCacheEnabled;
+ rc = CFGMR3QueryBoolDef(pCfg, "CacheEnabled", &fCacheEnabled, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ LogRel(("PGM: pgmR3PoolInit: cMaxPages=%#RX16 cMaxUsers=%#RX16 cMaxPhysExts=%#RX16 fCacheEnable=%RTbool\n",
+ cMaxPages, cMaxUsers, cMaxPhysExts, fCacheEnabled));
+
+ /*
+ * Allocate the data structures.
+ */
+ uint32_t cb = RT_UOFFSETOF_DYN(PGMPOOL, aPages[cMaxPages]);
+ cb += cMaxUsers * sizeof(PGMPOOLUSER);
+ cb += cMaxPhysExts * sizeof(PGMPOOLPHYSEXT);
+ PPGMPOOL pPool;
+ RTR0PTR pPoolR0;
+ rc = SUPR3PageAllocEx(RT_ALIGN_32(cb, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pPool, &pPoolR0, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+ Assert(ASMMemIsZero(pPool, cb));
+ pVM->pgm.s.pPoolR3 = pPool->pPoolR3 = pPool;
+ pVM->pgm.s.pPoolR0 = pPool->pPoolR0 = pPoolR0;
+
+ /*
+ * Initialize it.
+ */
+ pPool->pVMR3 = pVM;
+ pPool->pVMR0 = pVM->pVMR0ForCall;
+ pPool->cMaxPages = cMaxPages;
+ pPool->cCurPages = PGMPOOL_IDX_FIRST;
+ pPool->iUserFreeHead = 0;
+ pPool->cMaxUsers = cMaxUsers;
+ PPGMPOOLUSER paUsers = (PPGMPOOLUSER)&pPool->aPages[pPool->cMaxPages];
+ pPool->paUsersR3 = paUsers;
+ pPool->paUsersR0 = pPoolR0 + (uintptr_t)paUsers - (uintptr_t)pPool;
+ for (unsigned i = 0; i < cMaxUsers; i++)
+ {
+ paUsers[i].iNext = i + 1;
+ paUsers[i].iUser = NIL_PGMPOOL_IDX;
+ paUsers[i].iUserTable = 0xfffffffe;
+ }
+ paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
+ pPool->iPhysExtFreeHead = 0;
+ pPool->cMaxPhysExts = cMaxPhysExts;
+ PPGMPOOLPHYSEXT paPhysExts = (PPGMPOOLPHYSEXT)&paUsers[cMaxUsers];
+ pPool->paPhysExtsR3 = paPhysExts;
+ pPool->paPhysExtsR0 = pPoolR0 + (uintptr_t)paPhysExts - (uintptr_t)pPool;
+ for (unsigned i = 0; i < cMaxPhysExts; i++)
+ {
+ paPhysExts[i].iNext = i + 1;
+ paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ }
+ paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
+ for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
+ pPool->aiHash[i] = NIL_PGMPOOL_IDX;
+ pPool->iAgeHead = NIL_PGMPOOL_IDX;
+ pPool->iAgeTail = NIL_PGMPOOL_IDX;
+ pPool->fCacheEnabled = fCacheEnabled;
+
+ pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE;
+ rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
+ pgmPoolAccessHandler, "Guest Paging Access Handler", &pPool->hAccessHandlerType);
+ AssertLogRelRCReturn(rc, rc);
+
+ pPool->HCPhysTree = 0;
+
+ /*
+ * The NIL entry.
+ */
+ Assert(NIL_PGMPOOL_IDX == 0);
+ pPool->aPages[NIL_PGMPOOL_IDX].enmKind = PGMPOOLKIND_INVALID;
+ pPool->aPages[NIL_PGMPOOL_IDX].idx = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].Core.Key = NIL_RTHCPHYS;
+ pPool->aPages[NIL_PGMPOOL_IDX].GCPhys = NIL_RTGCPHYS;
+ pPool->aPages[NIL_PGMPOOL_IDX].iNext = NIL_PGMPOOL_IDX;
+ /* pPool->aPages[NIL_PGMPOOL_IDX].cLocked = INT32_MAX; - test this out... */
+ pPool->aPages[NIL_PGMPOOL_IDX].pvPageR3 = 0;
+ pPool->aPages[NIL_PGMPOOL_IDX].iUserHead = NIL_PGMPOOL_USER_INDEX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iModifiedNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iModifiedPrev = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredPrev = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iAgeNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iAgePrev = NIL_PGMPOOL_IDX;
+
+ Assert(pPool->aPages[NIL_PGMPOOL_IDX].idx == NIL_PGMPOOL_IDX);
+ Assert(pPool->aPages[NIL_PGMPOOL_IDX].GCPhys == NIL_RTGCPHYS);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fSeenNonGlobal);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fMonitored);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fCached);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fZeroed);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fReusedFlushPending);
+
+ /*
+ * Register statistics.
+ */
+ STAM_REL_REG(pVM, &pPool->StatGrow, STAMTYPE_PROFILE, "/PGM/Pool/Grow", STAMUNIT_TICKS_PER_CALL, "Profiling PGMR0PoolGrow");
+#ifdef VBOX_WITH_STATISTICS
+ STAM_REG(pVM, &pPool->cCurPages, STAMTYPE_U16, "/PGM/Pool/cCurPages", STAMUNIT_PAGES, "Current pool size.");
+ STAM_REG(pVM, &pPool->cMaxPages, STAMTYPE_U16, "/PGM/Pool/cMaxPages", STAMUNIT_PAGES, "Max pool size.");
+ STAM_REG(pVM, &pPool->cUsedPages, STAMTYPE_U16, "/PGM/Pool/cUsedPages", STAMUNIT_PAGES, "The number of pages currently in use.");
+ STAM_REG(pVM, &pPool->cUsedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/cUsedPagesHigh", STAMUNIT_PAGES, "The high watermark for cUsedPages.");
+ STAM_REG(pVM, &pPool->StatAlloc, STAMTYPE_PROFILE_ADV, "/PGM/Pool/Alloc", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolAlloc.");
+ STAM_REG(pVM, &pPool->StatClearAll, STAMTYPE_PROFILE, "/PGM/Pool/ClearAll", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolClearAll.");
+ STAM_REG(pVM, &pPool->StatR3Reset, STAMTYPE_PROFILE, "/PGM/Pool/R3Reset", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolReset.");
+ STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage.");
+ STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree.");
+ STAM_REG(pVM, &pPool->StatForceFlushPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForce", STAMUNIT_OCCURENCES, "Counting explicit flushes by PGMPoolFlushPage().");
+ STAM_REG(pVM, &pPool->StatForceFlushDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForceDirty", STAMUNIT_OCCURENCES, "Counting explicit flushes of dirty pages by PGMPoolFlushPage().");
+ STAM_REG(pVM, &pPool->StatForceFlushReused, STAMTYPE_COUNTER, "/PGM/Pool/FlushReused", STAMUNIT_OCCURENCES, "Counting flushes for reused pages.");
+ STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spent zeroing pages. Overlaps with Alloc.");
+ STAM_REG(pVM, &pPool->cMaxUsers, STAMTYPE_U16, "/PGM/Pool/Track/cMaxUsers", STAMUNIT_COUNT, "Max user tracking records.");
+ STAM_REG(pVM, &pPool->cPresent, STAMTYPE_U32, "/PGM/Pool/Track/cPresent", STAMUNIT_COUNT, "Number of present page table entries.");
+ STAM_REG(pVM, &pPool->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Pool/Track/Deref", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackDeref.");
+ STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPT, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPT", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPT.");
+ STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTs, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTs", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTs.");
+ STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTsSlow, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTsSlow", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTsSlow.");
+ STAM_REG(pVM, &pPool->StatTrackFlushEntry, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Flush", STAMUNIT_COUNT, "Nr of flushed entries.");
+ STAM_REG(pVM, &pPool->StatTrackFlushEntryKeep, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Update", STAMUNIT_COUNT, "Nr of updated entries.");
+ STAM_REG(pVM, &pPool->StatTrackFreeUpOneUser, STAMTYPE_COUNTER, "/PGM/Pool/Track/FreeUpOneUser", STAMUNIT_TICKS_PER_CALL, "The number of times we were out of user tracking records.");
+ STAM_REG(pVM, &pPool->StatTrackDerefGCPhys, STAMTYPE_PROFILE, "/PGM/Pool/Track/DrefGCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling deref activity related tracking GC physical pages.");
+ STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches.");
+ STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls.");
+
+ STAM_REG(pVM, &pPool->StatMonitorPfRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access handler.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access we've handled (except REP STOSD).");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
+ STAM_REG(pVM, &pPool->StatMonitorPfRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
+
+ STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM", STAMUNIT_TICKS_PER_CALL, "Profiling the regular access handler.");
+ STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the regular access handler.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[7], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[8], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[9], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[10], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[11], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[12], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[13], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[14], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[15], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[16], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[17], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZSizes[18], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6.");
+ STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7.");
+
+ STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
+ STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
+ STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
+ STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
+
+ STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler.");
+ STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[7], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[8], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[9], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[10], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[11], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[12], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[13], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[14], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[15], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses (R3).");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[16], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[17], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[18], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6 in R3.");
+ STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7 in R3.");
+
+ STAM_REG(pVM, &pPool->StatMonitorR3FaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
+ STAM_REG(pVM, &pPool->StatMonitorR3FaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
+ STAM_REG(pVM, &pPool->StatMonitorR3FaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
+ STAM_REG(pVM, &pPool->StatMonitorR3FaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
+
+ STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value.");
+ STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages.");
+ STAM_REG(pVM, &pPool->StatResetDirtyPages, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Resets", STAMUNIT_OCCURENCES, "Times we've called pgmPoolResetDirtyPages (and there were dirty page).");
+ STAM_REG(pVM, &pPool->StatDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Pages", STAMUNIT_OCCURENCES, "Times we've called pgmPoolAddDirtyPage.");
+ STAM_REG(pVM, &pPool->StatDirtyPageDupFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushDup", STAMUNIT_OCCURENCES, "Times we've had to flush duplicates for dirty page management.");
+ STAM_REG(pVM, &pPool->StatDirtyPageOverFlowFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushOverflow",STAMUNIT_OCCURENCES, "Times we've had to flush because of overflow.");
+ STAM_REG(pVM, &pPool->StatCacheHits, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Hits", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls satisfied by the cache.");
+ STAM_REG(pVM, &pPool->StatCacheMisses, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Misses", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls not statisfied by the cache.");
+ STAM_REG(pVM, &pPool->StatCacheKindMismatches, STAMTYPE_COUNTER, "/PGM/Pool/Cache/KindMismatches", STAMUNIT_OCCURENCES, "The number of shadow page kind mismatches. (Better be low, preferably 0!)");
+ STAM_REG(pVM, &pPool->StatCacheFreeUpOne, STAMTYPE_COUNTER, "/PGM/Pool/Cache/FreeUpOne", STAMUNIT_OCCURENCES, "The number of times the cache was asked to free up a page.");
+ STAM_REG(pVM, &pPool->StatCacheCacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Cacheable", STAMUNIT_OCCURENCES, "The number of cacheable allocations.");
+ STAM_REG(pVM, &pPool->StatCacheUncacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Uncacheable", STAMUNIT_OCCURENCES, "The number of uncacheable allocations.");
+#endif /* VBOX_WITH_STATISTICS */
+
+ DBGFR3InfoRegisterInternalEx(pVM, "pgmpoolpages", "Lists page pool pages.", pgmR3PoolInfoPages, 0);
+ DBGFR3InfoRegisterInternalEx(pVM, "pgmpoolroots", "Lists page pool roots.", pgmR3PoolInfoRoots, 0);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /*
+ * Debugger commands.
+ */
+ static bool s_fRegisteredCmds = false;
+ if (!s_fRegisteredCmds)
+ {
+ rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ if (RT_SUCCESS(rc))
+ s_fRegisteredCmds = true;
+ }
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Relocate the page pool data.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void pgmR3PoolRelocate(PVM pVM)
+{
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Grows the shadow page pool.
+ *
+ * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+VMMR3_INT_DECL(int) PGMR3PoolGrow(PVM pVM, PVMCPU pVCpu)
+{
+ /* This used to do a lot of stuff, but it has moved to ring-0 (PGMR0PoolGrow). */
+ AssertReturn(pVM->pgm.s.pPoolR3->cCurPages < pVM->pgm.s.pPoolR3->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
+ int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_POOL_GROW, 0, NULL);
+ if (rc == VINF_SUCCESS)
+ return rc;
+ LogRel(("PGMR3PoolGrow: rc=%Rrc cCurPages=%#x cMaxPages=%#x\n",
+ rc, pVM->pgm.s.pPoolR3->cCurPages, pVM->pgm.s.pPoolR3->cMaxPages));
+ if (pVM->pgm.s.pPoolR3->cCurPages > 128 && RT_FAILURE_NP(rc))
+ return -rc;
+ return rc;
+}
+
+
+/**
+ * Rendezvous callback used by pgmR3PoolClearAll that clears all shadow pages
+ * and all modification counters.
+ *
+ * This is only called on one of the EMTs while the other ones are waiting for
+ * it to complete this function.
+ *
+ * @returns VINF_SUCCESS (VBox strict status code).
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
+ * @param fpvFlushRemTlb When not NULL, we'll flush the REM TLB as well.
+ * (This is the pvUser, so it has to be void *.)
+ *
+ */
+DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *fpvFlushRemTlb)
+{
+ PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ STAM_PROFILE_START(&pPool->StatClearAll, c);
+ NOREF(pVCpu);
+
+ PGM_LOCK_VOID(pVM);
+ Log(("pgmR3PoolClearAllRendezvous: cUsedPages=%d fpvFlushRemTlb=%RTbool\n", pPool->cUsedPages, !!fpvFlushRemTlb));
+
+ /*
+ * Iterate all the pages until we've encountered all that are in use.
+ * This is a simple but not quite optimal solution.
+ */
+ unsigned cModifiedPages = 0; NOREF(cModifiedPages);
+ unsigned cLeft = pPool->cUsedPages;
+ uint32_t iPage = pPool->cCurPages;
+ while (--iPage >= PGMPOOL_IDX_FIRST)
+ {
+ PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
+ if (pPage->GCPhys != NIL_RTGCPHYS)
+ {
+ switch (pPage->enmKind)
+ {
+ /*
+ * We only care about shadow page tables that reference physical memory
+ */
+#ifdef PGM_WITH_LARGE_PAGES
+ case PGMPOOLKIND_PAE_PD_PHYS: /* Large pages reference 2 MB of physical memory, so we must clear them. */
+ if (pPage->cPresent)
+ {
+ PX86PDPAE pShwPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
+ for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
+ {
+ //Assert((pShwPD->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); - bogus, includes X86_PDE_PS.
+ if ((pShwPD->a[i].u & (X86_PDE_P | X86_PDE_PS)) == (X86_PDE_P | X86_PDE_PS))
+ {
+ pShwPD->a[i].u = 0;
+ Assert(pPage->cPresent);
+ pPage->cPresent--;
+ }
+ }
+ if (pPage->cPresent == 0)
+ pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
+ }
+ goto default_case;
+
+ case PGMPOOLKIND_EPT_PD_FOR_PHYS: /* Large pages reference 2 MB of physical memory, so we must clear them. */
+ if (pPage->cPresent)
+ {
+ PEPTPD pShwPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
+ for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
+ {
+ if ((pShwPD->a[i].u & (EPT_E_READ | EPT_E_LEAF)) == (EPT_E_READ | EPT_E_LEAF))
+ {
+ pShwPD->a[i].u = 0;
+ Assert(pPage->cPresent);
+ pPage->cPresent--;
+ }
+ }
+ if (pPage->cPresent == 0)
+ pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
+ }
+ goto default_case;
+
+# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: /* Large pages reference 2 MB of physical memory, so we must clear them. */
+ if (pPage->cPresent)
+ {
+ PEPTPD pShwPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
+ for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
+ {
+ if ( (pShwPD->a[i].u & EPT_PRESENT_MASK)
+ && (pShwPD->a[i].u & EPT_E_LEAF))
+ {
+ pShwPD->a[i].u = 0;
+ Assert(pPage->cPresent);
+ pPage->cPresent--;
+ }
+ }
+ if (pPage->cPresent == 0)
+ pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
+ }
+ goto default_case;
+# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
+#endif /* PGM_WITH_LARGE_PAGES */
+
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
+ case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
+ case PGMPOOLKIND_PAE_PT_FOR_PHYS:
+ case PGMPOOLKIND_EPT_PT_FOR_PHYS:
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_PT:
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_2MB:
+ case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT:
+ case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4:
+#endif
+ {
+ if (pPage->cPresent)
+ {
+ void *pvShw = PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
+ STAM_PROFILE_START(&pPool->StatZeroPage, z);
+#if 0
+ /* Useful check for leaking references; *very* expensive though. */
+ switch (pPage->enmKind)
+ {
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
+ case PGMPOOLKIND_PAE_PT_FOR_PHYS:
+ {
+ bool fFoundFirst = false;
+ PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)pvShw;
+ for (unsigned ptIndex = 0; ptIndex < RT_ELEMENTS(pPT->a); ptIndex++)
+ {
+ if (pPT->a[ptIndex].u)
+ {
+ if (!fFoundFirst)
+ {
+ AssertFatalMsg(pPage->iFirstPresent <= ptIndex, ("ptIndex = %d first present = %d\n", ptIndex, pPage->iFirstPresent));
+ if (pPage->iFirstPresent != ptIndex)
+ Log(("ptIndex = %d first present = %d\n", ptIndex, pPage->iFirstPresent));
+ fFoundFirst = true;
+ }
+ if (PGMSHWPTEPAE_IS_P(pPT->a[ptIndex]))
+ {
+ pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pPT->a[ptIndex]), NIL_RTGCPHYS);
+ if (pPage->iFirstPresent == ptIndex)
+ pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
+ }
+ }
+ }
+ AssertFatalMsg(pPage->cPresent == 0, ("cPresent = %d pPage = %RGv\n", pPage->cPresent, pPage->GCPhys));
+ break;
+ }
+ default:
+ break;
+ }
+#endif
+ ASMMemZeroPage(pvShw);
+ STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
+ pPage->cPresent = 0;
+ pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
+ }
+ }
+ RT_FALL_THRU();
+ default:
+#ifdef PGM_WITH_LARGE_PAGES
+ default_case:
+#endif
+ Assert(!pPage->cModifications || ++cModifiedPages);
+ Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
+ Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
+ pPage->iModifiedNext = NIL_PGMPOOL_IDX;
+ pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
+ pPage->cModifications = 0;
+ break;
+
+ }
+ if (!--cLeft)
+ break;
+ }
+ }
+
+#ifndef DEBUG_michael
+ AssertMsg(cModifiedPages == pPool->cModifiedPages, ("%d != %d\n", cModifiedPages, pPool->cModifiedPages));
+#endif
+ pPool->iModifiedHead = NIL_PGMPOOL_IDX;
+ pPool->cModifiedPages = 0;
+
+ /*
+ * Clear all the GCPhys links and rebuild the phys ext free list.
+ */
+ for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX);
+ pRam;
+ pRam = pRam->CTX_SUFF(pNext))
+ {
+ iPage = pRam->cb >> GUEST_PAGE_SHIFT;
+ while (iPage-- > 0)
+ PGM_PAGE_SET_TRACKING(pVM, &pRam->aPages[iPage], 0);
+ }
+
+ pPool->iPhysExtFreeHead = 0;
+ PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
+ const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
+ for (unsigned i = 0; i < cMaxPhysExts; i++)
+ {
+ paPhysExts[i].iNext = i + 1;
+ paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
+ paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
+ }
+ paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
+
+
+#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+ /* Reset all dirty pages to reactivate the page monitoring. */
+ /* Note: we must do this *after* clearing all page references and shadow page tables as there might be stale references to
+ * recently removed MMIO ranges around that might otherwise end up asserting in pgmPoolTracDerefGCPhysHint
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
+ {
+ unsigned idxPage = pPool->aidxDirtyPages[i];
+ if (idxPage == NIL_PGMPOOL_IDX)
+ continue;
+
+ PPGMPOOLPAGE pPage = &pPool->aPages[idxPage];
+ Assert(pPage->idx == idxPage);
+ Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
+
+ AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, i));
+
+ Log(("Reactivate dirty page %RGp\n", pPage->GCPhys));
+
+ /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
+ int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
+ AssertRCSuccess(rc);
+ pPage->fDirty = false;
+
+ pPool->aidxDirtyPages[i] = NIL_PGMPOOL_IDX;
+ }
+
+ /* Clear all dirty pages. */
+ pPool->idxFreeDirtyPage = 0;
+ pPool->cDirtyPages = 0;
+#endif
+
+ /* Clear the PGM_SYNC_CLEAR_PGM_POOL flag on all VCPUs to prevent redundant flushes. */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ pVM->apCpusR3[idCpu]->pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
+
+ /* Flush job finished. */
+ VM_FF_CLEAR(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
+ pPool->cPresent = 0;
+ PGM_UNLOCK(pVM);
+
+ PGM_INVL_ALL_VCPU_TLBS(pVM);
+
+ if (fpvFlushRemTlb)
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+
+ STAM_PROFILE_STOP(&pPool->StatClearAll, c);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Clears the shadow page pool.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFlushRemTlb When set, the REM TLB is scheduled for flushing as
+ * well.
+ */
+void pgmR3PoolClearAll(PVM pVM, bool fFlushRemTlb)
+{
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PoolClearAllRendezvous, &fFlushRemTlb);
+ AssertRC(rc);
+}
+
+
+/**
+ * Stringifies a PGMPOOLACCESS value.
+ */
+static const char *pgmPoolPoolAccessToStr(uint8_t enmAccess)
+{
+ switch ((PGMPOOLACCESS)enmAccess)
+ {
+ case PGMPOOLACCESS_DONTCARE: return "DONTCARE";
+ case PGMPOOLACCESS_USER_RW: return "USER_RW";
+ case PGMPOOLACCESS_USER_R: return "USER_R";
+ case PGMPOOLACCESS_USER_RW_NX: return "USER_RW_NX";
+ case PGMPOOLACCESS_USER_R_NX: return "USER_R_NX";
+ case PGMPOOLACCESS_SUPERVISOR_RW: return "SUPERVISOR_RW";
+ case PGMPOOLACCESS_SUPERVISOR_R: return "SUPERVISOR_R";
+ case PGMPOOLACCESS_SUPERVISOR_RW_NX: return "SUPERVISOR_RW_NX";
+ case PGMPOOLACCESS_SUPERVISOR_R_NX: return "SUPERVISOR_R_NX";
+ }
+ return "Unknown Access";
+}
+
+
+/**
+ * Stringifies a PGMPOOLKIND value.
+ */
+static const char *pgmPoolPoolKindToStr(uint8_t enmKind)
+{
+ switch ((PGMPOOLKIND)enmKind)
+ {
+ case PGMPOOLKIND_INVALID:
+ return "INVALID";
+ case PGMPOOLKIND_FREE:
+ return "FREE";
+ case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
+ return "32BIT_PT_FOR_PHYS";
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
+ return "32BIT_PT_FOR_32BIT_PT";
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
+ return "32BIT_PT_FOR_32BIT_4MB";
+ case PGMPOOLKIND_PAE_PT_FOR_PHYS:
+ return "PAE_PT_FOR_PHYS";
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
+ return "PAE_PT_FOR_32BIT_PT";
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
+ return "PAE_PT_FOR_32BIT_4MB";
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+ return "PAE_PT_FOR_PAE_PT";
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
+ return "PAE_PT_FOR_PAE_2MB";
+ case PGMPOOLKIND_32BIT_PD:
+ return "32BIT_PD";
+ case PGMPOOLKIND_32BIT_PD_PHYS:
+ return "32BIT_PD_PHYS";
+ case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
+ return "PAE_PD0_FOR_32BIT_PD";
+ case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
+ return "PAE_PD1_FOR_32BIT_PD";
+ case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
+ return "PAE_PD2_FOR_32BIT_PD";
+ case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
+ return "PAE_PD3_FOR_32BIT_PD";
+ case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
+ return "PAE_PD_FOR_PAE_PD";
+ case PGMPOOLKIND_PAE_PD_PHYS:
+ return "PAE_PD_PHYS";
+ case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
+ return "PAE_PDPT_FOR_32BIT";
+ case PGMPOOLKIND_PAE_PDPT:
+ return "PAE_PDPT";
+ case PGMPOOLKIND_PAE_PDPT_PHYS:
+ return "PAE_PDPT_PHYS";
+ case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
+ return "64BIT_PDPT_FOR_64BIT_PDPT";
+ case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
+ return "64BIT_PDPT_FOR_PHYS";
+ case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
+ return "64BIT_PD_FOR_64BIT_PD";
+ case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
+ return "64BIT_PD_FOR_PHYS";
+ case PGMPOOLKIND_64BIT_PML4:
+ return "64BIT_PML4";
+ case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
+ return "EPT_PDPT_FOR_PHYS";
+ case PGMPOOLKIND_EPT_PD_FOR_PHYS:
+ return "EPT_PD_FOR_PHYS";
+ case PGMPOOLKIND_EPT_PT_FOR_PHYS:
+ return "EPT_PT_FOR_PHYS";
+ case PGMPOOLKIND_ROOT_NESTED:
+ return "ROOT_NESTED";
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_PT:
+ return "EPT_PT_FOR_EPT_PT";
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_2MB:
+ return "EPT_PT_FOR_EPT_2MB";
+ case PGMPOOLKIND_EPT_PD_FOR_EPT_PD:
+ return "EPT_PD_FOR_EPT_PD";
+ case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT:
+ return "EPT_PDPT_FOR_EPT_PDPT";
+ case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4:
+ return "EPT_PML4_FOR_EPT_PML4";
+ }
+ return "Unknown kind!";
+}
+
+
+/**
+ * Protect all pgm pool page table entries to monitor writes
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @remarks ASSUMES the caller will flush all TLBs!!
+ */
+void pgmR3PoolWriteProtectPages(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+ PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ unsigned cLeft = pPool->cUsedPages;
+ unsigned iPage = pPool->cCurPages;
+ while (--iPage >= PGMPOOL_IDX_FIRST)
+ {
+ PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
+ if ( pPage->GCPhys != NIL_RTGCPHYS
+ && pPage->cPresent)
+ {
+ union
+ {
+ void *pv;
+ PX86PT pPT;
+ PPGMSHWPTPAE pPTPae;
+ PEPTPT pPTEpt;
+ } uShw;
+ uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
+
+ switch (pPage->enmKind)
+ {
+ /*
+ * We only care about shadow page tables.
+ */
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
+ case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
+ case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
+ for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPT->a); iShw++)
+ if (uShw.pPT->a[iShw].u & X86_PTE_P)
+ uShw.pPT->a[iShw].u = ~(X86PGUINT)X86_PTE_RW;
+ break;
+
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
+ case PGMPOOLKIND_PAE_PT_FOR_PHYS:
+ for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPTPae->a); iShw++)
+ if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
+ PGMSHWPTEPAE_SET_RO(uShw.pPTPae->a[iShw]);
+ break;
+
+ case PGMPOOLKIND_EPT_PT_FOR_PHYS:
+ for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPTEpt->a); iShw++)
+ if (uShw.pPTEpt->a[iShw].u & EPT_E_READ)
+ uShw.pPTEpt->a[iShw].u &= ~(X86PGPAEUINT)EPT_E_WRITE;
+ break;
+
+ default:
+ break;
+ }
+ if (!--cLeft)
+ break;
+ }
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFHANDLERINT, pgmpoolpages}
+ */
+static DECLCALLBACK(void) pgmR3PoolInfoPages(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+
+ PPGMPOOL const pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ unsigned const cPages = pPool->cCurPages;
+ unsigned cLeft = pPool->cUsedPages;
+ for (unsigned iPage = 0; iPage < cPages; iPage++)
+ {
+ PGMPOOLPAGE volatile const *pPage = (PGMPOOLPAGE volatile const *)&pPool->aPages[iPage];
+ RTGCPHYS const GCPhys = pPage->GCPhys;
+ uint8_t const enmKind = pPage->enmKind;
+ if ( enmKind != PGMPOOLKIND_INVALID
+ && enmKind != PGMPOOLKIND_FREE)
+ {
+ pHlp->pfnPrintf(pHlp, "#%04x: HCPhys=%RHp GCPhys=%RGp %s %s %s%s%s\n",
+ iPage, pPage->Core.Key, GCPhys, pPage->fA20Enabled ? "A20 " : "!A20",
+ pgmPoolPoolKindToStr(enmKind),
+ pPage->enmAccess == PGMPOOLACCESS_DONTCARE ? "" : pgmPoolPoolAccessToStr(pPage->enmAccess),
+ pPage->fCached ? " cached" : "", pPage->fMonitored ? " monitored" : "");
+ if (!--cLeft)
+ break;
+ }
+ }
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFHANDLERINT, pgmpoolroots}
+ */
+static DECLCALLBACK(void) pgmR3PoolInfoRoots(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ RT_NOREF(pszArgs);
+
+ PPGMPOOL const pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ unsigned const cPages = pPool->cCurPages;
+ unsigned cLeft = pPool->cUsedPages;
+ for (unsigned iPage = 0; iPage < cPages; iPage++)
+ {
+ PGMPOOLPAGE volatile const *pPage = (PGMPOOLPAGE volatile const *)&pPool->aPages[iPage];
+ RTGCPHYS const GCPhys = pPage->GCPhys;
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ uint8_t const enmKind = pPage->enmKind;
+ switch (enmKind)
+ {
+ default:
+ break;
+
+ case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
+ case PGMPOOLKIND_PAE_PDPT:
+ case PGMPOOLKIND_PAE_PDPT_PHYS:
+ case PGMPOOLKIND_64BIT_PML4:
+ case PGMPOOLKIND_ROOT_NESTED:
+ case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4:
+ pHlp->pfnPrintf(pHlp, "#%04x: HCPhys=%RHp GCPhys=%RGp %s %s %s\n",
+ iPage, pPage->Core.Key, GCPhys, pPage->fA20Enabled ? "A20 " : "!A20",
+ pgmPoolPoolKindToStr(enmKind), pPage->fMonitored ? " monitored" : "");
+ break;
+ }
+ if (!--cLeft)
+ break;
+ }
+ }
+}
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/**
+ * Helper for pgmR3PoolCmdCheck that reports an error.
+ */
+static void pgmR3PoolCheckError(PPGMPOOLCHECKERSTATE pState, const char *pszFormat, ...)
+{
+ if (pState->fFirstMsg)
+ {
+ DBGCCmdHlpPrintf(pState->pCmdHlp, "Checking pool page #%i for %RGp %s\n",
+ pState->pPage->idx, pState->pPage->GCPhys, pgmPoolPoolKindToStr(pState->pPage->enmKind));
+ pState->fFirstMsg = false;
+ }
+
+ va_list va;
+ va_start(va, pszFormat);
+ pState->pCmdHlp->pfnPrintfV(pState->pCmdHlp, NULL, pszFormat, va);
+ va_end(va);
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmpoolcheck' command.}
+ */
+static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs == 0);
+ NOREF(paArgs);
+
+ PGM_LOCK_VOID(pVM);
+ PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ PGMPOOLCHECKERSTATE State = { pCmdHlp, pVM, pPool, NULL, true, 0 };
+ for (unsigned i = 0; i < pPool->cCurPages; i++)
+ {
+ PPGMPOOLPAGE pPage = &pPool->aPages[i];
+ State.pPage = pPage;
+ State.fFirstMsg = true;
+
+ if (pPage->idx != i)
+ pgmR3PoolCheckError(&State, "Invalid idx value: %#x, expected %#x", pPage->idx, i);
+
+ if (pPage->enmKind == PGMPOOLKIND_FREE)
+ continue;
+ if (pPage->enmKind > PGMPOOLKIND_LAST || pPage->enmKind <= PGMPOOLKIND_INVALID)
+ {
+ if (pPage->enmKind != PGMPOOLKIND_INVALID || pPage->idx != 0)
+ pgmR3PoolCheckError(&State, "Invalid enmKind value: %#x\n", pPage->enmKind);
+ continue;
+ }
+
+ void const *pvGuestPage = NULL;
+ PGMPAGEMAPLOCK LockPage;
+ if ( pPage->enmKind != PGMPOOLKIND_EPT_PDPT_FOR_PHYS
+ && pPage->enmKind != PGMPOOLKIND_EPT_PD_FOR_PHYS
+ && pPage->enmKind != PGMPOOLKIND_EPT_PT_FOR_PHYS
+ && pPage->enmKind != PGMPOOLKIND_ROOT_NESTED)
+ {
+ int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pPage->GCPhys, &pvGuestPage, &LockPage);
+ if (RT_FAILURE(rc))
+ {
+ pgmR3PoolCheckError(&State, "PGMPhysGCPhys2CCPtrReadOnly failed for %RGp: %Rrc\n", pPage->GCPhys, rc);
+ continue;
+ }
+ }
+# define HCPHYS_TO_POOL_PAGE(a_HCPhys) (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, (a_HCPhys))
+
+ /*
+ * Check if something obvious is out of sync.
+ */
+ switch (pPage->enmKind)
+ {
+ case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+ {
+ PCPGMSHWPTPAE const pShwPT = (PCPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ PCX86PDPAE const pGstPT = (PCX86PDPAE)pvGuestPage;
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
+ if (PGMSHWPTEPAE_IS_P(pShwPT->a[j]))
+ {
+ RTHCPHYS HCPhys = NIL_RTHCPHYS;
+ int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[j].u & X86_PTE_PAE_PG_MASK, &HCPhys);
+ if ( rc != VINF_SUCCESS
+ || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[j]) != HCPhys)
+ pgmR3PoolCheckError(&State, "Mismatch HCPhys: rc=%Rrc idx=%#x guest %RX64 shw=%RX64 vs %RHp\n",
+ rc, j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys);
+ else if ( PGMSHWPTEPAE_IS_RW(pShwPT->a[j])
+ && !(pGstPT->a[j].u & X86_PTE_RW))
+ pgmR3PoolCheckError(&State, "Mismatch r/w gst/shw: idx=%#x guest %RX64 shw=%RX64 vs %RHp\n",
+ j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys);
+ }
+ break;
+ }
+
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_PT:
+ {
+ PCEPTPT const pShwPT = (PCEPTPT)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ PCEPTPT const pGstPT = (PCEPTPT)pvGuestPage;
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
+ {
+ uint64_t const uShw = pShwPT->a[j].u;
+ if (uShw & EPT_PRESENT_MASK)
+ {
+ uint64_t const uGst = pGstPT->a[j].u;
+ RTHCPHYS HCPhys = NIL_RTHCPHYS;
+ int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), uGst & EPT_E_PG_MASK, &HCPhys);
+ if ( rc != VINF_SUCCESS
+ || (uShw & EPT_E_PG_MASK) != HCPhys)
+ pgmR3PoolCheckError(&State, "Mismatch HCPhys: rc=%Rrc idx=%#x guest %RX64 shw=%RX64 vs %RHp\n",
+ rc, j, uGst, uShw, HCPhys);
+ if ( (uShw & (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
+ != (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE)
+ && ( ((uShw & EPT_E_READ) && !(uGst & EPT_E_READ))
+ || ((uShw & EPT_E_WRITE) && !(uGst & EPT_E_WRITE))
+ || ((uShw & EPT_E_EXECUTE) && !(uGst & EPT_E_EXECUTE)) ) )
+ pgmR3PoolCheckError(&State, "Mismatch r/w/x: idx=%#x guest %RX64 shw=%RX64\n", j, uGst, uShw);
+ }
+ }
+ break;
+ }
+
+ case PGMPOOLKIND_EPT_PT_FOR_EPT_2MB:
+ {
+ PCEPTPT const pShwPT = (PCEPTPT)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
+ {
+ uint64_t const uShw = pShwPT->a[j].u;
+ if (uShw & EPT_E_LEAF)
+ pgmR3PoolCheckError(&State, "Leafness-error: idx=%#x shw=%RX64 (2MB)\n", j, uShw);
+ else if (uShw & EPT_PRESENT_MASK)
+ {
+ RTGCPHYS const GCPhysSubPage = pPage->GCPhys | (j << PAGE_SHIFT);
+ RTHCPHYS HCPhys = NIL_RTHCPHYS;
+ int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), GCPhysSubPage, &HCPhys);
+ if ( rc != VINF_SUCCESS
+ || (uShw & EPT_E_PG_MASK) != HCPhys)
+ pgmR3PoolCheckError(&State, "Mismatch HCPhys: rc=%Rrc idx=%#x guest %RX64 shw=%RX64 vs %RHp\n",
+ rc, j, GCPhysSubPage, uShw, HCPhys);
+ }
+ }
+ break;
+ }
+
+ case PGMPOOLKIND_EPT_PD_FOR_EPT_PD:
+ {
+ PCEPTPD const pShwPD = (PCEPTPD)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ PCEPTPD const pGstPD = (PCEPTPD)pvGuestPage;
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPD->a); j++)
+ {
+ uint64_t const uShw = pShwPD->a[j].u;
+ if (uShw & EPT_PRESENT_MASK)
+ {
+ uint64_t const uGst = pGstPD->a[j].u;
+ if (uShw & EPT_E_LEAF)
+ {
+ if (!(uGst & EPT_E_LEAF))
+ pgmR3PoolCheckError(&State, "Leafness-mismatch: idx=%#x guest %RX64 shw=%RX64\n", j, uGst, uShw);
+ else
+ {
+ RTHCPHYS HCPhys = NIL_RTHCPHYS;
+ int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), uGst & EPT_PDE2M_PG_MASK, &HCPhys);
+ if ( rc != VINF_SUCCESS
+ || (uShw & EPT_E_PG_MASK) != HCPhys)
+ pgmR3PoolCheckError(&State, "Mismatch HCPhys: rc=%Rrc idx=%#x guest %RX64 shw=%RX64 vs %RHp (2MB)\n",
+ rc, j, uGst, uShw, HCPhys);
+ }
+ }
+ else
+ {
+ PPGMPOOLPAGE pSubPage = HCPHYS_TO_POOL_PAGE(uShw & EPT_E_PG_MASK);
+ if (pSubPage)
+ {
+ if ( pSubPage->enmKind != PGMPOOLKIND_EPT_PT_FOR_EPT_PT
+ && pSubPage->enmKind != PGMPOOLKIND_EPT_PT_FOR_EPT_2MB)
+ pgmR3PoolCheckError(&State, "Wrong sub-table type: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x %s\n",
+ j, uGst, uShw, pSubPage->idx, pgmPoolPoolKindToStr(pSubPage->enmKind));
+ if (pSubPage->fA20Enabled != pPage->fA20Enabled)
+ pgmR3PoolCheckError(&State, "Wrong sub-table A20: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x A20=%d, expected %d\n",
+ j, uGst, uShw, pSubPage->idx, pSubPage->fA20Enabled, pPage->fA20Enabled);
+ if (pSubPage->GCPhys != (uGst & EPT_E_PG_MASK))
+ pgmR3PoolCheckError(&State, "Wrong sub-table GCPhys: idx=%#x guest %RX64 shw=%RX64: GCPhys=%#RGp idxSub=%#x\n",
+ j, uGst, uShw, pSubPage->GCPhys, pSubPage->idx);
+ }
+ else
+ pgmR3PoolCheckError(&State, "sub table not found: idx=%#x shw=%RX64\n", j, uShw);
+ }
+ if ( (uShw & (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
+ != (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE)
+ && ( ((uShw & EPT_E_READ) && !(uGst & EPT_E_READ))
+ || ((uShw & EPT_E_WRITE) && !(uGst & EPT_E_WRITE))
+ || ((uShw & EPT_E_EXECUTE) && !(uGst & EPT_E_EXECUTE)) ) )
+ pgmR3PoolCheckError(&State, "Mismatch r/w/x: idx=%#x guest %RX64 shw=%RX64\n",
+ j, uGst, uShw);
+ }
+ }
+ break;
+ }
+
+ case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT:
+ {
+ PCEPTPDPT const pShwPDPT = (PCEPTPDPT)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ PCEPTPDPT const pGstPDPT = (PCEPTPDPT)pvGuestPage;
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPDPT->a); j++)
+ {
+ uint64_t const uShw = pShwPDPT->a[j].u;
+ if (uShw & EPT_PRESENT_MASK)
+ {
+ uint64_t const uGst = pGstPDPT->a[j].u;
+ if (uShw & EPT_E_LEAF)
+ pgmR3PoolCheckError(&State, "No 1GiB shadow pages: idx=%#x guest %RX64 shw=%RX64\n", j, uGst, uShw);
+ else
+ {
+ PPGMPOOLPAGE pSubPage = HCPHYS_TO_POOL_PAGE(uShw & EPT_E_PG_MASK);
+ if (pSubPage)
+ {
+ if (pSubPage->enmKind != PGMPOOLKIND_EPT_PD_FOR_EPT_PD)
+ pgmR3PoolCheckError(&State, "Wrong sub-table type: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x %s\n",
+ j, uGst, uShw, pSubPage->idx, pgmPoolPoolKindToStr(pSubPage->enmKind));
+ if (pSubPage->fA20Enabled != pPage->fA20Enabled)
+ pgmR3PoolCheckError(&State, "Wrong sub-table A20: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x A20=%d, expected %d\n",
+ j, uGst, uShw, pSubPage->idx, pSubPage->fA20Enabled, pPage->fA20Enabled);
+ if (pSubPage->GCPhys != (uGst & EPT_E_PG_MASK))
+ pgmR3PoolCheckError(&State, "Wrong sub-table GCPhys: idx=%#x guest %RX64 shw=%RX64: GCPhys=%#RGp idxSub=%#x\n",
+ j, uGst, uShw, pSubPage->GCPhys, pSubPage->idx);
+ }
+ else
+ pgmR3PoolCheckError(&State, "sub table not found: idx=%#x shw=%RX64\n", j, uShw);
+
+ }
+ if ( (uShw & (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
+ != (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE)
+ && ( ((uShw & EPT_E_READ) && !(uGst & EPT_E_READ))
+ || ((uShw & EPT_E_WRITE) && !(uGst & EPT_E_WRITE))
+ || ((uShw & EPT_E_EXECUTE) && !(uGst & EPT_E_EXECUTE)) ) )
+ pgmR3PoolCheckError(&State, "Mismatch r/w/x: idx=%#x guest %RX64 shw=%RX64\n",
+ j, uGst, uShw);
+ }
+ }
+ break;
+ }
+
+ case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4:
+ {
+ PCEPTPML4 const pShwPML4 = (PCEPTPML4)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
+ PCEPTPML4 const pGstPML4 = (PCEPTPML4)pvGuestPage;
+ for (unsigned j = 0; j < RT_ELEMENTS(pShwPML4->a); j++)
+ {
+ uint64_t const uShw = pShwPML4->a[j].u;
+ if (uShw & EPT_PRESENT_MASK)
+ {
+ uint64_t const uGst = pGstPML4->a[j].u;
+ if (uShw & EPT_E_LEAF)
+ pgmR3PoolCheckError(&State, "No 0.5TiB shadow pages: idx=%#x guest %RX64 shw=%RX64\n", j, uGst, uShw);
+ else
+ {
+ PPGMPOOLPAGE pSubPage = HCPHYS_TO_POOL_PAGE(uShw & EPT_E_PG_MASK);
+ if (pSubPage)
+ {
+ if (pSubPage->enmKind != PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT)
+ pgmR3PoolCheckError(&State, "Wrong sub-table type: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x %s\n",
+ j, uGst, uShw, pSubPage->idx, pgmPoolPoolKindToStr(pSubPage->enmKind));
+ if (pSubPage->fA20Enabled != pPage->fA20Enabled)
+ pgmR3PoolCheckError(&State, "Wrong sub-table A20: idx=%#x guest %RX64 shw=%RX64: idxSub=%#x A20=%d, expected %d\n",
+ j, uGst, uShw, pSubPage->idx, pSubPage->fA20Enabled, pPage->fA20Enabled);
+ if (pSubPage->GCPhys != (uGst & EPT_E_PG_MASK))
+ pgmR3PoolCheckError(&State, "Wrong sub-table GCPhys: idx=%#x guest %RX64 shw=%RX64: GCPhys=%#RGp idxSub=%#x\n",
+ j, uGst, uShw, pSubPage->GCPhys, pSubPage->idx);
+ }
+ else
+ pgmR3PoolCheckError(&State, "sub table not found: idx=%#x shw=%RX64\n", j, uShw);
+
+ }
+ if ( (uShw & (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
+ != (EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE)
+ && ( ((uShw & EPT_E_READ) && !(uGst & EPT_E_READ))
+ || ((uShw & EPT_E_WRITE) && !(uGst & EPT_E_WRITE))
+ || ((uShw & EPT_E_EXECUTE) && !(uGst & EPT_E_EXECUTE)) ) )
+ pgmR3PoolCheckError(&State, "Mismatch r/w/x: idx=%#x guest %RX64 shw=%RX64\n",
+ j, uGst, uShw);
+ }
+ }
+ break;
+ }
+ }
+
+#undef HCPHYS_TO_POOL_PAGE
+ if (pvGuestPage)
+ PGMPhysReleasePageMappingLock(pVM, &LockPage);
+ }
+ PGM_UNLOCK(pVM);
+
+ if (State.cErrors > 0)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Found %#x errors", State.cErrors);
+ DBGCCmdHlpPrintf(pCmdHlp, "no errors found\n");
+ return VINF_SUCCESS;
+}
+
+#endif /* VBOX_WITH_DEBUGGER */
diff --git a/src/VBox/VMM/VMMR3/PGMR3DbgA.asm b/src/VBox/VMM/VMMR3/PGMR3DbgA.asm
new file mode 100644
index 00000000..a2201aa9
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMR3DbgA.asm
@@ -0,0 +1,485 @@
+; $Id: PGMR3DbgA.asm $
+;; @file
+; PGM - Page Manager and Monitor - Debugger & Debugging API Optimizations.
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_ASM_WITH_SEH64
+%include "VBox/asmdefs.mac"
+
+BEGINCODE ;; Doesn't end up in code seg on 64-bit darwin. weird.
+
+
+;
+; Common to all code below.
+;
+%ifdef ASM_CALL64_MSC
+ %define pvNeedle r8
+ %define cbNeedle r9d
+ %define bTmp dl
+%elifdef ASM_CALL64_GCC
+ %define pvNeedle rdx
+ %define cbNeedle esi
+ %define bTmp r9b
+%elifdef RT_ARCH_X86
+ %define pvNeedle dword [esp + 8h]
+ %define cbNeedle dword [esp + 10h]
+%else
+ %error "Unsupported arch!"
+%endif
+
+;;
+; Searches for a 8 byte needle in steps of 8.
+;
+; In 32-bit mode, this will only actually search for a 8 byte needle.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:ebp+08h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:ebp+0ch] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:ebp+10h] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+; @remarks ASSUMES pbHaystack is aligned at uAlign.
+;
+BEGINPROC pgmR3DbgFixedMemScan8Wide8Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov rax, [r8] ; *(uint64_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov rax, [rdx] ; *(uint64_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ push ebp
+ mov ebp, esp
+ push edi ; save it
+ mov edi, [ebp + 08h] ; pbHaystack
+ mov ecx, [ebp + 0ch] ; cbHaystack
+ mov eax, [ebp + 10h] ; pvNeedle
+ mov edx, [eax + 4] ; ((uint32_t *)pvNeedle)[1]
+ mov eax, [eax] ; ((uint32_t *)pvNeedle)[0]
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+%ifdef RT_ARCH_X86
+ ;
+ ; No string instruction to help us here. Do a simple tight loop instead.
+ ;
+ shr ecx, 3
+ jz .return_null
+.again:
+ cmp [edi], eax
+ je .needle_check
+.continue:
+ add edi, 8
+ dec ecx
+ jnz .again
+ jmp .return_null
+
+ ; Check the needle 2nd dword, caller can do the rest.
+.needle_check:
+ cmp edx, [edi + 4]
+ jne .continue
+
+.return_edi:
+ mov eax, edi
+
+%else ; RT_ARCH_AMD64
+ cmp ecx, 8
+ jb .return_null
+.continue:
+ shr ecx, 3
+ repne scasq
+ jne .return_null
+ ; check more of the needle if we can.
+ mov r11d, 8
+ shl ecx, 3
+.needle_check:
+ cmp cbNeedle, r11d
+ je .return_edi
+ cmp ecx, r11d
+ jb .return_edi ; returns success here as we've might've lost stuff while shifting ecx around.
+ mov bTmp, [pvNeedle + r11]
+ cmp bTmp, [xDI + r11 - 8]
+ jne .continue
+ inc r11d
+ jmp .needle_check
+
+.return_edi:
+ lea xAX, [xDI - 8]
+%endif ; RT_ARCH_AMD64
+
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ pop edi
+ leave
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+ jmp .return
+ENDPROC pgmR3DbgFixedMemScan8Wide8Step
+
+
+;;
+; Searches for a 4 byte needle in steps of 4.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:esp+04h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:esp+08h] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:esp+0ch] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+; @remarks ASSUMES pbHaystack is aligned at uAlign.
+;
+BEGINPROC pgmR3DbgFixedMemScan4Wide4Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov eax, [r8] ; *(uint32_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov eax, [rdx] ; *(uint32_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ mov edx, edi ; save it
+ mov edi, [esp + 04h] ; pbHaystack
+ mov ecx, [esp + 08h] ; cbHaystack
+ mov eax, [esp + 0ch] ; pvNeedle
+ mov eax, [eax] ; *(uint32_t *)pvNeedle
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+.continue:
+ cmp ecx, 4
+ jb .return_null
+ shr ecx, 2
+ repne scasd
+ jne .return_null
+
+%ifdef RT_ARCH_AMD64
+ ; check more of the needle if we can.
+ mov r11d, 4
+.needle_check:
+ cmp cbNeedle, r11d
+ je .return_edi
+ cmp ecx, r11d ; don't bother converting ecx to bytes.
+ jb .return_edi
+ mov bTmp, [pvNeedle + r11]
+ cmp bTmp, [xDI + r11 - 4]
+ jne .continue
+ inc r11d
+ jmp .needle_check
+%endif
+
+.return_edi:
+ lea xAX, [xDI - 4]
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+ jmp .return
+ENDPROC pgmR3DbgFixedMemScan4Wide4Step
+
+
+;;
+; Searches for a 2 byte needle in steps of 2.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:esp+04h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:esp+08h] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:esp+0ch] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+; @remarks ASSUMES pbHaystack is aligned at uAlign.
+;
+BEGINPROC pgmR3DbgFixedMemScan2Wide2Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov ax, [r8] ; *(uint16_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov ax, [rdx] ; *(uint16_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ mov edx, edi ; save it
+ mov edi, [esp + 04h] ; pbHaystack
+ mov ecx, [esp + 08h] ; cbHaystack
+ mov eax, [esp + 0ch] ; pvNeedle
+ mov ax, [eax] ; *(uint16_t *)pvNeedle
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+.continue:
+ cmp ecx, 2
+ jb .return_null
+ shr ecx, 1
+ repne scasw
+ jne .return_null
+
+%ifdef RT_ARCH_AMD64
+ ; check more of the needle if we can.
+ mov r11d, 2
+.needle_check:
+ cmp cbNeedle, r11d
+ je .return_edi
+ cmp ecx, r11d ; don't bother converting ecx to bytes.
+ jb .return_edi
+ mov bTmp, [pvNeedle + r11]
+ cmp bTmp, [xDI + r11 - 2]
+ jne .continue
+ inc r11d
+ jmp .needle_check
+%endif
+
+.return_edi:
+ lea xAX, [xDI - 2]
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+ jmp .return
+ENDPROC pgmR3DbgFixedMemScan2Wide2Step
+
+
+;;
+; Searches for a 1 byte needle in steps of 1.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:esp+04h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:esp+08h] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:esp+0ch] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+BEGINPROC pgmR3DbgFixedMemScan1Wide1Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov al, [r8] ; *(uint8_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov al, [rdx] ; *(uint8_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ mov edx, edi ; save it
+ mov edi, [esp + 04h] ; pbHaystack
+ mov ecx, [esp + 08h] ; cbHaystack
+ mov eax, [esp + 0ch] ; pvNeedle
+ mov al, [eax] ; *(uint8_t *)pvNeedle
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+ cmp ecx, 1
+ jb .return_null
+.continue:
+ repne scasb
+ jne .return_null
+
+%ifdef RT_ARCH_AMD64
+ ; check more of the needle if we can.
+ mov r11d, 1
+.needle_check:
+ cmp cbNeedle, r11d
+ je .return_edi
+ cmp ecx, r11d
+ jb .return_edi
+ mov bTmp, [pvNeedle + r11]
+ cmp bTmp, [xDI + r11 - 1]
+ jne .continue
+ inc r11d
+ jmp .needle_check
+%endif
+
+.return_edi:
+ lea xAX, [xDI - 1]
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+ENDPROC pgmR3DbgFixedMemScan1Wide1Step
+
+
+;;
+; Searches for a 4 byte needle in steps of 1.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:esp+04h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:esp+08h] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:esp+0ch] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+BEGINPROC pgmR3DbgFixedMemScan4Wide1Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov eax, [r8] ; *(uint32_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov eax, [rdx] ; *(uint32_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ mov edx, edi ; save it
+ mov edi, [esp + 04h] ; pbHaystack
+ mov ecx, [esp + 08h] ; cbHaystack
+ mov eax, [esp + 0ch] ; pvNeedle
+ mov eax, [eax] ; *(uint32_t *)pvNeedle
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+ cmp ecx, 1
+ jb .return_null
+.continue:
+ repne scasb
+ jne .return_null
+ cmp ecx, 3
+ jb .return_null
+ cmp eax, [xDI - 1]
+ jne .continue
+
+.return_edi:
+ lea xAX, [xDI - 1]
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+ENDPROC pgmR3DbgFixedMemScan4Wide1Step
+
+;;
+; Searches for a 8 byte needle in steps of 1.
+;
+; @param pbHaystack [msc:rcx, gcc:rdi, x86:esp+04h] What to search thru.
+; @param cbHaystack [msc:edx, gcc:rsi, x86:esp+08h] The amount of hay to search.
+; @param pvNeedle [msc:r8, gcc:rdx, x86:esp+0ch] What we're searching for
+; @param cbNeedle [msc:r9, gcc:rcx, x86:esp+10h] Size of what we're searcing for. Currently ignored.
+;
+; @remarks The 32-bit version is currently identical to pgmR3DbgFixedMemScan4Wide1Step.
+;
+BEGINPROC pgmR3DbgFixedMemScan8Wide1Step
+%ifdef ASM_CALL64_MSC
+ mov r10, rdi ; save it
+ mov rdi, rcx ; rdi=pbHaystack
+ mov ecx, edx ; rcx=cbHaystack
+ mov rax, [r8] ; *(uint64_t *)pvNeedle
+%elifdef ASM_CALL64_GCC
+ xchg rcx, rsi ; rcx=cbHaystack, rsi=cbNeedle
+ mov rax, [rdx] ; *(uint64_t *)pvNeedle
+%elifdef RT_ARCH_X86
+ mov edx, edi ; save it
+ mov edi, [esp + 04h] ; pbHaystack
+ mov ecx, [esp + 08h] ; cbHaystack
+ mov eax, [esp + 0ch] ; pvNeedle
+ mov eax, [eax] ; *(uint32_t *)pvNeedle
+%else
+ %error "Unsupported arch!"
+%endif
+SEH64_END_PROLOGUE
+
+ cmp ecx, 1
+ jb .return_null
+.continue:
+ repne scasb
+ jne .return_null
+%ifdef RT_ARCH_AMD64
+ cmp ecx, 7
+ jb .check_smaller
+ cmp rax, [xDI - 1]
+ jne .continue
+ jmp .return_edi
+.check_smaller:
+%endif
+ cmp ecx, 3
+ jb .return_null
+ cmp eax, [xDI - 1]
+ jne .continue
+
+.return_edi:
+ lea xAX, [xDI - 1]
+.return:
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+
+.return_null:
+ xor eax, eax
+%ifdef ASM_CALL64_MSC
+ mov rdi, r10
+%elifdef RT_ARCH_X86
+ mov edi, edx
+%endif
+ ret
+ENDPROC pgmR3DbgFixedMemScan8Wide1Step
+
diff --git a/src/VBox/VMM/VMMR3/PGMSavedState.cpp b/src/VBox/VMM/VMMR3/PGMSavedState.cpp
new file mode 100644
index 00000000..fb1dbf97
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMSavedState.cpp
@@ -0,0 +1,3259 @@
+/* $Id: PGMSavedState.cpp $ */
+/** @file
+ * PGM - Page Manager and Monitor, The Saved State Part.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmdrv.h>
+#include <VBox/vmm/pdmdev.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include "PGMInline.h"
+
+#include <VBox/param.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/crc.h>
+#include <iprt/mem.h>
+#include <iprt/sha.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Saved state data unit version. */
+#define PGM_SAVED_STATE_VERSION 14
+/** Saved state data unit version before the PAE PDPE registers. */
+#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
+/** Saved state data unit version after this includes ballooned page flags in
+ * the state (see @bugref{5515}). */
+#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
+/** Saved state before the balloon change. */
+#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
+/** Saved state data unit version used during 3.1 development, misses the RAM
+ * config. */
+#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
+/** Saved state data unit version for 3.0 (pre teleportation). */
+#define PGM_SAVED_STATE_VERSION_3_0_0 9
+/** Saved state data unit version for 2.2.2 and later. */
+#define PGM_SAVED_STATE_VERSION_2_2_2 8
+/** Saved state data unit version for 2.2.0. */
+#define PGM_SAVED_STATE_VERSION_RR_DESC 7
+/** Saved state data unit version. */
+#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
+
+
+/** @name Sparse state record types
+ * @{ */
+/** Zero page. No data. */
+#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
+/** Raw page. */
+#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
+/** Raw MMIO2 page. */
+#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
+/** Zero MMIO2 page. */
+#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
+/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
+#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
+/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
+#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
+/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
+#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
+/** ROM protection (8-bit). */
+#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
+/** Ballooned page. No data. */
+#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
+/** The last record type. */
+#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
+/** End marker. */
+#define PGM_STATE_REC_END UINT8_C(0xff)
+/** Flag indicating that the data is preceded by the page address.
+ * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
+ * range ID and a 32-bit page index.
+ */
+#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
+/** @} */
+
+/** The CRC-32 for a zero page. */
+#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
+/** The CRC-32 for a zero half page. */
+#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
+
+
+
+/** @name Old Page types used in older saved states.
+ * @{ */
+/** Old saved state: The usual invalid zero entry. */
+#define PGMPAGETYPE_OLD_INVALID 0
+/** Old saved state: RAM page. (RWX) */
+#define PGMPAGETYPE_OLD_RAM 1
+/** Old saved state: MMIO2 page. (RWX) */
+#define PGMPAGETYPE_OLD_MMIO2 1
+/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
+ * See PGMHandlerPhysicalPageAlias(). */
+#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
+/** Old saved state: Shadowed ROM. (RWX) */
+#define PGMPAGETYPE_OLD_ROM_SHADOW 3
+/** Old saved state: ROM page. (R-X) */
+#define PGMPAGETYPE_OLD_ROM 4
+/** Old saved state: MMIO page. (---) */
+#define PGMPAGETYPE_OLD_MMIO 5
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** For loading old saved states. (pre-smp) */
+typedef struct
+{
+ /** If set no conflict checks are required. (boolean) */
+ bool fMappingsFixed;
+ /** Size of fixed mapping */
+ uint32_t cbMappingFixed;
+ /** Base address (GC) of fixed mapping */
+ RTGCPTR GCPtrMappingFixed;
+ /** A20 gate mask.
+ * Our current approach to A20 emulation is to let REM do it and don't bother
+ * anywhere else. The interesting guests will be operating with it enabled anyway.
+ * But should the need arise, we'll subject physical addresses to this mask. */
+ RTGCPHYS GCPhysA20Mask;
+ /** A20 gate state - boolean! */
+ bool fA20Enabled;
+ /** The guest paging mode. */
+ PGMMODE enmGuestMode;
+} PGMOLD;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** PGM fields to save/load. */
+
+static const SSMFIELD s_aPGMFields[] =
+{
+ SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
+ SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
+ SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
+ SSMFIELD_ENTRY( PGM, cBalloonedPages),
+ SSMFIELD_ENTRY_TERM()
+};
+
+static const SSMFIELD s_aPGMFieldsPreBalloon[] =
+{
+ SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
+ SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
+ SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
+ SSMFIELD_ENTRY_TERM()
+};
+
+static const SSMFIELD s_aPGMCpuFields[] =
+{
+ SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
+ SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
+ SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
+ SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
+ SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
+ SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
+ SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
+ SSMFIELD_ENTRY_TERM()
+};
+
+static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
+{
+ SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
+ SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
+ SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
+ SSMFIELD_ENTRY_TERM()
+};
+
+static const SSMFIELD s_aPGMFields_Old[] =
+{
+ SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
+ SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
+ SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
+ SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
+ SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
+ SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
+ SSMFIELD_ENTRY_TERM()
+};
+
+
+/**
+ * Find the ROM tracking structure for the given page.
+ *
+ * @returns Pointer to the ROM page structure. NULL if the caller didn't check
+ * that it's a ROM page.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The address of the ROM page.
+ */
+static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
+{
+ for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
+ pRomRange;
+ pRomRange = pRomRange->CTX_SUFF(pNext))
+ {
+ RTGCPHYS off = GCPhys - pRomRange->GCPhys;
+ if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
+ return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
+ }
+ return NULL;
+}
+
+
+/**
+ * Prepares the ROM pages for a live save.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int pgmR3PrepRomPages(PVM pVM)
+{
+ /*
+ * Initialize the live save tracking in the ROM page descriptors.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ PPGMRAMRANGE pRamHint = NULL;;
+ uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
+
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
+ pRom->aPages[iPage].LiveSave.fWrittenTo = false;
+ pRom->aPages[iPage].LiveSave.fDirty = true;
+ pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
+ if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
+ {
+ if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
+ pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
+ else
+ {
+ RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ PPGMPAGE pPage;
+ int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
+ AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
+ if (RT_SUCCESS(rc))
+ pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
+ else
+ pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
+ }
+ }
+ }
+
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
+ if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
+ }
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Assigns IDs to the ROM ranges and saves them.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM Saved state handle.
+ */
+static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
+{
+ PGM_LOCK_VOID(pVM);
+ uint8_t id = 1;
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
+ {
+ pRom->idSavedState = id;
+ SSMR3PutU8(pSSM, id);
+ SSMR3PutStrZ(pSSM, ""); /* device name */
+ SSMR3PutU32(pSSM, 0); /* device instance */
+ SSMR3PutU8(pSSM, 0); /* region */
+ SSMR3PutStrZ(pSSM, pRom->pszDesc);
+ SSMR3PutGCPhys(pSSM, pRom->GCPhys);
+ int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ PGM_UNLOCK(pVM);
+ return SSMR3PutU8(pSSM, UINT8_MAX);
+}
+
+
+/**
+ * Loads the ROM range ID assignments.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ pRom->idSavedState = UINT8_MAX;
+
+ for (;;)
+ {
+ /*
+ * Read the data.
+ */
+ uint8_t id;
+ int rc = SSMR3GetU8(pSSM, &id);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (id == UINT8_MAX)
+ {
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ if (pRom->idSavedState != UINT8_MAX)
+ { /* likely */ }
+ else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
+ LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
+ pRom->pszDesc));
+ else
+ AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
+ ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
+ pRom->pszDesc));
+ return VINF_SUCCESS; /* the end */
+ }
+ AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
+ rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
+ AssertLogRelRCReturn(rc, rc);
+
+ uint32_t uInstance;
+ SSMR3GetU32(pSSM, &uInstance);
+ uint8_t iRegion;
+ SSMR3GetU8(pSSM, &iRegion);
+
+ char szDesc[64];
+ rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
+ AssertLogRelRCReturn(rc, rc);
+
+ RTGCPHYS GCPhys;
+ SSMR3GetGCPhys(pSSM, &GCPhys);
+ RTGCPHYS cb;
+ rc = SSMR3GetGCPhys(pSSM, &cb);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /*
+ * Locate a matching ROM range.
+ */
+ AssertLogRelMsgReturn( uInstance == 0
+ && iRegion == 0
+ && szDevName[0] == '\0',
+ ("GCPhys=%RGp %s\n", GCPhys, szDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ PPGMROMRANGE pRom;
+ for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ if ( pRom->idSavedState == UINT8_MAX
+ && !strcmp(pRom->pszDesc, szDesc))
+ {
+ pRom->idSavedState = id;
+ break;
+ }
+ }
+ if (!pRom)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
+ } /* forever */
+}
+
+
+/**
+ * Scan ROM pages.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void pgmR3ScanRomPages(PVM pVM)
+{
+ /*
+ * The shadow ROMs.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+ uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
+ if (pRomPage->LiveSave.fWrittenTo)
+ {
+ pRomPage->LiveSave.fWrittenTo = false;
+ if (!pRomPage->LiveSave.fDirty)
+ {
+ pRomPage->LiveSave.fDirty = true;
+ pVM->pgm.s.LiveSave.Rom.cReadyPages--;
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
+ }
+ pRomPage->LiveSave.fDirtiedRecently = true;
+ }
+ else
+ pRomPage->LiveSave.fDirtiedRecently = false;
+ }
+ }
+ }
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Takes care of the virgin ROM pages in the first pass.
+ *
+ * This is an attempt at simplifying the handling of ROM pages a little bit.
+ * This ASSUMES that no new ROM ranges will be added and that they won't be
+ * relinked in any way.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param fLiveSave Whether we're in a live save or not.
+ */
+static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
+{
+ PGM_LOCK_VOID(pVM);
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
+
+ /* Get the virgin page descriptor. */
+ PPGMPAGE pPage;
+ if (PGMROMPROT_IS_ROM(enmProt))
+ pPage = pgmPhysGetPage(pVM, GCPhys);
+ else
+ pPage = &pRom->aPages[iPage].Virgin;
+
+ /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
+ int rc = VINF_SUCCESS;
+ char abPage[GUEST_PAGE_SIZE];
+ if ( !PGM_PAGE_IS_ZERO(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ void const *pvPage;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (!PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)
+ pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
+ else
+#endif
+ rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
+ if (RT_SUCCESS(rc))
+ memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
+ }
+ else
+ RT_ZERO(abPage);
+ PGM_UNLOCK(pVM);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
+
+ /* Save it. */
+ if (iPage > 0)
+ SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
+ else
+ {
+ SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutU8(pSSM, pRom->idSavedState);
+ SSMR3PutU32(pSSM, iPage);
+ }
+ SSMR3PutU8(pSSM, (uint8_t)enmProt);
+ rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Update state. */
+ PGM_LOCK_VOID(pVM);
+ pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
+ if (fLiveSave)
+ {
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
+ pVM->pgm.s.LiveSave.Rom.cReadyPages++;
+ pVM->pgm.s.LiveSave.cSavedPages++;
+ }
+ }
+ }
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Saves dirty pages in the shadowed ROM ranges.
+ *
+ * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param fLiveSave Whether it's a live save or not.
+ * @param fFinalPass Whether this is the final pass or not.
+ */
+static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
+{
+ /*
+ * The Shadowed ROMs.
+ *
+ * ASSUMES that the ROM ranges are fixed.
+ * ASSUMES that all the ROM ranges are mapped.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ {
+ if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
+ {
+ uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPrevPage = cPages;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
+ if ( !fLiveSave
+ || ( pRomPage->LiveSave.fDirty
+ && ( ( !pRomPage->LiveSave.fDirtiedRecently
+ && !pRomPage->LiveSave.fWrittenTo)
+ || fFinalPass
+ )
+ )
+ )
+ {
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ PGMROMPROT enmProt = pRomPage->enmProt;
+ RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
+ bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
+ int rc = VINF_SUCCESS;
+ if (!fZero)
+ {
+ void const *pvPage;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)
+ pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
+ else
+#endif
+ rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
+ if (RT_SUCCESS(rc))
+ memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
+ }
+ if (fLiveSave && RT_SUCCESS(rc))
+ {
+ pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
+ pRomPage->LiveSave.fDirty = false;
+ pVM->pgm.s.LiveSave.Rom.cReadyPages++;
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
+ pVM->pgm.s.LiveSave.cSavedPages++;
+ }
+ PGM_UNLOCK(pVM);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
+
+ if (iPage - 1U == iPrevPage && iPage > 0)
+ SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
+ else
+ {
+ SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutU8(pSSM, pRom->idSavedState);
+ SSMR3PutU32(pSSM, iPage);
+ }
+ rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
+ if (!fZero)
+ rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ PGM_LOCK_VOID(pVM);
+ iPrevPage = iPage;
+ }
+ /*
+ * In the final pass, make sure the protection is in sync.
+ */
+ else if ( fFinalPass
+ && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
+ {
+ PGMROMPROT enmProt = pRomPage->enmProt;
+ pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
+ PGM_UNLOCK(pVM);
+
+ if (iPage - 1U == iPrevPage && iPage > 0)
+ SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
+ else
+ {
+ SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutU8(pSSM, pRom->idSavedState);
+ SSMR3PutU32(pSSM, iPage);
+ }
+ int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ PGM_LOCK_VOID(pVM);
+ iPrevPage = iPage;
+ }
+ }
+ }
+ }
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Cleans up ROM pages after a live save.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void pgmR3DoneRomPages(PVM pVM)
+{
+ NOREF(pVM);
+}
+
+
+/**
+ * Prepares the MMIO2 pages for a live save.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int pgmR3PrepMmio2Pages(PVM pVM)
+{
+ /*
+ * Initialize the live save tracking in the MMIO2 ranges.
+ * ASSUME nothing changes here.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ {
+ uint32_t const cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ PGM_UNLOCK(pVM);
+
+ PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
+ sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
+ if (!paLSPages)
+ return VERR_NO_MEMORY;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ /* Initialize it as a dirty zero page. */
+ paLSPages[iPage].fDirty = true;
+ paLSPages[iPage].cUnchangedScans = 0;
+ paLSPages[iPage].fZero = true;
+ paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
+ paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
+ }
+
+ PGM_LOCK_VOID(pVM);
+ pRegMmio->paLSPages = paLSPages;
+ pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
+ }
+ PGM_UNLOCK(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Assigns IDs to the MMIO2 ranges and saves them.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM Saved state handle.
+ */
+static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
+{
+ PGM_LOCK_VOID(pVM);
+ uint8_t id = 1;
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ {
+ pRegMmio->idSavedState = id;
+ SSMR3PutU8(pSSM, id);
+ SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
+ SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
+ SSMR3PutU8(pSSM, pRegMmio->iRegion);
+ SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
+ int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
+ if (RT_FAILURE(rc))
+ break;
+ id++;
+ }
+ PGM_UNLOCK(pVM);
+ return SSMR3PutU8(pSSM, UINT8_MAX);
+}
+
+
+/**
+ * Loads the MMIO2 range ID assignments.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ pRegMmio->idSavedState = UINT8_MAX;
+
+ for (;;)
+ {
+ /*
+ * Read the data.
+ */
+ uint8_t id;
+ int rc = SSMR3GetU8(pSSM, &id);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (id == UINT8_MAX)
+ {
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX, ("%s\n", pRegMmio->RamRange.pszDesc));
+ return VINF_SUCCESS; /* the end */
+ }
+ AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
+ rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
+ AssertLogRelRCReturn(rc, rc);
+
+ uint32_t uInstance;
+ SSMR3GetU32(pSSM, &uInstance);
+ uint8_t iRegion;
+ SSMR3GetU8(pSSM, &iRegion);
+
+ char szDesc[64];
+ rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
+ AssertLogRelRCReturn(rc, rc);
+
+ RTGCPHYS cb;
+ rc = SSMR3GetGCPhys(pSSM, &cb);
+ AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /*
+ * Locate a matching MMIO2 range.
+ */
+ PPGMREGMMIO2RANGE pRegMmio;
+ for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ {
+ if ( pRegMmio->idSavedState == UINT8_MAX
+ && pRegMmio->iRegion == iRegion
+ && pRegMmio->pDevInsR3->iInstance == uInstance
+ && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
+ {
+ pRegMmio->idSavedState = id;
+ break;
+ }
+ }
+ if (!pRegMmio)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
+ szDesc, szDevName, uInstance, iRegion);
+
+ /*
+ * Validate the configuration, the size of the MMIO2 region should be
+ * the same.
+ */
+ if (cb != pRegMmio->RamRange.cb)
+ {
+ LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
+ pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
+ if (cb > pRegMmio->RamRange.cb) /* bad idea? */
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
+ pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
+ }
+ } /* forever */
+}
+
+
+/**
+ * Scans one MMIO2 page.
+ *
+ * @returns True if changed, false if unchanged.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pbPage The page bits.
+ * @param pLSPage The live save tracking structure for the page.
+ *
+ */
+DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
+{
+ /*
+ * Special handling of zero pages.
+ */
+ bool const fZero = pLSPage->fZero;
+ if (fZero)
+ {
+ if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
+ {
+ /* Not modified. */
+ if (pLSPage->fDirty)
+ pLSPage->cUnchangedScans++;
+ return false;
+ }
+
+ pLSPage->fZero = false;
+ pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
+ }
+ else
+ {
+ /*
+ * CRC the first half, if it doesn't match the page is dirty and
+ * we won't check the 2nd half (we'll do that next time).
+ */
+ uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
+ if (u32CrcH1 == pLSPage->u32CrcH1)
+ {
+ uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
+ if (u32CrcH2 == pLSPage->u32CrcH2)
+ {
+ /* Probably not modified. */
+ if (pLSPage->fDirty)
+ pLSPage->cUnchangedScans++;
+ return false;
+ }
+
+ pLSPage->u32CrcH2 = u32CrcH2;
+ }
+ else
+ {
+ pLSPage->u32CrcH1 = u32CrcH1;
+ if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
+ && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
+ {
+ pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
+ pLSPage->fZero = true;
+ }
+ }
+ }
+
+ /* dirty page path */
+ pLSPage->cUnchangedScans = 0;
+ if (!pLSPage->fDirty)
+ {
+ pLSPage->fDirty = true;
+ pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
+ pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
+ if (fZero)
+ pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
+ }
+ return true;
+}
+
+
+/**
+ * Scan for MMIO2 page modifications.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uPass The pass number.
+ */
+static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
+{
+ /*
+ * Since this is a bit expensive we lower the scan rate after a little while.
+ */
+ if ( ( (uPass & 3) != 0
+ && uPass > 10)
+ || uPass == SSM_PASS_FINAL)
+ return;
+
+ PGM_LOCK_VOID(pVM); /* paranoia */
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ {
+ PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
+ uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ PGM_UNLOCK(pVM);
+
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * GUEST_PAGE_SIZE;
+ pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
+ }
+
+ PGM_LOCK_VOID(pVM);
+ }
+ PGM_UNLOCK(pVM);
+
+}
+
+
+/**
+ * Save quiescent MMIO2 pages.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param fLiveSave Whether it's a live save or not.
+ * @param uPass The pass number.
+ */
+static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
+{
+ /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
+ * device that we wish to know about changes.) */
+
+ int rc = VINF_SUCCESS;
+ if (uPass == SSM_PASS_FINAL)
+ {
+ /*
+ * The mop up round.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
+ pRegMmio && RT_SUCCESS(rc);
+ pRegMmio = pRegMmio->pNextR3)
+ {
+ PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
+ uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
+ uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPageLast = cPages;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
+ {
+ uint8_t u8Type;
+ if (!fLiveSave)
+ u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
+ else
+ {
+ /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
+ if ( !paLSPages[iPage].fDirty
+ && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
+ {
+ if (paLSPages[iPage].fZero)
+ continue;
+
+ uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
+ RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
+ if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
+ continue;
+ }
+ u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
+ pVM->pgm.s.LiveSave.cSavedPages++;
+ }
+
+ if (iPage != 0 && iPage == iPageLast + 1)
+ rc = SSMR3PutU8(pSSM, u8Type);
+ else
+ {
+ SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutU8(pSSM, pRegMmio->idSavedState);
+ rc = SSMR3PutU32(pSSM, iPage);
+ }
+ if (u8Type == PGM_STATE_REC_MMIO2_RAW)
+ rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ break;
+ iPageLast = iPage;
+ }
+ }
+ PGM_UNLOCK(pVM);
+ }
+ /*
+ * Reduce the rate after a little while since the current MMIO2 approach is
+ * a bit expensive.
+ * We position it two passes after the scan pass to avoid saving busy pages.
+ */
+ else if ( uPass <= 10
+ || (uPass & 3) == 2)
+ {
+ PGM_LOCK_VOID(pVM);
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
+ pRegMmio && RT_SUCCESS(rc);
+ pRegMmio = pRegMmio->pNextR3)
+ {
+ PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
+ uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
+ uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPageLast = cPages;
+ PGM_UNLOCK(pVM);
+
+ for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
+ {
+ /* Skip clean pages and pages which hasn't quiesced. */
+ if (!paLSPages[iPage].fDirty)
+ continue;
+ if (paLSPages[iPage].cUnchangedScans < 3)
+ continue;
+ if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
+ continue;
+
+ /* Save it. */
+ bool const fZero = paLSPages[iPage].fZero;
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ if (!fZero)
+ {
+ memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
+ RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
+ }
+
+ uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
+ if (iPage != 0 && iPage == iPageLast + 1)
+ rc = SSMR3PutU8(pSSM, u8Type);
+ else
+ {
+ SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutU8(pSSM, pRegMmio->idSavedState);
+ rc = SSMR3PutU32(pSSM, iPage);
+ }
+ if (u8Type == PGM_STATE_REC_MMIO2_RAW)
+ rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ break;
+
+ /* Housekeeping. */
+ paLSPages[iPage].fDirty = false;
+ pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
+ pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
+ if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
+ pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
+ pVM->pgm.s.LiveSave.cSavedPages++;
+ iPageLast = iPage;
+ }
+
+ PGM_LOCK_VOID(pVM);
+ }
+ PGM_UNLOCK(pVM);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Cleans up MMIO2 pages after a live save.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void pgmR3DoneMmio2Pages(PVM pVM)
+{
+ /*
+ * Free the tracking structures for the MMIO2 pages.
+ * We do the freeing outside the lock in case the VM is running.
+ */
+ PGM_LOCK_VOID(pVM);
+ for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ {
+ void *pvMmio2ToFree = pRegMmio->paLSPages;
+ if (pvMmio2ToFree)
+ {
+ pRegMmio->paLSPages = NULL;
+ PGM_UNLOCK(pVM);
+ MMR3HeapFree(pvMmio2ToFree);
+ PGM_LOCK_VOID(pVM);
+ }
+ }
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Prepares the RAM pages for a live save.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int pgmR3PrepRamPages(PVM pVM)
+{
+
+ /*
+ * Try allocating tracking structures for the ram ranges.
+ *
+ * To avoid lock contention, we leave the lock every time we're allocating
+ * a new array. This means we'll have to ditch the allocation and start
+ * all over again if the RAM range list changes in-between.
+ *
+ * Note! pgmR3SaveDone will always be called and it is therefore responsible
+ * for cleaning up.
+ */
+ PPGMRAMRANGE pCur;
+ PGM_LOCK_VOID(pVM);
+ do
+ {
+ for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ if ( !pCur->paLSPages
+ && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
+ {
+ uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
+ uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
+ PGM_UNLOCK(pVM);
+ PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
+ if (!paLSPages)
+ return VERR_NO_MEMORY;
+ PGM_LOCK_VOID(pVM);
+ if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
+ {
+ PGM_UNLOCK(pVM);
+ MMR3HeapFree(paLSPages);
+ PGM_LOCK_VOID(pVM);
+ break; /* try again */
+ }
+ pCur->paLSPages = paLSPages;
+
+ /*
+ * Initialize the array.
+ */
+ uint32_t iPage = cPages;
+ while (iPage-- > 0)
+ {
+ /** @todo yield critsect! (after moving this away from EMT0) */
+ PCPGMPAGE pPage = &pCur->aPages[iPage];
+ paLSPages[iPage].cDirtied = 0;
+ paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
+ paLSPages[iPage].fWriteMonitored = 0;
+ paLSPages[iPage].fWriteMonitoredJustNow = 0;
+ paLSPages[iPage].u2Reserved = 0;
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_RAM:
+ if ( PGM_PAGE_IS_ZERO(pPage)
+ || PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ paLSPages[iPage].fZero = 1;
+ paLSPages[iPage].fShared = 0;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
+#endif
+ }
+ else if (PGM_PAGE_IS_SHARED(pPage))
+ {
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX;
+#endif
+ }
+ else
+ {
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 0;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX;
+#endif
+ }
+ paLSPages[iPage].fIgnore = 0;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
+ break;
+
+ case PGMPAGETYPE_ROM_SHADOW:
+ case PGMPAGETYPE_ROM:
+ {
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 0;
+ paLSPages[iPage].fDirty = 0;
+ paLSPages[iPage].fIgnore = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX;
+#endif
+ pVM->pgm.s.LiveSave.cIgnoredPages++;
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("%R[pgmpage]", pPage));
+ RT_FALL_THRU();
+ case PGMPAGETYPE_MMIO2:
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 0;
+ paLSPages[iPage].fDirty = 0;
+ paLSPages[iPage].fIgnore = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX;
+#endif
+ pVM->pgm.s.LiveSave.cIgnoredPages++;
+ break;
+
+ case PGMPAGETYPE_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 0;
+ paLSPages[iPage].fDirty = 0;
+ paLSPages[iPage].fIgnore = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX;
+#endif
+ pVM->pgm.s.LiveSave.cIgnoredPages++;
+ break;
+ }
+ }
+ }
+ }
+ } while (pCur);
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Saves the RAM configuration.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
+{
+ uint32_t cbRamHole = 0;
+ int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
+ AssertRCReturn(rc, rc);
+
+ uint64_t cbRam = 0;
+ rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
+ AssertRCReturn(rc, rc);
+
+ SSMR3PutU32(pSSM, cbRamHole);
+ return SSMR3PutU64(pSSM, cbRam);
+}
+
+
+/**
+ * Loads and verifies the RAM configuration.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
+{
+ uint32_t cbRamHoleCfg = 0;
+ int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
+ AssertRCReturn(rc, rc);
+
+ uint64_t cbRamCfg = 0;
+ rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
+ AssertRCReturn(rc, rc);
+
+ uint32_t cbRamHoleSaved;
+ SSMR3GetU32(pSSM, &cbRamHoleSaved);
+
+ uint64_t cbRamSaved;
+ rc = SSMR3GetU64(pSSM, &cbRamSaved);
+ AssertRCReturn(rc, rc);
+
+ if ( cbRamHoleCfg != cbRamHoleSaved
+ || cbRamCfg != cbRamSaved)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
+ cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
+ return VINF_SUCCESS;
+}
+
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+
+/**
+ * Calculates the CRC-32 for a RAM page and updates the live save page tracking
+ * info with it.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCur The current RAM range.
+ * @param paLSPages The current array of live save page tracking
+ * structures.
+ * @param iPage The page index.
+ */
+static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
+{
+ RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ PGMPAGEMAPLOCK PgMpLck;
+ void const *pvPage;
+ int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ else
+ paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
+}
+
+
+/**
+ * Verifies the CRC-32 for a page given it's raw bits.
+ *
+ * @param pvPage The page bits.
+ * @param pCur The current RAM range.
+ * @param paLSPages The current array of live save page tracking
+ * structures.
+ * @param iPage The page index.
+ */
+static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
+{
+ if (paLSPages[iPage].u32Crc != UINT32_MAX)
+ {
+ uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
+ Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
+ && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
+ || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
+ AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
+ ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
+ pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
+ }
+}
+
+
+/**
+ * Verifies the CRC-32 for a RAM page.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCur The current RAM range.
+ * @param paLSPages The current array of live save page tracking
+ * structures.
+ * @param iPage The page index.
+ */
+static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
+{
+ if (paLSPages[iPage].u32Crc != UINT32_MAX)
+ {
+ RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ PGMPAGEMAPLOCK PgMpLck;
+ void const *pvPage;
+ int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ }
+}
+
+#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
+
+/**
+ * Scan for RAM page modifications and reprotect them.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFinalPass Whether this is the final pass or not.
+ */
+static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
+{
+ /*
+ * The RAM.
+ */
+ RTGCPHYS GCPhysCur = 0;
+ PPGMRAMRANGE pCur;
+ PGM_LOCK_VOID(pVM);
+ do
+ {
+ uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
+ for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ if ( pCur->GCPhysLast > GCPhysCur
+ && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
+ {
+ PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
+ uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
+ GCPhysCur = 0;
+ for (; iPage < cPages; iPage++)
+ {
+ /* Do yield first. */
+ if ( !fFinalPass
+#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
+ && (iPage & 0x7ff) == 0x100
+#endif
+ && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
+ && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
+ {
+ GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ break; /* restart */
+ }
+
+ /* Skip already ignored pages. */
+ if (paLSPages[iPage].fIgnore)
+ continue;
+
+ if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
+ {
+ /*
+ * A RAM page.
+ */
+ switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
+ {
+ case PGM_PAGE_STATE_ALLOCATED:
+ /** @todo Optimize this: Don't always re-enable write
+ * monitoring if the page is known to be very busy. */
+ if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
+ {
+ AssertMsg(paLSPages[iPage].fWriteMonitored,
+ ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
+ PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
+ Assert(pVM->pgm.s.cWrittenToPages > 0);
+ pVM->pgm.s.cWrittenToPages--;
+ }
+ else
+ {
+ AssertMsg(!paLSPages[iPage].fWriteMonitored,
+ ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
+ pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
+ }
+
+ if (!paLSPages[iPage].fDirty)
+ {
+ pVM->pgm.s.LiveSave.Ram.cReadyPages--;
+ if (paLSPages[iPage].fZero)
+ pVM->pgm.s.LiveSave.Ram.cZeroPages--;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
+ if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
+ paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
+ }
+
+ pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
+ pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
+ paLSPages[iPage].fWriteMonitored = 1;
+ paLSPages[iPage].fWriteMonitoredJustNow = 1;
+ paLSPages[iPage].fDirty = 1;
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 0;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
+#endif
+ break;
+
+ case PGM_PAGE_STATE_WRITE_MONITORED:
+ Assert(paLSPages[iPage].fWriteMonitored);
+ if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
+ {
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ if (paLSPages[iPage].fWriteMonitoredJustNow)
+ pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
+ else
+ pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
+#endif
+ paLSPages[iPage].fWriteMonitoredJustNow = 0;
+ }
+ else
+ {
+ paLSPages[iPage].fWriteMonitoredJustNow = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
+#endif
+ if (!paLSPages[iPage].fDirty)
+ {
+ pVM->pgm.s.LiveSave.Ram.cReadyPages--;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
+ if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
+ paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
+ }
+ }
+ break;
+
+ case PGM_PAGE_STATE_ZERO:
+ case PGM_PAGE_STATE_BALLOONED:
+ if (!paLSPages[iPage].fZero)
+ {
+ if (!paLSPages[iPage].fDirty)
+ {
+ paLSPages[iPage].fDirty = 1;
+ pVM->pgm.s.LiveSave.Ram.cReadyPages--;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
+ }
+ paLSPages[iPage].fZero = 1;
+ paLSPages[iPage].fShared = 0;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
+#endif
+ }
+ break;
+
+ case PGM_PAGE_STATE_SHARED:
+ if (!paLSPages[iPage].fShared)
+ {
+ if (!paLSPages[iPage].fDirty)
+ {
+ paLSPages[iPage].fDirty = 1;
+ pVM->pgm.s.LiveSave.Ram.cReadyPages--;
+ if (paLSPages[iPage].fZero)
+ pVM->pgm.s.LiveSave.Ram.cZeroPages--;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
+ }
+ paLSPages[iPage].fZero = 0;
+ paLSPages[iPage].fShared = 1;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
+#endif
+ }
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * All other types => Ignore the page.
+ */
+ Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
+ paLSPages[iPage].fIgnore = 1;
+ if (paLSPages[iPage].fWriteMonitored)
+ {
+ /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
+ * pages! */
+ if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
+ {
+ AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
+ PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
+ Assert(pVM->pgm.s.cMonitoredPages > 0);
+ pVM->pgm.s.cMonitoredPages--;
+ }
+ if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
+ {
+ PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
+ Assert(pVM->pgm.s.cWrittenToPages > 0);
+ pVM->pgm.s.cWrittenToPages--;
+ }
+ pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
+ }
+
+ /** @todo the counting doesn't quite work out here. fix later? */
+ if (paLSPages[iPage].fDirty)
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
+ else
+ {
+ pVM->pgm.s.LiveSave.Ram.cReadyPages--;
+ if (paLSPages[iPage].fZero)
+ pVM->pgm.s.LiveSave.Ram.cZeroPages--;
+ }
+ pVM->pgm.s.LiveSave.cIgnoredPages++;
+ }
+ } /* for each page in range */
+
+ if (GCPhysCur != 0)
+ break; /* Yield + ramrange change */
+ GCPhysCur = pCur->GCPhysLast;
+ }
+ } /* for each range */
+ } while (pCur);
+ PGM_UNLOCK(pVM);
+}
+
+
+/**
+ * Save quiescent RAM pages.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param fLiveSave Whether it's a live save or not.
+ * @param uPass The pass number.
+ */
+static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
+{
+ NOREF(fLiveSave);
+
+ /*
+ * The RAM.
+ */
+ RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
+ RTGCPHYS GCPhysCur = 0;
+ PPGMRAMRANGE pCur;
+
+ PGM_LOCK_VOID(pVM);
+ do
+ {
+ uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
+ for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ if ( pCur->GCPhysLast > GCPhysCur
+ && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
+ {
+ PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
+ uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
+ uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
+ GCPhysCur = 0;
+ for (; iPage < cPages; iPage++)
+ {
+ /* Do yield first. */
+ if ( uPass != SSM_PASS_FINAL
+ && (iPage & 0x7ff) == 0x100
+ && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
+ && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
+ {
+ GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ break; /* restart */
+ }
+
+ PPGMPAGE pCurPage = &pCur->aPages[iPage];
+
+ /*
+ * Only save pages that haven't changed since last scan and are dirty.
+ */
+ if ( uPass != SSM_PASS_FINAL
+ && paLSPages)
+ {
+ if (!paLSPages[iPage].fDirty)
+ continue;
+ if (paLSPages[iPage].fWriteMonitoredJustNow)
+ continue;
+ if (paLSPages[iPage].fIgnore)
+ continue;
+ if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
+ continue;
+ if ( PGM_PAGE_GET_STATE(pCurPage)
+ != ( paLSPages[iPage].fZero
+ ? PGM_PAGE_STATE_ZERO
+ : paLSPages[iPage].fShared
+ ? PGM_PAGE_STATE_SHARED
+ : PGM_PAGE_STATE_WRITE_MONITORED))
+ continue;
+ if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
+ continue;
+ }
+ else
+ {
+ if ( paLSPages
+ && !paLSPages[iPage].fDirty
+ && !paLSPages[iPage].fIgnore)
+ {
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
+ pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
+#endif
+ continue;
+ }
+ if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
+ continue;
+ }
+
+ /*
+ * Do the saving outside the PGM critsect since SSM may block on I/O.
+ */
+ int rc;
+ RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+ bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
+ bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
+ bool fSkipped = false;
+
+ if (!fZero && !fBallooned)
+ {
+ /*
+ * Copy the page and then save it outside the lock (since any
+ * SSM call may block).
+ */
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ PGMPAGEMAPLOCK PgMpLck;
+ void const *pvPage;
+ rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ if (paLSPages)
+ pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
+#endif
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ PGM_UNLOCK(pVM);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
+
+ /* Try save some memory when restoring. */
+ if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
+ {
+ if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
+ SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
+ else
+ {
+ SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
+ SSMR3PutGCPhys(pSSM, GCPhys);
+ }
+ rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
+ }
+ else
+ {
+ if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
+ rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
+ else
+ {
+ SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
+ rc = SSMR3PutGCPhys(pSSM, GCPhys);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Dirty zero or ballooned page.
+ */
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ if (paLSPages)
+ pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
+#endif
+ PGM_UNLOCK(pVM);
+
+ uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
+ if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
+ rc = SSMR3PutU8(pSSM, u8RecType);
+ else
+ {
+ SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
+ rc = SSMR3PutGCPhys(pSSM, GCPhys);
+ }
+ }
+ if (RT_FAILURE(rc))
+ return rc;
+
+ PGM_LOCK_VOID(pVM);
+ if (!fSkipped)
+ GCPhysLast = GCPhys;
+ if (paLSPages)
+ {
+ paLSPages[iPage].fDirty = 0;
+ pVM->pgm.s.LiveSave.Ram.cReadyPages++;
+ if (fZero)
+ pVM->pgm.s.LiveSave.Ram.cZeroPages++;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
+ pVM->pgm.s.LiveSave.cSavedPages++;
+ }
+ if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
+ {
+ GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
+ break; /* restart */
+ }
+
+ } /* for each page in range */
+
+ if (GCPhysCur != 0)
+ break; /* Yield + ramrange change */
+ GCPhysCur = pCur->GCPhysLast;
+ }
+ } /* for each range */
+ } while (pCur);
+
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Cleans up RAM pages after a live save.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void pgmR3DoneRamPages(PVM pVM)
+{
+ /*
+ * Free the tracking arrays and disable write monitoring.
+ *
+ * Play nice with the PGM lock in case we're called while the VM is still
+ * running. This means we have to delay the freeing since we wish to use
+ * paLSPages as an indicator of which RAM ranges which we need to scan for
+ * write monitored pages.
+ */
+ void *pvToFree = NULL;
+ PPGMRAMRANGE pCur;
+ uint32_t cMonitoredPages = 0;
+ PGM_LOCK_VOID(pVM);
+ do
+ {
+ for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
+ {
+ if (pCur->paLSPages)
+ {
+ if (pvToFree)
+ {
+ uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
+ PGM_UNLOCK(pVM);
+ MMR3HeapFree(pvToFree);
+ pvToFree = NULL;
+ PGM_LOCK_VOID(pVM);
+ if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
+ break; /* start over again. */
+ }
+
+ pvToFree = pCur->paLSPages;
+ pCur->paLSPages = NULL;
+
+ uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
+ while (iPage--)
+ {
+ PPGMPAGE pPage = &pCur->aPages[iPage];
+ PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
+ if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
+ {
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
+ cMonitoredPages++;
+ }
+ }
+ }
+ }
+ } while (pCur);
+
+ Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
+ if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
+ pVM->pgm.s.cMonitoredPages = 0;
+ else
+ pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
+
+ PGM_UNLOCK(pVM);
+
+ MMR3HeapFree(pvToFree);
+ pvToFree = NULL;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLIVEEXEC}
+ */
+static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ int rc;
+
+ /*
+ * Save the MMIO2 and ROM range IDs in pass 0.
+ */
+ if (uPass == 0)
+ {
+ rc = pgmR3SaveRamConfig(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = pgmR3SaveRomRanges(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ /*
+ * Reset the page-per-second estimate to avoid inflation by the initial
+ * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
+ */
+ else if (uPass == 7)
+ {
+ pVM->pgm.s.LiveSave.cSavedPages = 0;
+ pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
+ }
+
+ /*
+ * Do the scanning.
+ */
+ pgmR3ScanRomPages(pVM);
+ pgmR3ScanMmio2Pages(pVM, uPass);
+ pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
+ pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
+
+ /*
+ * Save the pages.
+ */
+ if (uPass == 0)
+ rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
+ else
+ rc = VINF_SUCCESS;
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
+ SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLIVEVOTE}
+ */
+static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ /*
+ * Update and calculate parameters used in the decision making.
+ */
+ const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
+
+ /* update history. */
+ PGM_LOCK_VOID(pVM);
+ uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
+ PGM_UNLOCK(pVM);
+ uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
+ + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
+ + pVM->pgm.s.LiveSave.Ram.cDirtyPages
+ + cWrittenToPages;
+ uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
+ pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
+ pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
+
+ /* calc shortterm average (4 passes). */
+ AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
+ uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
+ cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
+ cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
+ cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
+ uint32_t const cDirtyPagesShort = cTotal / 4;
+ pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
+
+ /* calc longterm average. */
+ cTotal = 0;
+ if (uPass < cHistoryEntries)
+ for (i = 0; i < cHistoryEntries && i <= uPass; i++)
+ cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
+ else
+ for (i = 0; i < cHistoryEntries; i++)
+ cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
+ uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
+ pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
+
+ /* estimate the speed */
+ uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
+ uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
+ / ((long double)cNsElapsed / 1000000000.0) );
+ pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
+
+ /*
+ * Try make a decision.
+ */
+ if ( cDirtyPagesShort <= cDirtyPagesLong
+ && ( cDirtyNow <= cDirtyPagesShort
+ || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
+ )
+ )
+ {
+ if (uPass > 10)
+ {
+ uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
+ uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
+ uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
+ if (cMsMaxDowntime < 32)
+ cMsMaxDowntime = 32;
+ if ( ( cMsLeftLong <= cMsMaxDowntime
+ && cMsLeftShort < cMsMaxDowntime)
+ || cMsLeftShort < cMsMaxDowntime / 2
+ )
+ {
+ Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
+ uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ if ( ( cDirtyPagesShort <= 128
+ && cDirtyPagesLong <= 1024)
+ || cDirtyPagesLong <= 256
+ )
+ {
+ Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
+ return VINF_SUCCESS;
+ }
+ }
+ }
+
+ /*
+ * Come up with a completion percentage. Currently this is a simple
+ * dirty page (long term) vs. total pages ratio + some pass trickery.
+ */
+ unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
+ / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
+ if (uPctDirty <= 100)
+ SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
+ else
+ AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
+ uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
+
+ return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLIVEPREP}
+ *
+ * This will attempt to allocate and initialize the tracking structures. It
+ * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
+ * pgmR3SaveDone will do the cleanups.
+ */
+static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Indicate that we will be using the write monitoring.
+ */
+ PGM_LOCK_VOID(pVM);
+ /** @todo find a way of mediating this when more users are added. */
+ if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
+ {
+ PGM_UNLOCK(pVM);
+ AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
+ }
+ pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
+ PGM_UNLOCK(pVM);
+
+ /*
+ * Initialize the statistics.
+ */
+ pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
+ pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
+ pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
+ pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
+ pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
+ pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
+ pVM->pgm.s.LiveSave.cIgnoredPages = 0;
+ pVM->pgm.s.LiveSave.fActive = true;
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
+ pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
+ pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
+ pVM->pgm.s.LiveSave.cSavedPages = 0;
+ pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
+ pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
+
+ /*
+ * Per page type.
+ */
+ int rc = pgmR3PrepRomPages(pVM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3PrepMmio2Pages(pVM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3PrepRamPages(pVM);
+
+ NOREF(pSSM);
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTSAVEEXEC}
+ */
+static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
+{
+ PPGM pPGM = &pVM->pgm.s;
+
+ /*
+ * Lock PGM and set the no-more-writes indicator.
+ */
+ PGM_LOCK_VOID(pVM);
+ pVM->pgm.s.fNoMorePhysWrites = true;
+
+ /*
+ * Save basic data (required / unaffected by relocation).
+ */
+ int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
+ rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
+
+ /*
+ * Save the (remainder of the) memory.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ if (pVM->pgm.s.LiveSave.fActive)
+ {
+ pgmR3ScanRomPages(pVM);
+ pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
+ pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
+
+ rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
+ }
+ else
+ {
+ rc = pgmR3SaveRamConfig(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveRomRanges(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
+ }
+ SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTSAVEDONE}
+ */
+static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Do per page type cleanups first.
+ */
+ if (pVM->pgm.s.LiveSave.fActive)
+ {
+ pgmR3DoneRomPages(pVM);
+ pgmR3DoneMmio2Pages(pVM);
+ pgmR3DoneRamPages(pVM);
+ }
+
+ /*
+ * Clear the live save indicator and disengage write monitoring.
+ */
+ PGM_LOCK_VOID(pVM);
+ pVM->pgm.s.LiveSave.fActive = false;
+ /** @todo this is blindly assuming that we're the only user of write
+ * monitoring. Fix this when more users are added. */
+ pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
+ PGM_UNLOCK(pVM);
+
+ NOREF(pSSM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADPREP}
+ */
+static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Call the reset function to make sure all the memory is cleared.
+ */
+ PGMR3Reset(pVM);
+ pVM->pgm.s.LiveSave.fActive = false;
+ NOREF(pSSM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Load an ignored page.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ */
+static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
+{
+ uint8_t abPage[GUEST_PAGE_SIZE];
+ return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
+}
+
+
+/**
+ * Compares a page with an old save type value.
+ *
+ * @returns true if equal, false if not.
+ * @param pPage The page to compare.
+ * @param uOldType The old type value from the saved state.
+ */
+DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
+{
+ uint8_t uOldPageType;
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
+ case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
+ case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
+ case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
+ case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
+ case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
+ default:
+ AssertFailed();
+ uOldPageType = PGMPAGETYPE_OLD_INVALID;
+ break;
+ }
+ return uOldPageType == uOldType;
+}
+
+
+/**
+ * Loads a page without any bits in the saved state, i.e. making sure it's
+ * really zero.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
+ * state).
+ * @param pPage The guest page tracking structure.
+ * @param GCPhys The page address.
+ * @param pRam The ram range (logging).
+ */
+static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+{
+ if ( uOldType != PGMPAGETYPE_OLD_INVALID
+ && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
+ return VERR_SSM_UNEXPECTED_DATA;
+
+ /* I think this should be sufficient. */
+ if ( !PGM_PAGE_IS_ZERO(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
+ return VERR_SSM_UNEXPECTED_DATA;
+
+ NOREF(pVM);
+ NOREF(GCPhys);
+ NOREF(pRam);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads a page from the saved state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
+ * state).
+ * @param pPage The guest page tracking structure.
+ * @param GCPhys The page address.
+ * @param pRam The ram range (logging).
+ */
+static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+{
+ /*
+ * Match up the type, dealing with MMIO2 aliases (dropped).
+ */
+ AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
+ || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
+ /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
+ || ( uOldType == PGMPAGETYPE_OLD_RAM
+ && GCPhys >= 0xed000
+ && GCPhys <= 0xeffff
+ && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
+ ,
+ ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
+ VERR_SSM_UNEXPECTED_DATA);
+
+ /*
+ * Load the page.
+ */
+ PGMPAGEMAPLOCK PgMpLck;
+ void *pvPage;
+ int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
+ if (RT_SUCCESS(rc))
+ {
+ rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Loads a page (counter part to pgmR3SavePage).
+ *
+ * @returns VBox status code, fully bitched errors.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uOldType The page type.
+ * @param pPage The page.
+ * @param GCPhys The page address.
+ * @param pRam The RAM range (for error messages).
+ */
+static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+{
+ uint8_t uState;
+ int rc = SSMR3GetU8(pSSM, &uState);
+ AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
+ if (uState == 0 /* zero */)
+ rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
+ else if (uState == 1)
+ rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
+ else
+ rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
+ AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
+ pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
+ rc);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads a shadowed ROM page.
+ *
+ * @returns VBox status code, errors are fully bitched.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param pPage The page.
+ * @param GCPhys The page address.
+ * @param pRam The RAM range (for error messages).
+ */
+static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+{
+ /*
+ * Load and set the protection first, then load the two pages, the first
+ * one is the active the other is the passive.
+ */
+ PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
+ AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
+
+ uint8_t uProt;
+ int rc = SSMR3GetU8(pSSM, &uProt);
+ AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
+ PGMROMPROT enmProt = (PGMROMPROT)uProt;
+ AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
+ && enmProt < PGMROMPROT_END,
+ ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
+ VERR_SSM_UNEXPECTED_DATA);
+
+ if (pRomPage->enmProt != enmProt)
+ {
+ rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
+ }
+
+ PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
+ PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
+ uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
+ uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
+
+ /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
+ * used down the line (will the 2nd page will be written to the first
+ * one because of a false TLB hit since the TLB is using GCPhys and
+ * doesn't check the HCPhys of the desired page). */
+ rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
+ if (RT_SUCCESS(rc))
+ {
+ *pPageActive = *pPage;
+ rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
+ }
+ return rc;
+}
+
+/**
+ * Ram range flags and bits for older versions of the saved state.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion The saved state version.
+ */
+static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ PPGM pPGM = &pVM->pgm.s;
+
+ /*
+ * Ram range flags and bits.
+ */
+ uint32_t i = 0;
+ for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
+ {
+ /* Check the sequence number / separator. */
+ uint32_t u32Sep;
+ int rc = SSMR3GetU32(pSSM, &u32Sep);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32Sep == ~0U)
+ break;
+ if (u32Sep != i)
+ {
+ AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /* Get the range details. */
+ RTGCPHYS GCPhys;
+ SSMR3GetGCPhys(pSSM, &GCPhys);
+ RTGCPHYS GCPhysLast;
+ SSMR3GetGCPhys(pSSM, &GCPhysLast);
+ RTGCPHYS cb;
+ SSMR3GetGCPhys(pSSM, &cb);
+ uint8_t fHaveBits;
+ rc = SSMR3GetU8(pSSM, &fHaveBits);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (fHaveBits & ~1)
+ {
+ AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ size_t cchDesc = 0;
+ char szDesc[256];
+ szDesc[0] = '\0';
+ if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
+ {
+ rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
+ if (RT_FAILURE(rc))
+ return rc;
+ /* Since we've modified the description strings in r45878, only compare
+ them if the saved state is more recent. */
+ if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
+ cchDesc = strlen(szDesc);
+ }
+
+ /*
+ * Match it up with the current range.
+ *
+ * Note there is a hack for dealing with the high BIOS mapping
+ * in the old saved state format, this means we might not have
+ * a 1:1 match on success.
+ */
+ if ( ( GCPhys != pRam->GCPhys
+ || GCPhysLast != pRam->GCPhysLast
+ || cb != pRam->cb
+ || ( cchDesc
+ && strcmp(szDesc, pRam->pszDesc)) )
+ /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
+ && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
+ || GCPhys != UINT32_C(0xfff80000)
+ || GCPhysLast != UINT32_C(0xffffffff)
+ || pRam->GCPhysLast != GCPhysLast
+ || pRam->GCPhys < GCPhys
+ || !fHaveBits)
+ )
+ {
+ LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
+ "State : %RGp-%RGp %RGp bytes %s %s\n",
+ pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
+ GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
+ /*
+ * If we're loading a state for debugging purpose, don't make a fuss if
+ * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
+ */
+ if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
+ || GCPhys < 8 * _1M)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS,
+ N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
+ GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
+ pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
+
+ AssertMsgFailed(("debug skipping not implemented, sorry\n"));
+ continue;
+ }
+
+ uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
+ if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
+ {
+ /*
+ * Load the pages one by one.
+ */
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ uint8_t uOldType;
+ rc = SSMR3GetU8(pSSM, &uOldType);
+ AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
+ if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
+ rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
+ else
+ rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
+ }
+ }
+ else
+ {
+ /*
+ * Old format.
+ */
+
+ /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
+ The rest is generally irrelevant and wrong since the stuff have to match registrations. */
+ uint32_t fFlags = 0;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ uint16_t u16Flags;
+ rc = SSMR3GetU16(pSSM, &u16Flags);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
+ fFlags |= u16Flags;
+ }
+
+ /* Load the bits */
+ if ( !fHaveBits
+ && GCPhysLast < UINT32_C(0xe0000000))
+ {
+ /*
+ * Dynamic chunks.
+ */
+ const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
+ AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
+ ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
+ {
+ uint8_t fPresent;
+ rc = SSMR3GetU8(pSSM, &fPresent);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
+ AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
+ ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
+ {
+ RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+ if (fPresent)
+ {
+ if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
+ rc = pgmR3LoadPageToDevNullOld(pSSM);
+ else
+ rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
+ }
+ else
+ rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
+ }
+ }
+ }
+ else if (pRam->pvR3)
+ {
+ /*
+ * MMIO2.
+ */
+ AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
+ ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelMsgReturn(pRam->pvR3,
+ ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
+ AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
+ }
+ else if (GCPhysLast < UINT32_C(0xfff80000))
+ {
+ /*
+ * PCI MMIO, no pages saved.
+ */
+ }
+ else
+ {
+ /*
+ * Load the 0xfff80000..0xffffffff BIOS range.
+ * It starts with X reserved pages that we have to skip over since
+ * the RAMRANGE create by the new code won't include those.
+ */
+ AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
+ && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
+ ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
+ ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ /* Skip wasted reserved pages before the ROM. */
+ while (GCPhys < pRam->GCPhys)
+ {
+ rc = pgmR3LoadPageToDevNullOld(pSSM);
+ GCPhys += GUEST_PAGE_SIZE;
+ }
+
+ /* Load the bios pages. */
+ cPages = pRam->cb >> GUEST_PAGE_SHIFT;
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+
+ AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
+ ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
+ }
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for pgmR3Load and pgmR3LoadLocked.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion The PGM saved state unit version.
+ * @param uPass The pass number.
+ *
+ * @todo This needs splitting up if more record types or code twists are
+ * added...
+ */
+static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(uPass);
+
+ /*
+ * Process page records until we hit the terminator.
+ */
+ RTGCPHYS GCPhys = NIL_RTGCPHYS;
+ PPGMRAMRANGE pRamHint = NULL;
+ uint8_t id = UINT8_MAX;
+ uint32_t iPage = UINT32_MAX - 10;
+ PPGMROMRANGE pRom = NULL;
+ PPGMREGMMIO2RANGE pRegMmio = NULL;
+
+ /*
+ * We batch up pages that should be freed instead of calling GMM for
+ * each and every one of them. Note that we'll lose the pages in most
+ * failure paths - this should probably be addressed one day.
+ */
+ uint32_t cPendingPages = 0;
+ PGMMFREEPAGESREQ pReq;
+ int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
+ AssertLogRelRCReturn(rc, rc);
+
+ for (;;)
+ {
+ /*
+ * Get the record type and flags.
+ */
+ uint8_t u8;
+ rc = SSMR3GetU8(pSSM, &u8);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u8 == PGM_STATE_REC_END)
+ {
+ /*
+ * Finish off any pages pending freeing.
+ */
+ if (cPendingPages)
+ {
+ Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
+ rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ GMMR3FreePagesCleanup(pReq);
+ return VINF_SUCCESS;
+ }
+ AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
+ {
+ /*
+ * RAM page.
+ */
+ case PGM_STATE_REC_RAM_ZERO:
+ case PGM_STATE_REC_RAM_RAW:
+ case PGM_STATE_REC_RAM_BALLOONED:
+ {
+ /*
+ * Get the address and resolve it into a page descriptor.
+ */
+ if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
+ GCPhys += GUEST_PAGE_SIZE;
+ else
+ {
+ rc = SSMR3GetGCPhys(pSSM, &GCPhys);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ PPGMPAGE pPage;
+ rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
+
+ /*
+ * Take action according to the record type.
+ */
+ switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
+ {
+ case PGM_STATE_REC_RAM_ZERO:
+ {
+ if (PGM_PAGE_IS_ZERO(pPage))
+ break;
+
+ /* Ballooned pages must be unmarked (live snapshot and
+ teleportation scenarios). */
+ if (PGM_PAGE_IS_BALLOONED(pPage))
+ {
+ Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
+ if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
+ break;
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
+ break;
+ }
+
+ AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
+
+ /* If this is a ROM page, we must clear it and not try to
+ * free it. Ditto if the VM is using RamPreAlloc (see
+ * @bugref{6318}). */
+ if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ || pVM->pgm.s.fNemMode
+#endif
+ || pVM->pgm.s.fRamPreAlloc)
+ {
+ PGMPAGEMAPLOCK PgMpLck;
+ void *pvDstPage;
+ rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
+ AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
+
+ RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ }
+ /* Free it only if it's not part of a previously
+ allocated large page (no need to clear the page). */
+ else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
+ && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
+ {
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
+ AssertRCReturn(rc, rc);
+ }
+ /** @todo handle large pages (see @bugref{5545}) */
+ break;
+ }
+
+ case PGM_STATE_REC_RAM_BALLOONED:
+ {
+ Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
+ if (PGM_PAGE_IS_BALLOONED(pPage))
+ break;
+
+ /* We don't map ballooned pages in our shadow page tables, let's
+ just free it if allocated and mark as ballooned. See @bugref{5515}. */
+ if (PGM_PAGE_IS_ALLOCATED(pPage))
+ {
+ /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
+ * @bugref{5545}). */
+ AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
+ && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
+ ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
+
+ rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
+ AssertRCReturn(rc, rc);
+ }
+ Assert(PGM_PAGE_IS_ZERO(pPage));
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
+ break;
+ }
+
+ case PGM_STATE_REC_RAM_RAW:
+ {
+ PGMPAGEMAPLOCK PgMpLck;
+ void *pvDstPage;
+ rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
+ AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
+ rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ if (RT_FAILURE(rc))
+ return rc;
+ break;
+ }
+
+ default:
+ AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
+ }
+ id = UINT8_MAX;
+ break;
+ }
+
+ /*
+ * MMIO2 page.
+ */
+ case PGM_STATE_REC_MMIO2_RAW:
+ case PGM_STATE_REC_MMIO2_ZERO:
+ {
+ /*
+ * Get the ID + page number and resolved that into a MMIO2 page.
+ */
+ if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
+ iPage++;
+ else
+ {
+ SSMR3GetU8(pSSM, &id);
+ rc = SSMR3GetU32(pSSM, &iPage);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ if ( !pRegMmio
+ || pRegMmio->idSavedState != id)
+ {
+ for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
+ if (pRegMmio->idSavedState == id)
+ break;
+ AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
+ }
+ AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT),
+ ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc),
+ VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
+ void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << GUEST_PAGE_SHIFT);
+
+ /*
+ * Load the page bits.
+ */
+ if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
+ RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
+ else
+ {
+ rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ GCPhys = NIL_RTGCPHYS;
+ break;
+ }
+
+ /*
+ * ROM pages.
+ */
+ case PGM_STATE_REC_ROM_VIRGIN:
+ case PGM_STATE_REC_ROM_SHW_RAW:
+ case PGM_STATE_REC_ROM_SHW_ZERO:
+ case PGM_STATE_REC_ROM_PROT:
+ {
+ /*
+ * Get the ID + page number and resolved that into a ROM page descriptor.
+ */
+ if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
+ iPage++;
+ else
+ {
+ SSMR3GetU8(pSSM, &id);
+ rc = SSMR3GetU32(pSSM, &iPage);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ if ( !pRom
+ || pRom->idSavedState != id)
+ {
+ for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
+ if (pRom->idSavedState == id)
+ break;
+ AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
+ }
+ AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
+ ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
+ VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
+ PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
+ GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
+
+ /*
+ * Get and set the protection.
+ */
+ uint8_t u8Prot;
+ rc = SSMR3GetU8(pSSM, &u8Prot);
+ if (RT_FAILURE(rc))
+ return rc;
+ PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
+ AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
+
+ if (enmProt != pRomPage->enmProt)
+ {
+ if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS,
+ N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
+ GCPhys, enmProt, pRom->pszDesc);
+ rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
+ AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
+ AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
+ }
+ if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
+ break; /* done */
+
+ /*
+ * Get the right page descriptor.
+ */
+ PPGMPAGE pRealPage;
+ switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
+ {
+ case PGM_STATE_REC_ROM_VIRGIN:
+ if (!PGMROMPROT_IS_ROM(enmProt))
+ pRealPage = &pRomPage->Virgin;
+ else
+ pRealPage = NULL;
+ break;
+
+ case PGM_STATE_REC_ROM_SHW_RAW:
+ case PGM_STATE_REC_ROM_SHW_ZERO:
+ if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS,
+ N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
+ GCPhys, enmProt, pRom->pszDesc);
+ if (PGMROMPROT_IS_ROM(enmProt))
+ pRealPage = &pRomPage->Shadow;
+ else
+ pRealPage = NULL;
+ break;
+
+ default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
+ }
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ bool const fAltPage = pRealPage != NULL;
+#endif
+ if (!pRealPage)
+ {
+ rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
+ AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
+ }
+
+ /*
+ * Make it writable and map it (if necessary).
+ */
+ void *pvDstPage = NULL;
+ switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
+ {
+ case PGM_STATE_REC_ROM_SHW_ZERO:
+ if ( PGM_PAGE_IS_ZERO(pRealPage)
+ || PGM_PAGE_IS_BALLOONED(pRealPage))
+ break;
+ /** @todo implement zero page replacing. */
+ RT_FALL_THRU();
+ case PGM_STATE_REC_ROM_VIRGIN:
+ case PGM_STATE_REC_ROM_SHW_RAW:
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ if (fAltPage && pVM->pgm.s.fNemMode)
+ pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
+ else
+#endif
+ {
+ rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
+ AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
+ }
+ break;
+ }
+
+ /*
+ * Load the bits.
+ */
+ switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
+ {
+ case PGM_STATE_REC_ROM_SHW_ZERO:
+ if (pvDstPage)
+ RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
+ break;
+
+ case PGM_STATE_REC_ROM_VIRGIN:
+ case PGM_STATE_REC_ROM_SHW_RAW:
+ rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ return rc;
+ break;
+ }
+ GCPhys = NIL_RTGCPHYS;
+ break;
+ }
+
+ /*
+ * Unknown type.
+ */
+ default:
+ AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
+ }
+ } /* forever */
+}
+
+
+/**
+ * Worker for pgmR3Load.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion The saved state version.
+ */
+static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ PPGM pPGM = &pVM->pgm.s;
+ int rc;
+ uint32_t u32Sep;
+
+ /*
+ * Load basic data (required / unaffected by relocation).
+ */
+ if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
+ {
+ if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
+ rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
+ else
+ rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
+
+ AssertLogRelRCReturn(rc, rc);
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
+ rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
+ else
+ rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ }
+ else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
+ {
+ AssertRelease(pVM->cCpus == 1);
+
+ PGMOLD pgmOld;
+ rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
+ AssertLogRelRCReturn(rc, rc);
+
+ PVMCPU pVCpu0 = pVM->apCpusR3[0];
+ pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
+ pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
+ pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
+ }
+ else
+ {
+ AssertRelease(pVM->cCpus == 1);
+
+ SSMR3Skip(pSSM, sizeof(bool));
+ RTGCPTR GCPtrIgn;
+ SSMR3GetGCPtr(pSSM, &GCPtrIgn);
+ SSMR3Skip(pSSM, sizeof(uint32_t));
+
+ uint32_t cbRamSizeIgnored;
+ rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
+ if (RT_FAILURE(rc))
+ return rc;
+ PVMCPU pVCpu0 = pVM->apCpusR3[0];
+ SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
+
+ uint32_t u32 = 0;
+ SSMR3GetUInt(pSSM, &u32);
+ pVCpu0->pgm.s.fA20Enabled = !!u32;
+ SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
+ RTUINT uGuestMode;
+ SSMR3GetUInt(pSSM, &uGuestMode);
+ pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
+
+ /* check separator. */
+ SSMR3GetU32(pSSM, &u32Sep);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32Sep != (uint32_t)~0)
+ {
+ AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ }
+
+ /*
+ * Fix the A20 mask.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
+ pgmR3RefreshShadowModeAfterA20Change(pVCpu);
+ }
+
+ /*
+ * The guest mappings - skipped now, see re-fixation in the caller.
+ */
+ if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
+ {
+ for (uint32_t i = 0; ; i++)
+ {
+ rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32Sep == ~0U)
+ break;
+ AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+
+ char szDesc[256];
+ rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
+ if (RT_FAILURE(rc))
+ return rc;
+ RTGCPTR GCPtrIgnore;
+ SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
+ rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+
+ /*
+ * Load the RAM contents.
+ */
+ if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
+ {
+ if (!pVM->pgm.s.LiveSave.fActive)
+ {
+ if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
+ {
+ rc = pgmR3LoadRamConfig(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ rc = pgmR3LoadRomRanges(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
+ }
+ else
+ rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
+
+ /* Refresh balloon accounting. */
+ if (pVM->pgm.s.cBalloonedPages)
+ {
+ Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
+ rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
+ AssertRCReturn(rc, rc);
+ }
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADEXEC}
+ */
+static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ int rc;
+
+ /*
+ * Validate version.
+ */
+ if ( ( uPass != SSM_PASS_FINAL
+ && uVersion != PGM_SAVED_STATE_VERSION
+ && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
+ && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
+ && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
+ && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
+ || ( uVersion != PGM_SAVED_STATE_VERSION
+ && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
+ && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
+ && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
+ && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
+ && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
+ && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
+ && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
+ && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
+ )
+ {
+ AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ /*
+ * Do the loading while owning the lock because a bunch of the functions
+ * we're using requires this.
+ */
+ if (uPass != SSM_PASS_FINAL)
+ {
+ PGM_LOCK_VOID(pVM);
+ if (uPass != 0)
+ rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
+ else
+ {
+ pVM->pgm.s.LiveSave.fActive = true;
+ if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
+ rc = pgmR3LoadRamConfig(pVM, pSSM);
+ else
+ rc = VINF_SUCCESS;
+ if (RT_SUCCESS(rc))
+ rc = pgmR3LoadRomRanges(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
+ }
+ PGM_UNLOCK(pVM);
+ }
+ else
+ {
+ PGM_LOCK_VOID(pVM);
+ rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
+ pVM->pgm.s.LiveSave.fActive = false;
+ PGM_UNLOCK(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * We require a full resync now.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
+ /** @todo For guest PAE, we might get the wrong
+ * aGCPhysGstPaePDs values now. We should used the
+ * saved ones... Postponing this since it nothing new
+ * and PAE/PDPTR needs some general readjusting, see
+ * @bugref{5880}. */
+ }
+
+ pgmR3HandlerPhysicalUpdateAll(pVM);
+
+ /*
+ * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
+ * (Requires the CPUM state to be restored already!)
+ */
+ if (CPUMR3IsStateRestorePending(pVM))
+ return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
+ N_("PGM was unexpectedly restored before CPUM"));
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
+ AssertLogRelRCReturn(rc, rc);
+
+ /* Update the PSE, NX flags and validity masks. */
+ pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
+ PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADDONE}
+ */
+static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
+{
+ pVM->pgm.s.fRestoreRomPagesOnReset = true;
+ NOREF(pSSM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Registers the saved state callbacks with SSM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param cbRam The RAM size.
+ */
+int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
+{
+ return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
+ pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
+ NULL, pgmR3SaveExec, pgmR3SaveDone,
+ pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
+}
+
diff --git a/src/VBox/VMM/VMMR3/PGMSharedPage.cpp b/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
new file mode 100644
index 00000000..ce270176
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
@@ -0,0 +1,452 @@
+/* $Id: PGMSharedPage.cpp $ */
+/** @file
+ * PGM - Page Manager and Monitor, Shared page handling
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM_SHARED
+#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/uvm.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include <VBox/sup.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/VMMDev.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+#include "PGMInline.h"
+
+
+#ifdef VBOX_WITH_PAGE_SHARING
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+# ifdef VBOX_STRICT
+/** Keep a copy of all registered shared modules for the .pgmcheckduppages debugger command. */
+static PGMMREGISTERSHAREDMODULEREQ g_apSharedModules[512] = {0};
+static unsigned g_cSharedModules = 0;
+# endif /* VBOX_STRICT */
+
+
+/**
+ * Registers a new shared module for the VM
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmGuestOS Guest OS type.
+ * @param pszModuleName Module name.
+ * @param pszVersion Module version.
+ * @param GCBaseAddr Module base address.
+ * @param cbModule Module size.
+ * @param cRegions Number of shared region descriptors.
+ * @param paRegions Shared region(s).
+ *
+ * @todo This should be a GMMR3 call. No need to involve GMM here.
+ */
+VMMR3DECL(int) PGMR3SharedModuleRegister(PVM pVM, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
+ RTGCPTR GCBaseAddr, uint32_t cbModule, uint32_t cRegions,
+ VMMDEVSHAREDREGIONDESC const *paRegions)
+{
+ Log(("PGMR3SharedModuleRegister family=%d name=%s version=%s base=%RGv size=%x cRegions=%d\n",
+ enmGuestOS, pszModuleName, pszVersion, GCBaseAddr, cbModule, cRegions));
+
+ /*
+ * Sanity check.
+ */
+ AssertReturn(cRegions <= VMMDEVSHAREDREGIONDESC_MAX, VERR_INVALID_PARAMETER);
+ if (!pVM->pgm.s.fPageFusionAllowed)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Allocate and initialize a GMM request.
+ */
+ PGMMREGISTERSHAREDMODULEREQ pReq;
+ pReq = (PGMMREGISTERSHAREDMODULEREQ)RTMemAllocZ(RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[cRegions]));
+ AssertReturn(pReq, VERR_NO_MEMORY);
+
+ pReq->enmGuestOS = enmGuestOS;
+ pReq->GCBaseAddr = GCBaseAddr;
+ pReq->cbModule = cbModule;
+ pReq->cRegions = cRegions;
+ for (uint32_t i = 0; i < cRegions; i++)
+ pReq->aRegions[i] = paRegions[i];
+
+ int rc = RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Issue the request. In strict builds, do some local tracking.
+ */
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ rc = GMMR3RegisterSharedModule(pVM, pReq);
+ if (RT_SUCCESS(rc))
+ rc = pReq->rc;
+ AssertMsg(rc == VINF_SUCCESS || rc == VINF_GMM_SHARED_MODULE_ALREADY_REGISTERED, ("%Rrc\n", rc));
+
+# ifdef VBOX_STRICT
+ if ( rc == VINF_SUCCESS
+ && g_cSharedModules < RT_ELEMENTS(g_apSharedModules))
+ {
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(g_apSharedModules); i++)
+ if (g_apSharedModules[i] == NULL)
+ {
+
+ size_t const cbSharedModule = RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[cRegions]);
+ g_apSharedModules[i] = (PGMMREGISTERSHAREDMODULEREQ)RTMemDup(pReq, cbSharedModule);
+ g_cSharedModules++;
+ break;
+ }
+ Assert(i < RT_ELEMENTS(g_apSharedModules));
+ }
+# endif /* VBOX_STRICT */
+ if (RT_SUCCESS(rc))
+ rc = VINF_SUCCESS;
+ }
+ }
+
+ RTMemFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Unregisters a shared module for the VM
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszModuleName Module name.
+ * @param pszVersion Module version.
+ * @param GCBaseAddr Module base address.
+ * @param cbModule Module size.
+ *
+ * @todo This should be a GMMR3 call. No need to involve GMM here.
+ */
+VMMR3DECL(int) PGMR3SharedModuleUnregister(PVM pVM, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
+{
+ Log(("PGMR3SharedModuleUnregister name=%s version=%s base=%RGv size=%x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
+
+ AssertMsgReturn(cbModule > 0 && cbModule < _1G, ("%u\n", cbModule), VERR_OUT_OF_RANGE);
+ if (!pVM->pgm.s.fPageFusionAllowed)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Forward the request to GMM (ring-0).
+ */
+ PGMMUNREGISTERSHAREDMODULEREQ pReq = (PGMMUNREGISTERSHAREDMODULEREQ)RTMemAlloc(sizeof(*pReq));
+ AssertReturn(pReq, VERR_NO_MEMORY);
+
+ pReq->GCBaseAddr = GCBaseAddr;
+ pReq->u32Alignment = 0;
+ pReq->cbModule = cbModule;
+
+ int rc = RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion);
+ if (RT_SUCCESS(rc))
+ {
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ rc = GMMR3UnregisterSharedModule(pVM, pReq);
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+
+# ifdef VBOX_STRICT
+ /*
+ * Update our local tracking.
+ */
+ for (unsigned i = 0; i < g_cSharedModules; i++)
+ {
+ if ( g_apSharedModules[i]
+ && !strcmp(g_apSharedModules[i]->szName, pszModuleName)
+ && !strcmp(g_apSharedModules[i]->szVersion, pszVersion))
+ {
+ RTMemFree(g_apSharedModules[i]);
+ g_apSharedModules[i] = NULL;
+ g_cSharedModules--;
+ break;
+ }
+ }
+# endif /* VBOX_STRICT */
+ }
+ }
+
+ RTMemFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Rendezvous callback that will be called once.
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Pointer to a VMCPUID with the requester's ID.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) pgmR3SharedModuleRegRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VMCPUID idCpu = *(VMCPUID *)pvUser;
+
+ /* Execute on the VCPU that issued the original request to make sure we're in the right cr3 context. */
+ if (pVCpu->idCpu != idCpu)
+ {
+ Assert(pVM->cCpus > 1);
+ return VINF_SUCCESS;
+ }
+
+
+ /* Flush all pending handy page operations before changing any shared page assignments. */
+ int rc = PGMR3PhysAllocateHandyPages(pVM);
+ AssertRC(rc);
+
+ /*
+ * Lock it here as we can't deal with busy locks in this ring-0 path.
+ */
+ LogFlow(("pgmR3SharedModuleRegRendezvous: start (%d)\n", pVM->pgm.s.cSharedPages));
+
+ PGM_LOCK_VOID(pVM);
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ rc = GMMR3CheckSharedModules(pVM);
+ pgmR3PhysAssertSharedPageChecksums(pVM);
+ PGM_UNLOCK(pVM);
+ AssertLogRelRC(rc);
+
+ LogFlow(("pgmR3SharedModuleRegRendezvous: done (%d)\n", pVM->pgm.s.cSharedPages));
+ return rc;
+}
+
+/**
+ * Shared module check helper (called on the way out).
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu VCPU id.
+ */
+static DECLCALLBACK(void) pgmR3CheckSharedModulesHelper(PVM pVM, VMCPUID idCpu)
+{
+ /* We must stall other VCPUs as we'd otherwise have to send IPI flush commands for every single change we make. */
+ STAM_REL_PROFILE_START(&pVM->pgm.s.StatShModCheck, a);
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, pgmR3SharedModuleRegRendezvous, &idCpu);
+ AssertRCSuccess(rc);
+ STAM_REL_PROFILE_STOP(&pVM->pgm.s.StatShModCheck, a);
+}
+
+
+/**
+ * Check all registered modules for changes.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) PGMR3SharedModuleCheckAll(PVM pVM)
+{
+ if (!pVM->pgm.s.fPageFusionAllowed)
+ return VERR_NOT_SUPPORTED;
+
+ /* Queue the actual registration as we are under the IOM lock right now. Perform this operation on the way out. */
+ return VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3CheckSharedModulesHelper, 2, pVM, VMMGetCpuId(pVM));
+}
+
+
+# ifdef DEBUG
+/**
+ * Query the state of a page in a shared module
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPtrPage Page address.
+ * @param pfShared Shared status (out).
+ * @param pfPageFlags Page flags (out).
+ */
+VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags)
+{
+ /* Debug only API for the page fusion testcase. */
+ PGMPTWALK Walk;
+
+ PGM_LOCK_VOID(pVM);
+
+ int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &Walk);
+ switch (rc)
+ {
+ case VINF_SUCCESS:
+ {
+ PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
+ if (pPage)
+ {
+ *pfShared = PGM_PAGE_IS_SHARED(pPage);
+ *pfPageFlags = Walk.fEffective;
+ }
+ else
+ rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
+ break;
+ }
+
+ case VERR_PAGE_NOT_PRESENT:
+ case VERR_PAGE_TABLE_NOT_PRESENT:
+ case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
+ case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
+ *pfShared = false;
+ *pfPageFlags = 0;
+ rc = VINF_SUCCESS;
+ break;
+
+ default:
+ break;
+ }
+
+ PGM_UNLOCK(pVM);
+ return rc;
+}
+# endif /* DEBUG */
+
+# ifdef VBOX_STRICT
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmcheckduppages' command.}
+ */
+DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ unsigned cBallooned = 0;
+ unsigned cShared = 0;
+ unsigned cZero = 0;
+ unsigned cUnique = 0;
+ unsigned cDuplicate = 0;
+ unsigned cAllocZero = 0;
+ unsigned cPages = 0;
+ NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PGM_LOCK_VOID(pVM);
+
+ for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
+ {
+ PPGMPAGE pPage = &pRam->aPages[0];
+ RTGCPHYS GCPhys = pRam->GCPhys;
+ uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
+ while (cLeft-- > 0)
+ {
+ if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
+ {
+ switch (PGM_PAGE_GET_STATE(pPage))
+ {
+ case PGM_PAGE_STATE_ZERO:
+ cZero++;
+ break;
+
+ case PGM_PAGE_STATE_BALLOONED:
+ cBallooned++;
+ break;
+
+ case PGM_PAGE_STATE_SHARED:
+ cShared++;
+ break;
+
+ case PGM_PAGE_STATE_ALLOCATED:
+ case PGM_PAGE_STATE_WRITE_MONITORED:
+ {
+ /* Check if the page was allocated, but completely zero. */
+ PGMPAGEMAPLOCK PgMpLck;
+ const void *pvPage;
+ int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
+ if ( RT_SUCCESS(rc)
+ && ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
+ cAllocZero++;
+ else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
+ cDuplicate++;
+ else
+ cUnique++;
+ if (RT_SUCCESS(rc))
+ pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+ break;
+ }
+
+ default:
+ AssertFailed();
+ break;
+ }
+ }
+
+ /* next */
+ pPage++;
+ GCPhys += GUEST_PAGE_SIZE;
+ cPages++;
+ /* Give some feedback for every processed megabyte. */
+ if ((cPages & 0x7f) == 0)
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, ".");
+ }
+ }
+ PGM_UNLOCK(pVM);
+
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "\nNumber of zero pages %08x (%d MB)\n", cZero, cZero / 256);
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of alloczero pages %08x (%d MB)\n", cAllocZero, cAllocZero / 256);
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of ballooned pages %08x (%d MB)\n", cBallooned, cBallooned / 256);
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of shared pages %08x (%d MB)\n", cShared, cShared / 256);
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of unique pages %08x (%d MB)\n", cUnique, cUnique / 256);
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of duplicate pages %08x (%d MB)\n", cDuplicate, cDuplicate / 256);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsharedmodules' command.}
+ */
+DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PGM_LOCK_VOID(pVM);
+ for (unsigned i = 0; i < RT_ELEMENTS(g_apSharedModules); i++)
+ {
+ if (g_apSharedModules[i])
+ {
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Shared module %s (%s):\n", g_apSharedModules[i]->szName, g_apSharedModules[i]->szVersion);
+ for (unsigned j = 0; j < g_apSharedModules[i]->cRegions; j++)
+ pCmdHlp->pfnPrintf(pCmdHlp, NULL, "--- Region %d: base %RGv size %x\n", j, g_apSharedModules[i]->aRegions[j].GCRegionAddr, g_apSharedModules[i]->aRegions[j].cbRegion);
+ }
+ }
+ PGM_UNLOCK(pVM);
+
+ return VINF_SUCCESS;
+}
+
+# endif /* VBOX_STRICT*/
+#endif /* VBOX_WITH_PAGE_SHARING */
diff --git a/src/VBox/VMM/VMMR3/SELM.cpp b/src/VBox/VMM/VMMR3/SELM.cpp
new file mode 100644
index 00000000..e1d36e5e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/SELM.cpp
@@ -0,0 +1,685 @@
+/* $Id: SELM.cpp $ */
+/** @file
+ * SELM - The Selector Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_selm SELM - The Selector Manager
+ *
+ * SELM takes care of GDT, LDT and TSS shadowing in raw-mode, and the injection
+ * of a few hyper selector for the raw-mode context. In the hardware assisted
+ * virtualization mode its only task is to decode entries in the guest GDT or
+ * LDT once in a while.
+ *
+ * @see grp_selm
+ *
+ *
+ * @section seg_selm_shadowing Shadowing
+ *
+ * SELMR3UpdateFromCPUM() and SELMR3SyncTSS() does the bulk synchronization
+ * work. The three structures (GDT, LDT, TSS) are all shadowed wholesale atm.
+ * The idea is to do it in a more on-demand fashion when we get time. There
+ * also a whole bunch of issues with the current synchronization of all three
+ * tables, see notes and todos in the code.
+ *
+ * When the guest makes changes to the GDT we will try update the shadow copy
+ * without involving SELMR3UpdateFromCPUM(), see selmGCSyncGDTEntry().
+ *
+ * When the guest make LDT changes we'll trigger a full resync of the LDT
+ * (SELMR3UpdateFromCPUM()), which, needless to say, isn't optimal.
+ *
+ * The TSS shadowing is limited to the fields we need to care about, namely SS0
+ * and ESP0. The Patch Manager makes use of these. We monitor updates to the
+ * guest TSS and will try keep our SS0 and ESP0 copies up to date this way
+ * rather than go the SELMR3SyncTSS() route.
+ *
+ * When in raw-mode SELM also injects a few extra GDT selectors which are used
+ * by the raw-mode (hyper) context. These start their life at the high end of
+ * the table and will be relocated when the guest tries to make use of them...
+ * Well, that was that idea at least, only the code isn't quite there yet which
+ * is why we have trouble with guests which actually have a full sized GDT.
+ *
+ * So, the summary of the current GDT, LDT and TSS shadowing is that there is a
+ * lot of relatively simple and enjoyable work to be done, see @bugref{3267}.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_SELM
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/dbgf.h>
+#include "SELMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+
+/**
+ * Initializes the SELM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) SELMR3Init(PVM pVM)
+{
+ int rc;
+ LogFlow(("SELMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ * (The TSS block requires contiguous back.)
+ */
+ AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
+ AssertCompileMemberAlignment(VM, selm.s, 32); AssertRelease(!(RT_UOFFSETOF(VM, selm.s) & 31));
+
+ /*
+ * Register the saved state data unit.
+ */
+ rc = SSMR3RegisterStub(pVM, "selm", 1);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Statistics.
+ */
+ STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelGst, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables.");
+ STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelShw, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedShadow", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from shadow tables.");
+ STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelReadErrors, STAMTYPE_COUNTER, "/SELM/LoadHidSel/GstReadErrors", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Guest table read errors.");
+ STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelGstNoGood, STAMTYPE_COUNTER, "/SELM/LoadHidSel/NoGoodGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: No good guest table entry.");
+
+ /*
+ * Register info handlers.
+ */
+ DBGFR3InfoRegisterInternalEx(pVM, "gdt", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest, DBGFINFO_FLAGS_RUN_ON_EMT);
+ DBGFR3InfoRegisterInternalEx(pVM, "ldt", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest, DBGFINFO_FLAGS_RUN_ON_EMT);
+ //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest, DBGFINFO_FLAGS_RUN_ON_EMT);
+
+ return rc;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) SELMR3Relocate(PVM pVM)
+{
+ LogFlow(("SELMR3Relocate\n"));
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Terminates the SELM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) SELMR3Term(PVM pVM)
+{
+ NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the SELM component this means that any GDT/LDT/TSS monitors
+ * needs to be removed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) SELMR3Reset(PVM pVM)
+{
+ LogFlow(("SELMR3Reset:\n"));
+ VM_ASSERT_EMT(pVM);
+ RT_NOREF(pVM);
+}
+
+
+/**
+ * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
+ *
+ * See SELMR3GetSelectorInfo for details.
+ *
+ * @returns VBox status code, see SELMR3GetSelectorInfo for details.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param Sel The selector to get info about.
+ * @param pSelInfo Where to store the information.
+ */
+static int selmR3GetSelectorInfo64(PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
+{
+ /*
+ * Read it from the guest descriptor table.
+ */
+/** @todo this is bogus wrt the LDT/GDT limit on long selectors. */
+ X86DESC64 Desc;
+ RTGCPTR GCPtrDesc;
+ if (!(Sel & X86_SEL_LDT))
+ {
+ /* GDT */
+ VBOXGDTR Gdtr;
+ CPUMGetGuestGDTR(pVCpu, &Gdtr);
+ if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
+ return VERR_INVALID_SELECTOR;
+ GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
+ }
+ else
+ {
+ /* LDT */
+ uint64_t GCPtrBase;
+ uint32_t cbLimit;
+ CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit);
+ if ((Sel | X86_SEL_RPL_LDT) > cbLimit)
+ return VERR_INVALID_SELECTOR;
+
+ /* calc the descriptor location. */
+ GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK);
+ }
+
+ /* read the descriptor. */
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
+ if (RT_FAILURE(rc))
+ {
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(X86DESC));
+ if (RT_FAILURE(rc))
+ return rc;
+ Desc.au64[1] = 0;
+ }
+
+ /*
+ * Extract the base and limit
+ * (We ignore the present bit here, which is probably a bit silly...)
+ */
+ pSelInfo->Sel = Sel;
+ pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
+ pSelInfo->u.Raw64 = Desc;
+ if (Desc.Gen.u1DescType)
+ {
+ /*
+ * 64-bit code selectors are wide open, it's not possible to detect
+ * 64-bit data or stack selectors without also dragging in assumptions
+ * about current CS (i.e. that's we're executing in 64-bit mode). So,
+ * the selinfo user needs to deal with this in the context the info is
+ * used unfortunately.
+ */
+ if ( Desc.Gen.u1Long
+ && !Desc.Gen.u1DefBig
+ && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
+ {
+ /* Note! We ignore the segment limit hacks that was added by AMD. */
+ pSelInfo->GCPtrBase = 0;
+ pSelInfo->cbLimit = ~(RTGCUINTPTR)0;
+ }
+ else
+ {
+ pSelInfo->cbLimit = X86DESC_LIMIT_G(&Desc);
+ pSelInfo->GCPtrBase = X86DESC_BASE(&Desc);
+ }
+ pSelInfo->SelGate = 0;
+ }
+ else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_LDT
+ || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_AVAIL
+ || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY)
+ {
+ /* Note. LDT descriptors are weird in long mode, we ignore the footnote
+ in the AMD manual here as a simplification. */
+ pSelInfo->GCPtrBase = X86DESC64_BASE(&Desc);
+ pSelInfo->cbLimit = X86DESC_LIMIT_G(&Desc);
+ pSelInfo->SelGate = 0;
+ }
+ else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE
+ || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_TRAP_GATE
+ || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_INT_GATE)
+ {
+ pSelInfo->cbLimit = X86DESC64_BASE(&Desc);
+ pSelInfo->GCPtrBase = Desc.Gate.u16OffsetLow
+ | ((uint32_t)Desc.Gate.u16OffsetHigh << 16)
+ | ((uint64_t)Desc.Gate.u32OffsetTop << 32);
+ pSelInfo->SelGate = Desc.Gate.u16Sel;
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
+ }
+ else
+ {
+ pSelInfo->cbLimit = 0;
+ pSelInfo->GCPtrBase = 0;
+ pSelInfo->SelGate = 0;
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
+ }
+ if (!Desc.Gen.u1Present)
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for selmR3GetSelectorInfo32 and SELMR3GetShadowSelectorInfo that
+ * interprets a legacy descriptor table entry and fills in the selector info
+ * structure from it.
+ *
+ * @param pSelInfo Where to store the selector info. Only the fFlags and
+ * Sel members have been initialized.
+ * @param pDesc The legacy descriptor to parse.
+ */
+DECLINLINE(void) selmR3SelInfoFromDesc32(PDBGFSELINFO pSelInfo, PCX86DESC pDesc)
+{
+ pSelInfo->u.Raw64.au64[1] = 0;
+ pSelInfo->u.Raw = *pDesc;
+ if ( pDesc->Gen.u1DescType
+ || !(pDesc->Gen.u4Type & 4))
+ {
+ pSelInfo->cbLimit = X86DESC_LIMIT_G(pDesc);
+ pSelInfo->GCPtrBase = X86DESC_BASE(pDesc);
+ pSelInfo->SelGate = 0;
+ }
+ else if (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_UNDEFINED4)
+ {
+ pSelInfo->cbLimit = 0;
+ if (pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_TASK_GATE)
+ pSelInfo->GCPtrBase = 0;
+ else
+ pSelInfo->GCPtrBase = pDesc->Gate.u16OffsetLow
+ | (uint32_t)pDesc->Gate.u16OffsetHigh << 16;
+ pSelInfo->SelGate = pDesc->Gate.u16Sel;
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_GATE;
+ }
+ else
+ {
+ pSelInfo->cbLimit = 0;
+ pSelInfo->GCPtrBase = 0;
+ pSelInfo->SelGate = 0;
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_INVALID;
+ }
+ if (!pDesc->Gen.u1Present)
+ pSelInfo->fFlags |= DBGFSELINFO_FLAGS_NOT_PRESENT;
+}
+
+
+/**
+ * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
+ *
+ * See SELMR3GetSelectorInfo for details.
+ *
+ * @returns VBox status code, see SELMR3GetSelectorInfo for details.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param Sel The selector to get info about.
+ * @param pSelInfo Where to store the information.
+ */
+static int selmR3GetSelectorInfo32(PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
+{
+ /*
+ * Read the descriptor entry
+ */
+ pSelInfo->fFlags = 0;
+ if (CPUMIsGuestInProtectedMode(pVCpu))
+ {
+ /*
+ * Read it from the guest descriptor table.
+ */
+ pSelInfo->fFlags = DBGFSELINFO_FLAGS_PROT_MODE;
+
+ RTGCPTR GCPtrDesc;
+ if (!(Sel & X86_SEL_LDT))
+ {
+ /* GDT */
+ VBOXGDTR Gdtr;
+ CPUMGetGuestGDTR(pVCpu, &Gdtr);
+ if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
+ return VERR_INVALID_SELECTOR;
+ GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
+ }
+ else
+ {
+ /* LDT */
+ uint64_t GCPtrBase;
+ uint32_t cbLimit;
+ CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit);
+ if ((Sel | X86_SEL_RPL_LDT) > cbLimit)
+ return VERR_INVALID_SELECTOR;
+
+ /* calc the descriptor location. */
+ GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK);
+ }
+
+ /* read the descriptor. */
+ X86DESC Desc;
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Extract the base and limit or sel:offset for gates.
+ */
+ pSelInfo->Sel = Sel;
+ selmR3SelInfoFromDesc32(pSelInfo, &Desc);
+
+ return VINF_SUCCESS;
+ }
+ return rc;
+ }
+
+ /*
+ * We're in real mode.
+ */
+ pSelInfo->Sel = Sel;
+ pSelInfo->GCPtrBase = Sel << 4;
+ pSelInfo->cbLimit = 0xffff;
+ pSelInfo->fFlags = DBGFSELINFO_FLAGS_REAL_MODE;
+ pSelInfo->u.Raw64.au64[0] = 0;
+ pSelInfo->u.Raw64.au64[1] = 0;
+ pSelInfo->SelGate = 0;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets information about a selector.
+ *
+ * Intended for the debugger mostly and will prefer the guest descriptor tables
+ * over the shadow ones.
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
+ * descriptor table.
+ * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
+ * is not returned if the selector itself isn't present, you have to
+ * check that for yourself (see DBGFSELINFO::fFlags).
+ * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
+ * pagetable or page backing the selector table wasn't present.
+ * @returns Other VBox status code on other errors.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param Sel The selector to get info about.
+ * @param pSelInfo Where to store the information.
+ */
+VMMR3DECL(int) SELMR3GetSelectorInfo(PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo)
+{
+ AssertPtr(pSelInfo);
+ if (CPUMIsGuestInLongMode(pVCpu))
+ return selmR3GetSelectorInfo64(pVCpu, Sel, pSelInfo);
+ return selmR3GetSelectorInfo32(pVCpu, Sel, pSelInfo);
+}
+
+
+/**
+ * Formats a descriptor.
+ *
+ * @param Desc Descriptor to format.
+ * @param Sel Selector number.
+ * @param pszOutput Output buffer.
+ * @param cchOutput Size of output buffer.
+ */
+static void selmR3FormatDescriptor(X86DESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
+{
+ /*
+ * Make variable description string.
+ */
+ static struct
+ {
+ unsigned cch;
+ const char *psz;
+ } const aTypes[32] =
+ {
+#define STRENTRY(str) { sizeof(str) - 1, str }
+ /* system */
+ STRENTRY("Reserved0 "), /* 0x00 */
+ STRENTRY("TSS16Avail "), /* 0x01 */
+ STRENTRY("LDT "), /* 0x02 */
+ STRENTRY("TSS16Busy "), /* 0x03 */
+ STRENTRY("Call16 "), /* 0x04 */
+ STRENTRY("Task "), /* 0x05 */
+ STRENTRY("Int16 "), /* 0x06 */
+ STRENTRY("Trap16 "), /* 0x07 */
+ STRENTRY("Reserved8 "), /* 0x08 */
+ STRENTRY("TSS32Avail "), /* 0x09 */
+ STRENTRY("ReservedA "), /* 0x0a */
+ STRENTRY("TSS32Busy "), /* 0x0b */
+ STRENTRY("Call32 "), /* 0x0c */
+ STRENTRY("ReservedD "), /* 0x0d */
+ STRENTRY("Int32 "), /* 0x0e */
+ STRENTRY("Trap32 "), /* 0x0f */
+ /* non system */
+ STRENTRY("DataRO "), /* 0x10 */
+ STRENTRY("DataRO Accessed "), /* 0x11 */
+ STRENTRY("DataRW "), /* 0x12 */
+ STRENTRY("DataRW Accessed "), /* 0x13 */
+ STRENTRY("DataDownRO "), /* 0x14 */
+ STRENTRY("DataDownRO Accessed "), /* 0x15 */
+ STRENTRY("DataDownRW "), /* 0x16 */
+ STRENTRY("DataDownRW Accessed "), /* 0x17 */
+ STRENTRY("CodeEO "), /* 0x18 */
+ STRENTRY("CodeEO Accessed "), /* 0x19 */
+ STRENTRY("CodeER "), /* 0x1a */
+ STRENTRY("CodeER Accessed "), /* 0x1b */
+ STRENTRY("CodeConfEO "), /* 0x1c */
+ STRENTRY("CodeConfEO Accessed "), /* 0x1d */
+ STRENTRY("CodeConfER "), /* 0x1e */
+ STRENTRY("CodeConfER Accessed ") /* 0x1f */
+#undef SYSENTRY
+ };
+#define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
+ char szMsg[128];
+ char *psz = &szMsg[0];
+ unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
+ memcpy(psz, aTypes[i].psz, aTypes[i].cch);
+ psz += aTypes[i].cch;
+
+ if (Desc.Gen.u1Present)
+ ADD_STR(psz, "Present ");
+ else
+ ADD_STR(psz, "Not-Present ");
+ if (Desc.Gen.u1Granularity)
+ ADD_STR(psz, "Page ");
+ if (Desc.Gen.u1DefBig)
+ ADD_STR(psz, "32-bit ");
+ else
+ ADD_STR(psz, "16-bit ");
+#undef ADD_STR
+ *psz = '\0';
+
+ /*
+ * Limit and Base and format the output.
+ */
+ uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
+ uint32_t u32Base = X86DESC_BASE(&Desc);
+
+ RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
+ Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
+}
+
+
+/**
+ * Dumps a descriptor.
+ *
+ * @param Desc Descriptor to dump.
+ * @param Sel Selector number.
+ * @param pszMsg Message to prepend the log entry with.
+ */
+VMMR3DECL(void) SELMR3DumpDescriptor(X86DESC Desc, RTSEL Sel, const char *pszMsg)
+{
+#ifdef LOG_ENABLED
+ if (LogIsEnabled())
+ {
+ char szOutput[128];
+ selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
+ Log(("%s: %s\n", pszMsg, szOutput));
+ }
+#else
+ RT_NOREF3(Desc, Sel, pszMsg);
+#endif
+}
+
+
+/**
+ * Display the guest gdt.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /** @todo SMP support! */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
+ | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_GDTR);
+
+ VBOXGDTR GDTR;
+ CPUMGetGuestGDTR(pVCpu, &GDTR);
+ RTGCPTR GCPtrGDT = GDTR.pGdt;
+ unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(X86DESC);
+
+ pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%RGv limit=%x):\n", GCPtrGDT, GDTR.cbGdt);
+ for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, GCPtrGDT += sizeof(X86DESC))
+ {
+ X86DESC GDTE;
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, &GDTE, GCPtrGDT, sizeof(GDTE));
+ if (RT_SUCCESS(rc))
+ {
+ if (GDTE.Gen.u1Present)
+ {
+ char szOutput[128];
+ selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
+ pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
+ }
+ }
+ else if (rc == VERR_PAGE_NOT_PRESENT)
+ {
+ if ((GCPtrGDT & GUEST_PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
+ pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", iGDT << X86_SEL_SHIFT, GCPtrGDT);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", iGDT << X86_SEL_SHIFT, rc, GCPtrGDT);
+ }
+ NOREF(pszArgs);
+}
+
+
+/**
+ * Display the guest ldt.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ /** @todo SMP support! */
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
+ | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
+
+ uint64_t GCPtrLdt;
+ uint32_t cbLdt;
+ RTSEL SelLdt = CPUMGetGuestLdtrEx(pVCpu, &GCPtrLdt, &cbLdt);
+ if (!(SelLdt & X86_SEL_MASK_OFF_RPL))
+ {
+ pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
+ return;
+ }
+
+ pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RX64 limit=%x):\n", SelLdt, GCPtrLdt, cbLdt);
+ unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
+ for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC))
+ {
+ X86DESC LdtE;
+ int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));
+ if (RT_SUCCESS(rc))
+ {
+ if (LdtE.Gen.u1Present)
+ {
+ char szOutput[128];
+ selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
+ pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
+ }
+ }
+ else if (rc == VERR_PAGE_NOT_PRESENT)
+ {
+ if ((GCPtrLdt & GUEST_PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))
+ pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, GCPtrLdt);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, GCPtrLdt);
+ }
+ NOREF(pszArgs);
+}
+
+
+/**
+ * Dumps the guest GDT
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
+{
+ DBGFR3Info(pVM->pUVM, "gdt", NULL, NULL);
+}
+
+
+/**
+ * Dumps the guest LDT
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
+{
+ DBGFR3Info(pVM->pUVM, "ldt", NULL, NULL);
+}
+
diff --git a/src/VBox/VMM/VMMR3/SSM.cpp b/src/VBox/VMM/VMMR3/SSM.cpp
new file mode 100644
index 00000000..f03a0ba9
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/SSM.cpp
@@ -0,0 +1,9944 @@
+/* $Id: SSM.cpp $ */
+/** @file
+ * SSM - Saved State Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/** @page pg_ssm SSM - The Saved State Manager
+ *
+ * The Saved State Manager (SSM) implements facilities for saving and loading a
+ * VM state in a structural manner using callbacks for named data units.
+ *
+ * At init time each of the VMM components, Devices, Drivers and one or two
+ * other things will register data units which they need to save and restore.
+ * Each unit have a unique name (ascii), instance number, and a set of callbacks
+ * associated with it. The name will be used to identify the unit during
+ * restore. The callbacks are for the two operations, save and restore. There
+ * are three callbacks for each of the two - a prepare, a execute and a complete
+ * - giving each component ample opportunity to perform actions both before and
+ * afterwards.
+ *
+ * The SSM provides a number of APIs for encoding and decoding the data: @see
+ * grp_ssm
+ *
+ *
+ *
+ * @section sec_ssm_live_snapshots Live Snapshots
+ *
+ * The live snapshots feature (LS) is similar to teleportation (TP) and was a
+ * natural first step when implementing TP. The main differences between LS and
+ * TP are that after a live snapshot we will have a saved state file, disk image
+ * snapshots, and the VM will still be running.
+ *
+ * Compared to normal saved stated and snapshots, the difference is in that the
+ * VM is running while we do most of the saving. Prior to LS, there was only
+ * one round of callbacks during saving and the VM was paused during it. With
+ * LS there are 1 or more passes while the VM is still running and a final one
+ * after it has been paused. The runtime passes are executed on a dedicated
+ * thread running at at the same priority as the EMTs so that the saving doesn't
+ * starve or lose in scheduling questions (note: not implemented yet). The final
+ * pass is done on EMT(0).
+ *
+ * There are a couple of common reasons why LS and TP will fail:
+ * - Memory configuration changed (PCI memory mappings).
+ * - Takes too long (TP) / Too much output (LS).
+ *
+ *
+ * The live saving sequence is something like this:
+ *
+ * -# SSMR3LiveSave is called on EMT0. It returns a saved state
+ * handle.
+ * -# SSMR3LiveDoStep1 is called on a non-EMT. This will save the major
+ * parts of the state while the VM may still be running.
+ * -# The VM is suspended.
+ * -# SSMR3LiveDoStep2 is called on EMT0 to save the remainder of the state
+ * in the normal way.
+ * -# The client does any necessary reconfiguration of harddisks and
+ * similar.
+ * -# SSMR3LiveDone is called on EMT0 to close the handle.
+ * -# The VM is resumed or powered off and destroyed.
+ *
+ *
+ * @section sec_ssm_teleportation Teleportation
+ *
+ * As mentioned in the previous section, the main differences between this and
+ * live snapshots are in where the saved state is written and what state the
+ * local VM is in afterwards - at least from the VMM point of view. The
+ * necessary administrative work - establishing the connection to the remote
+ * machine, cloning the VM config on it and doing lowlevel saved state data
+ * transfer - is taken care of by layer above the VMM (i.e. Main).
+ *
+ * The SSM data format was made streamable for the purpose of teleportation
+ * (v1.2 was the last non-streamable version).
+ *
+ *
+ * @section sec_ssm_format Saved State Format
+ *
+ * The stream format starts with a header (SSMFILEHDR) that indicates the
+ * version and such things, it is followed by zero or more saved state units
+ * (name + instance + pass), and the stream concludes with a footer
+ * (SSMFILEFTR) that contains unit counts and optionally a checksum for the
+ * entire file. (In version 1.2 and earlier, the checksum was in the header and
+ * there was no footer. This meant that the header was updated after the entire
+ * file was written.)
+ *
+ * The saved state units each starts with a variable sized header
+ * (SSMFILEUNITHDRV2) that contains the name, instance and pass. The data
+ * follows the header and is encoded as records with a 2-8 byte record header
+ * indicating the type, flags and size. The first byte in the record header
+ * indicates the type and flags:
+ *
+ * - bits 0..3: Record type:
+ * - type 0: Invalid.
+ * - type 1: Terminator with CRC-32 and unit size.
+ * - type 2: Raw data record.
+ * - type 3: Raw data compressed by LZF. The data is prefixed by a 8-bit
+ * field containing the length of the uncompressed data given in
+ * 1KB units.
+ * - type 4: Zero data. The record header is followed by a 8-bit field
+ * counting the length of the zero data given in 1KB units.
+ * - type 5: Named data - length prefixed name followed by the data. This
+ * type is not implemented yet as we're missing the API part, so
+ * the type assignment is tentative.
+ * - types 6 thru 15 are current undefined.
+ * - bit 4: Important (set), can be skipped (clear).
+ * - bit 5: Undefined flag, must be zero.
+ * - bit 6: Undefined flag, must be zero.
+ * - bit 7: "magic" bit, always set.
+ *
+ * Record header byte 2 (optionally thru 7) is the size of the following data
+ * encoded in UTF-8 style. To make buffering simpler and more efficient during
+ * the save operation, the strict checks enforcing optimal encoding has been
+ * relaxed for the 2 and 3 byte encodings.
+ *
+ * (In version 1.2 and earlier the unit data was compressed and not record
+ * based. The unit header contained the compressed size of the data, i.e. it
+ * needed updating after the data was written.)
+ *
+ *
+ * @section sec_ssm_future Future Changes
+ *
+ * There are plans to extend SSM to make it easier to be both backwards and
+ * (somewhat) forwards compatible. One of the new features will be being able
+ * to classify units and data items as unimportant (added to the format in
+ * v2.0). Another suggested feature is naming data items (also added to the
+ * format in v2.0), perhaps by extending the SSMR3PutStruct API. Both features
+ * will require API changes, the naming may possibly require both buffering of
+ * the stream as well as some helper managing them.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_SSM
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vmm.h>
+#include "SSMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/version.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/crc.h>
+#include <iprt/file.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+#include <iprt/thread.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/uuid.h>
+#include <iprt/zip.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The max length of a unit name. */
+#define SSM_MAX_NAME_SIZE 48
+
+/** Saved state file magic base string. */
+#define SSMFILEHDR_MAGIC_BASE "\177VirtualBox SavedState "
+/** Saved state file magic indicating version 1.x. */
+#define SSMFILEHDR_MAGIC_V1_X "\177VirtualBox SavedState V1."
+/** Saved state file v1.1 magic. */
+#define SSMFILEHDR_MAGIC_V1_1 "\177VirtualBox SavedState V1.1\n"
+/** Saved state file v1.2 magic. */
+#define SSMFILEHDR_MAGIC_V1_2 "\177VirtualBox SavedState V1.2\n\0\0\0"
+/** Saved state file v2.0 magic. */
+#define SSMFILEHDR_MAGIC_V2_0 "\177VirtualBox SavedState V2.0\n\0\0\0"
+
+/** @name SSMFILEHDR::fFlags
+ * @{ */
+/** The stream is checksummed up to the footer using CRC-32. */
+#define SSMFILEHDR_FLAGS_STREAM_CRC32 RT_BIT_32(0)
+/** Indicates that the file was produced by a live save. */
+#define SSMFILEHDR_FLAGS_STREAM_LIVE_SAVE RT_BIT_32(1)
+/** @} */
+
+/** The directory magic. */
+#define SSMFILEDIR_MAGIC "\nDir\n\0\0"
+
+/** Saved state file v2.0 magic. */
+#define SSMFILEFTR_MAGIC "\nFooter"
+
+/** Data unit magic. */
+#define SSMFILEUNITHDR_MAGIC "\nUnit\n\0"
+/** Data end marker magic. */
+#define SSMFILEUNITHDR_END "\nTheEnd"
+
+
+/** @name Record Types (data unit)
+ * @{ */
+/** The record type mask. */
+#define SSM_REC_TYPE_MASK UINT8_C(0x0f)
+/** Invalid record. */
+#define SSM_REC_TYPE_INVALID 0
+/** Normal termination record, see SSMRECTERM. */
+#define SSM_REC_TYPE_TERM 1
+/** Raw data. The data follows the size field without further ado. */
+#define SSM_REC_TYPE_RAW 2
+/** Raw data compressed by LZF.
+ * The record header is followed by a 8-bit field containing the size of the
+ * uncompressed data in 1KB units. The compressed data is after it. */
+#define SSM_REC_TYPE_RAW_LZF 3
+/** Raw zero data.
+ * The record header is followed by a 8-bit field containing the size of the
+ * zero data in 1KB units. */
+#define SSM_REC_TYPE_RAW_ZERO 4
+/** Named data items.
+ * A length prefix zero terminated string (i.e. max 255) followed by the data. */
+#define SSM_REC_TYPE_NAMED 5
+/** Macro for validating the record type.
+ * This can be used with the flags+type byte, no need to mask out the type first. */
+#define SSM_REC_TYPE_IS_VALID(u8Type) ( ((u8Type) & SSM_REC_TYPE_MASK) > SSM_REC_TYPE_INVALID \
+ && ((u8Type) & SSM_REC_TYPE_MASK) <= SSM_REC_TYPE_NAMED )
+/** @} */
+
+/** The flag mask. */
+#define SSM_REC_FLAGS_MASK UINT8_C(0xf0)
+/** The record is important if this flag is set, if clear it can be omitted. */
+#define SSM_REC_FLAGS_IMPORTANT UINT8_C(0x10)
+/** This flag is always set. */
+#define SSM_REC_FLAGS_FIXED UINT8_C(0x80)
+/** Macro for validating the flags.
+ * No need to mask the flags out of the flags+type byte before invoking this macro. */
+#define SSM_REC_FLAGS_ARE_VALID(fFlags) ( ((fFlags) & UINT8_C(0xe0)) == UINT8_C(0x80) )
+
+/** Macro for validating the type and flags byte in a data record. */
+#define SSM_REC_ARE_TYPE_AND_FLAGS_VALID(u8) ( SSM_REC_FLAGS_ARE_VALID(u8) && SSM_REC_TYPE_IS_VALID(u8) )
+
+/** @name SSMRECTERM::fFlags
+ * @{ */
+/** There is a CRC-32 value for the stream. */
+#define SSMRECTERM_FLAGS_CRC32 UINT16_C(0x0001)
+/** @} */
+
+/** Start structure magic. (Isaac Asimov) */
+#define SSMR3STRUCT_BEGIN UINT32_C(0x19200102)
+/** End structure magic. (Isaac Asimov) */
+#define SSMR3STRUCT_END UINT32_C(0x19920406)
+
+
+/** Number of bytes to log in Log2 and Log4 statements. */
+#define SSM_LOG_BYTES 16
+
+/** SSMHANDLE::fCancelled value indicating that the operation has been
+ * cancelled. */
+#define SSMHANDLE_CANCELLED UINT32_C(0xdeadbeef)
+/** SSMHANDLE::fCancelled value indicating no cancellation. */
+#define SSMHANDLE_OK UINT32_C(0x77777777)
+
+
+/** Macro for checking the u32CRC field of a structure.
+ * The Msg can assume there are u32ActualCRC and u32CRC in the context. */
+#define SSM_CHECK_CRC32_RET(p, cb, Msg) \
+ do \
+ { \
+ uint32_t u32CRC = (p)->u32CRC; \
+ (p)->u32CRC = 0; \
+ uint32_t u32ActualCRC = RTCrc32((p), (cb)); \
+ (p)->u32CRC = u32CRC; \
+ AssertLogRelMsgReturn(u32ActualCRC == u32CRC, Msg, VERR_SSM_INTEGRITY_CRC); \
+ } while (0)
+
+/** The number of bytes to compress is one block.
+ * Must be a multiple of 1KB. */
+#define SSM_ZIP_BLOCK_SIZE _4K
+AssertCompile(SSM_ZIP_BLOCK_SIZE / _1K * _1K == SSM_ZIP_BLOCK_SIZE);
+
+
+/**
+ * Asserts that the handle is writable and returns with VERR_SSM_INVALID_STATE
+ * if it isn't.
+ */
+#define SSM_ASSERT_WRITEABLE_RET(pSSM) \
+ AssertMsgReturn( pSSM->enmOp == SSMSTATE_SAVE_EXEC \
+ || pSSM->enmOp == SSMSTATE_LIVE_EXEC,\
+ ("Invalid state %d\n", pSSM->enmOp), VERR_SSM_INVALID_STATE);
+
+/**
+ * Asserts that the handle is readable and returns with VERR_SSM_INVALID_STATE
+ * if it isn't.
+ */
+#define SSM_ASSERT_READABLE_RET(pSSM) \
+ AssertMsgReturn( pSSM->enmOp == SSMSTATE_LOAD_EXEC \
+ || pSSM->enmOp == SSMSTATE_OPEN_READ,\
+ ("Invalid state %d\n", pSSM->enmOp), VERR_SSM_INVALID_STATE);
+
+/** Checks for cancellation and returns if pending.
+ * Sets SSMHANDLE::rc to VERR_SSM_CANCELLED (if it still indicates success) and
+ * then returns SSMHANDLE::rc. (Debug logging only.) */
+#define SSM_CHECK_CANCELLED_RET(pSSM) \
+ do \
+ { \
+ if (RT_UNLIKELY(ASMAtomicUoReadU32(&(pSSM)->fCancelled) == SSMHANDLE_CANCELLED)) \
+ { \
+ LogFlow(("%Rfn: Cancelled -> VERR_SSM_CANCELLED\n", __PRETTY_FUNCTION__)); \
+ if (RT_SUCCESS((pSSM)->rc)) \
+ (pSSM)->rc = VERR_SSM_CANCELLED; \
+ return (pSSM)->rc; \
+ } \
+ } while (0)
+
+/**
+ * Asserts that the handle is somewhat valid. No returns as this is just a
+ * simple safeguard for catching bad API calls. */
+#define SSM_ASSERT_VALID_HANDLE(pSSM) \
+ do \
+ { \
+ AssertPtr(pSSM); \
+ Assert(pSSM->enmOp > SSMSTATE_INVALID && pSSM->enmOp < SSMSTATE_END); \
+ } while (0)
+
+
+/** @def SSM_HOST_IS_MSC_32
+ * Set to 1 if the host is 32-bit MSC, otherwise set to 0.
+ * */
+#if defined(_MSC_VER) && HC_ARCH_BITS == 32
+# define SSM_HOST_IS_MSC_32 1
+#else
+# define SSM_HOST_IS_MSC_32 0
+#endif
+
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** SSM state. */
+typedef enum SSMSTATE
+{
+ SSMSTATE_INVALID = 0,
+ SSMSTATE_LIVE_PREP,
+ SSMSTATE_LIVE_STEP1,
+ SSMSTATE_LIVE_EXEC,
+ SSMSTATE_LIVE_VOTE,
+ SSMSTATE_LIVE_STEP2,
+ SSMSTATE_SAVE_PREP,
+ SSMSTATE_SAVE_EXEC,
+ SSMSTATE_SAVE_DONE,
+ SSMSTATE_LOAD_PREP,
+ SSMSTATE_LOAD_EXEC,
+ SSMSTATE_LOAD_DONE,
+ SSMSTATE_OPEN_READ,
+ SSMSTATE_END
+} SSMSTATE;
+
+
+/** Pointer to a SSM stream buffer. */
+typedef struct SSMSTRMBUF *PSSMSTRMBUF;
+/**
+ * A SSM stream buffer.
+ */
+typedef struct SSMSTRMBUF
+{
+ /** The buffer data. */
+ uint8_t abData[_64K];
+
+ /** The stream position of this buffer. */
+ uint64_t offStream;
+ /** The amount of buffered data. */
+ uint32_t cb;
+ /** End of stream indicator (for read streams only). */
+ bool fEndOfStream;
+ /** The nano timestamp set by ssmR3StrmGetFreeBuf. */
+ uint64_t NanoTS;
+ /** Pointer to the next buffer in the chain. */
+ PSSMSTRMBUF volatile pNext;
+} SSMSTRMBUF;
+
+/**
+ * SSM stream.
+ *
+ * This is a typical producer / consumer setup with a dedicated I/O thread and
+ * fixed number of buffers for read ahead and write back.
+ */
+typedef struct SSMSTRM
+{
+ /** The stream method table. */
+ PCSSMSTRMOPS pOps;
+ /** The user argument for the stream methods.
+ * For file based streams, this is the file handle and not a pointer. */
+ void *pvUser;
+
+ /** Write (set) or read (clear) stream. */
+ bool fWrite;
+ /** Termination indicator. */
+ bool volatile fTerminating;
+ /** Indicates whether it is necessary to seek before the next buffer is
+ * read from the stream. This is used to avoid a seek in ssmR3StrmPeekAt. */
+ bool fNeedSeek;
+ /** Stream error status. */
+ int32_t volatile rc;
+ /** The handle of the I/O thread. This is set to nil when not active. */
+ RTTHREAD hIoThread;
+ /** Where to seek to. */
+ uint64_t offNeedSeekTo;
+
+ /** The head of the consumer queue.
+ * For save the consumer is the I/O thread. For load the I/O thread is the
+ * producer. */
+ PSSMSTRMBUF volatile pHead;
+ /** Chain of free buffers.
+ * The consumer/producer roles are the inverse of pHead. */
+ PSSMSTRMBUF volatile pFree;
+ /** Event that's signalled when pHead is updated. */
+ RTSEMEVENT hEvtHead;
+ /** Event that's signalled when pFree is updated. */
+ RTSEMEVENT hEvtFree;
+
+ /** List of pending buffers that has been dequeued from pHead and reversed. */
+ PSSMSTRMBUF pPending;
+ /** Pointer to the current buffer. */
+ PSSMSTRMBUF pCur;
+ /** The stream offset of the current buffer. */
+ uint64_t offCurStream;
+ /** The current buffer offset. */
+ uint32_t off;
+ /** Whether we're checksumming reads/writes. */
+ bool fChecksummed;
+ /** The stream CRC if fChecksummed is set. */
+ uint32_t u32StreamCRC;
+ /** How far into the buffer u32StreamCRC is up-to-date.
+ * This may lag behind off as it's desirable to checksum as large blocks as
+ * possible. */
+ uint32_t offStreamCRC;
+} SSMSTRM;
+/** Pointer to a SSM stream. */
+typedef SSMSTRM *PSSMSTRM;
+
+
+/**
+ * Handle structure.
+ */
+typedef struct SSMHANDLE
+{
+ /** Stream/buffer manager. */
+ SSMSTRM Strm;
+
+ /** Pointer to the VM. */
+ PVM pVM;
+ /** The current operation. */
+ SSMSTATE enmOp;
+ /** What to do after save completes. (move the enum) */
+ SSMAFTER enmAfter;
+ /** Flag indicating that the operation has been cancelled. */
+ uint32_t volatile fCancelled;
+ /** The current rc of the save operation. */
+ int32_t rc;
+ /** Number of compressed bytes left in the current data unit (V1). */
+ uint64_t cbUnitLeftV1;
+ /** The current compressed? offset into the data unit. */
+ uint64_t offUnit;
+ /** The current user data offset into the unit (debug purposes). */
+ uint64_t offUnitUser;
+ /** Indicates that this is a live save or restore operation. */
+ bool fLiveSave;
+
+ /** Pointer to the progress callback function. */
+ PFNVMPROGRESS pfnProgress;
+ /** User specified argument to the callback function. */
+ void *pvUser;
+ /** Next completion percentage. (corresponds to offEstProgress) */
+ unsigned uPercent;
+ /** The position of the next progress callback in the estimated file. */
+ uint64_t offEstProgress;
+ /** The estimated total byte count.
+ * (Only valid after the prep.) */
+ uint64_t cbEstTotal;
+ /** Current position in the estimated file. */
+ uint64_t offEst;
+ /** End of current unit in the estimated file. */
+ uint64_t offEstUnitEnd;
+ /** The amount of % we reserve for the 'live' stage */
+ unsigned uPercentLive;
+ /** The amount of % we reserve for the 'prepare' phase */
+ unsigned uPercentPrepare;
+ /** The amount of % we reserve for the 'done' stage */
+ unsigned uPercentDone;
+ /** The lowest value reported via SSMR3HandleReportLivePercent during one
+ * vote run. */
+ unsigned uReportedLivePercent;
+ /** The filename, NULL if remote stream. */
+ const char *pszFilename;
+
+ union
+ {
+ /** Write data. */
+ struct
+ {
+ /** Offset into the databuffer. */
+ uint32_t offDataBuffer;
+ /** Space for the record header. */
+ uint8_t abRecHdr[1+7];
+ /** Data buffer. */
+ uint8_t abDataBuffer[4096];
+ /** The maximum downtime given as milliseconds. */
+ uint32_t cMsMaxDowntime;
+ } Write;
+
+ /** Read data. */
+ struct
+ {
+ /** V1: The decompressor of the current data unit. */
+ PRTZIPDECOMP pZipDecompV1;
+ /** The major format version number. */
+ uint32_t uFmtVerMajor;
+ /** The minor format version number. */
+ uint32_t uFmtVerMinor;
+
+ /** V2: Unread bytes in the current record. */
+ uint32_t cbRecLeft;
+ /** V2: Bytes in the data buffer. */
+ uint32_t cbDataBuffer;
+ /** V2: Current buffer position. */
+ uint32_t offDataBuffer;
+ /** V2: End of data indicator. */
+ bool fEndOfData;
+ /** V2: The type and flags byte fo the current record. */
+ uint8_t u8TypeAndFlags;
+
+ /** @name Context info for SSMR3SetLoadError.
+ * @{ */
+ /** Pointer to the header for the current unit. */
+ PSSMUNIT pCurUnit;
+ /** The version of the current unit if in the load exec stage. */
+ uint32_t uCurUnitVer;
+ /** The pass number of the current unit if in the load exec stage. */
+ uint32_t uCurUnitPass;
+ /** Whether SSMR3SetLoadError[V] has been called.
+ * @note Using ASMAtomicXchgBool because I'm very lazy. */
+ bool volatile fHaveSetError;
+ /** @} */
+
+ /** RTGCPHYS size in bytes. (Only applicable when loading/reading.) */
+ unsigned cbGCPhys;
+ /** RTGCPTR size in bytes. (Only applicable when loading/reading.) */
+ unsigned cbGCPtr;
+ /** Whether cbGCPtr is fixed or settable. */
+ bool fFixedGCPtrSize;
+
+ /** 32-bit MSC saved this? */
+ bool fIsHostMsc32;
+ /** "Host OS" dot "architecture", picked up from recent SSM data units. */
+ char szHostOSAndArch[32];
+
+ /** @name Header info (set by ssmR3ValidateFile)
+ * @{ */
+ /** The size of the file header. */
+ uint32_t cbFileHdr;
+ /** The major version number. */
+ uint16_t u16VerMajor;
+ /** The minor version number. */
+ uint16_t u16VerMinor;
+ /** The build number. */
+ uint32_t u32VerBuild;
+ /** The SVN revision. */
+ uint32_t u32SvnRev;
+ /** 32 or 64 depending on the host. */
+ uint8_t cHostBits;
+ /** Whether the stream is checksummed (SSMFILEHDR_FLAGS_STREAM_CRC32). */
+ bool fStreamCrc32;
+ /** The CRC of the loaded file. */
+ uint32_t u32LoadCRC;
+ /** The size of the load file. */
+ uint64_t cbLoadFile;
+ /** @} */
+
+ /** V2: Data buffer.
+ * @remarks Be extremely careful when changing the size of this buffer! */
+ uint8_t abDataBuffer[4096];
+
+ /** V2: Decompression buffer for when we cannot use the stream buffer. */
+ uint8_t abComprBuffer[4096];
+ } Read;
+ } u;
+} SSMHANDLE;
+
+
+/**
+ * Header of the saved state file.
+ *
+ * Added in r5xxxx on 2009-07-2?, VirtualBox v3.0.51.
+ */
+typedef struct SSMFILEHDR
+{
+ /** Magic string which identifies this file as a version of VBox saved state
+ * file format (SSMFILEHDR_MAGIC_V2_0). */
+ char szMagic[32];
+ /** The major version number. */
+ uint16_t u16VerMajor;
+ /** The minor version number. */
+ uint16_t u16VerMinor;
+ /** The build number. */
+ uint32_t u32VerBuild;
+ /** The SVN revision. */
+ uint32_t u32SvnRev;
+ /** 32 or 64 depending on the host. */
+ uint8_t cHostBits;
+ /** The size of RTGCPHYS. */
+ uint8_t cbGCPhys;
+ /** The size of RTGCPTR. */
+ uint8_t cbGCPtr;
+ /** Reserved header space - must be zero. */
+ uint8_t u8Reserved;
+ /** The number of units that (may) have stored data in the file. */
+ uint32_t cUnits;
+ /** Flags, see SSMFILEHDR_FLAGS_XXX. */
+ uint32_t fFlags;
+ /** The maximum size of decompressed data. */
+ uint32_t cbMaxDecompr;
+ /** The checksum of this header.
+ * This field is set to zero when calculating the checksum. */
+ uint32_t u32CRC;
+} SSMFILEHDR;
+AssertCompileSize(SSMFILEHDR, 64);
+AssertCompileMemberOffset(SSMFILEHDR, u32CRC, 60);
+AssertCompileMemberSize(SSMFILEHDR, szMagic, sizeof(SSMFILEHDR_MAGIC_V2_0));
+/** Pointer to a saved state file header. */
+typedef SSMFILEHDR *PSSMFILEHDR;
+/** Pointer to a const saved state file header. */
+typedef SSMFILEHDR const *PCSSMFILEHDR;
+
+
+/**
+ * Header of the saved state file.
+ *
+ * Added in r40980 on 2008-12-15, VirtualBox v2.0.51.
+ *
+ * @remarks This is a superset of SSMFILEHDRV11.
+ */
+typedef struct SSMFILEHDRV12
+{
+ /** Magic string which identifies this file as a version of VBox saved state
+ * file format (SSMFILEHDR_MAGIC_V1_2). */
+ char achMagic[32];
+ /** The size of this file. Used to check
+ * whether the save completed and that things are fine otherwise. */
+ uint64_t cbFile;
+ /** File checksum. The actual calculation skips past the u32CRC field. */
+ uint32_t u32CRC;
+ /** Padding. */
+ uint32_t u32Reserved;
+ /** The machine UUID. (Ignored if NIL.) */
+ RTUUID MachineUuid;
+
+ /** The major version number. */
+ uint16_t u16VerMajor;
+ /** The minor version number. */
+ uint16_t u16VerMinor;
+ /** The build number. */
+ uint32_t u32VerBuild;
+ /** The SVN revision. */
+ uint32_t u32SvnRev;
+
+ /** 32 or 64 depending on the host. */
+ uint8_t cHostBits;
+ /** The size of RTGCPHYS. */
+ uint8_t cbGCPhys;
+ /** The size of RTGCPTR. */
+ uint8_t cbGCPtr;
+ /** Padding. */
+ uint8_t au8Reserved;
+} SSMFILEHDRV12;
+AssertCompileSize(SSMFILEHDRV12, 64+16);
+AssertCompileMemberOffset(SSMFILEHDRV12, u32CRC, 40);
+AssertCompileMemberSize(SSMFILEHDRV12, achMagic, sizeof(SSMFILEHDR_MAGIC_V1_2));
+/** Pointer to a saved state file header. */
+typedef SSMFILEHDRV12 *PSSMFILEHDRV12;
+
+
+/**
+ * Header of the saved state file, version 1.1.
+ *
+ * Added in r23677 on 2007-08-17, VirtualBox v1.4.1.
+ */
+typedef struct SSMFILEHDRV11
+{
+ /** Magic string which identifies this file as a version of VBox saved state
+ * file format (SSMFILEHDR_MAGIC_V1_1). */
+ char achMagic[32];
+ /** The size of this file. Used to check
+ * whether the save completed and that things are fine otherwise. */
+ uint64_t cbFile;
+ /** File checksum. The actual calculation skips past the u32CRC field. */
+ uint32_t u32CRC;
+ /** Padding. */
+ uint32_t u32Reserved;
+ /** The machine UUID. (Ignored if NIL.) */
+ RTUUID MachineUuid;
+} SSMFILEHDRV11;
+AssertCompileSize(SSMFILEHDRV11, 64);
+AssertCompileMemberOffset(SSMFILEHDRV11, u32CRC, 40);
+/** Pointer to a saved state file header. */
+typedef SSMFILEHDRV11 *PSSMFILEHDRV11;
+
+
+/**
+ * Data unit header.
+ */
+typedef struct SSMFILEUNITHDRV2
+{
+ /** Magic (SSMFILEUNITHDR_MAGIC or SSMFILEUNITHDR_END). */
+ char szMagic[8];
+ /** The offset in the saved state stream of the start of this unit.
+ * This is mainly intended for sanity checking. */
+ uint64_t offStream;
+ /** The CRC-in-progress value this unit starts at. */
+ uint32_t u32CurStreamCRC;
+ /** The checksum of this structure, including the whole name.
+ * Calculated with this field set to zero. */
+ uint32_t u32CRC;
+ /** Data version. */
+ uint32_t u32Version;
+ /** Instance number. */
+ uint32_t u32Instance;
+ /** Data pass number. */
+ uint32_t u32Pass;
+ /** Flags reserved for future extensions. Must be zero. */
+ uint32_t fFlags;
+ /** Size of the data unit name including the terminator. (bytes) */
+ uint32_t cbName;
+ /** Data unit name, variable size. */
+ char szName[SSM_MAX_NAME_SIZE];
+} SSMFILEUNITHDRV2;
+AssertCompileMemberOffset(SSMFILEUNITHDRV2, szName, 44);
+AssertCompileMemberSize(SSMFILEUNITHDRV2, szMagic, sizeof(SSMFILEUNITHDR_MAGIC));
+AssertCompileMemberSize(SSMFILEUNITHDRV2, szMagic, sizeof(SSMFILEUNITHDR_END));
+/** Pointer to SSMFILEUNITHDRV2. */
+typedef SSMFILEUNITHDRV2 *PSSMFILEUNITHDRV2;
+
+
+/**
+ * Data unit header.
+ *
+ * This is used by v1.0, v1.1 and v1.2 of the format.
+ */
+typedef struct SSMFILEUNITHDRV1
+{
+ /** Magic (SSMFILEUNITHDR_MAGIC or SSMFILEUNITHDR_END). */
+ char achMagic[8];
+ /** Number of bytes in this data unit including the header. */
+ uint64_t cbUnit;
+ /** Data version. */
+ uint32_t u32Version;
+ /** Instance number. */
+ uint32_t u32Instance;
+ /** Size of the data unit name including the terminator. (bytes) */
+ uint32_t cchName;
+ /** Data unit name. */
+ char szName[1];
+} SSMFILEUNITHDRV1;
+/** Pointer to SSMFILEUNITHDR. */
+typedef SSMFILEUNITHDRV1 *PSSMFILEUNITHDRV1;
+
+
+/**
+ * Termination data record.
+ */
+typedef struct SSMRECTERM
+{
+ uint8_t u8TypeAndFlags;
+ /** The record size (sizeof(SSMRECTERM) - 2). */
+ uint8_t cbRec;
+ /** Flags, see SSMRECTERM_FLAGS_CRC32. */
+ uint16_t fFlags;
+ /** The checksum of the stream up to fFlags (exclusive). */
+ uint32_t u32StreamCRC;
+ /** The length of this data unit in bytes (including this record). */
+ uint64_t cbUnit;
+} SSMRECTERM;
+AssertCompileSize(SSMRECTERM, 16);
+AssertCompileMemberAlignment(SSMRECTERM, cbUnit, 8);
+/** Pointer to a termination record. */
+typedef SSMRECTERM *PSSMRECTERM;
+/** Pointer to a const termination record. */
+typedef SSMRECTERM const *PCSSMRECTERM;
+
+
+/**
+ * Directory entry.
+ */
+typedef struct SSMFILEDIRENTRY
+{
+ /** The offset of the data unit. */
+ uint64_t off;
+ /** The instance number. */
+ uint32_t u32Instance;
+ /** The CRC-32 of the name excluding the terminator. (lazy bird) */
+ uint32_t u32NameCRC;
+} SSMFILEDIRENTRY;
+AssertCompileSize(SSMFILEDIRENTRY, 16);
+/** Pointer to a directory entry. */
+typedef SSMFILEDIRENTRY *PSSMFILEDIRENTRY;
+/** Pointer to a const directory entry. */
+typedef SSMFILEDIRENTRY const *PCSSMFILEDIRENTRY;
+
+/**
+ * Directory for the data units from the final pass.
+ *
+ * This is used to speed up SSMR3Seek (it would have to decompress and parse the
+ * whole stream otherwise).
+ */
+typedef struct SSMFILEDIR
+{
+ /** Magic string (SSMFILEDIR_MAGIC). */
+ char szMagic[8];
+ /** The CRC-32 for the whole directory.
+ * Calculated with this field set to zero. */
+ uint32_t u32CRC;
+ /** The number of directory entries. */
+ uint32_t cEntries;
+ /** The directory entries (variable size). */
+ SSMFILEDIRENTRY aEntries[1];
+} SSMFILEDIR;
+AssertCompileSize(SSMFILEDIR, 32);
+/** Pointer to a directory. */
+typedef SSMFILEDIR *PSSMFILEDIR;
+/** Pointer to a const directory. */
+typedef SSMFILEDIR *PSSMFILEDIR;
+
+
+/**
+ * Footer structure
+ */
+typedef struct SSMFILEFTR
+{
+ /** Magic string (SSMFILEFTR_MAGIC). */
+ char szMagic[8];
+ /** The offset of this record in the stream. */
+ uint64_t offStream;
+ /** The CRC for the stream.
+ * This is set to zero if SSMFILEHDR_FLAGS_STREAM_CRC32 is clear. */
+ uint32_t u32StreamCRC;
+ /** Number directory entries. */
+ uint32_t cDirEntries;
+ /** Reserved footer space - must be zero. */
+ uint32_t u32Reserved;
+ /** The CRC-32 for this structure.
+ * Calculated with this field set to zero. */
+ uint32_t u32CRC;
+} SSMFILEFTR;
+AssertCompileSize(SSMFILEFTR, 32);
+/** Pointer to a footer. */
+typedef SSMFILEFTR *PSSMFILEFTR;
+/** Pointer to a const footer. */
+typedef SSMFILEFTR const *PCSSMFILEFTR;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifndef SSM_STANDALONE
+/** Zeros used by the struct putter.
+ * This must be at least 8 bytes or the code breaks. */
+static uint8_t const g_abZero[_1K] = {0};
+#endif
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifndef SSM_STANDALONE
+static int ssmR3LazyInit(PVM pVM);
+static DECLCALLBACK(int) ssmR3SelfLiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
+static DECLCALLBACK(int) ssmR3SelfSaveExec(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) ssmR3SelfLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static DECLCALLBACK(int) ssmR3LiveControlLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static int ssmR3Register(PVM pVM, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess, const char *pszBefore, PSSMUNIT *ppUnit);
+static int ssmR3LiveControlEmit(PSSMHANDLE pSSM, long double lrdPct, uint32_t uPass);
+#endif
+
+static int ssmR3StrmWriteBuffers(PSSMSTRM pStrm);
+static int ssmR3StrmReadMore(PSSMSTRM pStrm);
+
+#ifndef SSM_STANDALONE
+static int ssmR3DataFlushBuffer(PSSMHANDLE pSSM);
+#endif
+static int ssmR3DataReadRecHdrV2(PSSMHANDLE pSSM);
+
+
+#ifndef SSM_STANDALONE
+
+/**
+ * Cleans up resources allocated by SSM on VM termination.
+ *
+ * @param pVM The cross context VM structure.
+ * @note Not using VMMR3_INT_DECL because of testcases.
+ */
+VMMR3DECL(void) SSMR3Term(PVM pVM)
+{
+ if (pVM->ssm.s.fInitialized)
+ {
+ pVM->ssm.s.fInitialized = false;
+ RTCritSectDelete(&pVM->ssm.s.CancelCritSect);
+ }
+}
+
+
+/**
+ * Performs lazy initialization of the SSM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int ssmR3LazyInit(PVM pVM)
+{
+ /*
+ * Register a saved state unit which we use to put the VirtualBox version,
+ * revision and similar stuff in.
+ */
+ pVM->ssm.s.fInitialized = true;
+ int rc = SSMR3RegisterInternal(pVM, "SSM", 0 /*uInstance*/, 1 /*uVersion*/, 64 /*cbGuess*/,
+ NULL /*pfnLivePrep*/, ssmR3SelfLiveExec, NULL /*pfnLiveVote*/,
+ NULL /*pfnSavePrep*/, ssmR3SelfSaveExec, NULL /*pfnSaveDone*/,
+ NULL /*pfnSavePrep*/, ssmR3SelfLoadExec, NULL /*pfnSaveDone*/);
+ if (RT_SUCCESS(rc))
+ rc = SSMR3RegisterInternal(pVM, "SSMLiveControl", 0 /*uInstance*/, 1 /*uVersion*/, 1 /*cbGuess*/,
+ NULL /*pfnLivePrep*/, NULL /*pfnLiveExec*/, NULL /*pfnLiveVote*/,
+ NULL /*pfnSavePrep*/, NULL /*pfnSaveExec*/, NULL /*pfnSaveDone*/,
+ NULL /*pfnSavePrep*/, ssmR3LiveControlLoadExec, NULL /*pfnSaveDone*/);
+
+ /*
+ * Initialize the cancellation critsect now.
+ */
+ if (RT_SUCCESS(rc))
+ rc = RTCritSectInit(&pVM->ssm.s.CancelCritSect);
+ if (RT_SUCCESS(rc))
+ {
+ STAM_REL_REG_USED(pVM, &pVM->ssm.s.uPass, STAMTYPE_U32, "/SSM/uPass", STAMUNIT_COUNT, "Current pass");
+ }
+
+ pVM->ssm.s.fInitialized = RT_SUCCESS(rc);
+ return rc;
+}
+
+
+/**
+ * Do ssmR3SelfSaveExec in pass 0.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uPass The data pass number.
+ */
+static DECLCALLBACK(int) ssmR3SelfLiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ if (uPass == 0)
+ {
+ int rc = ssmR3SelfSaveExec(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = VINF_SSM_DONT_CALL_AGAIN;
+ return rc;
+ }
+ AssertFailed();
+ return VERR_SSM_UNEXPECTED_PASS;
+}
+
+
+/**
+ * For saving usful things without having to go thru the tedious process of
+ * adding it to the header.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ */
+static DECLCALLBACK(int) ssmR3SelfSaveExec(PVM pVM, PSSMHANDLE pSSM)
+{
+ NOREF(pVM);
+
+ /*
+ * String table containing pairs of variable and value string.
+ * Terminated by two empty strings.
+ */
+ SSMR3PutStrZ(pSSM, "Build Type");
+ SSMR3PutStrZ(pSSM, KBUILD_TYPE);
+ SSMR3PutStrZ(pSSM, "Host OS");
+ SSMR3PutStrZ(pSSM, KBUILD_TARGET "." KBUILD_TARGET_ARCH);
+#ifdef VBOX_OSE
+ SSMR3PutStrZ(pSSM, "OSE");
+ SSMR3PutStrZ(pSSM, "true");
+#endif
+
+ /* terminator */
+ SSMR3PutStrZ(pSSM, "");
+ return SSMR3PutStrZ(pSSM, "");
+}
+
+
+/**
+ * For load the version + revision and stuff.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion The version (1).
+ * @param uPass The pass.
+ */
+static DECLCALLBACK(int) ssmR3SelfLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ AssertLogRelMsgReturn(uVersion == 1, ("%d\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
+ NOREF(pVM); NOREF(uPass);
+
+ /*
+ * The first and last passes contains a {name, value} string table that is
+ * terminated by two emptry strings. It contains useful informal build
+ * info and can be very handy when something goes wrong after restore.
+ */
+ if ( uPass == 0
+ || uPass == SSM_PASS_FINAL)
+ {
+ for (unsigned i = 0; ; i++)
+ {
+ char szVar[128];
+ char szValue[1024];
+ int rc = SSMR3GetStrZ(pSSM, szVar, sizeof(szVar));
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetStrZ(pSSM, szValue, sizeof(szValue));
+ AssertRCReturn(rc, rc);
+ if (!szVar[0] && !szValue[0])
+ break;
+ if (i == 0)
+ LogRel(("SSM: Saved state info:\n"));
+ LogRel(("SSM: %s: %s\n", szVar, szValue));
+
+ /*
+ * Detect 32-bit MSC for handling SSMFIELD_ENTRY_PAD_MSC32_AUTO.
+ * Save the Host OS for SSMR3HandleHostOSAndArch
+ */
+ if (!strcmp(szVar, "Host OS"))
+ {
+ bool fIsHostMsc32 = !strcmp(szValue, "win.x86");
+ if (fIsHostMsc32 != pSSM->u.Read.fIsHostMsc32)
+ {
+ LogRel(("SSM: (fIsHostMsc32 %RTbool => %RTbool)\n", pSSM->u.Read.fIsHostMsc32, fIsHostMsc32));
+ pSSM->u.Read.fIsHostMsc32 = fIsHostMsc32;
+ }
+
+ size_t cchValue = strlen(szValue);
+ size_t cchCopy = RT_MIN(cchValue, sizeof(pSSM->u.Read.szHostOSAndArch) - 1);
+ Assert(cchValue == cchCopy);
+ memcpy(pSSM->u.Read.szHostOSAndArch, szValue, cchCopy);
+ pSSM->u.Read.szHostOSAndArch[cchCopy] = '\0';
+ }
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Load exec callback for the special live save state unit that tracks the
+ * progress of a live save.
+ *
+ * This is saved by ssmR3LiveControlEmit().
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param uVersion The version (1).
+ * @param uPass The pass.
+ */
+static DECLCALLBACK(int) ssmR3LiveControlLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ AssertLogRelMsgReturn(uVersion == 1, ("%d\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
+ NOREF(uPass);
+
+ uint16_t uPartsPerTenThousand;
+ int rc = SSMR3GetU16(pSSM, &uPartsPerTenThousand);
+ if (RT_SUCCESS(rc))
+ {
+ /* Scale it down to fit in our exec range. */
+ unsigned uPct = (unsigned)( (long double)uPartsPerTenThousand / 100
+ * (100 - pSSM->uPercentPrepare - pSSM->uPercentDone) / 100)
+ + pSSM->uPercentPrepare;
+ if (uPct != pSSM->uPercent)
+ {
+ AssertMsg(uPct < 100, ("uPct=%d uPartsPerTenThousand=%d uPercentPrepare=%d uPercentDone=%d\n", uPct, uPartsPerTenThousand, pSSM->uPercentPrepare, pSSM->uPercentDone));
+ pSSM->uPercent = uPct;
+ if (pSSM->pfnProgress)
+ pSSM->pfnProgress(pVM->pUVM, RT_MIN(uPct, 100 - pSSM->uPercentDone), pSSM->pvUser);
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * Internal registration worker.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName Data unit name.
+ * @param uInstance The instance id.
+ * @param uVersion The data unit version.
+ * @param cbGuess The guessed data unit size.
+ * @param pszBefore Name of data unit to be placed in front of.
+ * Optional.
+ * @param ppUnit Where to store the inserted unit node.
+ * Caller must fill in the missing details.
+ */
+static int ssmR3Register(PVM pVM, const char *pszName, uint32_t uInstance,
+ uint32_t uVersion, size_t cbGuess, const char *pszBefore, PSSMUNIT *ppUnit)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtr(pszName);
+ AssertReturn(*pszName, VERR_INVALID_PARAMETER);
+ size_t cchName = strlen(pszName);
+ AssertMsgReturn(cchName < SSM_MAX_NAME_SIZE, ("%zu >= %u: %s\n", cchName, SSM_MAX_NAME_SIZE, pszName), VERR_OUT_OF_RANGE);
+
+ AssertReturn(!pszBefore || *pszBefore, VERR_INVALID_PARAMETER);
+ size_t cchBefore = pszBefore ? strlen(pszBefore) : 0;
+ AssertMsgReturn(cchBefore < SSM_MAX_NAME_SIZE, ("%zu >= %u: %s\n", cchBefore, SSM_MAX_NAME_SIZE, pszBefore), VERR_OUT_OF_RANGE);
+
+ /*
+ * Lazy init.
+ */
+ if (!pVM->ssm.s.fInitialized)
+ {
+ int rc = ssmR3LazyInit(pVM);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Walk to the end of the list checking for duplicates as we go.
+ */
+ PSSMUNIT pUnitBeforePrev = NULL;
+ PSSMUNIT pUnitBefore = NULL;
+ PSSMUNIT pUnitPrev = NULL;
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while (pUnit)
+ {
+ if ( pUnit->u32Instance == uInstance
+ && pUnit->cchName == cchName
+ && !memcmp(pUnit->szName, pszName, cchName))
+ {
+ AssertMsgFailed(("Duplicate registration %s\n", pszName));
+ return VERR_SSM_UNIT_EXISTS;
+ }
+ if ( pUnit->cchName == cchBefore
+ && !pUnitBefore
+ && !memcmp(pUnit->szName, pszBefore, cchBefore))
+ {
+ pUnitBeforePrev = pUnitPrev;
+ pUnitBefore = pUnit;
+ }
+
+ /* next */
+ pUnitPrev = pUnit;
+ pUnit = pUnit->pNext;
+ }
+
+ /*
+ * Allocate new node.
+ */
+ pUnit = (PSSMUNIT)MMR3HeapAllocZ(pVM, MM_TAG_SSM, RT_UOFFSETOF_DYN(SSMUNIT, szName[cchName + 1]));
+ if (!pUnit)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Fill in (some) data. (Stuff is zero'd.)
+ */
+ pUnit->u32Version = uVersion;
+ pUnit->u32Instance = uInstance;
+ pUnit->cbGuess = cbGuess;
+ pUnit->cchName = cchName;
+ memcpy(pUnit->szName, pszName, cchName);
+
+ /*
+ * Insert
+ */
+ if (pUnitBefore)
+ {
+ pUnit->pNext = pUnitBefore;
+ if (pUnitBeforePrev)
+ pUnitBeforePrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ }
+ else if (pUnitPrev)
+ pUnitPrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ pVM->ssm.s.cUnits++;
+
+ *ppUnit = pUnit;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Register a PDM Devices data unit.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param pszName Data unit name.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @param uVersion Data layout version number.
+ * @param cbGuess The approximate amount of data in the unit.
+ * Only for progress indicators.
+ * @param pszBefore Name of data unit which we should be put in front
+ * of. Optional (NULL).
+ *
+ * @param pfnLivePrep Prepare live save callback, optional.
+ * @param pfnLiveExec Execute live save callback, optional.
+ * @param pfnLiveVote Vote live save callback, optional.
+ *
+ * @param pfnSavePrep Prepare save callback, optional.
+ * @param pfnSaveExec Execute save callback, optional.
+ * @param pfnSaveDone Done save callback, optional.
+ *
+ * @param pfnLoadPrep Prepare load callback, optional.
+ * @param pfnLoadExec Execute load callback, optional.
+ * @param pfnLoadDone Done load callback, optional.
+ */
+VMMR3_INT_DECL(int)
+SSMR3RegisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName,
+ uint32_t uInstance, uint32_t uVersion, size_t cbGuess, const char *pszBefore,
+ PFNSSMDEVLIVEPREP pfnLivePrep, PFNSSMDEVLIVEEXEC pfnLiveExec, PFNSSMDEVLIVEVOTE pfnLiveVote,
+ PFNSSMDEVSAVEPREP pfnSavePrep, PFNSSMDEVSAVEEXEC pfnSaveExec, PFNSSMDEVSAVEDONE pfnSaveDone,
+ PFNSSMDEVLOADPREP pfnLoadPrep, PFNSSMDEVLOADEXEC pfnLoadExec, PFNSSMDEVLOADDONE pfnLoadDone)
+{
+ PSSMUNIT pUnit;
+ int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, pszBefore, &pUnit);
+ if (RT_SUCCESS(rc))
+ {
+ pUnit->enmType = SSMUNITTYPE_DEV;
+ pUnit->u.Dev.pfnLivePrep = pfnLivePrep;
+ pUnit->u.Dev.pfnLiveExec = pfnLiveExec;
+ pUnit->u.Dev.pfnLiveVote = pfnLiveVote;
+ pUnit->u.Dev.pfnSavePrep = pfnSavePrep;
+ pUnit->u.Dev.pfnSaveExec = pfnSaveExec;
+ pUnit->u.Dev.pfnSaveDone = pfnSaveDone;
+ pUnit->u.Dev.pfnLoadPrep = pfnLoadPrep;
+ pUnit->u.Dev.pfnLoadExec = pfnLoadExec;
+ pUnit->u.Dev.pfnLoadDone = pfnLoadDone;
+ pUnit->u.Dev.pDevIns = pDevIns;
+ pUnit->pCritSect = PDMR3DevGetCritSect(pVM, pDevIns);
+ }
+ return rc;
+}
+
+
+/**
+ * Register a PDM driver data unit.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param pszName Data unit name.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @param uVersion Data layout version number.
+ * @param cbGuess The approximate amount of data in the unit.
+ * Only for progress indicators.
+ *
+ * @param pfnLivePrep Prepare live save callback, optional.
+ * @param pfnLiveExec Execute live save callback, optional.
+ * @param pfnLiveVote Vote live save callback, optional.
+ *
+ * @param pfnSavePrep Prepare save callback, optional.
+ * @param pfnSaveExec Execute save callback, optional.
+ * @param pfnSaveDone Done save callback, optional.
+ *
+ * @param pfnLoadPrep Prepare load callback, optional.
+ * @param pfnLoadExec Execute load callback, optional.
+ * @param pfnLoadDone Done load callback, optional.
+ */
+VMMR3_INT_DECL(int)
+SSMR3RegisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+ PFNSSMDRVLIVEPREP pfnLivePrep, PFNSSMDRVLIVEEXEC pfnLiveExec, PFNSSMDRVLIVEVOTE pfnLiveVote,
+ PFNSSMDRVSAVEPREP pfnSavePrep, PFNSSMDRVSAVEEXEC pfnSaveExec, PFNSSMDRVSAVEDONE pfnSaveDone,
+ PFNSSMDRVLOADPREP pfnLoadPrep, PFNSSMDRVLOADEXEC pfnLoadExec, PFNSSMDRVLOADDONE pfnLoadDone)
+{
+ PSSMUNIT pUnit;
+ int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL, &pUnit);
+ if (RT_SUCCESS(rc))
+ {
+ pUnit->enmType = SSMUNITTYPE_DRV;
+ pUnit->u.Drv.pfnLivePrep = pfnLivePrep;
+ pUnit->u.Drv.pfnLiveExec = pfnLiveExec;
+ pUnit->u.Drv.pfnLiveVote = pfnLiveVote;
+ pUnit->u.Drv.pfnSavePrep = pfnSavePrep;
+ pUnit->u.Drv.pfnSaveExec = pfnSaveExec;
+ pUnit->u.Drv.pfnSaveDone = pfnSaveDone;
+ pUnit->u.Drv.pfnLoadPrep = pfnLoadPrep;
+ pUnit->u.Drv.pfnLoadExec = pfnLoadExec;
+ pUnit->u.Drv.pfnLoadDone = pfnLoadDone;
+ pUnit->u.Drv.pDrvIns = pDrvIns;
+ }
+ return rc;
+}
+
+
+/**
+ * Register a PDM USB device data unit.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns USB instance.
+ * @param pszName Data unit name.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @param uVersion Data layout version number.
+ * @param cbGuess The approximate amount of data in the unit.
+ * Only for progress indicators.
+ *
+ * @param pfnLivePrep Prepare live save callback, optional.
+ * @param pfnLiveExec Execute live save callback, optional.
+ * @param pfnLiveVote Vote live save callback, optional.
+ *
+ * @param pfnSavePrep Prepare save callback, optional.
+ * @param pfnSaveExec Execute save callback, optional.
+ * @param pfnSaveDone Done save callback, optional.
+ *
+ * @param pfnLoadPrep Prepare load callback, optional.
+ * @param pfnLoadExec Execute load callback, optional.
+ * @param pfnLoadDone Done load callback, optional.
+ */
+VMMR3_INT_DECL(int)
+SSMR3RegisterUsb(PVM pVM, PPDMUSBINS pUsbIns, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+ PFNSSMUSBLIVEPREP pfnLivePrep, PFNSSMUSBLIVEEXEC pfnLiveExec, PFNSSMUSBLIVEVOTE pfnLiveVote,
+ PFNSSMUSBSAVEPREP pfnSavePrep, PFNSSMUSBSAVEEXEC pfnSaveExec, PFNSSMUSBSAVEDONE pfnSaveDone,
+ PFNSSMUSBLOADPREP pfnLoadPrep, PFNSSMUSBLOADEXEC pfnLoadExec, PFNSSMUSBLOADDONE pfnLoadDone)
+{
+ PSSMUNIT pUnit;
+ int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL, &pUnit);
+ if (RT_SUCCESS(rc))
+ {
+ pUnit->enmType = SSMUNITTYPE_USB;
+ pUnit->u.Usb.pfnLivePrep = pfnLivePrep;
+ pUnit->u.Usb.pfnLiveExec = pfnLiveExec;
+ pUnit->u.Usb.pfnLiveVote = pfnLiveVote;
+ pUnit->u.Usb.pfnSavePrep = pfnSavePrep;
+ pUnit->u.Usb.pfnSaveExec = pfnSaveExec;
+ pUnit->u.Usb.pfnSaveDone = pfnSaveDone;
+ pUnit->u.Usb.pfnLoadPrep = pfnLoadPrep;
+ pUnit->u.Usb.pfnLoadExec = pfnLoadExec;
+ pUnit->u.Usb.pfnLoadDone = pfnLoadDone;
+ pUnit->u.Usb.pUsbIns = pUsbIns;
+ }
+ return rc;
+}
+
+
+/**
+ * Register a internal data unit.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszName Data unit name.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @param uVersion Data layout version number.
+ * @param cbGuess The approximate amount of data in the unit.
+ * Only for progress indicators.
+ *
+ * @param pfnLivePrep Prepare live save callback, optional.
+ * @param pfnLiveExec Execute live save callback, optional.
+ * @param pfnLiveVote Vote live save callback, optional.
+ *
+ * @param pfnSavePrep Prepare save callback, optional.
+ * @param pfnSaveExec Execute save callback, optional.
+ * @param pfnSaveDone Done save callback, optional.
+ *
+ * @param pfnLoadPrep Prepare load callback, optional.
+ * @param pfnLoadExec Execute load callback, optional.
+ * @param pfnLoadDone Done load callback, optional.
+ */
+VMMR3DECL(int) SSMR3RegisterInternal(PVM pVM, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+ PFNSSMINTLIVEPREP pfnLivePrep, PFNSSMINTLIVEEXEC pfnLiveExec, PFNSSMINTLIVEVOTE pfnLiveVote,
+ PFNSSMINTSAVEPREP pfnSavePrep, PFNSSMINTSAVEEXEC pfnSaveExec, PFNSSMINTSAVEDONE pfnSaveDone,
+ PFNSSMINTLOADPREP pfnLoadPrep, PFNSSMINTLOADEXEC pfnLoadExec, PFNSSMINTLOADDONE pfnLoadDone)
+{
+ PSSMUNIT pUnit;
+ int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL /* pszBefore */, &pUnit);
+ if (RT_SUCCESS(rc))
+ {
+ pUnit->enmType = SSMUNITTYPE_INTERNAL;
+ pUnit->u.Internal.pfnLivePrep = pfnLivePrep;
+ pUnit->u.Internal.pfnLiveExec = pfnLiveExec;
+ pUnit->u.Internal.pfnLiveVote = pfnLiveVote;
+ pUnit->u.Internal.pfnSavePrep = pfnSavePrep;
+ pUnit->u.Internal.pfnSaveExec = pfnSaveExec;
+ pUnit->u.Internal.pfnSaveDone = pfnSaveDone;
+ pUnit->u.Internal.pfnLoadPrep = pfnLoadPrep;
+ pUnit->u.Internal.pfnLoadExec = pfnLoadExec;
+ pUnit->u.Internal.pfnLoadDone = pfnLoadDone;
+ }
+ return rc;
+}
+
+
+/**
+ * Register an external data unit.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pszName Data unit name.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @param uVersion Data layout version number.
+ * @param cbGuess The approximate amount of data in the unit.
+ * Only for progress indicators.
+ *
+ * @param pfnLivePrep Prepare live save callback, optional.
+ * @param pfnLiveExec Execute live save callback, optional.
+ * @param pfnLiveVote Vote live save callback, optional.
+ *
+ * @param pfnSavePrep Prepare save callback, optional.
+ * @param pfnSaveExec Execute save callback, optional.
+ * @param pfnSaveDone Done save callback, optional.
+ *
+ * @param pfnLoadPrep Prepare load callback, optional.
+ * @param pfnLoadExec Execute load callback, optional.
+ * @param pfnLoadDone Done load callback, optional.
+ * @param pvUser User argument.
+ */
+VMMR3DECL(int) SSMR3RegisterExternal(PUVM pUVM, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+ PFNSSMEXTLIVEPREP pfnLivePrep, PFNSSMEXTLIVEEXEC pfnLiveExec, PFNSSMEXTLIVEVOTE pfnLiveVote,
+ PFNSSMEXTSAVEPREP pfnSavePrep, PFNSSMEXTSAVEEXEC pfnSaveExec, PFNSSMEXTSAVEDONE pfnSaveDone,
+ PFNSSMEXTLOADPREP pfnLoadPrep, PFNSSMEXTLOADEXEC pfnLoadExec, PFNSSMEXTLOADDONE pfnLoadDone, void *pvUser)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ PSSMUNIT pUnit;
+ int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL /* pszBefore */, &pUnit);
+ if (RT_SUCCESS(rc))
+ {
+ pUnit->enmType = SSMUNITTYPE_EXTERNAL;
+ pUnit->u.External.pfnLivePrep = pfnLivePrep;
+ pUnit->u.External.pfnLiveExec = pfnLiveExec;
+ pUnit->u.External.pfnLiveVote = pfnLiveVote;
+ pUnit->u.External.pfnSavePrep = pfnSavePrep;
+ pUnit->u.External.pfnSaveExec = pfnSaveExec;
+ pUnit->u.External.pfnSaveDone = pfnSaveDone;
+ pUnit->u.External.pfnLoadPrep = pfnLoadPrep;
+ pUnit->u.External.pfnLoadExec = pfnLoadExec;
+ pUnit->u.External.pfnLoadDone = pfnLoadDone;
+ pUnit->u.External.pvUser = pvUser;
+ }
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMINTLOADEXEC,
+ * Stub that skips the whole unit (see SSMR3RegisterStub).}
+ */
+static DECLCALLBACK(int) ssmR3LoadExecStub(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uVersion); NOREF(uPass);
+ return SSMR3SkipToEndOfUnit(pSSM);
+}
+
+
+/**
+ * Registers a stub state loader for working around legacy.
+ *
+ * This is used to deal with irelevant PATM and CSAM saved state units in HM
+ * mode and when built without raw-mode.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName Data unit name.
+ * @param uInstance Instance number.
+ */
+VMMR3DECL(int) SSMR3RegisterStub(PVM pVM, const char *pszName, uint32_t uInstance)
+{
+ return SSMR3RegisterInternal(pVM, pszName, uInstance, UINT32_MAX, 0,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ NULL, ssmR3LoadExecStub, NULL);
+}
+
+
+/**
+ * Deregister one or more PDM Device data units.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param pszName Data unit name.
+ * Use NULL to deregister all data units for that device instance.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique.
+ * @remark Only for dynamic data units and dynamic unloaded modules.
+ */
+VMMR3_INT_DECL(int) SSMR3DeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName, uint32_t uInstance)
+{
+ /*
+ * Validate input.
+ */
+ if (!pDevIns)
+ {
+ AssertMsgFailed(("pDevIns is NULL!\n"));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Search the list.
+ */
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ int rc = pszName ? VERR_SSM_UNIT_NOT_FOUND : VINF_SUCCESS;
+ PSSMUNIT pUnitPrev = NULL;
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while (pUnit)
+ {
+ if ( pUnit->enmType == SSMUNITTYPE_DEV
+ && ( !pszName
+ || ( pUnit->cchName == cchName
+ && !memcmp(pUnit->szName, pszName, cchName)))
+ && pUnit->u32Instance == uInstance
+ )
+ {
+ if (pUnit->u.Dev.pDevIns == pDevIns)
+ {
+ /*
+ * Unlink it, advance pointer, and free the node.
+ */
+ PSSMUNIT pFree = pUnit;
+ pUnit = pUnit->pNext;
+ if (pUnitPrev)
+ pUnitPrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ pVM->ssm.s.cUnits--;
+ Log(("SSM: Removed data unit '%s' (pdm dev).\n", pFree->szName));
+ MMR3HeapFree(pFree);
+
+ if (pszName)
+ return VINF_SUCCESS;
+ rc = VINF_SUCCESS;
+ continue;
+ }
+ else if (pszName)
+ {
+ AssertMsgFailed(("Caller is not owner! Owner=%p Caller=%p %s\n",
+ pUnit->u.Dev.pDevIns, pDevIns, pszName));
+ return VERR_SSM_UNIT_NOT_OWNER;
+ }
+ }
+
+ /* next */
+ pUnitPrev = pUnit;
+ pUnit = pUnit->pNext;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Deregister one ore more PDM Driver data units.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param pszName Data unit name.
+ * Use NULL to deregister all data units for that driver instance.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique. Ignored if pszName is NULL.
+ * @remark Only for dynamic data units and dynamic unloaded modules.
+ */
+VMMR3_INT_DECL(int) SSMR3DeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, uint32_t uInstance)
+{
+ /*
+ * Validate input.
+ */
+ if (!pDrvIns)
+ {
+ AssertMsgFailed(("pDrvIns is NULL!\n"));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Search the list.
+ */
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ int rc = pszName ? VERR_SSM_UNIT_NOT_FOUND : VINF_SUCCESS;
+ PSSMUNIT pUnitPrev = NULL;
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while (pUnit)
+ {
+ if ( pUnit->enmType == SSMUNITTYPE_DRV
+ && ( !pszName
+ || ( pUnit->cchName == cchName
+ && !memcmp(pUnit->szName, pszName, cchName)
+ && pUnit->u32Instance == uInstance))
+ )
+ {
+ if (pUnit->u.Drv.pDrvIns == pDrvIns)
+ {
+ /*
+ * Unlink it, advance pointer, and free the node.
+ */
+ PSSMUNIT pFree = pUnit;
+ pUnit = pUnit->pNext;
+ if (pUnitPrev)
+ pUnitPrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ pVM->ssm.s.cUnits--;
+ Log(("SSM: Removed data unit '%s' (pdm drv).\n", pFree->szName));
+ MMR3HeapFree(pFree);
+
+ if (pszName)
+ return VINF_SUCCESS;
+ rc = VINF_SUCCESS;
+ continue;
+ }
+
+ AssertMsgReturn(!pszName,
+ ("Caller is not owner! Owner=%p Caller=%p %s\n", pUnit->u.Drv.pDrvIns, pDrvIns, pszName),
+ VERR_SSM_UNIT_NOT_OWNER);
+ }
+
+ /* next */
+ pUnitPrev = pUnit;
+ pUnit = pUnit->pNext;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Deregister one or more PDM USB device data units.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns USB device instance.
+ * @param pszName Data unit name.
+ * Use NULL to deregister all data units for that driver instance.
+ * @param uInstance The instance identifier of the data unit.
+ * This must together with the name be unique. Ignored if pszName is NULL.
+ * @remark Only for dynamic data units and dynamic unloaded modules.
+ */
+VMMR3_INT_DECL(int) SSMR3DeregisterUsb(PVM pVM, PPDMUSBINS pUsbIns, const char *pszName, uint32_t uInstance)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pUsbIns, VERR_INVALID_POINTER);
+
+ /*
+ * Search the list.
+ */
+ size_t cchName = pszName ? strlen(pszName) : 0;
+ int rc = pszName ? VERR_SSM_UNIT_NOT_FOUND : VINF_SUCCESS;
+ PSSMUNIT pUnitPrev = NULL;
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while (pUnit)
+ {
+ if ( pUnit->enmType == SSMUNITTYPE_USB
+ && ( !pszName
+ || ( pUnit->cchName == cchName
+ && !memcmp(pUnit->szName, pszName, cchName)
+ && pUnit->u32Instance == uInstance))
+ )
+ {
+ if (pUnit->u.Usb.pUsbIns == pUsbIns)
+ {
+ /*
+ * Unlink it, advance pointer, and free the node.
+ */
+ PSSMUNIT pFree = pUnit;
+ pUnit = pUnit->pNext;
+ if (pUnitPrev)
+ pUnitPrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ pVM->ssm.s.cUnits--;
+ Log(("SSM: Removed data unit '%s' (pdm drv).\n", pFree->szName));
+ MMR3HeapFree(pFree);
+
+ if (pszName)
+ return VINF_SUCCESS;
+ rc = VINF_SUCCESS;
+ continue;
+ }
+
+ AssertMsgReturn(!pszName,
+ ("Caller is not owner! Owner=%p Caller=%p %s\n", pUnit->u.Usb.pUsbIns, pUsbIns, pszName),
+ VERR_SSM_UNIT_NOT_OWNER);
+ }
+
+ /* next */
+ pUnitPrev = pUnit;
+ pUnit = pUnit->pNext;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Deregister a data unit.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmType Unit type
+ * @param pszName Data unit name.
+ * @remark Only for dynamic data units.
+ */
+static int ssmR3DeregisterByNameAndType(PVM pVM, const char *pszName, SSMUNITTYPE enmType)
+{
+ /*
+ * Validate input.
+ */
+ if (!pszName)
+ {
+ AssertMsgFailed(("pszName is NULL!\n"));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Search the list.
+ */
+ size_t cchName = strlen(pszName);
+ int rc = VERR_SSM_UNIT_NOT_FOUND;
+ PSSMUNIT pUnitPrev = NULL;
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while (pUnit)
+ {
+ if ( pUnit->enmType == enmType
+ && pUnit->cchName == cchName
+ && !memcmp(pUnit->szName, pszName, cchName))
+ {
+ /*
+ * Unlink it, advance pointer, and free the node.
+ */
+ PSSMUNIT pFree = pUnit;
+ pUnit = pUnit->pNext;
+ if (pUnitPrev)
+ pUnitPrev->pNext = pUnit;
+ else
+ pVM->ssm.s.pHead = pUnit;
+ pVM->ssm.s.cUnits--;
+ Log(("SSM: Removed data unit '%s' (type=%d).\n", pFree->szName, enmType));
+ MMR3HeapFree(pFree);
+ return VINF_SUCCESS;
+ }
+
+ /* next */
+ pUnitPrev = pUnit;
+ pUnit = pUnit->pNext;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Deregister an internal data unit.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszName Data unit name.
+ * @remark Only for dynamic data units.
+ */
+VMMR3DECL(int) SSMR3DeregisterInternal(PVM pVM, const char *pszName)
+{
+ return ssmR3DeregisterByNameAndType(pVM, pszName, SSMUNITTYPE_INTERNAL);
+}
+
+
+/**
+ * Deregister an external data unit.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param pszName Data unit name.
+ * @remark Only for dynamic data units.
+ */
+VMMR3DECL(int) SSMR3DeregisterExternal(PUVM pUVM, const char *pszName)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ return ssmR3DeregisterByNameAndType(pVM, pszName, SSMUNITTYPE_EXTERNAL);
+}
+
+#endif /* !SSM_STANDALONE */
+
+
+/**
+ * Initializes the stream after/before opening the file/whatever.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_MEMORY.
+ * @param pStrm The stream handle.
+ * @param fChecksummed Whether the stream is to be checksummed while
+ * written/read.
+ * @param cBuffers The number of buffers.
+ */
+static int ssmR3StrmInitInternal(PSSMSTRM pStrm, bool fChecksummed, uint32_t cBuffers)
+{
+ Assert(cBuffers > 0);
+
+ /*
+ * Init the common data members.
+ */
+ pStrm->fTerminating = false;
+ pStrm->fNeedSeek = false;
+ pStrm->rc = VINF_SUCCESS;
+ pStrm->hIoThread = NIL_RTTHREAD;
+ pStrm->offNeedSeekTo= UINT64_MAX;
+
+ pStrm->pHead = NULL;
+ pStrm->pFree = NULL;
+ pStrm->hEvtHead = NIL_RTSEMEVENT;
+ pStrm->hEvtFree = NIL_RTSEMEVENT;
+
+ pStrm->pPending = NULL;
+ pStrm->pCur = NULL;
+ pStrm->offCurStream = 0;
+ pStrm->off = 0;
+ pStrm->fChecksummed = fChecksummed;
+ pStrm->u32StreamCRC = fChecksummed ? RTCrc32Start() : 0;
+ pStrm->offStreamCRC = 0;
+
+ /*
+ * Allocate the buffers. Page align them in case that makes the kernel
+ * and/or cpu happier in some way.
+ */
+ int rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cBuffers; i++)
+ {
+ PSSMSTRMBUF pBuf = (PSSMSTRMBUF)RTMemPageAllocZ(sizeof(*pBuf));
+ if (!pBuf)
+ {
+ if (i > 2)
+ {
+ LogRel(("ssmR3StrmAllocBuffer: WARNING: Could only get %d stream buffers.\n", i));
+ break;
+ }
+ LogRel(("ssmR3StrmAllocBuffer: Failed to allocate stream buffers. (i=%d)\n", i));
+ return VERR_NO_MEMORY;
+ }
+
+ /* link it */
+ pBuf->pNext = pStrm->pFree;
+ pStrm->pFree = pBuf;
+ }
+
+ /*
+ * Create the event semaphores.
+ */
+ rc = RTSemEventCreate(&pStrm->hEvtHead);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = RTSemEventCreate(&pStrm->hEvtFree);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroys a list of buffers.
+ *
+ * @param pHead Pointer to the head.
+ */
+static void ssmR3StrmDestroyBufList(PSSMSTRMBUF pHead)
+{
+ while (pHead)
+ {
+ PSSMSTRMBUF pCur = pHead;
+ pHead = pCur->pNext;
+ pCur->pNext = NULL;
+ RTMemPageFree(pCur, sizeof(*pCur));
+ }
+}
+
+
+/**
+ * Cleans up a stream after ssmR3StrmInitInternal has been called (regardless of
+ * it succeeded or not).
+ *
+ * @param pStrm The stream handle.
+ */
+static void ssmR3StrmDelete(PSSMSTRM pStrm)
+{
+ RTMemPageFree(pStrm->pCur, sizeof(*pStrm->pCur));
+ pStrm->pCur = NULL;
+ ssmR3StrmDestroyBufList(pStrm->pHead);
+ pStrm->pHead = NULL;
+ ssmR3StrmDestroyBufList(pStrm->pPending);
+ pStrm->pPending = NULL;
+ ssmR3StrmDestroyBufList(pStrm->pFree);
+ pStrm->pFree = NULL;
+
+ RTSemEventDestroy(pStrm->hEvtHead);
+ pStrm->hEvtHead = NIL_RTSEMEVENT;
+
+ RTSemEventDestroy(pStrm->hEvtFree);
+ pStrm->hEvtFree = NIL_RTSEMEVENT;
+}
+
+
+/**
+ * Initializes a stream that uses a method table.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream manager structure.
+ * @param pStreamOps The stream method table.
+ * @param pvUser The user argument for the stream methods.
+ * @param fWrite Whether to open for writing or reading.
+ * @param fChecksummed Whether the stream is to be checksummed while
+ * written/read.
+ * @param cBuffers The number of buffers.
+ */
+static int ssmR3StrmInit(PSSMSTRM pStrm, PCSSMSTRMOPS pStreamOps, void *pvUser, bool fWrite, bool fChecksummed, uint32_t cBuffers)
+{
+ int rc = ssmR3StrmInitInternal(pStrm, fChecksummed, cBuffers);
+ if (RT_SUCCESS(rc))
+ {
+ pStrm->pOps = pStreamOps;
+ pStrm->pvUser = pvUser;
+ pStrm->fWrite = fWrite;
+ return VINF_SUCCESS;
+ }
+
+ ssmR3StrmDelete(pStrm);
+ pStrm->rc = rc;
+ return rc;
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnWrite
+ */
+static DECLCALLBACK(int) ssmR3FileWrite(void *pvUser, uint64_t offStream, const void *pvBuf, size_t cbToWrite)
+{
+ NOREF(offStream);
+ return RTFileWriteAt((RTFILE)(uintptr_t)pvUser, offStream, pvBuf, cbToWrite, NULL); /** @todo use RTFileWrite */
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnRead
+ */
+static DECLCALLBACK(int) ssmR3FileRead(void *pvUser, uint64_t offStream, void *pvBuf, size_t cbToRead, size_t *pcbRead)
+{
+ Assert(RTFileTell((RTFILE)(uintptr_t)pvUser) == offStream); NOREF(offStream);
+ return RTFileRead((RTFILE)(uintptr_t)pvUser, pvBuf, cbToRead, pcbRead);
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnSeek
+ */
+static DECLCALLBACK(int) ssmR3FileSeek(void *pvUser, int64_t offSeek, unsigned uMethod, uint64_t *poffActual)
+{
+ return RTFileSeek((RTFILE)(uintptr_t)pvUser, offSeek, uMethod, poffActual);
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnTell
+ */
+static DECLCALLBACK(uint64_t) ssmR3FileTell(void *pvUser)
+{
+ return RTFileTell((RTFILE)(uintptr_t)pvUser);
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnSize
+ */
+static DECLCALLBACK(int) ssmR3FileSize(void *pvUser, uint64_t *pcb)
+{
+ return RTFileQuerySize((RTFILE)(uintptr_t)pvUser, pcb);
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnIsOk
+ */
+static DECLCALLBACK(int) ssmR3FileIsOk(void *pvUser)
+{
+ /*
+ * Check that there is still some space left on the disk.
+ */
+ RTFOFF cbFree;
+ int rc = RTFileQueryFsSizes((RTFILE)(uintptr_t)pvUser, NULL, &cbFree, NULL, NULL);
+#define SSM_MIN_DISK_FREE ((RTFOFF)( 10 * _1M ))
+ if (RT_SUCCESS(rc))
+ {
+ if (cbFree < SSM_MIN_DISK_FREE)
+ {
+ LogRel(("SSM: Giving up: Low on disk space. (cbFree=%RTfoff, SSM_MIN_DISK_FREE=%RTfoff).\n",
+ cbFree, SSM_MIN_DISK_FREE));
+ rc = VERR_SSM_LOW_ON_DISK_SPACE;
+ }
+ }
+ else if (rc == VERR_NOT_SUPPORTED)
+ rc = VINF_SUCCESS;
+ else
+ AssertLogRelRC(rc);
+ return rc;
+}
+
+
+/**
+ * @copydoc SSMSTRMOPS::pfnClose
+ */
+static DECLCALLBACK(int) ssmR3FileClose(void *pvUser, bool fCancelled)
+{
+ NOREF(fCancelled);
+ return RTFileClose((RTFILE)(uintptr_t)pvUser);
+}
+
+
+/**
+ * Method table for a file based stream.
+ */
+static SSMSTRMOPS const g_ssmR3FileOps =
+{
+ SSMSTRMOPS_VERSION,
+ ssmR3FileWrite,
+ ssmR3FileRead,
+ ssmR3FileSeek,
+ ssmR3FileTell,
+ ssmR3FileSize,
+ ssmR3FileIsOk,
+ ssmR3FileClose,
+ SSMSTRMOPS_VERSION
+};
+
+
+/**
+ * Opens a file stream.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream manager structure.
+ * @param pszFilename The file to open or create.
+ * @param fWrite Whether to open for writing or reading.
+ * @param fChecksummed Whether the stream is to be checksummed while
+ * written/read.
+ * @param cBuffers The number of buffers.
+ */
+static int ssmR3StrmOpenFile(PSSMSTRM pStrm, const char *pszFilename, bool fWrite, bool fChecksummed, uint32_t cBuffers)
+{
+ int rc = ssmR3StrmInitInternal(pStrm, fChecksummed, cBuffers);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t fFlags = fWrite
+ ? RTFILE_O_READWRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE
+ : RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE;
+ RTFILE hFile;
+ rc = RTFileOpen(&hFile, pszFilename, fFlags);
+ if (RT_SUCCESS(rc))
+ {
+ pStrm->pOps = &g_ssmR3FileOps;
+ pStrm->pvUser = (void *)(uintptr_t)hFile;
+ pStrm->fWrite = fWrite;
+ return VINF_SUCCESS;
+ }
+ }
+
+ ssmR3StrmDelete(pStrm);
+ pStrm->rc = rc;
+ return rc;
+}
+
+
+/**
+ * Raise an error condition on the stream.
+ *
+ * @returns true if we raised the error condition, false if the stream already
+ * had an error condition set.
+ *
+ * @param pStrm The stream handle.
+ * @param rc The VBox error status code.
+ *
+ * @thread Any.
+ */
+DECLINLINE(bool) ssmR3StrmSetError(PSSMSTRM pStrm, int rc)
+{
+ Assert(RT_FAILURE_NP(rc));
+ return ASMAtomicCmpXchgS32(&pStrm->rc, rc, VINF_SUCCESS);
+}
+
+
+/**
+ * Puts a buffer into the free list.
+ *
+ * @param pStrm The stream handle.
+ * @param pBuf The buffer.
+ *
+ * @thread The consumer.
+ */
+static void ssmR3StrmPutFreeBuf(PSSMSTRM pStrm, PSSMSTRMBUF pBuf)
+{
+ for (;;)
+ {
+ PSSMSTRMBUF pCurFreeHead = ASMAtomicUoReadPtrT(&pStrm->pFree, PSSMSTRMBUF);
+ ASMAtomicUoWritePtr(&pBuf->pNext, pCurFreeHead);
+ if (ASMAtomicCmpXchgPtr(&pStrm->pFree, pBuf, pCurFreeHead))
+ {
+ int rc = RTSemEventSignal(pStrm->hEvtFree);
+ AssertRC(rc);
+ return;
+ }
+ }
+}
+
+
+/**
+ * Gets a free buffer, waits for one if necessary.
+ *
+ * @returns Pointer to the buffer on success. NULL if we're terminating.
+ * @param pStrm The stream handle.
+ *
+ * @thread The producer.
+ */
+static PSSMSTRMBUF ssmR3StrmGetFreeBuf(PSSMSTRM pStrm)
+{
+ for (;;)
+ {
+ PSSMSTRMBUF pMine = ASMAtomicUoReadPtrT(&pStrm->pFree, PSSMSTRMBUF);
+ if (!pMine)
+ {
+ if (pStrm->fTerminating)
+ return NULL;
+ if (RT_FAILURE(pStrm->rc))
+ return NULL;
+ if ( pStrm->fWrite
+ && pStrm->hIoThread == NIL_RTTHREAD)
+ {
+ int rc = ssmR3StrmWriteBuffers(pStrm);
+ if (RT_FAILURE(rc))
+ return NULL;
+ }
+ int rc = RTSemEventWaitNoResume(pStrm->hEvtFree, 30000);
+ if ( rc == VERR_SEM_DESTROYED
+ || pStrm->fTerminating)
+ return NULL;
+ continue;
+ }
+
+ if (ASMAtomicCmpXchgPtr(&pStrm->pFree, pMine->pNext, pMine))
+ {
+ pMine->offStream = UINT64_MAX;
+ pMine->cb = 0;
+ pMine->pNext = NULL;
+ pMine->fEndOfStream = false;
+ pMine->NanoTS = RTTimeNanoTS();
+ return pMine;
+ }
+ }
+}
+
+
+/**
+ * Puts a buffer onto the queue.
+ *
+ * @param pStrm The stream handle.
+ * @param pBuf The stream buffer to put.
+ *
+ * @thread The producer.
+ */
+static void ssmR3StrmPutBuf(PSSMSTRM pStrm, PSSMSTRMBUF pBuf)
+{
+ for (;;)
+ {
+ PSSMSTRMBUF pCurHead = ASMAtomicUoReadPtrT(&pStrm->pHead, PSSMSTRMBUF);
+ ASMAtomicUoWritePtr(&pBuf->pNext, pCurHead);
+ if (ASMAtomicCmpXchgPtr(&pStrm->pHead, pBuf, pCurHead))
+ {
+ int rc = RTSemEventSignal(pStrm->hEvtHead);
+ AssertRC(rc);
+ return;
+ }
+ }
+}
+
+
+/**
+ * Reverses the list.
+ *
+ * @returns The head of the reversed list.
+ * @param pHead The head of the list to reverse.
+ */
+static PSSMSTRMBUF ssmR3StrmReverseList(PSSMSTRMBUF pHead)
+{
+ PSSMSTRMBUF pRevHead = NULL;
+ while (pHead)
+ {
+ PSSMSTRMBUF pCur = pHead;
+ pHead = pCur->pNext;
+ pCur->pNext = pRevHead;
+ pRevHead = pCur;
+ }
+ return pRevHead;
+}
+
+
+/**
+ * Gets one buffer from the queue, will wait for one to become ready if
+ * necessary.
+ *
+ * @returns Pointer to the buffer on success. NULL if we're terminating.
+ * @param pStrm The stream handle.
+ *
+ * @thread The consumer.
+ */
+static PSSMSTRMBUF ssmR3StrmGetBuf(PSSMSTRM pStrm)
+{
+ for (;;)
+ {
+ PSSMSTRMBUF pMine = pStrm->pPending;
+ if (pMine)
+ {
+ pStrm->pPending = pMine->pNext;
+ pMine->pNext = NULL;
+ return pMine;
+ }
+
+ pMine = ASMAtomicXchgPtrT(&pStrm->pHead, NULL, PSSMSTRMBUF);
+ if (pMine)
+ pStrm->pPending = ssmR3StrmReverseList(pMine);
+ else
+ {
+ if (pStrm->fTerminating)
+ return NULL;
+ if (RT_FAILURE(pStrm->rc))
+ return NULL;
+ if ( !pStrm->fWrite
+ && pStrm->hIoThread == NIL_RTTHREAD)
+ {
+ int rc = ssmR3StrmReadMore(pStrm);
+ if (RT_FAILURE(rc))
+ return NULL;
+ continue;
+ }
+
+ int rc = RTSemEventWaitNoResume(pStrm->hEvtHead, 30000);
+ if ( rc == VERR_SEM_DESTROYED
+ || pStrm->fTerminating)
+ return NULL;
+ }
+ }
+}
+
+
+/**
+ * Flushes the current buffer (both write and read streams).
+ *
+ * @param pStrm The stream handle.
+ */
+static void ssmR3StrmFlushCurBuf(PSSMSTRM pStrm)
+{
+ if (pStrm->pCur)
+ {
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ pStrm->pCur = NULL;
+
+ if (pStrm->fWrite)
+ {
+ uint32_t cb = pStrm->off;
+ pBuf->cb = cb;
+ pBuf->offStream = pStrm->offCurStream;
+ if ( pStrm->fChecksummed
+ && pStrm->offStreamCRC < cb)
+ pStrm->u32StreamCRC = RTCrc32Process(pStrm->u32StreamCRC,
+ &pBuf->abData[pStrm->offStreamCRC],
+ cb - pStrm->offStreamCRC);
+ pStrm->offCurStream += cb;
+ pStrm->off = 0;
+ pStrm->offStreamCRC = 0;
+
+ ssmR3StrmPutBuf(pStrm, pBuf);
+ }
+ else
+ {
+ uint32_t cb = pBuf->cb;
+ if ( pStrm->fChecksummed
+ && pStrm->offStreamCRC < cb)
+ pStrm->u32StreamCRC = RTCrc32Process(pStrm->u32StreamCRC,
+ &pBuf->abData[pStrm->offStreamCRC],
+ cb - pStrm->offStreamCRC);
+ pStrm->offCurStream += cb;
+ pStrm->off = 0;
+ pStrm->offStreamCRC = 0;
+
+ ssmR3StrmPutFreeBuf(pStrm, pBuf);
+ }
+ }
+}
+
+
+/**
+ * Flush buffered data.
+ *
+ * @returns VBox status code. Returns VINF_EOF if we encounter a buffer with the
+ * fEndOfStream indicator set.
+ * @param pStrm The stream handle.
+ *
+ * @thread The producer thread.
+ */
+static int ssmR3StrmWriteBuffers(PSSMSTRM pStrm)
+{
+ Assert(pStrm->fWrite);
+
+ /*
+ * Just return if the stream has a pending error condition.
+ */
+ int rc = pStrm->rc;
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Grab the pending list and write it out.
+ */
+ PSSMSTRMBUF pHead = ASMAtomicXchgPtrT(&pStrm->pHead, NULL, PSSMSTRMBUF);
+ if (!pHead)
+ return VINF_SUCCESS;
+ pHead = ssmR3StrmReverseList(pHead);
+
+ while (pHead)
+ {
+ /* pop */
+ PSSMSTRMBUF pCur = pHead;
+ pHead = pCur->pNext;
+
+ /* flush */
+ rc = pStrm->pOps->pfnIsOk(pStrm->pvUser);
+ if (RT_SUCCESS(rc))
+ rc = pStrm->pOps->pfnWrite(pStrm->pvUser, pCur->offStream, &pCur->abData[0], pCur->cb);
+ if ( RT_FAILURE(rc)
+ && ssmR3StrmSetError(pStrm, rc))
+ LogRel(("ssmR3StrmWriteBuffers: Write failed with rc=%Rrc at offStream=%#llx\n", rc, pCur->offStream));
+
+ /* free */
+ bool fEndOfStream = pCur->fEndOfStream;
+ ssmR3StrmPutFreeBuf(pStrm, pCur);
+ if (fEndOfStream)
+ {
+ Assert(!pHead);
+ return VINF_EOF;
+ }
+ }
+
+ return pStrm->rc;
+}
+
+
+/**
+ * Closes the stream after first flushing any pending write.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ * @param fCancelled Indicates whether the operation was cancelled or
+ * not.
+ */
+static int ssmR3StrmClose(PSSMSTRM pStrm, bool fCancelled)
+{
+ /*
+ * Flush, terminate the I/O thread, and close the stream.
+ */
+ if (pStrm->fWrite)
+ {
+ ssmR3StrmFlushCurBuf(pStrm);
+ if (pStrm->hIoThread == NIL_RTTHREAD)
+ ssmR3StrmWriteBuffers(pStrm);
+ }
+
+ if (pStrm->hIoThread != NIL_RTTHREAD)
+ ASMAtomicWriteBool(&pStrm->fTerminating, true);
+
+ int rc;
+ if (pStrm->fWrite)
+ {
+ if (pStrm->hIoThread != NIL_RTTHREAD)
+ {
+ int rc2 = RTSemEventSignal(pStrm->hEvtHead);
+ AssertLogRelRC(rc2);
+ int rc3 = RTThreadWait(pStrm->hIoThread, RT_INDEFINITE_WAIT, NULL);
+ AssertLogRelRC(rc3);
+ pStrm->hIoThread = NIL_RTTHREAD;
+ }
+
+ rc = pStrm->pOps->pfnClose(pStrm->pvUser, fCancelled);
+ if (RT_FAILURE(rc))
+ ssmR3StrmSetError(pStrm, rc);
+ }
+ else
+ {
+ rc = pStrm->pOps->pfnClose(pStrm->pvUser, fCancelled);
+ if (RT_FAILURE(rc))
+ ssmR3StrmSetError(pStrm, rc);
+
+ if (pStrm->hIoThread != NIL_RTTHREAD)
+ {
+ int rc2 = RTSemEventSignal(pStrm->hEvtFree);
+ AssertLogRelRC(rc2);
+ int rc3 = RTThreadWait(pStrm->hIoThread, RT_INDEFINITE_WAIT, NULL);
+ AssertLogRelRC(rc3);
+ pStrm->hIoThread = NIL_RTTHREAD;
+ }
+ }
+
+ pStrm->pOps = NULL;
+ pStrm->pvUser = NULL;
+
+ rc = pStrm->rc;
+ ssmR3StrmDelete(pStrm);
+
+ return rc;
+}
+
+#ifndef SSM_STANDALONE
+
+/**
+ * Stream output routine.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ * @param pvBuf What to write.
+ * @param cbToWrite How much to write.
+ *
+ * @thread The producer in a write stream (never the I/O thread).
+ */
+static int ssmR3StrmWrite(PSSMSTRM pStrm, const void *pvBuf, size_t cbToWrite)
+{
+ AssertReturn(cbToWrite > 0, VINF_SUCCESS);
+ Assert(pStrm->fWrite);
+
+ /*
+ * Squeeze as much as possible into the current buffer.
+ */
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ if (RT_LIKELY(pBuf))
+ {
+ uint32_t cbLeft = RT_SIZEOFMEMB(SSMSTRMBUF, abData) - pStrm->off;
+ if (RT_LIKELY(cbLeft >= cbToWrite))
+ {
+ memcpy(&pBuf->abData[pStrm->off], pvBuf, cbToWrite);
+ pStrm->off += (uint32_t)cbToWrite;
+ return VINF_SUCCESS;
+ }
+
+ if (cbLeft > 0)
+ {
+ memcpy(&pBuf->abData[pStrm->off], pvBuf, cbLeft);
+ pStrm->off += cbLeft;
+ cbToWrite -= cbLeft;
+ pvBuf = (uint8_t const *)pvBuf + cbLeft;
+ }
+ Assert(pStrm->off == RT_SIZEOFMEMB(SSMSTRMBUF, abData));
+ }
+
+ /*
+ * Need one or more new buffers.
+ */
+ do
+ {
+ /*
+ * Flush the current buffer and replace it with a new one.
+ */
+ ssmR3StrmFlushCurBuf(pStrm);
+ pBuf = ssmR3StrmGetFreeBuf(pStrm);
+ if (!pBuf)
+ break;
+ pStrm->pCur = pBuf;
+ Assert(pStrm->off == 0);
+
+ /*
+ * Copy data to the buffer.
+ */
+ uint32_t cbCopy = RT_SIZEOFMEMB(SSMSTRMBUF, abData);
+ if (cbCopy > cbToWrite)
+ cbCopy = (uint32_t)cbToWrite;
+ memcpy(&pBuf->abData[0], pvBuf, cbCopy);
+ pStrm->off = cbCopy;
+ cbToWrite -= cbCopy;
+ pvBuf = (uint8_t const *)pvBuf + cbCopy;
+ } while (cbToWrite > 0);
+
+ return pStrm->rc;
+}
+
+
+/**
+ * Reserves space in the current buffer so the caller can write directly to the
+ * buffer instead of doing double buffering.
+ *
+ * @returns VBox status code
+ * @param pStrm The stream handle.
+ * @param cb The amount of buffer space to reserve.
+ * @param ppb Where to return the pointer.
+ */
+static int ssmR3StrmReserveWriteBufferSpace(PSSMSTRM pStrm, size_t cb, uint8_t **ppb)
+{
+ Assert(pStrm->fWrite);
+ Assert(RT_SIZEOFMEMB(SSMSTRMBUF, abData) / 4 >= cb);
+
+ /*
+ * Check if there is room in the current buffer, it not flush it.
+ */
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ if (pBuf)
+ {
+ uint32_t cbLeft = RT_SIZEOFMEMB(SSMSTRMBUF, abData) - pStrm->off;
+ if (cbLeft >= cb)
+ {
+ *ppb = &pBuf->abData[pStrm->off];
+ return VINF_SUCCESS;
+ }
+
+ ssmR3StrmFlushCurBuf(pStrm);
+ }
+
+ /*
+ * Get a fresh buffer and return a pointer into it.
+ */
+ pBuf = ssmR3StrmGetFreeBuf(pStrm);
+ if (pBuf)
+ {
+ pStrm->pCur = pBuf;
+ Assert(pStrm->off == 0);
+ *ppb = &pBuf->abData[0];
+ }
+ else
+ *ppb = NULL; /* make gcc happy. */
+ return pStrm->rc;
+}
+
+
+/**
+ * Commits buffer space reserved by ssmR3StrmReserveWriteBufferSpace.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ * @param cb The amount of buffer space to commit. This can be less
+ * that what was reserved initially.
+ */
+static int ssmR3StrmCommitWriteBufferSpace(PSSMSTRM pStrm, size_t cb)
+{
+ Assert(pStrm->pCur);
+ Assert(pStrm->off + cb <= RT_SIZEOFMEMB(SSMSTRMBUF, abData));
+ pStrm->off += (uint32_t)cb;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Marks the end of the stream.
+ *
+ * This will cause the I/O thread to quit waiting for more buffers.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ */
+static int ssmR3StrmSetEnd(PSSMSTRM pStrm)
+{
+ Assert(pStrm->fWrite);
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ if (RT_UNLIKELY(!pStrm->pCur))
+ {
+ pBuf = ssmR3StrmGetFreeBuf(pStrm);
+ if (!pBuf)
+ return pStrm->rc;
+ pStrm->pCur = pBuf;
+ Assert(pStrm->off == 0);
+ }
+ pBuf->fEndOfStream = true;
+ ssmR3StrmFlushCurBuf(pStrm);
+ return VINF_SUCCESS;
+}
+
+#endif /* !SSM_STANDALONE */
+
+/**
+ * Read more from the stream.
+ *
+ * @returns VBox status code. VERR_EOF gets translated into VINF_EOF.
+ * @param pStrm The stream handle.
+ *
+ * @thread The I/O thread when we got one, otherwise the stream user.
+ */
+static int ssmR3StrmReadMore(PSSMSTRM pStrm)
+{
+ int rc;
+ Log6(("ssmR3StrmReadMore:\n"));
+
+ /*
+ * Undo seek done by ssmR3StrmPeekAt.
+ */
+ if (pStrm->fNeedSeek)
+ {
+ rc = pStrm->pOps->pfnSeek(pStrm->pvUser, pStrm->offNeedSeekTo, RTFILE_SEEK_BEGIN, NULL);
+ if (RT_FAILURE(rc))
+ {
+ if (ssmR3StrmSetError(pStrm, rc))
+ LogRel(("ssmR3StrmReadMore: RTFileSeek(,%#llx,) failed with rc=%Rrc\n", pStrm->offNeedSeekTo, rc));
+ return rc;
+ }
+ pStrm->fNeedSeek = false;
+ pStrm->offNeedSeekTo = UINT64_MAX;
+ }
+
+ /*
+ * Get a free buffer and try fill it up.
+ */
+ PSSMSTRMBUF pBuf = ssmR3StrmGetFreeBuf(pStrm);
+ if (!pBuf)
+ return pStrm->rc;
+
+ pBuf->offStream = pStrm->pOps->pfnTell(pStrm->pvUser);
+ size_t cbRead = sizeof(pBuf->abData);
+ rc = pStrm->pOps->pfnRead(pStrm->pvUser, pBuf->offStream, &pBuf->abData[0], cbRead, &cbRead);
+ if ( RT_SUCCESS(rc)
+ && cbRead > 0)
+ {
+ pBuf->cb = (uint32_t)cbRead;
+ pBuf->fEndOfStream = false;
+ Log6(("ssmR3StrmReadMore: %#010llx %#x\n", pBuf->offStream, pBuf->cb));
+ ssmR3StrmPutBuf(pStrm, pBuf);
+ }
+ else if ( ( RT_SUCCESS_NP(rc)
+ && cbRead == 0)
+ || rc == VERR_EOF)
+ {
+ pBuf->cb = 0;
+ pBuf->fEndOfStream = true;
+ Log6(("ssmR3StrmReadMore: %#010llx 0 EOF!\n", pBuf->offStream));
+ ssmR3StrmPutBuf(pStrm, pBuf);
+ rc = VINF_EOF;
+ }
+ else
+ {
+ Log6(("ssmR3StrmReadMore: %#010llx rc=%Rrc!\n", pBuf->offStream, rc));
+ if (ssmR3StrmSetError(pStrm, rc))
+ LogRel(("ssmR3StrmReadMore: RTFileRead(,,%#x,) -> %Rrc at offset %#llx\n",
+ sizeof(pBuf->abData), rc, pBuf->offStream));
+ ssmR3StrmPutFreeBuf(pStrm, pBuf);
+ }
+ return rc;
+}
+
+
+/**
+ * Stream input routine.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ * @param pvBuf Where to put what we read.
+ * @param cbToRead How much to read.
+ */
+static int ssmR3StrmRead(PSSMSTRM pStrm, void *pvBuf, size_t cbToRead)
+{
+ AssertReturn(cbToRead > 0, VINF_SUCCESS);
+ Assert(!pStrm->fWrite);
+
+ /*
+ * Read from the current buffer if we got one.
+ */
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ if (RT_LIKELY(pBuf))
+ {
+ Assert(pStrm->off <= pBuf->cb);
+ uint32_t cbLeft = pBuf->cb - pStrm->off;
+ if (cbLeft >= cbToRead)
+ {
+ memcpy(pvBuf, &pBuf->abData[pStrm->off], cbToRead);
+ pStrm->off += (uint32_t)cbToRead;
+ Assert(pStrm->off <= pBuf->cb);
+ return VINF_SUCCESS;
+ }
+ if (cbLeft)
+ {
+ memcpy(pvBuf, &pBuf->abData[pStrm->off], cbLeft);
+ pStrm->off += cbLeft;
+ cbToRead -= cbLeft;
+ pvBuf = (uint8_t *)pvBuf + cbLeft;
+ }
+ else if (pBuf->fEndOfStream)
+ return VERR_EOF;
+ Assert(pStrm->off == pBuf->cb);
+ }
+
+ /*
+ * Get more buffers from the stream.
+ */
+ int rc = VINF_SUCCESS;
+ do
+ {
+ /*
+ * Check for EOF first - never flush the EOF buffer.
+ */
+ if ( pBuf
+ && pBuf->fEndOfStream)
+ return VERR_EOF;
+
+ /*
+ * Flush the current buffer and get the next one.
+ */
+ ssmR3StrmFlushCurBuf(pStrm);
+ pBuf = ssmR3StrmGetBuf(pStrm);
+ if (!pBuf)
+ {
+ rc = pStrm->rc;
+ break;
+ }
+ pStrm->pCur = pBuf;
+ Assert(pStrm->off == 0);
+ Assert(pStrm->offCurStream == pBuf->offStream);
+ if (!pBuf->cb)
+ {
+ Assert(pBuf->fEndOfStream);
+ return VERR_EOF;
+ }
+
+ /*
+ * Read data from the buffer.
+ */
+ uint32_t cbCopy = pBuf->cb;
+ if (cbCopy > cbToRead)
+ cbCopy = (uint32_t)cbToRead;
+ memcpy(pvBuf, &pBuf->abData[0], cbCopy);
+ pStrm->off = cbCopy;
+ cbToRead -= cbCopy;
+ pvBuf = (uint8_t *)pvBuf + cbCopy;
+ Assert(!pStrm->pCur || pStrm->off <= pStrm->pCur->cb);
+ } while (cbToRead > 0);
+
+ return rc;
+}
+
+
+/**
+ * Reads data from the stream but instead of copying it to some output buffer
+ * the caller gets a pointer to into the current stream buffer.
+ *
+ * The returned pointer becomes invalid after the next stream operation!
+ *
+ * @returns Pointer to the read data residing in the stream buffer. NULL is
+ * returned if the request amount of data isn't available in the
+ * buffer. The caller must fall back on ssmR3StrmRead when this
+ * happens.
+ *
+ * @param pStrm The stream handle.
+ * @param cbToRead The number of bytes to tread.
+ */
+static uint8_t const *ssmR3StrmReadDirect(PSSMSTRM pStrm, size_t cbToRead)
+{
+ AssertReturn(cbToRead > 0, VINF_SUCCESS);
+ Assert(!pStrm->fWrite);
+
+ /*
+ * Too lazy to fetch more data for the odd case that we're
+ * exactly at the boundary between two buffers.
+ */
+ PSSMSTRMBUF pBuf = pStrm->pCur;
+ if (RT_LIKELY(pBuf))
+ {
+ Assert(pStrm->off <= pBuf->cb);
+ uint32_t cbLeft = pBuf->cb - pStrm->off;
+ if (cbLeft >= cbToRead)
+ {
+ uint8_t const *pb = &pBuf->abData[pStrm->off];
+ pStrm->off += (uint32_t)cbToRead;
+ Assert(pStrm->off <= pBuf->cb);
+ return pb;
+ }
+ }
+ return NULL;
+}
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Check that the stream is OK and flush data that is getting old
+ *
+ * The checking is mainly for testing for cancellation and out of space
+ * conditions.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ */
+static int ssmR3StrmCheckAndFlush(PSSMSTRM pStrm)
+{
+ int rc = pStrm->pOps->pfnIsOk(pStrm->pvUser);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ if ( pStrm->fWrite
+ && pStrm->hIoThread != NIL_RTTHREAD
+ && !pStrm->pHead /* the worker is probably idle */
+ && pStrm->pCur
+ && RTTimeNanoTS() - pStrm->pCur->NanoTS > 500*1000*1000 /* 0.5s */
+ )
+ ssmR3StrmFlushCurBuf(pStrm);
+ return VINF_SUCCESS;
+}
+#endif /* !SSM_STANDALONE */
+
+
+#if !defined(SSM_STANDALONE) || defined(LOG_ENABLED)
+/**
+ * Tell current stream position.
+ *
+ * @returns stream position.
+ * @param pStrm The stream handle.
+ */
+static uint64_t ssmR3StrmTell(PSSMSTRM pStrm)
+{
+ return pStrm->offCurStream + pStrm->off;
+}
+#endif
+
+
+/**
+ * Gets the intermediate stream CRC up to the current position.
+ *
+ * @returns CRC.
+ * @param pStrm The stream handle.
+ */
+static uint32_t ssmR3StrmCurCRC(PSSMSTRM pStrm)
+{
+ if (!pStrm->fChecksummed)
+ return 0;
+ if (pStrm->offStreamCRC < pStrm->off)
+ {
+ PSSMSTRMBUF pBuf = pStrm->pCur; Assert(pBuf);
+ pStrm->u32StreamCRC = RTCrc32Process(pStrm->u32StreamCRC, &pBuf->abData[pStrm->offStreamCRC], pStrm->off - pStrm->offStreamCRC);
+ pStrm->offStreamCRC = pStrm->off;
+ }
+ else
+ Assert(pStrm->offStreamCRC == pStrm->off);
+ return pStrm->u32StreamCRC;
+}
+
+
+/**
+ * Gets the final stream CRC up to the current position.
+ *
+ * @returns CRC.
+ * @param pStrm The stream handle.
+ */
+static uint32_t ssmR3StrmFinalCRC(PSSMSTRM pStrm)
+{
+ if (!pStrm->fChecksummed)
+ return 0;
+ return RTCrc32Finish(ssmR3StrmCurCRC(pStrm));
+}
+
+
+/**
+ * Disables checksumming of the stream.
+ *
+ * @param pStrm The stream handle.
+ */
+static void ssmR3StrmDisableChecksumming(PSSMSTRM pStrm)
+{
+ pStrm->fChecksummed = false;
+}
+
+
+/**
+ * Used by SSMR3Seek to position the stream at the new unit.
+ *
+ * @returns VBox status code.
+ * @param pStrm The strem handle.
+ * @param off The seek offset.
+ * @param uMethod The seek method.
+ * @param u32CurCRC The current CRC at the seek position.
+ */
+static int ssmR3StrmSeek(PSSMSTRM pStrm, int64_t off, uint32_t uMethod, uint32_t u32CurCRC)
+{
+ AssertReturn(!pStrm->fWrite, VERR_NOT_SUPPORTED);
+ AssertReturn(pStrm->hIoThread == NIL_RTTHREAD, VERR_WRONG_ORDER);
+
+ uint64_t offStream;
+ int rc = pStrm->pOps->pfnSeek(pStrm->pvUser, off, uMethod, &offStream);
+ if (RT_SUCCESS(rc))
+ {
+ pStrm->fNeedSeek = false;
+ pStrm->offNeedSeekTo= UINT64_MAX;
+ pStrm->offCurStream = offStream;
+ pStrm->off = 0;
+ pStrm->offStreamCRC = 0;
+ if (pStrm->fChecksummed)
+ pStrm->u32StreamCRC = u32CurCRC;
+ if (pStrm->pCur)
+ {
+ ssmR3StrmPutFreeBuf(pStrm, pStrm->pCur);
+ pStrm->pCur = NULL;
+ }
+ if (pStrm->pPending)
+ {
+ ssmR3StrmDestroyBufList(pStrm->pPending);
+ pStrm->pPending = NULL;
+ }
+ if (pStrm->pHead)
+ {
+ ssmR3StrmDestroyBufList(pStrm->pHead);
+ pStrm->pHead = NULL;
+ }
+ }
+ return rc;
+}
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Skip some bytes in the stream.
+ *
+ * This is only used if someone didn't read all of their data in the V1 format,
+ * so don't bother making this very efficient yet.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle.
+ * @param offDst The destination offset.
+ */
+static int ssmR3StrmSkipTo(PSSMSTRM pStrm, uint64_t offDst)
+{
+ /* dead simple - lazy bird! */
+ for (;;)
+ {
+ uint64_t offCur = ssmR3StrmTell(pStrm);
+ AssertReturn(offCur <= offDst, VERR_SSM_SKIP_BACKWARDS);
+ if (offCur == offDst)
+ return VINF_SUCCESS;
+
+ uint8_t abBuf[4096];
+ size_t cbToRead = RT_MIN(sizeof(abBuf), offDst - offCur);
+ int rc = ssmR3StrmRead(pStrm, abBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+}
+#endif /* !SSM_STANDALONE */
+
+
+/**
+ * Get the size of the file.
+ *
+ * This does not work for non-file streams!
+ *
+ * @returns The file size, or UINT64_MAX if not a file stream.
+ * @param pStrm The stream handle.
+ */
+static uint64_t ssmR3StrmGetSize(PSSMSTRM pStrm)
+{
+ uint64_t cbFile;
+ int rc = pStrm->pOps->pfnSize(pStrm->pvUser, &cbFile);
+ AssertLogRelRCReturn(rc, UINT64_MAX);
+ return cbFile;
+}
+
+
+/***
+ * Tests if the stream is a file stream or not.
+ *
+ * @returns true / false.
+ * @param pStrm The stream handle.
+ */
+static bool ssmR3StrmIsFile(PSSMSTRM pStrm)
+{
+ return pStrm->pOps == &g_ssmR3FileOps;
+}
+
+
+/**
+ * Peeks at data in a file stream without buffering anything (or upsetting
+ * the buffering for that matter).
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle
+ * @param off The offset to start peeking at. Use a negative offset to
+ * peek at something relative to the end of the file.
+ * @param pvBuf Output buffer.
+ * @param cbToRead How much to read.
+ * @param poff Where to optionally store the position. Useful when
+ * using a negative off.
+ *
+ * @remarks Failures occurring while peeking will not be raised on the stream.
+ */
+static int ssmR3StrmPeekAt(PSSMSTRM pStrm, RTFOFF off, void *pvBuf, size_t cbToRead, uint64_t *poff)
+{
+ AssertReturn(!pStrm->fWrite, VERR_NOT_SUPPORTED);
+ AssertReturn(pStrm->hIoThread == NIL_RTTHREAD, VERR_WRONG_ORDER);
+
+ if (!pStrm->fNeedSeek)
+ {
+ pStrm->fNeedSeek = true;
+ pStrm->offNeedSeekTo = pStrm->offCurStream + (pStrm->pCur ? pStrm->pCur->cb : 0);
+ }
+ uint64_t offActual;
+ int rc = pStrm->pOps->pfnSeek(pStrm->pvUser, off, off >= 0 ? RTFILE_SEEK_BEGIN : RTFILE_SEEK_END, &offActual);
+ if (RT_SUCCESS(rc))
+ {
+ if (poff)
+ *poff = offActual;
+ rc = pStrm->pOps->pfnRead(pStrm->pvUser, offActual, pvBuf, cbToRead, NULL);
+ }
+
+ return rc;
+}
+
+#ifndef SSM_STANDALONE
+
+/**
+ * The I/O thread.
+ *
+ * @returns VINF_SUCCESS (ignored).
+ * @param hSelf The thread handle.
+ * @param pvStrm The stream handle.
+ */
+static DECLCALLBACK(int) ssmR3StrmIoThread(RTTHREAD hSelf, void *pvStrm)
+{
+ PSSMSTRM pStrm = (PSSMSTRM)pvStrm;
+ ASMAtomicWriteHandle(&pStrm->hIoThread, hSelf); /* paranoia */
+
+ Log(("ssmR3StrmIoThread: starts working\n"));
+ if (pStrm->fWrite)
+ {
+ /*
+ * Write until error or terminated.
+ */
+ for (;;)
+ {
+ int rc = ssmR3StrmWriteBuffers(pStrm);
+ if ( RT_FAILURE(rc)
+ || rc == VINF_EOF)
+ {
+ Log(("ssmR3StrmIoThread: quitting writing with rc=%Rrc.\n", rc));
+ break;
+ }
+ if (RT_FAILURE(pStrm->rc))
+ {
+ Log(("ssmR3StrmIoThread: quitting writing with stream rc=%Rrc\n", pStrm->rc));
+ break;
+ }
+
+ if (ASMAtomicReadBool(&pStrm->fTerminating))
+ {
+ if (!ASMAtomicReadPtrT(&pStrm->pHead, PSSMSTRMBUF))
+ {
+ Log(("ssmR3StrmIoThread: quitting writing because of pending termination.\n"));
+ break;
+ }
+ Log(("ssmR3StrmIoThread: postponing termination because of pending buffers.\n"));
+ }
+ else if (!ASMAtomicReadPtrT(&pStrm->pHead, PSSMSTRMBUF))
+ {
+ rc = RTSemEventWait(pStrm->hEvtHead, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ }
+ }
+
+ if (!ASMAtomicReadBool(&pStrm->fTerminating))
+ RTSemEventSignal(pStrm->hEvtFree);
+ }
+ else
+ {
+ /*
+ * Read until end of file, error or termination.
+ */
+ for (;;)
+ {
+ if (ASMAtomicReadBool(&pStrm->fTerminating))
+ {
+ Log(("ssmR3StrmIoThread: quitting reading because of pending termination.\n"));
+ break;
+ }
+
+ int rc = ssmR3StrmReadMore(pStrm);
+ if ( RT_FAILURE(rc)
+ || rc == VINF_EOF)
+ {
+ Log(("ssmR3StrmIoThread: quitting reading with rc=%Rrc\n", rc));
+ break;
+ }
+ if (RT_FAILURE(pStrm->rc))
+ {
+ Log(("ssmR3StrmIoThread: quitting reading with stream rc=%Rrc\n", pStrm->rc));
+ break;
+ }
+ }
+
+ if (!ASMAtomicReadBool(&pStrm->fTerminating))
+ RTSemEventSignal(pStrm->hEvtHead);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Starts the I/O thread for the specified stream.
+ *
+ * @param pStrm The stream handle.
+ */
+static void ssmR3StrmStartIoThread(PSSMSTRM pStrm)
+{
+ Assert(pStrm->hIoThread == NIL_RTTHREAD);
+
+ RTTHREAD hThread;
+ int rc = RTThreadCreate(&hThread, ssmR3StrmIoThread, pStrm, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "SSM-IO");
+ AssertRCReturnVoid(rc);
+ ASMAtomicWriteHandle(&pStrm->hIoThread, hThread); /* paranoia */
+}
+
+
+/**
+ * Stops the I/O thread.
+ *
+ * @param pStrm The stream handle.
+ */
+static void ssmR3StrmStopIoThread(PSSMSTRM pStrm)
+{
+ LogFlow(("ssmR3StrmStopIoThread: %p\n", pStrm->hIoThread));
+ if (pStrm->hIoThread != NIL_RTTHREAD)
+ {
+ /*
+ * Signal the I/O thread and wait for it to complete.
+ */
+ ASMAtomicWriteBool(&pStrm->fTerminating, true);
+ if (pStrm->fWrite)
+ {
+ int rc1 = RTSemEventSignal(pStrm->hEvtHead);
+ AssertLogRelRC(rc1);
+ }
+ else
+ {
+ int rc2 = RTSemEventSignal(pStrm->hEvtFree);
+ AssertLogRelRC(rc2);
+ }
+ int rc3 = RTThreadWait(pStrm->hIoThread, RT_INDEFINITE_WAIT, NULL);
+ AssertLogRelRC(rc3);
+ pStrm->hIoThread = NIL_RTTHREAD;
+ pStrm->fTerminating = false; /* Can't read stuff otherwise. */
+ }
+}
+
+#endif /* !SSM_STANDALONE */
+
+/**
+ * Works the progress calculation for non-live saves and restores.
+ *
+ * @param pSSM The SSM handle.
+ * @param cbAdvance Number of bytes to advance (with in the current unit).
+ */
+static void ssmR3ProgressByByte(PSSMHANDLE pSSM, uint64_t cbAdvance)
+{
+ if (!pSSM->fLiveSave)
+ {
+ /* Can't advance it beyond the estimated end of the unit. */
+ uint64_t cbLeft = pSSM->offEstUnitEnd - pSSM->offEst;
+ if (cbAdvance > cbLeft)
+ cbAdvance = cbLeft;
+ pSSM->offEst += cbAdvance;
+
+ /* uPercentPrepare% prepare, xx% exec, uPercentDone% done+crc. This is not
+ quite right for live save, but the non-live stage there is very short. */
+ while ( pSSM->offEst >= pSSM->offEstProgress
+ && pSSM->uPercent <= 100 - pSSM->uPercentDone)
+ {
+ if (pSSM->pfnProgress)
+ pSSM->pfnProgress(pSSM->pVM->pUVM, pSSM->uPercent, pSSM->pvUser);
+ pSSM->uPercent++;
+ pSSM->offEstProgress = (pSSM->uPercent - pSSM->uPercentPrepare - pSSM->uPercentLive) * pSSM->cbEstTotal
+ / (100 - pSSM->uPercentDone - pSSM->uPercentPrepare - pSSM->uPercentLive);
+ }
+ }
+}
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Makes the SSM operation cancellable or not (via SSMR3Cancel).
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle. (SSMHANDLE::rc may be set.)
+ * @param fCancellable The new state.
+ */
+static void ssmR3SetCancellable(PVM pVM, PSSMHANDLE pSSM, bool fCancellable)
+{
+ RTCritSectEnter(&pVM->ssm.s.CancelCritSect);
+ if (fCancellable)
+ {
+ Assert(!pVM->ssm.s.pSSM);
+ pVM->ssm.s.pSSM = pSSM;
+ }
+ else
+ {
+ if (pVM->ssm.s.pSSM == pSSM)
+ pVM->ssm.s.pSSM = NULL;
+
+ uint32_t fCancelled = ASMAtomicUoReadU32(&pSSM->fCancelled);
+ if ( fCancelled == SSMHANDLE_CANCELLED
+ && RT_SUCCESS(pSSM->rc))
+ pSSM->rc = VERR_SSM_CANCELLED;
+ }
+
+ RTCritSectLeave(&pVM->ssm.s.CancelCritSect);
+}
+#endif /* !SSM_STANDALONE */
+
+
+/**
+ * Gets the host bit count of the saved state.
+ *
+ * Works for on both save and load handles.
+ *
+ * @returns 32 or 64.
+ * @param pSSM The saved state handle.
+ */
+DECLINLINE(uint32_t) ssmR3GetHostBits(PSSMHANDLE pSSM)
+{
+ if (pSSM->enmOp >= SSMSTATE_LOAD_PREP)
+ {
+ uint32_t cBits = pSSM->u.Read.cHostBits;
+ if (cBits)
+ return cBits;
+ }
+ return HC_ARCH_BITS;
+}
+
+
+/**
+ * Saved state origins on a host using 32-bit MSC?
+ *
+ * Works for on both save and load handles.
+ *
+ * @returns true/false.
+ * @param pSSM The saved state handle.
+ */
+DECLINLINE(bool) ssmR3IsHostMsc32(PSSMHANDLE pSSM)
+{
+ if (pSSM->enmOp >= SSMSTATE_LOAD_PREP)
+ return pSSM->u.Read.fIsHostMsc32;
+ return SSM_HOST_IS_MSC_32;
+}
+
+#ifndef SSM_STANDALONE
+
+/**
+ * Finishes a data unit.
+ * All buffers and compressor instances are flushed and destroyed.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DataWriteFinish(PSSMHANDLE pSSM)
+{
+ //Log2(("ssmR3DataWriteFinish: %#010llx start\n", ssmR3StrmTell(&pSSM->Strm)));
+ int rc = ssmR3DataFlushBuffer(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+ return VINF_SUCCESS;
+ }
+
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ Log2(("ssmR3DataWriteFinish: failure rc=%Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Begins writing the data of a data unit.
+ *
+ * Errors are signalled via pSSM->rc.
+ *
+ * @param pSSM The saved state handle.
+ */
+static void ssmR3DataWriteBegin(PSSMHANDLE pSSM)
+{
+ pSSM->offUnit = 0;
+ pSSM->offUnitUser = 0;
+}
+
+
+/**
+ * Writes a record to the current data item in the saved state file.
+ *
+ * @returns VBox status code. Sets pSSM->rc on failure.
+ * @param pSSM The saved state handle.
+ * @param pvBuf The bits to write.
+ * @param cbBuf The number of bytes to write.
+ */
+static int ssmR3DataWriteRaw(PSSMHANDLE pSSM, const void *pvBuf, size_t cbBuf)
+{
+ Log2(("ssmR3DataWriteRaw: %08llx|%08llx: pvBuf=%p cbBuf=%#x %.*Rhxs%s\n",
+ ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pvBuf, cbBuf, RT_MIN(cbBuf, SSM_LOG_BYTES), pvBuf, cbBuf > SSM_LOG_BYTES ? "..." : ""));
+
+ /*
+ * Check that everything is fine.
+ */
+ if (RT_FAILURE(pSSM->rc))
+ return pSSM->rc;
+
+ /*
+ * Write the data item in 1MB chunks for progress indicator reasons.
+ */
+ while (cbBuf > 0)
+ {
+ size_t cbChunk = RT_MIN(cbBuf, _1M);
+ int rc = ssmR3StrmWrite(&pSSM->Strm, pvBuf, cbChunk);
+ if (RT_FAILURE(rc))
+ return rc;
+ pSSM->offUnit += cbChunk;
+ cbBuf -= cbChunk;
+ pvBuf = (char *)pvBuf + cbChunk;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Writes a record header for the specified amount of data.
+ *
+ * @returns VBox status code. Sets pSSM->rc on failure.
+ * @param pSSM The saved state handle
+ * @param cb The amount of data.
+ * @param u8TypeAndFlags The record type and flags.
+ */
+static int ssmR3DataWriteRecHdr(PSSMHANDLE pSSM, size_t cb, uint8_t u8TypeAndFlags)
+{
+ size_t cbHdr;
+ uint8_t abHdr[8];
+ abHdr[0] = u8TypeAndFlags;
+ if (cb < 0x80)
+ {
+ cbHdr = 2;
+ abHdr[1] = (uint8_t)cb;
+ }
+ else if (cb < 0x00000800)
+ {
+ cbHdr = 3;
+ abHdr[1] = (uint8_t)(0xc0 | (cb >> 6));
+ abHdr[2] = (uint8_t)(0x80 | (cb & 0x3f));
+ }
+ else if (cb < 0x00010000)
+ {
+ cbHdr = 4;
+ abHdr[1] = (uint8_t)(0xe0 | (cb >> 12));
+ abHdr[2] = (uint8_t)(0x80 | ((cb >> 6) & 0x3f));
+ abHdr[3] = (uint8_t)(0x80 | (cb & 0x3f));
+ }
+ else if (cb < 0x00200000)
+ {
+ cbHdr = 5;
+ abHdr[1] = (uint8_t)(0xf0 | (cb >> 18));
+ abHdr[2] = (uint8_t)(0x80 | ((cb >> 12) & 0x3f));
+ abHdr[3] = (uint8_t)(0x80 | ((cb >> 6) & 0x3f));
+ abHdr[4] = (uint8_t)(0x80 | (cb & 0x3f));
+ }
+ else if (cb < 0x04000000)
+ {
+ cbHdr = 6;
+ abHdr[1] = (uint8_t)(0xf8 | (cb >> 24));
+ abHdr[2] = (uint8_t)(0x80 | ((cb >> 18) & 0x3f));
+ abHdr[3] = (uint8_t)(0x80 | ((cb >> 12) & 0x3f));
+ abHdr[4] = (uint8_t)(0x80 | ((cb >> 6) & 0x3f));
+ abHdr[5] = (uint8_t)(0x80 | (cb & 0x3f));
+ }
+ else if (cb <= 0x7fffffff)
+ {
+ cbHdr = 7;
+ abHdr[1] = (uint8_t)(0xfc | (cb >> 30));
+ abHdr[2] = (uint8_t)(0x80 | ((cb >> 24) & 0x3f));
+ abHdr[3] = (uint8_t)(0x80 | ((cb >> 18) & 0x3f));
+ abHdr[4] = (uint8_t)(0x80 | ((cb >> 12) & 0x3f));
+ abHdr[5] = (uint8_t)(0x80 | ((cb >> 6) & 0x3f));
+ abHdr[6] = (uint8_t)(0x80 | (cb & 0x3f));
+ }
+ else
+ AssertLogRelMsgFailedReturn(("cb=%#x\n", cb), pSSM->rc = VERR_SSM_MEM_TOO_BIG);
+
+ Log3(("ssmR3DataWriteRecHdr: %08llx|%08llx/%08x: Type=%02x fImportant=%RTbool cbHdr=%u\n",
+ ssmR3StrmTell(&pSSM->Strm) + cbHdr, pSSM->offUnit + cbHdr, cb, u8TypeAndFlags & SSM_REC_TYPE_MASK, !!(u8TypeAndFlags & SSM_REC_FLAGS_IMPORTANT), cbHdr));
+
+ return ssmR3DataWriteRaw(pSSM, &abHdr[0], cbHdr);
+}
+
+
+/**
+ * Worker that flushes the buffered data.
+ *
+ * @returns VBox status code. Will set pSSM->rc on error.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DataFlushBuffer(PSSMHANDLE pSSM)
+{
+ /*
+ * Check how much there current is in the buffer.
+ */
+ uint32_t cb = pSSM->u.Write.offDataBuffer;
+ if (!cb)
+ return pSSM->rc;
+ pSSM->u.Write.offDataBuffer = 0;
+
+ /*
+ * Write a record header and then the data.
+ * (No need for fancy optimizations here any longer since the stream is
+ * fully buffered.)
+ */
+ int rc = ssmR3DataWriteRecHdr(pSSM, cb, SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_RAW);
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DataWriteRaw(pSSM, pSSM->u.Write.abDataBuffer, cb);
+ ssmR3ProgressByByte(pSSM, cb);
+ return rc;
+}
+
+
+/**
+ * ssmR3DataWrite worker that writes big stuff.
+ *
+ * @returns VBox status code
+ * @param pSSM The saved state handle.
+ * @param pvBuf The bits to write.
+ * @param cbBuf The number of bytes to write.
+ */
+static int ssmR3DataWriteBig(PSSMHANDLE pSSM, const void *pvBuf, size_t cbBuf)
+{
+ int rc = ssmR3DataFlushBuffer(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->offUnitUser += cbBuf;
+
+ /*
+ * Split it up into compression blocks.
+ */
+ for (;;)
+ {
+ if ( cbBuf >= SSM_ZIP_BLOCK_SIZE
+ && ( ((uintptr_t)pvBuf & 0xf)
+ || !ASMMemIsZero(pvBuf, SSM_ZIP_BLOCK_SIZE))
+ )
+ {
+ /*
+ * Compress it.
+ */
+ AssertCompile(1 + 3 + 1 + SSM_ZIP_BLOCK_SIZE < 0x00010000);
+ uint8_t *pb;
+ rc = ssmR3StrmReserveWriteBufferSpace(&pSSM->Strm, 1 + 3 + 1 + SSM_ZIP_BLOCK_SIZE, &pb);
+ if (RT_FAILURE(rc))
+ break;
+ size_t cbRec = SSM_ZIP_BLOCK_SIZE - (SSM_ZIP_BLOCK_SIZE / 16);
+ rc = RTZipBlockCompress(RTZIPTYPE_LZF, RTZIPLEVEL_FAST, 0 /*fFlags*/,
+ pvBuf, SSM_ZIP_BLOCK_SIZE,
+ pb + 1 + 3 + 1, cbRec, &cbRec);
+ if (RT_SUCCESS(rc))
+ {
+ pb[0] = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_RAW_LZF;
+ pb[4] = SSM_ZIP_BLOCK_SIZE / _1K;
+ cbRec += 1;
+ }
+ else
+ {
+ pb[0] = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_RAW;
+ memcpy(&pb[4], pvBuf, SSM_ZIP_BLOCK_SIZE);
+ cbRec = SSM_ZIP_BLOCK_SIZE;
+ }
+ pb[1] = (uint8_t)(0xe0 | ( cbRec >> 12));
+ pb[2] = (uint8_t)(0x80 | ((cbRec >> 6) & 0x3f));
+ pb[3] = (uint8_t)(0x80 | ( cbRec & 0x3f));
+ cbRec += 1 + 3;
+ rc = ssmR3StrmCommitWriteBufferSpace(&pSSM->Strm, cbRec);
+ if (RT_FAILURE(rc))
+ break;
+
+ pSSM->offUnit += cbRec;
+ ssmR3ProgressByByte(pSSM, SSM_ZIP_BLOCK_SIZE);
+
+ /* advance */
+ if (cbBuf == SSM_ZIP_BLOCK_SIZE)
+ return VINF_SUCCESS;
+ cbBuf -= SSM_ZIP_BLOCK_SIZE;
+ pvBuf = (uint8_t const*)pvBuf + SSM_ZIP_BLOCK_SIZE;
+ }
+ else if (cbBuf >= SSM_ZIP_BLOCK_SIZE)
+ {
+ /*
+ * Zero block.
+ */
+ uint8_t abRec[3];
+ abRec[0] = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_RAW_ZERO;
+ abRec[1] = 1;
+ abRec[2] = SSM_ZIP_BLOCK_SIZE / _1K;
+ Log3(("ssmR3DataWriteBig: %08llx|%08llx/%08x: ZERO\n", ssmR3StrmTell(&pSSM->Strm) + 2, pSSM->offUnit + 2, 1));
+ rc = ssmR3DataWriteRaw(pSSM, &abRec[0], sizeof(abRec));
+ if (RT_FAILURE(rc))
+ break;
+
+ /* advance */
+ ssmR3ProgressByByte(pSSM, SSM_ZIP_BLOCK_SIZE);
+ if (cbBuf == SSM_ZIP_BLOCK_SIZE)
+ return VINF_SUCCESS;
+ cbBuf -= SSM_ZIP_BLOCK_SIZE;
+ pvBuf = (uint8_t const*)pvBuf + SSM_ZIP_BLOCK_SIZE;
+ }
+ else
+ {
+ /*
+ * Less than one block left, store it the simple way.
+ */
+ rc = ssmR3DataWriteRecHdr(pSSM, cbBuf, SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_RAW);
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DataWriteRaw(pSSM, pvBuf, cbBuf);
+ ssmR3ProgressByByte(pSSM, cbBuf);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+
+/**
+ * ssmR3DataWrite worker that is called when there isn't enough room in the
+ * buffer for the current chunk of data.
+ *
+ * This will first flush the buffer and then add the new bits to it.
+ *
+ * @returns VBox status code
+ * @param pSSM The saved state handle.
+ * @param pvBuf The bits to write.
+ * @param cbBuf The number of bytes to write.
+ */
+static int ssmR3DataWriteFlushAndBuffer(PSSMHANDLE pSSM, const void *pvBuf, size_t cbBuf)
+{
+ int rc = ssmR3DataFlushBuffer(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ memcpy(&pSSM->u.Write.abDataBuffer[0], pvBuf, cbBuf);
+ pSSM->u.Write.offDataBuffer = (uint32_t)cbBuf;
+ pSSM->offUnitUser += cbBuf;
+ }
+ return rc;
+}
+
+
+/**
+ * Writes data to the current data unit.
+ *
+ * This is an inlined wrapper that optimizes the small writes that so many of
+ * the APIs make.
+ *
+ * @returns VBox status code
+ * @param pSSM The saved state handle.
+ * @param pvBuf The bits to write.
+ * @param cbBuf The number of bytes to write.
+ */
+DECLINLINE(int) ssmR3DataWrite(PSSMHANDLE pSSM, const void *pvBuf, size_t cbBuf)
+{
+ if (cbBuf > sizeof(pSSM->u.Write.abDataBuffer) / 8)
+ return ssmR3DataWriteBig(pSSM, pvBuf, cbBuf);
+ if (!cbBuf)
+ return VINF_SUCCESS;
+
+ uint32_t off = pSSM->u.Write.offDataBuffer;
+ if (RT_UNLIKELY(cbBuf + off > sizeof(pSSM->u.Write.abDataBuffer)))
+ return ssmR3DataWriteFlushAndBuffer(pSSM, pvBuf, cbBuf);
+
+ memcpy(&pSSM->u.Write.abDataBuffer[off], pvBuf, cbBuf);
+ pSSM->u.Write.offDataBuffer = off + (uint32_t)cbBuf;
+ pSSM->offUnitUser += cbBuf;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Puts a structure.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pvStruct The structure address.
+ * @param paFields The array of structure fields descriptions.
+ * The array must be terminated by a SSMFIELD_ENTRY_TERM().
+ */
+VMMR3DECL(int) SSMR3PutStruct(PSSMHANDLE pSSM, const void *pvStruct, PCSSMFIELD paFields)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ AssertPtr(pvStruct);
+ AssertPtr(paFields);
+
+ /* begin marker. */
+ int rc = SSMR3PutU32(pSSM, SSMR3STRUCT_BEGIN);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* put the fields */
+ for (PCSSMFIELD pCur = paFields;
+ pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
+ pCur++)
+ {
+ uint8_t const *pbField = (uint8_t const *)pvStruct + pCur->off;
+ switch ((uintptr_t)pCur->pfnGetPutOrTransformer)
+ {
+ case SSMFIELDTRANS_NO_TRANSFORMATION:
+ rc = ssmR3DataWrite(pSSM, pbField, pCur->cb);
+ break;
+
+ case SSMFIELDTRANS_GCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutGCPtr(pSSM, *(PRTGCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_GCPHYS:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutGCPhys(pSSM, *(PRTGCPHYS)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutRCPtr(pSSM, *(PRTRCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR_ARRAY:
+ {
+ uint32_t const cEntries = pCur->cb / sizeof(RTRCPTR);
+ AssertMsgBreakStmt(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = SSMR3PutRCPtr(pSSM, ((PRTRCPTR)pbField)[i]);
+ break;
+ }
+
+ default:
+ AssertMsgFailedBreakStmt(("%#x\n", pCur->pfnGetPutOrTransformer), rc = VERR_SSM_FIELD_COMPLEX);
+ }
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
+ }
+
+ /* end marker */
+ return SSMR3PutU32(pSSM, SSMR3STRUCT_END);
+}
+
+
+/**
+ * SSMR3PutStructEx helper that puts a HCPTR that is used as a NULL indicator.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The saved state handle.
+ * @param pv The value to put.
+ * @param fFlags SSMSTRUCT_FLAGS_XXX.
+ */
+DECLINLINE(int) ssmR3PutHCPtrNI(PSSMHANDLE pSSM, void *pv, uint32_t fFlags)
+{
+ int rc;
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3DataWrite(pSSM, &pv, sizeof(void *));
+ else
+ rc = SSMR3PutBool(pSSM, pv != NULL);
+ return rc;
+}
+
+
+/**
+ * SSMR3PutStructEx helper that puts an arbitrary number of zeros.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param cbToFill The number of zeros to stuff into the state.
+ */
+static int ssmR3PutZeros(PSSMHANDLE pSSM, uint32_t cbToFill)
+{
+ while (cbToFill > 0)
+ {
+ uint32_t cb = RT_MIN(sizeof(g_abZero), cbToFill);
+ int rc = ssmR3DataWrite(pSSM, g_abZero, cb);
+ if (RT_FAILURE(rc))
+ return rc;
+ cbToFill -= cb;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Puts a structure, extended API.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pvStruct The structure address.
+ * @param cbStruct The size of the struct (use for validation only).
+ * @param fFlags Combination of SSMSTRUCT_FLAGS_XXX defines.
+ * @param paFields The array of structure fields descriptions. The
+ * array must be terminated by a SSMFIELD_ENTRY_TERM().
+ * @param pvUser User argument for any callbacks that paFields might
+ * contain.
+ */
+VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cbStruct,
+ uint32_t fFlags, PCSSMFIELD paFields, void *pvUser)
+{
+ int rc;
+
+ /*
+ * Validation.
+ */
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), pSSM->rc = VERR_INVALID_PARAMETER);
+ AssertPtr(pvStruct);
+ AssertPtr(paFields);
+
+
+ /*
+ * Begin marker.
+ */
+ if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_LEAD_MARKER)))
+ {
+ rc = SSMR3PutU32(pSSM, SSMR3STRUCT_BEGIN);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Put the fields
+ */
+ rc = VINF_SUCCESS;
+ uint32_t off = 0;
+ for (PCSSMFIELD pCur = paFields;
+ pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
+ pCur++)
+ {
+ uint32_t const offField = (!SSMFIELDTRANS_IS_PADDING(pCur->pfnGetPutOrTransformer) || pCur->off != UINT32_MAX / 2)
+ && !SSMFIELDTRANS_IS_OLD(pCur->pfnGetPutOrTransformer)
+ ? pCur->off
+ : off;
+ uint32_t const cbField = SSMFIELDTRANS_IS_OLD(pCur->pfnGetPutOrTransformer)
+ ? 0
+ : SSMFIELDTRANS_IS_PADDING(pCur->pfnGetPutOrTransformer)
+ ? RT_HIWORD(pCur->cb)
+ : pCur->cb;
+ AssertMsgBreakStmt( cbField <= cbStruct
+ && offField + cbField <= cbStruct
+ && offField + cbField >= offField,
+ ("offField=%#x cbField=%#x cbStruct=%#x (%s)\n", offField, cbField, cbStruct, pCur->pszName),
+ rc = VERR_SSM_FIELD_OUT_OF_BOUNDS);
+ AssertMsgBreakStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == offField,
+ ("off=%#x offField=%#x (%s)\n", off, offField, pCur->pszName),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ rc = VINF_SUCCESS;
+ uint8_t const *pbField = (uint8_t const *)pvStruct + offField;
+ switch ((uintptr_t)pCur->pfnGetPutOrTransformer)
+ {
+ case SSMFIELDTRANS_NO_TRANSFORMATION:
+ rc = ssmR3DataWrite(pSSM, pbField, cbField);
+ break;
+
+ case SSMFIELDTRANS_GCPHYS:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutGCPhys(pSSM, *(PRTGCPHYS)pbField);
+ break;
+
+ case SSMFIELDTRANS_GCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutGCPtr(pSSM, *(PRTGCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3PutRCPtr(pSSM, *(PRTRCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR_ARRAY:
+ {
+ uint32_t const cEntries = cbField / sizeof(RTRCPTR);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = SSMR3PutRCPtr(pSSM, ((PRTRCPTR)pbField)[i]);
+ break;
+ }
+
+ case SSMFIELDTRANS_HCPTR_NI:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3PutHCPtrNI(pSSM, *(void * const *)pbField, fFlags);
+ break;
+
+ case SSMFIELDTRANS_HCPTR_NI_ARRAY:
+ {
+ uint32_t const cEntries = cbField / sizeof(void *);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = ssmR3PutHCPtrNI(pSSM, ((void * const *)pbField)[i], fFlags);
+ break;
+ }
+
+ case SSMFIELDTRANS_HCPTR_HACK_U32:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(*(uintptr_t *)pbField <= UINT32_MAX, ("%p (%s)\n", *(uintptr_t *)pbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_VALUE);
+ rc = ssmR3DataWrite(pSSM, pbField, sizeof(uint32_t));
+ if ((fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE) && sizeof(void *) != sizeof(uint32_t) && RT_SUCCESS(rc))
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(uint32_t));
+ break;
+
+ case SSMFIELDTRANS_U32_ZX_U64:
+ AssertFailedBreakStmt(rc = VERR_SSM_FIELD_LOAD_ONLY_TRANSFORMATION);
+ break;
+
+ case SSMFIELDTRANS_IGNORE:
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3PutZeros(pSSM, cbField);
+ break;
+
+ case SSMFIELDTRANS_IGN_GCPHYS:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPHYS));
+ break;
+
+ case SSMFIELDTRANS_IGN_GCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPTR));
+ break;
+
+ case SSMFIELDTRANS_IGN_RCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTRCPTR));
+ break;
+
+ case SSMFIELDTRANS_IGN_HCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(void *));
+ break;
+
+
+ case SSMFIELDTRANS_OLD:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3PutZeros(pSSM, pCur->cb);
+ break;
+
+ case SSMFIELDTRANS_OLD_GCPHYS:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPHYS));
+ break;
+
+ case SSMFIELDTRANS_OLD_GCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPTR));
+ break;
+
+ case SSMFIELDTRANS_OLD_RCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTRCPTR));
+ break;
+
+ case SSMFIELDTRANS_OLD_HCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(void *));
+ break;
+
+ case SSMFIELDTRANS_OLD_PAD_HC:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3PutZeros(pSSM, HC_ARCH_BITS == 64 ? RT_HIWORD(pCur->cb) : RT_LOWORD(pCur->cb));
+ break;
+
+ case SSMFIELDTRANS_OLD_PAD_MSC32:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (SSM_HOST_IS_MSC_32)
+ rc = ssmR3PutZeros(pSSM, pCur->cb);
+ break;
+
+
+ case SSMFIELDTRANS_PAD_HC:
+ case SSMFIELDTRANS_PAD_HC32:
+ case SSMFIELDTRANS_PAD_HC64:
+ case SSMFIELDTRANS_PAD_HC_AUTO:
+ case SSMFIELDTRANS_PAD_MSC32_AUTO:
+ {
+ uint32_t cb32 = RT_BYTE1(pCur->cb);
+ uint32_t cb64 = RT_BYTE2(pCur->cb);
+ uint32_t cbCtx = HC_ARCH_BITS == 64
+ || ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ && !SSM_HOST_IS_MSC_32)
+ ? cb64 : cb32;
+ uint32_t cbSaved = ssmR3GetHostBits(pSSM) == 64
+ || ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ && !ssmR3IsHostMsc32(pSSM))
+ ? cb64 : cb32;
+ AssertMsgBreakStmt( cbField == cbCtx
+ && ( ( pCur->off == UINT32_MAX / 2
+ && ( cbField == 0
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ )
+ )
+ || (pCur->off != UINT32_MAX / 2 && cbField != 0)
+ )
+ , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
+ cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
+ rc = VERR_SSM_FIELD_INVALID_PADDING_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = ssmR3PutZeros(pSSM, cbSaved);
+ break;
+ }
+
+ default:
+ AssertPtrBreakStmt(pCur->pfnGetPutOrTransformer, rc = VERR_SSM_FIELD_INVALID_CALLBACK);
+ rc = pCur->pfnGetPutOrTransformer(pSSM, pCur, (void *)pvStruct, fFlags, false /*fGetOrPut*/, pvUser);
+ break;
+ }
+ if (RT_FAILURE(rc))
+ break; /* Deal with failures in one place (see below). */
+
+ off = offField + cbField;
+ }
+
+ if (RT_SUCCESS(rc))
+ AssertMsgStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == cbStruct,
+ ("off=%#x cbStruct=%#x\n", off, cbStruct),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
+
+ /*
+ * End marker
+ */
+ if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_TAIL_MARKER)))
+ {
+ rc = SSMR3PutU32(pSSM, SSMR3STRUCT_END);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Saves a boolean item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param fBool Item to save.
+ */
+VMMR3DECL(int) SSMR3PutBool(PSSMHANDLE pSSM, bool fBool)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ uint8_t u8 = fBool; /* enforce 1 byte size */
+ return ssmR3DataWrite(pSSM, &u8, sizeof(u8));
+}
+
+
+/**
+ * Saves a 8-bit unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u8 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutU8(PSSMHANDLE pSSM, uint8_t u8)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u8, sizeof(u8));
+}
+
+
+/**
+ * Saves a 8-bit signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i8 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutS8(PSSMHANDLE pSSM, int8_t i8)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i8, sizeof(i8));
+}
+
+
+/**
+ * Saves a 16-bit unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u16 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutU16(PSSMHANDLE pSSM, uint16_t u16)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u16, sizeof(u16));
+}
+
+
+/**
+ * Saves a 16-bit signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i16 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutS16(PSSMHANDLE pSSM, int16_t i16)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i16, sizeof(i16));
+}
+
+
+/**
+ * Saves a 32-bit unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u32 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutU32(PSSMHANDLE pSSM, uint32_t u32)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u32, sizeof(u32));
+}
+
+
+/**
+ * Saves a 32-bit signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i32 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutS32(PSSMHANDLE pSSM, int32_t i32)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i32, sizeof(i32));
+}
+
+
+/**
+ * Saves a 64-bit unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u64 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutU64(PSSMHANDLE pSSM, uint64_t u64)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u64, sizeof(u64));
+}
+
+
+/**
+ * Saves a 64-bit signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i64 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutS64(PSSMHANDLE pSSM, int64_t i64)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i64, sizeof(i64));
+}
+
+
+/**
+ * Saves a 128-bit unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u128 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutU128(PSSMHANDLE pSSM, uint128_t u128)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u128, sizeof(u128));
+}
+
+
+/**
+ * Saves a 128-bit signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i128 Item to save.
+ */
+VMMR3DECL(int) SSMR3PutS128(PSSMHANDLE pSSM, int128_t i128)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i128, sizeof(i128));
+}
+
+
+/**
+ * Saves a VBox unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u Item to save.
+ */
+VMMR3DECL(int) SSMR3PutUInt(PSSMHANDLE pSSM, RTUINT u)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u, sizeof(u));
+}
+
+
+/**
+ * Saves a VBox signed integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param i Item to save.
+ */
+VMMR3DECL(int) SSMR3PutSInt(PSSMHANDLE pSSM, RTINT i)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &i, sizeof(i));
+}
+
+
+/**
+ * Saves a GC natural unsigned integer item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u Item to save.
+ *
+ * @deprecated Silly type, don't use it.
+ */
+VMMR3DECL(int) SSMR3PutGCUInt(PSSMHANDLE pSSM, RTGCUINT u)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u, sizeof(u));
+}
+
+
+/**
+ * Saves a GC unsigned integer register item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param u Item to save.
+ */
+VMMR3DECL(int) SSMR3PutGCUIntReg(PSSMHANDLE pSSM, RTGCUINTREG u)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &u, sizeof(u));
+}
+
+
+/**
+ * Saves a 32 bits GC physical address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param GCPhys The item to save
+ */
+VMMR3DECL(int) SSMR3PutGCPhys32(PSSMHANDLE pSSM, RTGCPHYS32 GCPhys)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &GCPhys, sizeof(GCPhys));
+}
+
+
+/**
+ * Saves a 64 bits GC physical address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param GCPhys The item to save
+ */
+VMMR3DECL(int) SSMR3PutGCPhys64(PSSMHANDLE pSSM, RTGCPHYS64 GCPhys)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &GCPhys, sizeof(GCPhys));
+}
+
+
+/**
+ * Saves a GC physical address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param GCPhys The item to save
+ */
+VMMR3DECL(int) SSMR3PutGCPhys(PSSMHANDLE pSSM, RTGCPHYS GCPhys)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &GCPhys, sizeof(GCPhys));
+}
+
+
+/**
+ * Saves a GC virtual address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param GCPtr The item to save.
+ */
+VMMR3DECL(int) SSMR3PutGCPtr(PSSMHANDLE pSSM, RTGCPTR GCPtr)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &GCPtr, sizeof(GCPtr));
+}
+
+
+/**
+ * Saves an RC virtual address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param RCPtr The item to save.
+ */
+VMMR3DECL(int) SSMR3PutRCPtr(PSSMHANDLE pSSM, RTRCPTR RCPtr)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &RCPtr, sizeof(RCPtr));
+}
+
+
+/**
+ * Saves a GC virtual address (represented as an unsigned integer) item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param GCPtr The item to save.
+ */
+VMMR3DECL(int) SSMR3PutGCUIntPtr(PSSMHANDLE pSSM, RTGCUINTPTR GCPtr)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &GCPtr, sizeof(GCPtr));
+}
+
+
+/**
+ * Saves a I/O port address item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param IOPort The item to save.
+ */
+VMMR3DECL(int) SSMR3PutIOPort(PSSMHANDLE pSSM, RTIOPORT IOPort)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &IOPort, sizeof(IOPort));
+}
+
+
+/**
+ * Saves a selector item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param Sel The item to save.
+ */
+VMMR3DECL(int) SSMR3PutSel(PSSMHANDLE pSSM, RTSEL Sel)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, &Sel, sizeof(Sel));
+}
+
+
+/**
+ * Saves a memory item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pv Item to save.
+ * @param cb Size of the item.
+ */
+VMMR3DECL(int) SSMR3PutMem(PSSMHANDLE pSSM, const void *pv, size_t cb)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataWrite(pSSM, pv, cb);
+}
+
+
+/**
+ * Saves a zero terminated string item to the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param psz Item to save.
+ */
+VMMR3DECL(int) SSMR3PutStrZ(PSSMHANDLE pSSM, const char *psz)
+{
+ SSM_ASSERT_WRITEABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+
+ size_t cch = strlen(psz);
+ if (cch > _1M)
+ {
+ AssertMsgFailed(("a %zu byte long string, what's this!?!\n", cch));
+ return VERR_TOO_MUCH_DATA;
+ }
+ uint32_t u32 = (uint32_t)cch;
+ int rc = ssmR3DataWrite(pSSM, &u32, sizeof(u32));
+ if (rc)
+ return rc;
+ return ssmR3DataWrite(pSSM, psz, cch);
+}
+
+
+/**
+ * Emits a SSMLiveControl unit with a new progress report.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param lrdPct The progress of the live save.
+ * @param uPass The current pass.
+ */
+static int ssmR3LiveControlEmit(PSSMHANDLE pSSM, long double lrdPct, uint32_t uPass)
+{
+ AssertMsg(lrdPct <= 100.0, ("%u\n", lrdPct * 100));
+
+ /*
+ * Make sure we're in one of the two EXEC states or we may fail.
+ */
+ SSMSTATE enmSavedState = pSSM->enmOp;
+ if (enmSavedState == SSMSTATE_LIVE_VOTE)
+ pSSM->enmOp = SSMSTATE_LIVE_EXEC;
+ else if (enmSavedState == SSMSTATE_SAVE_DONE)
+ pSSM->enmOp = SSMSTATE_SAVE_EXEC;
+
+ /*
+ * Write the unit header.
+ */
+ SSMFILEUNITHDRV2 UnitHdr;
+ memcpy(&UnitHdr.szMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(UnitHdr.szMagic));
+ UnitHdr.offStream = ssmR3StrmTell(&pSSM->Strm);
+ UnitHdr.u32CurStreamCRC = ssmR3StrmCurCRC(&pSSM->Strm);
+ UnitHdr.u32CRC = 0;
+ UnitHdr.u32Version = 1;
+ UnitHdr.u32Instance = 0;
+ UnitHdr.u32Pass = uPass;
+ UnitHdr.fFlags = 0;
+ UnitHdr.cbName = sizeof("SSMLiveControl");
+ memcpy(&UnitHdr.szName[0], "SSMLiveControl", UnitHdr.cbName);
+ UnitHdr.u32CRC = RTCrc32(&UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ Log(("SSM: Unit at %#9llx: '%s', instance %u, pass %#x, version %u\n",
+ UnitHdr.offStream, UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Pass, UnitHdr.u32Version));
+ int rc = ssmR3StrmWrite(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write the payload.
+ */
+ ssmR3DataWriteBegin(pSSM);
+
+ uint16_t u16PartsPerTenThousand = (uint16_t)(lrdPct * (100 - pSSM->uPercentDone));
+ AssertMsg(u16PartsPerTenThousand <= 10000, ("%u\n", u16PartsPerTenThousand));
+ ssmR3DataWrite(pSSM, &u16PartsPerTenThousand, sizeof(u16PartsPerTenThousand));
+
+ rc = ssmR3DataFlushBuffer(pSSM); /* will return SSMHANDLE::rc if it is set */
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Write the termination record and flush the compression stream.
+ */
+ SSMRECTERM TermRec;
+ TermRec.u8TypeAndFlags = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_TERM;
+ TermRec.cbRec = sizeof(TermRec) - 2;
+ if (pSSM->Strm.fChecksummed)
+ {
+ TermRec.fFlags = SSMRECTERM_FLAGS_CRC32;
+ TermRec.u32StreamCRC = RTCrc32Finish(RTCrc32Process(ssmR3StrmCurCRC(&pSSM->Strm), &TermRec, 2));
+ }
+ else
+ {
+ TermRec.fFlags = 0;
+ TermRec.u32StreamCRC = 0;
+ }
+ TermRec.cbUnit = pSSM->offUnit + sizeof(TermRec);
+ rc = ssmR3DataWriteRaw(pSSM, &TermRec, sizeof(TermRec));
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DataWriteFinish(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->enmOp = enmSavedState;
+ return rc;
+ }
+ }
+ }
+
+ LogRel(("SSM: Failed to write live control unit. rc=%Rrc\n", rc));
+ if (RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ pSSM->enmOp = enmSavedState;
+ return rc;
+}
+
+
+
+/**
+ * Enters the critical session (optionally) associated with the unit.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUnit The unit.
+ */
+DECLINLINE(void) ssmR3UnitCritSectEnter(PVM pVM, PSSMUNIT pUnit)
+{
+ PPDMCRITSECT pCritSect = pUnit->pCritSect;
+ if (pCritSect)
+ {
+ int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+ AssertRC(rc);
+ }
+}
+
+
+/**
+ * Leaves the critical session (optionally) associated with the unit.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUnit The unit.
+ */
+DECLINLINE(void) ssmR3UnitCritSectLeave(PVM pVM, PSSMUNIT pUnit)
+{
+ PPDMCRITSECT pCritSect = pUnit->pCritSect;
+ if (pCritSect)
+ {
+ int rc = PDMCritSectLeave(pVM, pCritSect);
+ AssertRC(rc);
+ }
+}
+
+
+/**
+ * Do the pfnSaveDone run.
+ *
+ * @returns VBox status code (pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3SaveDoDoneRun(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Do the done run.
+ */
+ pSSM->enmOp = SSMSTATE_SAVE_DONE;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if ( pUnit->u.Common.pfnSaveDone
+ && ( pUnit->fCalled
+ || (!pUnit->u.Common.pfnSavePrep && !pUnit->u.Common.pfnSaveExec)))
+ {
+ int rcOld = pSSM->rc;
+ int rc;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnSaveDone(pUnit->u.Dev.pDevIns, pSSM);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnSaveDone(pUnit->u.Drv.pDrvIns, pSSM);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnSaveDone(pUnit->u.Usb.pUsbIns, pSSM);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnSaveDone(pVM, pSSM);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnSaveDone(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ if (RT_SUCCESS(rc) && pSSM->rc != rcOld)
+ rc = pSSM->rc;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Done save failed with rc=%Rrc for data unit '%s.\n", rc, pUnit->szName));
+ if (RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ }
+ }
+ }
+ return pSSM->rc;
+}
+
+
+/**
+ * Worker for SSMR3LiveDone and SSMR3Save that closes the handle and deletes the
+ * saved state file on failure.
+ *
+ * @returns VBox status code (pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3SaveDoClose(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+ pVM->ssm.s.uPass = 0;
+
+ /*
+ * Make it non-cancellable, close the stream and delete the file on failure.
+ */
+ ssmR3SetCancellable(pVM, pSSM, false);
+ int rc = ssmR3StrmClose(&pSSM->Strm, pSSM->rc == VERR_SSM_CANCELLED);
+ if (RT_SUCCESS(rc))
+ rc = pSSM->rc;
+ if (RT_SUCCESS(rc))
+ {
+ Assert(pSSM->enmOp == SSMSTATE_SAVE_DONE);
+ if (pSSM->pfnProgress)
+ pSSM->pfnProgress(pVM->pUVM, 100, pSSM->pvUser);
+ LogRel(("SSM: Successfully saved the VM state to '%s'\n",
+ pSSM->pszFilename ? pSSM->pszFilename : "<remote-machine>"));
+ }
+ else
+ {
+ if (pSSM->pszFilename)
+ {
+ int rc2 = RTFileDelete(pSSM->pszFilename);
+ AssertRC(rc2);
+ if (RT_SUCCESS(rc2))
+ LogRel(("SSM: Failed to save the VM state to '%s' (file deleted): %Rrc\n",
+ pSSM->pszFilename, rc));
+ else
+ LogRel(("SSM: Failed to save the VM state to '%s' (file deletion failed, rc2=%Rrc): %Rrc\n",
+ pSSM->pszFilename, rc2, rc));
+ }
+ else
+ LogRel(("SSM: Failed to save the VM state.\n"));
+
+ Assert(pSSM->enmOp <= SSMSTATE_SAVE_DONE);
+ if (pSSM->enmOp != SSMSTATE_SAVE_DONE)
+ ssmR3SaveDoDoneRun(pVM, pSSM);
+ }
+
+ /*
+ * Trash the handle before freeing it.
+ */
+ ASMAtomicWriteU32(&pSSM->fCancelled, 0);
+ pSSM->pVM = NULL;
+ pSSM->enmAfter = SSMAFTER_INVALID;
+ pSSM->enmOp = SSMSTATE_INVALID;
+ RTMemFree(pSSM);
+
+ return rc;
+}
+
+
+/**
+ * Closes the SSM handle.
+ *
+ * This must always be called on a handled returned by SSMR3LiveSave.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The SSM handle returned by SSMR3LiveSave.
+ *
+ * @thread EMT(0).
+ */
+VMMR3_INT_DECL(int) SSMR3LiveDone(PSSMHANDLE pSSM)
+{
+ LogFlow(("SSMR3LiveDone: pSSM=%p\n", pSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pSSM, VERR_INVALID_POINTER);
+ PVM pVM = pSSM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT0(pVM);
+ AssertMsgReturn( pSSM->enmAfter == SSMAFTER_DESTROY
+ || pSSM->enmAfter == SSMAFTER_CONTINUE
+ || pSSM->enmAfter == SSMAFTER_TELEPORT,
+ ("%d\n", pSSM->enmAfter),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn( pSSM->enmOp >= SSMSTATE_LIVE_PREP
+ && pSSM->enmOp <= SSMSTATE_SAVE_DONE,
+ ("%d\n", pSSM->enmOp), VERR_INVALID_STATE);
+
+ /*
+ * Join paths with SSMR3Save again.
+ */
+ return ssmR3SaveDoClose(pVM, pSSM);
+}
+
+
+/**
+ * Writes the directory.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ * @param pcEntries Where to return the number of directory entries.
+ */
+static int ssmR3WriteDirectory(PVM pVM, PSSMHANDLE pSSM, uint32_t *pcEntries)
+{
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Grab some temporary memory for the dictionary.
+ */
+ size_t cbDir = RT_UOFFSETOF_DYN(SSMFILEDIR, aEntries[pVM->ssm.s.cUnits]);
+ PSSMFILEDIR pDir = (PSSMFILEDIR)RTMemTmpAlloc(cbDir);
+ if (!pDir)
+ {
+ LogRel(("ssmR3WriteDirectory: failed to allocate %zu bytes!\n", cbDir));
+ return VERR_NO_TMP_MEMORY;
+ }
+
+ /*
+ * Initialize it.
+ */
+ memcpy(pDir->szMagic, SSMFILEDIR_MAGIC, sizeof(pDir->szMagic));
+ pDir->u32CRC = 0;
+ pDir->cEntries = 0;
+
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ if (pUnit->offStream != RTFOFF_MIN)
+ {
+ PSSMFILEDIRENTRY pEntry = &pDir->aEntries[pDir->cEntries++];
+ Assert(pDir->cEntries <= pVM->ssm.s.cUnits);
+ Assert(pUnit->offStream >= (RTFOFF)sizeof(SSMFILEHDR));
+ pEntry->off = pUnit->offStream;
+ pEntry->u32Instance = pUnit->u32Instance;
+ pEntry->u32NameCRC = RTCrc32(pUnit->szName, pUnit->cchName);
+ }
+
+ /*
+ * Calculate the actual size and CRC-32, then write the directory
+ * out to the stream.
+ */
+ *pcEntries = pDir->cEntries;
+ cbDir = RT_UOFFSETOF_DYN(SSMFILEDIR, aEntries[pDir->cEntries]);
+ pDir->u32CRC = RTCrc32(pDir, cbDir);
+ int rc = ssmR3StrmWrite(&pSSM->Strm, pDir, cbDir);
+ RTMemTmpFree(pDir);
+ return rc;
+}
+
+
+/**
+ * Finalize the saved state stream, i.e. add the end unit, directory
+ * and footer.
+ *
+ * @returns VBox status code (pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3SaveDoFinalization(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+ Assert(RT_SUCCESS(pSSM->rc));
+
+ /*
+ * Write the end unit.
+ */
+ SSMFILEUNITHDRV2 UnitHdr;
+ memcpy(&UnitHdr.szMagic[0], SSMFILEUNITHDR_END, sizeof(UnitHdr.szMagic));
+ UnitHdr.offStream = ssmR3StrmTell(&pSSM->Strm);
+ UnitHdr.u32CurStreamCRC = ssmR3StrmCurCRC(&pSSM->Strm);
+ UnitHdr.u32CRC = 0;
+ UnitHdr.u32Version = 0;
+ UnitHdr.u32Instance = 0;
+ UnitHdr.u32Pass = SSM_PASS_FINAL;
+ UnitHdr.fFlags = 0;
+ UnitHdr.cbName = 0;
+ UnitHdr.u32CRC = RTCrc32(&UnitHdr, RT_UOFFSETOF(SSMFILEUNITHDRV2, szName[0]));
+ Log(("SSM: Unit at %#9llx: END UNIT\n", UnitHdr.offStream));
+ int rc = ssmR3StrmWrite(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF(SSMFILEUNITHDRV2, szName[0]));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed writing the end unit: %Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ /*
+ * Write the directory for the final units and then the footer.
+ */
+ SSMFILEFTR Footer;
+ rc = ssmR3WriteDirectory(pVM, pSSM, &Footer.cDirEntries);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed writing the directory: %Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ memcpy(Footer.szMagic, SSMFILEFTR_MAGIC, sizeof(Footer.szMagic));
+ Footer.offStream = ssmR3StrmTell(&pSSM->Strm);
+ Footer.u32StreamCRC = ssmR3StrmFinalCRC(&pSSM->Strm);
+ Footer.u32Reserved = 0;
+ Footer.u32CRC = 0;
+ Footer.u32CRC = RTCrc32(&Footer, sizeof(Footer));
+ Log(("SSM: Footer at %#9llx: \n", Footer.offStream));
+ rc = ssmR3StrmWrite(&pSSM->Strm, &Footer, sizeof(Footer));
+ if (RT_SUCCESS(rc))
+ rc = ssmR3StrmSetEnd(&pSSM->Strm);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed writing the footer: %Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ LogRel(("SSM: Footer at %#llx (%lld), %u directory entries.\n",
+ Footer.offStream, Footer.offStream, Footer.cDirEntries));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Works the progress calculation during the exec part of a live save.
+ *
+ * @param pSSM The SSM handle.
+ * @param iUnit The current unit number.
+ */
+static void ssmR3ProgressByUnit(PSSMHANDLE pSSM, uint32_t iUnit)
+{
+ if (pSSM->fLiveSave)
+ {
+ unsigned uPctExec = iUnit * 100 / pSSM->pVM->ssm.s.cUnits;
+ unsigned cPctExec = 100 - pSSM->uPercentDone - pSSM->uPercentPrepare - pSSM->uPercentLive;
+ long double lrdPct = (long double)uPctExec * cPctExec / 100 + pSSM->uPercentPrepare + pSSM->uPercentLive;
+ unsigned uPct = (unsigned)lrdPct;
+ if (uPct != pSSM->uPercent)
+ {
+ ssmR3LiveControlEmit(pSSM, lrdPct, SSM_PASS_FINAL);
+ pSSM->uPercent = uPct;
+ pSSM->pfnProgress(pSSM->pVM->pUVM, uPct, pSSM->pvUser);
+ }
+ }
+}
+
+
+/**
+ * Do the pfnSaveExec run.
+ *
+ * @returns VBox status code (pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3SaveDoExecRun(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+ AssertRC(pSSM->rc);
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->enmOp = SSMSTATE_SAVE_EXEC;
+ unsigned iUnit = 0;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext, iUnit++)
+ {
+ /*
+ * Not all unit have a callback. Skip those which don't and
+ * make sure to keep the progress indicator up to date.
+ */
+ ssmR3ProgressByUnit(pSSM, iUnit);
+ pSSM->offEstUnitEnd += pUnit->cbGuess;
+ if (!pUnit->u.Common.pfnSaveExec)
+ {
+ pUnit->fCalled = true;
+ if (pUnit->cbGuess)
+ ssmR3ProgressByByte(pSSM, pSSM->offEstUnitEnd - pSSM->offEst);
+ continue;
+ }
+ pUnit->offStream = ssmR3StrmTell(&pSSM->Strm);
+
+ /*
+ * Check for cancellation.
+ */
+ if (RT_UNLIKELY(ASMAtomicUoReadU32(&(pSSM)->fCancelled) == SSMHANDLE_CANCELLED))
+ {
+ LogRel(("SSM: Cancelled!\n"));
+ AssertRC(pSSM->rc);
+ return pSSM->rc = VERR_SSM_CANCELLED;
+ }
+
+ /*
+ * Write data unit header
+ */
+ SSMFILEUNITHDRV2 UnitHdr;
+ memcpy(&UnitHdr.szMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(UnitHdr.szMagic));
+ UnitHdr.offStream = pUnit->offStream;
+ UnitHdr.u32CurStreamCRC = ssmR3StrmCurCRC(&pSSM->Strm);
+ UnitHdr.u32CRC = 0;
+ UnitHdr.u32Version = pUnit->u32Version;
+ UnitHdr.u32Instance = pUnit->u32Instance;
+ UnitHdr.u32Pass = SSM_PASS_FINAL;
+ UnitHdr.fFlags = 0;
+ UnitHdr.cbName = (uint32_t)pUnit->cchName + 1;
+ memcpy(&UnitHdr.szName[0], &pUnit->szName[0], UnitHdr.cbName);
+ UnitHdr.u32CRC = RTCrc32(&UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ Log(("SSM: Unit at %#9llx: '%s', instance %u, pass %#x, version %u\n",
+ UnitHdr.offStream, UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Pass, UnitHdr.u32Version));
+ int rc = ssmR3StrmWrite(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed to write unit header. rc=%Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ /*
+ * Call the execute handler.
+ */
+ ssmR3DataWriteBegin(pSSM);
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnSaveExec(pUnit->u.Dev.pDevIns, pSSM);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnSaveExec(pUnit->u.Drv.pDrvIns, pSSM);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnSaveExec(pUnit->u.Usb.pUsbIns, pSSM);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnSaveExec(pVM, pSSM);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnSaveExec(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ else
+ rc = ssmR3DataFlushBuffer(pSSM); /* will return SSMHANDLE::rc if it is set */
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Execute save failed with rc=%Rrc for data unit '%s'/#%u.\n", rc, pUnit->szName, pUnit->u32Instance));
+ return rc;
+ }
+
+ /*
+ * Write the termination record and flush the compression stream.
+ */
+ SSMRECTERM TermRec;
+ TermRec.u8TypeAndFlags = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_TERM;
+ TermRec.cbRec = sizeof(TermRec) - 2;
+ if (pSSM->Strm.fChecksummed)
+ {
+ TermRec.fFlags = SSMRECTERM_FLAGS_CRC32;
+ TermRec.u32StreamCRC = RTCrc32Finish(RTCrc32Process(ssmR3StrmCurCRC(&pSSM->Strm), &TermRec, 2));
+ }
+ else
+ {
+ TermRec.fFlags = 0;
+ TermRec.u32StreamCRC = 0;
+ }
+ TermRec.cbUnit = pSSM->offUnit + sizeof(TermRec);
+ rc = ssmR3DataWriteRaw(pSSM, &TermRec, sizeof(TermRec));
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DataWriteFinish(pSSM);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed terminating unit: %Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ /*
+ * Advance the progress indicator to the end of the current unit.
+ */
+ ssmR3ProgressByByte(pSSM, pSSM->offEstUnitEnd - pSSM->offEst);
+ } /* for each unit */
+ ssmR3ProgressByUnit(pSSM, pVM->ssm.s.cUnits);
+
+ /* (progress should be pending 99% now) */
+ AssertMsg( pSSM->uPercent == 101 - pSSM->uPercentDone
+ || pSSM->uPercent == 100 - pSSM->uPercentDone,
+ ("%d\n", pSSM->uPercent));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Do the pfnSavePrep run.
+ *
+ * @returns VBox status code (pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3SaveDoPrepRun(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+ Assert(RT_SUCCESS(pSSM->rc));
+ pSSM->enmOp = SSMSTATE_SAVE_PREP;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if (pUnit->u.Common.pfnSavePrep)
+ {
+ int rc;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnSavePrep(pUnit->u.Dev.pDevIns, pSSM);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnSavePrep(pUnit->u.Drv.pDrvIns, pSSM);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnSavePrep(pUnit->u.Usb.pUsbIns, pSSM);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnSavePrep(pVM, pSSM);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnSavePrep(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ else
+ rc = pSSM->rc;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Prepare save failed with rc=%Rrc for data unit '%s.\n", rc, pUnit->szName));
+ return rc;
+ }
+ }
+
+ pSSM->cbEstTotal += pUnit->cbGuess;
+ }
+
+ /*
+ * Work the progress indicator if we got one.
+ */
+ if (pSSM->pfnProgress)
+ pSSM->pfnProgress(pVM->pUVM, pSSM->uPercentPrepare + pSSM->uPercentLive - 1, pSSM->pvUser);
+ pSSM->uPercent = pSSM->uPercentPrepare + pSSM->uPercentLive;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Common worker for SSMR3Save and SSMR3LiveSave.
+ *
+ * @returns VBox status code (no need to check pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The state handle.
+ *
+ * @thread EMT(0)
+ */
+static int ssmR3SaveDoCommon(PVM pVM, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Do the work.
+ */
+ int rc = ssmR3SaveDoPrepRun(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = ssmR3SaveDoExecRun(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = ssmR3SaveDoFinalization(pVM, pSSM);
+ }
+ Assert(pSSM->rc == rc);
+ int rc2 = ssmR3SaveDoDoneRun(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = rc2;
+
+ return rc;
+}
+
+
+/**
+ * Saves the rest of the state on EMT0.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The SSM handle returned by SSMR3LiveSave.
+ *
+ * @thread Non-EMT thread. Will involve the EMT at the end of the operation.
+ */
+VMMR3_INT_DECL(int) SSMR3LiveDoStep2(PSSMHANDLE pSSM)
+{
+ LogFlow(("SSMR3LiveDoStep2: pSSM=%p\n", pSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pSSM, VERR_INVALID_POINTER);
+ PVM pVM = pSSM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT0(pVM);
+ AssertMsgReturn( pSSM->enmAfter == SSMAFTER_DESTROY
+ || pSSM->enmAfter == SSMAFTER_CONTINUE
+ || pSSM->enmAfter == SSMAFTER_TELEPORT,
+ ("%d\n", pSSM->enmAfter),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pSSM->enmOp == SSMSTATE_LIVE_STEP2, ("%d\n", pSSM->enmOp), VERR_INVALID_STATE);
+ AssertRCReturn(pSSM->rc, pSSM->rc);
+
+ /*
+ * Join paths with VMMR3Save.
+ */
+ return ssmR3SaveDoCommon(pVM, pSSM);
+}
+
+
+/**
+ * Writes the file header and clear the per-unit data.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The SSM handle.
+ */
+static int ssmR3WriteHeaderAndClearPerUnitData(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Write the header.
+ */
+ SSMFILEHDR FileHdr;
+ memcpy(&FileHdr.szMagic, SSMFILEHDR_MAGIC_V2_0, sizeof(FileHdr.szMagic));
+ FileHdr.u16VerMajor = VBOX_VERSION_MAJOR;
+ FileHdr.u16VerMinor = VBOX_VERSION_MINOR;
+ FileHdr.u32VerBuild = VBOX_VERSION_BUILD;
+ FileHdr.u32SvnRev = VMMGetSvnRev();
+ FileHdr.cHostBits = HC_ARCH_BITS;
+ FileHdr.cbGCPhys = sizeof(RTGCPHYS);
+ FileHdr.cbGCPtr = sizeof(RTGCPTR);
+ FileHdr.u8Reserved = 0;
+ FileHdr.cUnits = pVM->ssm.s.cUnits;
+ FileHdr.fFlags = SSMFILEHDR_FLAGS_STREAM_CRC32;
+ if (pSSM->fLiveSave)
+ FileHdr.fFlags |= SSMFILEHDR_FLAGS_STREAM_LIVE_SAVE;
+ FileHdr.cbMaxDecompr = RT_SIZEOFMEMB(SSMHANDLE, u.Read.abDataBuffer);
+ FileHdr.u32CRC = 0;
+ FileHdr.u32CRC = RTCrc32(&FileHdr, sizeof(FileHdr));
+ int rc = ssmR3StrmWrite(&pSSM->Strm, &FileHdr, sizeof(FileHdr));
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Clear the per unit flags and offsets.
+ */
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ pUnit->fCalled = false;
+ pUnit->offStream = RTFOFF_MIN;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates a new saved state file.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pszFilename The name of the file. NULL if pStreamOps is
+ * used.
+ * @param pStreamOps The stream methods. NULL if pszFilename is
+ * used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param enmAfter What to do afterwards.
+ * @param pfnProgress The progress callback.
+ * @param pvProgressUser The progress callback user argument.
+ * @param ppSSM Where to return the pointer to the saved state
+ * handle upon successful return. Free it using
+ * RTMemFree after closing the stream.
+ */
+static int ssmR3SaveDoCreateFile(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
+{
+ PSSMHANDLE pSSM = (PSSMHANDLE)RTMemAllocZ(sizeof(*pSSM));
+ if (!pSSM)
+ return VERR_NO_MEMORY;
+
+ pSSM->pVM = pVM;
+ pSSM->enmOp = SSMSTATE_INVALID;
+ pSSM->enmAfter = enmAfter;
+ pSSM->fCancelled = SSMHANDLE_OK;
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->cbUnitLeftV1 = 0;
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+ pSSM->fLiveSave = false;
+ pSSM->pfnProgress = pfnProgress;
+ pSSM->pvUser = pvProgressUser;
+ pSSM->uPercent = 0;
+ pSSM->offEstProgress = 0;
+ pSSM->cbEstTotal = 0;
+ pSSM->offEst = 0;
+ pSSM->offEstUnitEnd = 0;
+ pSSM->uPercentLive = 0;
+ pSSM->uPercentPrepare = 0;
+ pSSM->uPercentDone = 0;
+ pSSM->uReportedLivePercent = 0;
+ pSSM->pszFilename = pszFilename;
+ pSSM->u.Write.offDataBuffer = 0;
+ pSSM->u.Write.cMsMaxDowntime = UINT32_MAX;
+
+ int rc;
+ if (pStreamOps)
+ rc = ssmR3StrmInit(&pSSM->Strm, pStreamOps, pvStreamOpsUser, true /*fWrite*/, true /*fChecksummed*/, 8 /*cBuffers*/);
+ else
+ rc = ssmR3StrmOpenFile(&pSSM->Strm, pszFilename, true /*fWrite*/, true /*fChecksummed*/, 8 /*cBuffers*/);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed to create save state file '%s', rc=%Rrc.\n", pszFilename, rc));
+ RTMemFree(pSSM);
+ return rc;
+ }
+
+ *ppSSM = pSSM;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Start VM save operation.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszFilename Name of the file to save the state in. NULL if pStreamOps is used.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param enmAfter What is planned after a successful save operation.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvUser User argument for the progress callback.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) SSMR3Save(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvUser)
+{
+ LogFlow(("SSMR3Save: pszFilename=%p:{%s} enmAfter=%d pfnProgress=%p pvUser=%p\n", pszFilename, pszFilename, enmAfter, pfnProgress, pvUser));
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn( enmAfter == SSMAFTER_DESTROY
+ || enmAfter == SSMAFTER_CONTINUE,
+ ("%d\n", enmAfter),
+ VERR_INVALID_PARAMETER);
+
+ AssertReturn(!pszFilename != !pStreamOps, VERR_INVALID_PARAMETER);
+ if (pStreamOps)
+ {
+ AssertReturn(pStreamOps->u32Version == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->u32EndVersion == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->pfnWrite, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnRead, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSeek, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnTell, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSize, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnClose, VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Create the saved state file and handle.
+ *
+ * Note that there might be quite some work to do after executing the saving,
+ * so we reserve 20% for the 'Done' period.
+ */
+ PSSMHANDLE pSSM;
+ int rc = ssmR3SaveDoCreateFile(pVM, pszFilename, pStreamOps, pvStreamOpsUser,
+ enmAfter, pfnProgress, pvUser, &pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ pSSM->uPercentLive = 0;
+ pSSM->uPercentPrepare = 20;
+ pSSM->uPercentDone = 2;
+ pSSM->fLiveSave = false;
+
+ /*
+ * Write the saved state stream header and join paths with
+ * the other save methods for the rest of the job.
+ */
+ Log(("SSM: Starting state save to file '%s'...\n", pszFilename));
+ ssmR3StrmStartIoThread(&pSSM->Strm);
+ rc = ssmR3WriteHeaderAndClearPerUnitData(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ ssmR3SetCancellable(pVM, pSSM, true);
+ ssmR3SaveDoCommon(pVM, pSSM);
+ }
+
+ return ssmR3SaveDoClose(pVM, pSSM);
+}
+
+
+/**
+ * Used by PGM to report the completion percentage of the live stage during the
+ * vote run.
+ *
+ * @param pSSM The saved state handle.
+ * @param uPercent The completion percentage.
+ */
+VMMR3DECL(void) SSMR3HandleReportLivePercent(PSSMHANDLE pSSM, unsigned uPercent)
+{
+ AssertMsgReturnVoid(pSSM->enmOp == SSMSTATE_LIVE_VOTE, ("%d\n", pSSM->enmOp));
+ AssertReturnVoid(uPercent <= 100);
+ if (uPercent < pSSM->uReportedLivePercent)
+ pSSM->uReportedLivePercent = uPercent;
+}
+
+
+/**
+ * Calls pfnLiveVote for all units.
+ *
+ * @returns VBox status code (no need to check pSSM->rc).
+ * @retval VINF_SUCCESS if we can pass on to step 2.
+ * @retval VINF_SSM_VOTE_FOR_ANOTHER_PASS if we need another pass.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uPass The current pass.
+ */
+static int ssmR3LiveDoVoteRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ int rcRet = VINF_SUCCESS;
+ AssertRC(pSSM->rc);
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->enmOp = SSMSTATE_LIVE_VOTE;
+
+ unsigned uPrevPrecent = pSSM->uReportedLivePercent;
+ pSSM->uReportedLivePercent = 101;
+
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if ( pUnit->u.Common.pfnLiveVote
+ && !pUnit->fDoneLive)
+ {
+ int rc;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLiveVote(pUnit->u.Dev.pDevIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLiveVote(pUnit->u.Drv.pDrvIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLiveVote(pUnit->u.Usb.pUsbIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLiveVote(pVM, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLiveVote(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser, uPass);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ Assert(pSSM->rc == VINF_SUCCESS);
+ if (rc != VINF_SUCCESS)
+ {
+ if (rc == VINF_SSM_VOTE_FOR_ANOTHER_PASS)
+ {
+ Log(("ssmR3DoLiveVoteRun: '%s'/#%u -> VINF_SSM_VOTE_FOR_ANOTHER_PASS (pass=%u)\n", pUnit->szName, pUnit->u32Instance, uPass));
+ rcRet = VINF_SSM_VOTE_FOR_ANOTHER_PASS;
+ }
+ else if (rc == VINF_SSM_VOTE_DONE_DONT_CALL_AGAIN)
+ {
+ pUnit->fDoneLive = true;
+ Log(("ssmR3DoLiveVoteRun: '%s'/#%u -> VINF_SSM_VOTE_DONE_DONT_CALL_AGAIN (pass=%u)\n", pUnit->szName, pUnit->u32Instance, uPass));
+ }
+ else
+ {
+ /*
+ * rc is usually VERR_SSM_VOTE_FOR_GIVING_UP here, but we allow
+ * other status codes for better user feed back. However, no
+ * other non-error status is allowed.
+ */
+ LogRel(("SSM: Error - '%s'/#%u voted %Rrc! (pass=%u)\n", pUnit->szName, pUnit->u32Instance, rc, uPass));
+ AssertMsgReturn(RT_FAILURE(rc), ("%Rrc; '%s'\n", rc, pUnit->szName), pSSM->rc = VERR_IPE_UNEXPECTED_INFO_STATUS);
+ return pSSM->rc = rc;
+ }
+ }
+ }
+ }
+ if (rcRet == VINF_SUCCESS)
+ {
+ LogRel(("SSM: Step 1 completed after pass %u.\n", uPass));
+ pSSM->uReportedLivePercent = 100;
+ }
+ else
+ {
+ /*
+ * Work the progress callback.
+ */
+ if (pSSM->uReportedLivePercent > 100)
+ pSSM->uReportedLivePercent = 0;
+ if ( pSSM->uReportedLivePercent != uPrevPrecent
+ && pSSM->pfnProgress
+ && pSSM->uPercentLive)
+ {
+ long double lrdPct = (long double)pSSM->uReportedLivePercent * pSSM->uPercentLive / 100;
+ unsigned uPct = (unsigned)lrdPct;
+ if (uPct != pSSM->uPercent)
+ {
+ ssmR3LiveControlEmit(pSSM, lrdPct, uPass);
+ pSSM->uPercent = uPct;
+ pSSM->pfnProgress(pVM->pUVM, uPct, pSSM->pvUser);
+ }
+ }
+ }
+ return rcRet;
+}
+
+
+/**
+ * Calls pfnLiveExec for all units.
+ *
+ * @returns VBox status code (no need to check pSSM->rc).
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ * @param uPass The current pass.
+ */
+static int ssmR3LiveDoExecRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ AssertRC(pSSM->rc);
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->enmOp = SSMSTATE_LIVE_EXEC;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ /*
+ * Skip units without a callback (this is most).
+ */
+ if ( !pUnit->u.Common.pfnLiveExec
+ || pUnit->fDoneLive)
+ continue;
+ pUnit->offStream = ssmR3StrmTell(&pSSM->Strm);
+
+ /*
+ * Check for cancellation.
+ */
+ if (RT_UNLIKELY(ASMAtomicUoReadU32(&(pSSM)->fCancelled) == SSMHANDLE_CANCELLED))
+ {
+ LogRel(("SSM: Cancelled!\n"));
+ AssertRC(pSSM->rc);
+ return pSSM->rc = VERR_SSM_CANCELLED;
+ }
+
+ /*
+ * Write data unit header.
+ */
+ SSMFILEUNITHDRV2 UnitHdr;
+ memcpy(&UnitHdr.szMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(UnitHdr.szMagic));
+ UnitHdr.offStream = pUnit->offStream;
+ UnitHdr.u32CurStreamCRC = ssmR3StrmCurCRC(&pSSM->Strm);
+ UnitHdr.u32CRC = 0;
+ UnitHdr.u32Version = pUnit->u32Version;
+ UnitHdr.u32Instance = pUnit->u32Instance;
+ UnitHdr.u32Pass = uPass;
+ UnitHdr.fFlags = 0;
+ UnitHdr.cbName = (uint32_t)pUnit->cchName + 1;
+ memcpy(&UnitHdr.szName[0], &pUnit->szName[0], UnitHdr.cbName);
+ UnitHdr.u32CRC = RTCrc32(&UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ Log(("SSM: Unit at %#9llx: '%s', instance %u, pass %#x, version %u\n",
+ UnitHdr.offStream, UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Pass, UnitHdr.u32Version));
+ int rc = ssmR3StrmWrite(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed to write unit header. rc=%Rrc\n", rc));
+ return pSSM->rc = rc;
+ }
+
+ /*
+ * Call the execute handler.
+ */
+ ssmR3DataWriteBegin(pSSM);
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLiveExec(pUnit->u.Dev.pDevIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLiveExec(pUnit->u.Drv.pDrvIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLiveExec(pUnit->u.Usb.pUsbIns, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLiveExec(pVM, pSSM, uPass);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLiveExec(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser, uPass);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ else
+ {
+ if (rc == VINF_SSM_DONT_CALL_AGAIN)
+ pUnit->fDoneLive = true;
+ rc = ssmR3DataFlushBuffer(pSSM); /* will return SSMHANDLE::rc if it is set */
+ }
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Execute save failed with rc=%Rrc for data unit '%s'/#%u.\n", rc, pUnit->szName, pUnit->u32Instance));
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
+
+ /*
+ * Write the termination record and flush the compression stream.
+ */
+ SSMRECTERM TermRec;
+ TermRec.u8TypeAndFlags = SSM_REC_FLAGS_FIXED | SSM_REC_FLAGS_IMPORTANT | SSM_REC_TYPE_TERM;
+ TermRec.cbRec = sizeof(TermRec) - 2;
+ if (pSSM->Strm.fChecksummed)
+ {
+ TermRec.fFlags = SSMRECTERM_FLAGS_CRC32;
+ TermRec.u32StreamCRC = RTCrc32Finish(RTCrc32Process(ssmR3StrmCurCRC(&pSSM->Strm), &TermRec, 2));
+ }
+ else
+ {
+ TermRec.fFlags = 0;
+ TermRec.u32StreamCRC = 0;
+ }
+ TermRec.cbUnit = pSSM->offUnit + sizeof(TermRec);
+ rc = ssmR3DataWriteRaw(pSSM, &TermRec, sizeof(TermRec));
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DataWriteFinish(pSSM);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed terminating unit: %Rrc (pass=%u)\n", rc, uPass));
+ return pSSM->rc = rc;
+ }
+ } /* for each unit */
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Implements the live exec+vote loop.
+ *
+ * @returns VBox status code (no need to check pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DoLiveExecVoteLoop(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Calc the max saved state size before we should give up because of insane
+ * amounts of data.
+ */
+#define SSM_MAX_GROWTH_FILE 10000
+#define SSM_MAX_GROWTH_REMOTE 100000
+ uint64_t cbSum = 0;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ cbSum += pUnit->cbGuess;
+ uint64_t cbMax = cbSum * (pSSM->pszFilename ? SSM_MAX_GROWTH_FILE : SSM_MAX_GROWTH_REMOTE);
+ AssertLogRelMsgReturn(cbMax > cbSum, ("cbMax=%#RX64, cbSum=%#RX64\n", cbMax, cbSum), pSSM->rc = VERR_OUT_OF_RANGE);
+ if (cbMax < _1G)
+ cbMax = _1G;
+
+ /*
+ * The pass loop.
+ *
+ * The number of iterations is restricted for two reasons, first
+ * to make sure
+ */
+#define SSM_MAX_PASSES _1M
+ for (uint32_t uPass = 0; uPass < SSM_MAX_PASSES; uPass++)
+ {
+ pVM->ssm.s.uPass = uPass;
+
+ /*
+ * Save state and vote on whether we need more passes or not.
+ */
+ int rc = ssmR3LiveDoExecRun(pVM, pSSM, uPass);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = ssmR3LiveDoVoteRun(pVM, pSSM, uPass);
+ if (rc == VINF_SUCCESS)
+ {
+ pSSM->enmOp = SSMSTATE_LIVE_STEP2;
+ return VINF_SUCCESS;
+ }
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Check that we're still within sane data amounts.
+ */
+ uint64_t cbSaved = ssmR3StrmTell(&pSSM->Strm);
+ if (cbSaved > cbMax)
+ {
+ LogRel(("SSM: Giving up: Exceeded max state size. (cbSaved=%#RX64, cbMax=%#RX64)\n", cbSaved, cbMax));
+ return pSSM->rc = VERR_SSM_STATE_GREW_TOO_BIG;
+ }
+
+ /*
+ * Check that the stream is still OK.
+ */
+ rc = ssmR3StrmCheckAndFlush(&pSSM->Strm);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ }
+
+ LogRel(("SSM: Giving up: Too many passes! (%u)\n", SSM_MAX_PASSES));
+ return pSSM->rc = VERR_SSM_TOO_MANY_PASSES;
+}
+
+
+/**
+ * Calls pfnLivePrep for all units.
+ *
+ * @returns VBox status code (no need to check pSSM->rc).
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DoLivePrepRun(PVM pVM, PSSMHANDLE pSSM)
+{
+ /*
+ * Do the prepare run.
+ */
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->enmOp = SSMSTATE_SAVE_PREP;
+ for (PSSMUNIT pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if (pUnit->u.Common.pfnLivePrep)
+ {
+ int rc;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLivePrep(pUnit->u.Dev.pDevIns, pSSM);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLivePrep(pUnit->u.Drv.pDrvIns, pSSM);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLivePrep(pUnit->u.Usb.pUsbIns, pSSM);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLivePrep(pVM, pSSM);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLivePrep(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ else
+ rc = pSSM->rc;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Prepare save failed with rc=%Rrc for data unit '%s.\n", rc, pUnit->szName));
+ return rc;
+ }
+ }
+
+ pSSM->cbEstTotal += pUnit->cbGuess;
+ }
+
+ /*
+ * Work the progress indicator if we got one.
+ */
+ if (pSSM->pfnProgress)
+ pSSM->pfnProgress(pVM->pUVM, 2, pSSM->pvUser);
+ pSSM->uPercent = 2;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Continue a live state saving operation on the worker thread.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The SSM handle returned by SSMR3LiveSave.
+ *
+ * @thread Non-EMT thread. Will involve the EMT at the end of the operation.
+ */
+VMMR3_INT_DECL(int) SSMR3LiveDoStep1(PSSMHANDLE pSSM)
+{
+ LogFlow(("SSMR3LiveDoStep1: pSSM=%p\n", pSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pSSM, VERR_INVALID_POINTER);
+ PVM pVM = pSSM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_OTHER_THREAD(pVM);
+ AssertMsgReturn( pSSM->enmAfter == SSMAFTER_DESTROY
+ || pSSM->enmAfter == SSMAFTER_CONTINUE
+ || pSSM->enmAfter == SSMAFTER_TELEPORT,
+ ("%d\n", pSSM->enmAfter),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pSSM->enmOp == SSMSTATE_LIVE_STEP1, ("%d\n", pSSM->enmOp), VERR_INVALID_STATE);
+ AssertRCReturn(pSSM->rc, pSSM->rc);
+
+ /*
+ * Do the prep run, then the exec+vote cycle.
+ */
+ int rc = ssmR3DoLivePrepRun(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ rc = ssmR3DoLiveExecVoteLoop(pVM, pSSM);
+ return rc;
+}
+
+
+/**
+ * Start saving the live state.
+ *
+ * Call SSMR3LiveDoStep1, SSMR3LiveDoStep2 and finally SSMR3LiveDone on success.
+ * SSMR3LiveDone should be called even if SSMR3LiveDoStep1 or SSMR3LiveDoStep2
+ * fails.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param cMsMaxDowntime The maximum downtime given as milliseconds.
+ * @param pszFilename Name of the file to save the state in. This string
+ * must remain valid until SSMR3LiveDone is called.
+ * Must be NULL if pStreamOps is used.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param enmAfter What is planned after a successful save operation.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param ppSSM Where to return the saved state handle on success.
+ *
+ * @thread EMT0
+ */
+VMMR3_INT_DECL(int) SSMR3LiveSave(PVM pVM, uint32_t cMsMaxDowntime,
+ const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser,
+ PSSMHANDLE *ppSSM)
+{
+ LogFlow(("SSMR3LiveSave: cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p\n",
+ cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser));
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn( enmAfter == SSMAFTER_DESTROY
+ || enmAfter == SSMAFTER_CONTINUE
+ || enmAfter == SSMAFTER_TELEPORT,
+ ("%d\n", enmAfter),
+ VERR_INVALID_PARAMETER);
+ AssertReturn(!pszFilename != !pStreamOps, VERR_INVALID_PARAMETER);
+ if (pStreamOps)
+ {
+ AssertReturn(pStreamOps->u32Version == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->u32EndVersion == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->pfnWrite, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnRead, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSeek, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnTell, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSize, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnClose, VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Create the saved state file and handle.
+ *
+ * Note that there might be quite some work to do after executing the saving,
+ * so we reserve 20% for the 'Done' period.
+ */
+ PSSMHANDLE pSSM;
+ int rc = ssmR3SaveDoCreateFile(pVM, pszFilename, pStreamOps, pvStreamOpsUser,
+ enmAfter, pfnProgress, pvProgressUser, &pSSM);
+ if (RT_FAILURE(rc))
+ return rc;
+ pSSM->uPercentLive = 93;
+ pSSM->uPercentPrepare = 2;
+ pSSM->uPercentDone = 2;
+ pSSM->fLiveSave = true;
+ pSSM->u.Write.cMsMaxDowntime = cMsMaxDowntime;
+
+ /*
+ * Write the saved state stream header and do the prep run for live saving.
+ */
+ Log(("SSM: Starting state save to file '%s'...\n", pszFilename));
+ ssmR3StrmStartIoThread(&pSSM->Strm);
+ rc = ssmR3WriteHeaderAndClearPerUnitData(pVM, pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Return and let the requestor thread do the pfnLiveExec/Vote part
+ * via SSMR3SaveFinishLive
+ */
+ pSSM->enmOp = SSMSTATE_LIVE_STEP1;
+ ssmR3SetCancellable(pVM, pSSM, true);
+ *ppSSM = pSSM;
+ return VINF_SUCCESS;
+ }
+ /* bail out. */
+ int rc2 = ssmR3StrmClose(&pSSM->Strm, pSSM->rc == VERR_SSM_CANCELLED);
+ RTMemFree(pSSM);
+ rc2 = RTFileDelete(pszFilename);
+ AssertRC(rc2);
+ return rc;
+}
+
+#endif /* !SSM_STANDALONE */
+
+
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+/* ... Loading and reading starts here ... */
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Closes the decompressor of a data unit.
+ *
+ * @returns pSSM->rc.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DataReadFinishV1(PSSMHANDLE pSSM)
+{
+ if (pSSM->u.Read.pZipDecompV1)
+ {
+ int rc = RTZipDecompDestroy(pSSM->u.Read.pZipDecompV1);
+ AssertRC(rc);
+ pSSM->u.Read.pZipDecompV1 = NULL;
+ }
+ return pSSM->rc;
+}
+#endif /* !SSM_STANDALONE */
+
+
+/**
+ * Callback for reading compressed data into the input buffer of the
+ * decompressor, for saved file format version 1.
+ *
+ * @returns VBox status code. Set pSSM->rc on error.
+ * @param pvSSM The SSM handle.
+ * @param pvBuf Where to store the compressed data.
+ * @param cbBuf Size of the buffer.
+ * @param pcbRead Number of bytes actually stored in the buffer.
+ */
+static DECLCALLBACK(int) ssmR3ReadInV1(void *pvSSM, void *pvBuf, size_t cbBuf, size_t *pcbRead)
+{
+ PSSMHANDLE pSSM = (PSSMHANDLE)pvSSM;
+ size_t cbRead = cbBuf;
+ if (pSSM->cbUnitLeftV1 < cbBuf)
+ cbRead = (size_t)pSSM->cbUnitLeftV1;
+ if (cbRead)
+ {
+ //Log2(("ssmR3ReadInV1: %#010llx cbBug=%#x cbRead=%#x\n", ssmR3StrmTell(&pSSM->Strm), cbBuf, cbRead));
+ int rc = ssmR3StrmRead(&pSSM->Strm, pvBuf, cbRead);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->cbUnitLeftV1 -= cbRead;
+ if (pcbRead)
+ *pcbRead = cbRead;
+ ssmR3ProgressByByte(pSSM, cbRead);
+ return VINF_SUCCESS;
+ }
+ return pSSM->rc = rc;
+ }
+
+ if (pSSM->enmAfter != SSMAFTER_DEBUG_IT)
+ AssertMsgFailed(("SSM: attempted reading more than the unit!\n"));
+ return pSSM->rc = VERR_SSM_LOADED_TOO_MUCH;
+}
+
+
+/**
+ * Internal read worker for reading data from a version 1 unit.
+ *
+ * @returns VBox status code, pSSM->rc is set on error.
+ *
+ * @param pSSM The saved state handle.
+ * @param pvBuf Where to store the read data.
+ * @param cbBuf Number of bytes to read.
+ */
+static int ssmR3DataReadV1(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
+{
+ /*
+ * Open the decompressor on the first read.
+ */
+ if (!pSSM->u.Read.pZipDecompV1)
+ {
+ pSSM->rc = RTZipDecompCreate(&pSSM->u.Read.pZipDecompV1, pSSM, ssmR3ReadInV1);
+ if (RT_FAILURE(pSSM->rc))
+ return pSSM->rc;
+ }
+
+ /*
+ * Do the requested read.
+ */
+ int rc = pSSM->rc = RTZipDecompress(pSSM->u.Read.pZipDecompV1, pvBuf, cbBuf, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ Log2(("ssmR3DataRead: pvBuf=%p cbBuf=%#x offUnit=%#llx %.*Rhxs%s\n", pvBuf, cbBuf, pSSM->offUnit, RT_MIN(cbBuf, SSM_LOG_BYTES), pvBuf, cbBuf > SSM_LOG_BYTES ? "..." : ""));
+ pSSM->offUnit += cbBuf;
+ pSSM->offUnitUser += cbBuf;
+ return VINF_SUCCESS;
+ }
+ AssertMsgFailed(("rc=%Rrc cbBuf=%#x\n", rc, cbBuf));
+ return rc;
+}
+
+
+/**
+ * Creates the decompressor for the data unit.
+ *
+ * pSSM->rc will be set on error.
+ *
+ * @param pSSM The saved state handle.
+ */
+static void ssmR3DataReadBeginV2(PSSMHANDLE pSSM)
+{
+ Assert(!pSSM->u.Read.cbDataBuffer || pSSM->u.Read.cbDataBuffer == pSSM->u.Read.offDataBuffer);
+ Assert(!pSSM->u.Read.cbRecLeft);
+
+ pSSM->offUnit = 0;
+ pSSM->offUnitUser = 0;
+ pSSM->u.Read.cbRecLeft = 0;
+ pSSM->u.Read.cbDataBuffer = 0;
+ pSSM->u.Read.offDataBuffer = 0;
+ pSSM->u.Read.fEndOfData = false;
+ pSSM->u.Read.u8TypeAndFlags = 0;
+}
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Checks for the termination record and closes the decompressor.
+ *
+ * pSSM->rc will be set on error.
+ *
+ * @returns pSSM->rc.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DataReadFinishV2(PSSMHANDLE pSSM)
+{
+ /*
+ * If we haven't encountered the end of the record, it must be the next one.
+ */
+ int rc = pSSM->rc;
+ if ( !pSSM->u.Read.fEndOfData
+ && RT_SUCCESS(rc))
+ {
+ if ( pSSM->u.Read.cbDataBuffer != pSSM->u.Read.offDataBuffer
+ && pSSM->u.Read.cbDataBuffer > 0)
+ {
+ LogRel(("SSM: At least %#x bytes left to read\n", pSSM->u.Read.cbDataBuffer - pSSM->u.Read.offDataBuffer));
+ rc = VERR_SSM_LOADED_TOO_LITTLE;
+ }
+ else
+ {
+ rc = ssmR3DataReadRecHdrV2(pSSM);
+ if ( RT_SUCCESS(rc)
+ && !pSSM->u.Read.fEndOfData)
+ {
+ LogRel(("SSM: At least %#x bytes left to read\n", pSSM->u.Read.cbDataBuffer));
+ rc = VERR_SSM_LOADED_TOO_LITTLE;
+ AssertFailed();
+ }
+ }
+ pSSM->rc = rc;
+ }
+ return rc;
+}
+#endif /* !SSM_STANDALONE */
+
+
+/**
+ * Read raw record bytes, work the progress indicator and unit offset.
+ *
+ * @returns VBox status code. Does NOT set pSSM->rc.
+ * @param pSSM The saved state handle.
+ * @param pvBuf Where to put the bits
+ * @param cbToRead How many bytes to read.
+ */
+DECLINLINE(int) ssmR3DataReadV2Raw(PSSMHANDLE pSSM, void *pvBuf, size_t cbToRead)
+{
+ int rc = ssmR3StrmRead(&pSSM->Strm, pvBuf, cbToRead);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->offUnit += cbToRead;
+ ssmR3ProgressByByte(pSSM, cbToRead);
+ return VINF_SUCCESS;
+ }
+
+ if (rc == VERR_SSM_CANCELLED)
+ return rc;
+
+ if (pSSM->enmAfter != SSMAFTER_DEBUG_IT && rc == VERR_EOF)
+ AssertMsgFailedReturn(("SSM: attempted reading more than the unit! rc=%Rrc\n", rc), VERR_SSM_LOADED_TOO_MUCH);
+ return VERR_SSM_STREAM_ERROR;
+}
+
+
+/**
+ * Reads and checks the LZF "header".
+ *
+ * @returns VBox status code. Sets pSSM->rc on error.
+ * @param pSSM The saved state handle..
+ * @param pcbDecompr Where to store the size of the decompressed data.
+ */
+DECLINLINE(int) ssmR3DataReadV2RawLzfHdr(PSSMHANDLE pSSM, uint32_t *pcbDecompr)
+{
+ *pcbDecompr = 0; /* shuts up gcc. */
+ AssertLogRelMsgReturn( pSSM->u.Read.cbRecLeft > 1
+ && pSSM->u.Read.cbRecLeft <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abComprBuffer) + 2,
+ ("%#x\n", pSSM->u.Read.cbRecLeft),
+ pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
+
+ uint8_t cKB;
+ int rc = ssmR3DataReadV2Raw(pSSM, &cKB, 1);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pSSM->u.Read.cbRecLeft -= sizeof(cKB);
+
+ uint32_t cbDecompr = (uint32_t)cKB * _1K;
+ AssertLogRelMsgReturn( cbDecompr >= pSSM->u.Read.cbRecLeft
+ && cbDecompr <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abDataBuffer),
+ ("%#x\n", cbDecompr),
+ pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
+
+ *pcbDecompr = cbDecompr;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Reads an LZF block from the stream and decompresses into the specified
+ * buffer.
+ *
+ * @returns VBox status code. Sets pSSM->rc on error.
+ * @param pSSM The saved state handle.
+ * @param pvDst Pointer to the output buffer.
+ * @param cbDecompr The size of the decompressed data.
+ */
+static int ssmR3DataReadV2RawLzf(PSSMHANDLE pSSM, void *pvDst, size_t cbDecompr)
+{
+ int rc;
+ uint32_t cbCompr = pSSM->u.Read.cbRecLeft;
+ pSSM->u.Read.cbRecLeft = 0;
+
+ /*
+ * Try use the stream buffer directly to avoid copying things around.
+ */
+ uint8_t const *pb = ssmR3StrmReadDirect(&pSSM->Strm, cbCompr);
+ if (pb)
+ {
+ pSSM->offUnit += cbCompr;
+ ssmR3ProgressByByte(pSSM, cbCompr);
+ }
+ else
+ {
+ rc = ssmR3DataReadV2Raw(pSSM, &pSSM->u.Read.abComprBuffer[0], cbCompr);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pb = &pSSM->u.Read.abComprBuffer[0];
+ }
+
+ /*
+ * Decompress it.
+ */
+ size_t cbDstActual;
+ rc = RTZipBlockDecompress(RTZIPTYPE_LZF, 0 /*fFlags*/,
+ pb, cbCompr, NULL /*pcbSrcActual*/,
+ pvDst, cbDecompr, &cbDstActual);
+ if (RT_SUCCESS(rc))
+ {
+ AssertLogRelMsgReturn(cbDstActual == cbDecompr, ("%#x %#x\n", cbDstActual, cbDecompr), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
+ return VINF_SUCCESS;
+ }
+
+ AssertLogRelMsgFailed(("cbCompr=%#x cbDecompr=%#x rc=%Rrc\n", cbCompr, cbDecompr, rc));
+ return pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION;
+}
+
+
+/**
+ * Reads and checks the raw zero "header".
+ *
+ * @returns VBox status code. Sets pSSM->rc on error.
+ * @param pSSM The saved state handle..
+ * @param pcbZero Where to store the size of the zero data.
+ */
+DECLINLINE(int) ssmR3DataReadV2RawZeroHdr(PSSMHANDLE pSSM, uint32_t *pcbZero)
+{
+ *pcbZero = 0; /* shuts up gcc. */
+ AssertLogRelMsgReturn(pSSM->u.Read.cbRecLeft == 1, ("%#x\n", pSSM->u.Read.cbRecLeft), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
+
+ uint8_t cKB;
+ int rc = ssmR3DataReadV2Raw(pSSM, &cKB, 1);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pSSM->u.Read.cbRecLeft = 0;
+
+ uint32_t cbZero = (uint32_t)cKB * _1K;
+ AssertLogRelMsgReturn(cbZero <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abDataBuffer),
+ ("%#x\n", cbZero), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
+
+ *pcbZero = cbZero;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for reading the record header.
+ *
+ * It sets pSSM->u.Read.cbRecLeft, pSSM->u.Read.u8TypeAndFlags and
+ * pSSM->u.Read.fEndOfData. When a termination record is encounter, it will be
+ * read in full and validated, the fEndOfData indicator is set, and VINF_SUCCESS
+ * is returned.
+ *
+ * @returns VBox status code. Does not set pSSM->rc.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3DataReadRecHdrV2(PSSMHANDLE pSSM)
+{
+ AssertLogRelReturn(!pSSM->u.Read.fEndOfData, VERR_SSM_LOADED_TOO_MUCH);
+
+ /*
+ * Read the two mandatory bytes.
+ */
+ uint8_t abHdr[8];
+ int rc = ssmR3DataReadV2Raw(pSSM, abHdr, 2);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Validate the first byte and check for the termination records.
+ */
+ pSSM->u.Read.u8TypeAndFlags = abHdr[0];
+ AssertLogRelMsgReturn(SSM_REC_ARE_TYPE_AND_FLAGS_VALID(abHdr[0]), ("%#x %#x\n", abHdr[0], abHdr[1]), VERR_SSM_INTEGRITY_REC_HDR);
+ if ((abHdr[0] & SSM_REC_TYPE_MASK) == SSM_REC_TYPE_TERM)
+ {
+ pSSM->u.Read.cbRecLeft = 0;
+ pSSM->u.Read.fEndOfData = true;
+ AssertLogRelMsgReturn(abHdr[1] == sizeof(SSMRECTERM) - 2, ("%#x\n", abHdr[1]), VERR_SSM_INTEGRITY_REC_TERM);
+ AssertLogRelMsgReturn(abHdr[0] & SSM_REC_FLAGS_IMPORTANT, ("%#x\n", abHdr[0]), VERR_SSM_INTEGRITY_REC_TERM);
+
+ /* get the rest */
+ uint32_t u32StreamCRC = ssmR3StrmFinalCRC(&pSSM->Strm);
+ SSMRECTERM TermRec;
+ rc = ssmR3DataReadV2Raw(pSSM, (uint8_t *)&TermRec + 2, sizeof(SSMRECTERM) - 2);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* validate integrity */
+ AssertLogRelMsgReturn(TermRec.cbUnit == pSSM->offUnit,
+ ("cbUnit=%#llx offUnit=%#llx\n", TermRec.cbUnit, pSSM->offUnit),
+ VERR_SSM_INTEGRITY_REC_TERM);
+ AssertLogRelMsgReturn(!(TermRec.fFlags & ~SSMRECTERM_FLAGS_CRC32), ("%#x\n", TermRec.fFlags), VERR_SSM_INTEGRITY_REC_TERM);
+ if (!(TermRec.fFlags & SSMRECTERM_FLAGS_CRC32))
+ AssertLogRelMsgReturn(TermRec.u32StreamCRC == 0, ("%#x\n", TermRec.u32StreamCRC), VERR_SSM_INTEGRITY_REC_TERM);
+ else if (pSSM->Strm.fChecksummed)
+ AssertLogRelMsgReturn(TermRec.u32StreamCRC == u32StreamCRC, ("%#x, %#x\n", TermRec.u32StreamCRC, u32StreamCRC),
+ VERR_SSM_INTEGRITY_REC_TERM_CRC);
+
+ Log3(("ssmR3DataReadRecHdrV2: %08llx|%08llx: TERM\n", ssmR3StrmTell(&pSSM->Strm) - sizeof(SSMRECTERM), pSSM->offUnit));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Figure the size. The 2nd byte is encoded in UTF-8 fashion, so this
+ * is can be highly enjoyable.
+ */
+ uint32_t cbHdr = 2;
+ uint32_t cb = abHdr[1];
+ if (!(cb & 0x80))
+ pSSM->u.Read.cbRecLeft = cb;
+ else
+ {
+ /*
+ * Need more data. Figure how much and read it.
+ */
+ if (!(cb & RT_BIT(5)))
+ cb = 2;
+ else if (!(cb & RT_BIT(4)))
+ cb = 3;
+ else if (!(cb & RT_BIT(3)))
+ cb = 4;
+ else if (!(cb & RT_BIT(2)))
+ cb = 5;
+ else if (!(cb & RT_BIT(1)))
+ cb = 6;
+ else
+ AssertLogRelMsgFailedReturn(("Invalid record size byte: %#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+ cbHdr = cb + 1;
+
+ rc = ssmR3DataReadV2Raw(pSSM, &abHdr[2], cb - 1);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Validate what we've read.
+ */
+ switch (cb)
+ {
+ case 6:
+ AssertLogRelMsgReturn((abHdr[6] & 0xc0) == 0x80, ("6/%u: %.*Rhxs\n", cb, cb + 1, &abHdr[0]), VERR_SSM_INTEGRITY_REC_HDR);
+ RT_FALL_THRU();
+ case 5:
+ AssertLogRelMsgReturn((abHdr[5] & 0xc0) == 0x80, ("5/%u: %.*Rhxs\n", cb, cb + 1, &abHdr[0]), VERR_SSM_INTEGRITY_REC_HDR);
+ RT_FALL_THRU();
+ case 4:
+ AssertLogRelMsgReturn((abHdr[4] & 0xc0) == 0x80, ("4/%u: %.*Rhxs\n", cb, cb + 1, &abHdr[0]), VERR_SSM_INTEGRITY_REC_HDR);
+ RT_FALL_THRU();
+ case 3:
+ AssertLogRelMsgReturn((abHdr[3] & 0xc0) == 0x80, ("3/%u: %.*Rhxs\n", cb, cb + 1, &abHdr[0]), VERR_SSM_INTEGRITY_REC_HDR);
+ RT_FALL_THRU();
+ case 2:
+ AssertLogRelMsgReturn((abHdr[2] & 0xc0) == 0x80, ("2/%u: %.*Rhxs\n", cb, cb + 1, &abHdr[0]), VERR_SSM_INTEGRITY_REC_HDR);
+ break;
+ default:
+ return VERR_IPE_NOT_REACHED_DEFAULT_CASE;
+ }
+
+ /*
+ * Decode it and validate the range.
+ */
+ switch (cb)
+ {
+ case 6:
+ cb = (abHdr[6] & 0x3f)
+ | ((uint32_t)(abHdr[5] & 0x3f) << 6)
+ | ((uint32_t)(abHdr[4] & 0x3f) << 12)
+ | ((uint32_t)(abHdr[3] & 0x3f) << 18)
+ | ((uint32_t)(abHdr[2] & 0x3f) << 24)
+ | ((uint32_t)(abHdr[1] & 0x01) << 30);
+ AssertLogRelMsgReturn(cb >= 0x04000000 && cb <= 0x7fffffff, ("cb=%#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+ break;
+ case 5:
+ cb = (abHdr[5] & 0x3f)
+ | ((uint32_t)(abHdr[4] & 0x3f) << 6)
+ | ((uint32_t)(abHdr[3] & 0x3f) << 12)
+ | ((uint32_t)(abHdr[2] & 0x3f) << 18)
+ | ((uint32_t)(abHdr[1] & 0x03) << 24);
+ AssertLogRelMsgReturn(cb >= 0x00200000 && cb <= 0x03ffffff, ("cb=%#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+ break;
+ case 4:
+ cb = (abHdr[4] & 0x3f)
+ | ((uint32_t)(abHdr[3] & 0x3f) << 6)
+ | ((uint32_t)(abHdr[2] & 0x3f) << 12)
+ | ((uint32_t)(abHdr[1] & 0x07) << 18);
+ AssertLogRelMsgReturn(cb >= 0x00010000 && cb <= 0x001fffff, ("cb=%#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+ break;
+ case 3:
+ cb = (abHdr[3] & 0x3f)
+ | ((uint32_t)(abHdr[2] & 0x3f) << 6)
+ | ((uint32_t)(abHdr[1] & 0x0f) << 12);
+#if 0 /* disabled to optimize buffering */
+ AssertLogRelMsgReturn(cb >= 0x00000800 && cb <= 0x0000ffff, ("cb=%#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+#endif
+ break;
+ case 2:
+ cb = (abHdr[2] & 0x3f)
+ | ((uint32_t)(abHdr[1] & 0x1f) << 6);
+#if 0 /* disabled to optimize buffering */
+ AssertLogRelMsgReturn(cb >= 0x00000080 && cb <= 0x000007ff, ("cb=%#x\n", cb), VERR_SSM_INTEGRITY_REC_HDR);
+#endif
+ break;
+ default:
+ return VERR_IPE_NOT_REACHED_DEFAULT_CASE;
+ }
+
+ pSSM->u.Read.cbRecLeft = cb;
+ }
+
+ Log3(("ssmR3DataReadRecHdrV2: %08llx|%08llx/%08x: Type=%02x fImportant=%RTbool cbHdr=%u\n",
+ ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft,
+ pSSM->u.Read.u8TypeAndFlags & SSM_REC_TYPE_MASK,
+ !!(pSSM->u.Read.u8TypeAndFlags & SSM_REC_FLAGS_IMPORTANT),
+ cbHdr
+ )); NOREF(cbHdr);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Buffer miss, do an unbuffered read.
+ *
+ * @returns VBox status code. Sets pSSM->rc on error.
+ * @param pSSM The saved state handle.
+ * @param pvBuf Where to store the read data.
+ * @param cbBuf Number of bytes to read.
+ */
+static int ssmR3DataReadUnbufferedV2(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
+{
+ void const *pvBufOrg = pvBuf; NOREF(pvBufOrg);
+ size_t const cbBufOrg = cbBuf; NOREF(cbBufOrg);
+
+ /*
+ * Copy out what we've got in the buffer.
+ */
+ uint32_t off = pSSM->u.Read.offDataBuffer;
+ int32_t cbInBuffer = pSSM->u.Read.cbDataBuffer - off;
+ Log4(("ssmR3DataReadUnbufferedV2: %08llx|%08llx/%08x/%08x: cbBuf=%#x\n", ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft, cbInBuffer, cbBufOrg));
+ if (cbInBuffer > 0)
+ {
+ uint32_t const cbToCopy = (uint32_t)cbInBuffer;
+ Assert(cbBuf > cbToCopy);
+ memcpy(pvBuf, &pSSM->u.Read.abDataBuffer[off], cbToCopy);
+ pvBuf = (uint8_t *)pvBuf + cbToCopy;
+ cbBuf -= cbToCopy;
+ pSSM->u.Read.cbDataBuffer = 0;
+ pSSM->u.Read.offDataBuffer = 0;
+ }
+
+ /*
+ * Read data.
+ */
+ do
+ {
+ /*
+ * Read the next record header if no more data.
+ */
+ if (!pSSM->u.Read.cbRecLeft)
+ {
+ int rc = ssmR3DataReadRecHdrV2(pSSM);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ }
+ AssertLogRelMsgReturn(!pSSM->u.Read.fEndOfData, ("cbBuf=%zu\n", cbBuf), pSSM->rc = VERR_SSM_LOADED_TOO_MUCH);
+
+ /*
+ * Read data from the current record.
+ */
+ uint32_t cbToRead;
+ switch (pSSM->u.Read.u8TypeAndFlags & SSM_REC_TYPE_MASK)
+ {
+ case SSM_REC_TYPE_RAW:
+ {
+ cbToRead = (uint32_t)RT_MIN(cbBuf, pSSM->u.Read.cbRecLeft);
+ int rc = ssmR3DataReadV2Raw(pSSM, pvBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pSSM->u.Read.cbRecLeft -= cbToRead;
+ break;
+ }
+
+ case SSM_REC_TYPE_RAW_LZF:
+ {
+ int rc = ssmR3DataReadV2RawLzfHdr(pSSM, &cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (cbToRead <= cbBuf)
+ {
+ rc = ssmR3DataReadV2RawLzf(pSSM, pvBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ else
+ {
+ /* The output buffer is too small, use the data buffer. */
+ rc = ssmR3DataReadV2RawLzf(pSSM, &pSSM->u.Read.abDataBuffer[0], cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ pSSM->u.Read.cbDataBuffer = cbToRead;
+ cbToRead = (uint32_t)cbBuf;
+ pSSM->u.Read.offDataBuffer = cbToRead;
+ memcpy(pvBuf, &pSSM->u.Read.abDataBuffer[0], cbToRead);
+ }
+ break;
+ }
+
+ case SSM_REC_TYPE_RAW_ZERO:
+ {
+ int rc = ssmR3DataReadV2RawZeroHdr(pSSM, &cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (cbToRead > cbBuf)
+ {
+ /* Spill the remainder into the data buffer. */
+ memset(&pSSM->u.Read.abDataBuffer[0], 0, cbToRead - cbBuf);
+ pSSM->u.Read.cbDataBuffer = cbToRead - (uint32_t)cbBuf;
+ pSSM->u.Read.offDataBuffer = 0;
+ cbToRead = (uint32_t)cbBuf;
+ }
+ memset(pvBuf, 0, cbToRead);
+ break;
+ }
+
+ default:
+ AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), pSSM->rc = VERR_SSM_BAD_REC_TYPE);
+ }
+
+ pSSM->offUnitUser += cbToRead;
+ cbBuf -= cbToRead;
+ pvBuf = (uint8_t *)pvBuf + cbToRead;
+ } while (cbBuf > 0);
+
+ Log4(("ssmR3DataReadUnBufferedV2: %08llx|%08llx/%08x/%08x: cbBuf=%#x %.*Rhxs%s\n",
+ ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft, 0, cbBufOrg, RT_MIN(SSM_LOG_BYTES, cbBufOrg), pvBufOrg, cbBufOrg > SSM_LOG_BYTES ? "..." : ""));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Buffer miss, do a buffered read.
+ *
+ * @returns VBox status code. Sets pSSM->rc on error.
+ *
+ * @param pSSM The saved state handle.
+ * @param pvBuf Where to store the read data.
+ * @param cbBuf Number of bytes to read.
+ */
+static int ssmR3DataReadBufferedV2(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
+{
+ void const *pvBufOrg = pvBuf; NOREF(pvBufOrg);
+ size_t const cbBufOrg = cbBuf; NOREF(cbBufOrg);
+
+ /*
+ * Copy out what we've got in the buffer.
+ */
+ uint32_t off = pSSM->u.Read.offDataBuffer;
+ int32_t cbInBuffer = pSSM->u.Read.cbDataBuffer - off;
+ Log4(("ssmR3DataReadBufferedV2: %08llx|%08llx/%08x/%08x: cbBuf=%#x\n", ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft, cbInBuffer, cbBufOrg));
+ if (cbInBuffer > 0)
+ {
+ uint32_t const cbToCopy = (uint32_t)cbInBuffer;
+ Assert(cbBuf > cbToCopy);
+ memcpy(pvBuf, &pSSM->u.Read.abDataBuffer[off], cbToCopy);
+ pvBuf = (uint8_t *)pvBuf + cbToCopy;
+ cbBuf -= cbToCopy;
+ pSSM->offUnitUser += cbToCopy;
+ pSSM->u.Read.cbDataBuffer = 0;
+ pSSM->u.Read.offDataBuffer = 0;
+ }
+
+ /*
+ * Buffer more data.
+ */
+ do
+ {
+ /*
+ * Read the next record header if no more data.
+ */
+ if (!pSSM->u.Read.cbRecLeft)
+ {
+ int rc = ssmR3DataReadRecHdrV2(pSSM);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ }
+ AssertLogRelMsgReturn(!pSSM->u.Read.fEndOfData, ("cbBuf=%zu\n", cbBuf), pSSM->rc = VERR_SSM_LOADED_TOO_MUCH);
+
+ /*
+ * Read data from the current record.
+ * LATER: optimize by reading directly into the output buffer for some cases.
+ */
+ uint32_t cbToRead;
+ switch (pSSM->u.Read.u8TypeAndFlags & SSM_REC_TYPE_MASK)
+ {
+ case SSM_REC_TYPE_RAW:
+ {
+ cbToRead = RT_MIN(sizeof(pSSM->u.Read.abDataBuffer), pSSM->u.Read.cbRecLeft);
+ int rc = ssmR3DataReadV2Raw(pSSM, &pSSM->u.Read.abDataBuffer[0], cbToRead);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pSSM->u.Read.cbRecLeft -= cbToRead;
+ pSSM->u.Read.cbDataBuffer = cbToRead;
+ break;
+ }
+
+ case SSM_REC_TYPE_RAW_LZF:
+ {
+ int rc = ssmR3DataReadV2RawLzfHdr(pSSM, &cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = ssmR3DataReadV2RawLzf(pSSM, &pSSM->u.Read.abDataBuffer[0], cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ pSSM->u.Read.cbDataBuffer = cbToRead;
+ break;
+ }
+
+ case SSM_REC_TYPE_RAW_ZERO:
+ {
+ int rc = ssmR3DataReadV2RawZeroHdr(pSSM, &cbToRead);
+ if (RT_FAILURE(rc))
+ return rc;
+ memset(&pSSM->u.Read.abDataBuffer[0], 0, cbToRead);
+ pSSM->u.Read.cbDataBuffer = cbToRead;
+ break;
+ }
+
+ default:
+ AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), pSSM->rc = VERR_SSM_BAD_REC_TYPE);
+ }
+ /*pSSM->u.Read.offDataBuffer = 0;*/
+
+ /*
+ * Copy data from the buffer.
+ */
+ uint32_t cbToCopy = (uint32_t)RT_MIN(cbBuf, cbToRead);
+ memcpy(pvBuf, &pSSM->u.Read.abDataBuffer[0], cbToCopy);
+ cbBuf -= cbToCopy;
+ pvBuf = (uint8_t *)pvBuf + cbToCopy;
+ pSSM->offUnitUser += cbToCopy;
+ pSSM->u.Read.offDataBuffer = cbToCopy;
+ } while (cbBuf > 0);
+
+ Log4(("ssmR3DataReadBufferedV2: %08llx|%08llx/%08x/%08x: cbBuf=%#x %.*Rhxs%s\n",
+ ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft, pSSM->u.Read.cbDataBuffer - pSSM->u.Read.offDataBuffer,
+ cbBufOrg, RT_MIN(SSM_LOG_BYTES, cbBufOrg), pvBufOrg, cbBufOrg > SSM_LOG_BYTES ? "..." : ""));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Inlined worker that handles format checks and buffered reads.
+ *
+ * @param pSSM The saved state handle.
+ * @param pvBuf Where to store the read data.
+ * @param cbBuf Number of bytes to read.
+ */
+DECLINLINE(int) ssmR3DataRead(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
+{
+ /*
+ * Fend off previous errors and V1 data units.
+ */
+ if (RT_SUCCESS(pSSM->rc))
+ {
+ if (RT_LIKELY(pSSM->u.Read.uFmtVerMajor != 1))
+ {
+ /*
+ * Check if the requested data is buffered.
+ */
+ uint32_t off = pSSM->u.Read.offDataBuffer;
+ if ( off + cbBuf > pSSM->u.Read.cbDataBuffer
+ || cbBuf > sizeof(pSSM->u.Read.abDataBuffer))
+ {
+ if (cbBuf <= sizeof(pSSM->u.Read.abDataBuffer) / 8)
+ return ssmR3DataReadBufferedV2(pSSM, pvBuf, cbBuf);
+ return ssmR3DataReadUnbufferedV2(pSSM, pvBuf, cbBuf);
+ }
+
+ memcpy(pvBuf, &pSSM->u.Read.abDataBuffer[off], cbBuf);
+ pSSM->u.Read.offDataBuffer = off + (uint32_t)cbBuf;
+ pSSM->offUnitUser += cbBuf;
+ Log4((cbBuf
+ ? "ssmR3DataRead: %08llx|%08llx/%08x/%08x: cbBuf=%#x %.*Rhxs%s\n"
+ : "ssmR3DataRead: %08llx|%08llx/%08x/%08x: cbBuf=%#x\n",
+ ssmR3StrmTell(&pSSM->Strm), pSSM->offUnit, pSSM->u.Read.cbRecLeft, pSSM->u.Read.cbDataBuffer - pSSM->u.Read.offDataBuffer,
+ cbBuf, RT_MIN(SSM_LOG_BYTES, cbBuf), pvBuf, cbBuf > SSM_LOG_BYTES ? "..." : ""));
+
+ return VINF_SUCCESS;
+ }
+ return ssmR3DataReadV1(pSSM, pvBuf, cbBuf);
+ }
+ return pSSM->rc;
+}
+
+
+/**
+ * Gets a structure.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pvStruct The structure address.
+ * @param paFields The array of structure fields descriptions.
+ * The array must be terminated by a SSMFIELD_ENTRY_TERM().
+ */
+VMMR3DECL(int) SSMR3GetStruct(PSSMHANDLE pSSM, void *pvStruct, PCSSMFIELD paFields)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ AssertPtr(pvStruct);
+ AssertPtr(paFields);
+
+ /* begin marker. */
+ uint32_t u32Magic;
+ int rc = SSMR3GetU32(pSSM, &u32Magic);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
+
+ /* get the fields */
+ for (PCSSMFIELD pCur = paFields;
+ pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
+ pCur++)
+ {
+ if (pCur->uFirstVer <= pSSM->u.Read.uCurUnitVer)
+ {
+ uint8_t *pbField = (uint8_t *)pvStruct + pCur->off;
+ switch ((uintptr_t)pCur->pfnGetPutOrTransformer)
+ {
+ case SSMFIELDTRANS_NO_TRANSFORMATION:
+ rc = ssmR3DataRead(pSSM, pbField, pCur->cb);
+ break;
+
+ case SSMFIELDTRANS_GCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetGCPtr(pSSM, (PRTGCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_GCPHYS:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetGCPhys(pSSM, (PRTGCPHYS)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetRCPtr(pSSM, (PRTRCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR_ARRAY:
+ {
+ uint32_t const cEntries = pCur->cb / sizeof(RTRCPTR);
+ AssertMsgBreakStmt(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = SSMR3GetRCPtr(pSSM, &((PRTRCPTR)pbField)[i]);
+ break;
+ }
+
+ default:
+ AssertMsgFailedBreakStmt(("%#x\n", pCur->pfnGetPutOrTransformer), rc = VERR_SSM_FIELD_COMPLEX);
+ }
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
+ }
+ }
+
+ /* end marker */
+ rc = SSMR3GetU32(pSSM, &u32Magic);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
+ return rc;
+}
+
+
+/**
+ * SSMR3GetStructEx helper that gets a HCPTR that is used as a NULL indicator.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The saved state handle.
+ * @param ppv Where to return the value (0/1).
+ * @param fFlags SSMSTRUCT_FLAGS_XXX.
+ */
+DECLINLINE(int) ssmR3GetHCPtrNI(PSSMHANDLE pSSM, void **ppv, uint32_t fFlags)
+{
+ uintptr_t uPtrNI;
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ {
+ if (ssmR3GetHostBits(pSSM) == 64)
+ {
+ uint64_t u;
+ int rc = ssmR3DataRead(pSSM, &u, sizeof(u));
+ if (RT_FAILURE(rc))
+ return rc;
+ uPtrNI = u ? 1 : 0;
+ }
+ else
+ {
+ uint32_t u;
+ int rc = ssmR3DataRead(pSSM, &u, sizeof(u));
+ if (RT_FAILURE(rc))
+ return rc;
+ uPtrNI = u ? 1 : 0;
+ }
+ }
+ else
+ {
+ bool f;
+ int rc = SSMR3GetBool(pSSM, &f);
+ if (RT_FAILURE(rc))
+ return rc;
+ uPtrNI = f ? 1 : 0;
+ }
+ *ppv = (void *)uPtrNI;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets a structure, extended API.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pvStruct The structure address.
+ * @param cbStruct The size of the struct (use for validation only).
+ * @param fFlags Combination of SSMSTRUCT_FLAGS_XXX defines.
+ * @param paFields The array of structure fields descriptions. The
+ * array must be terminated by a SSMFIELD_ENTRY_TERM().
+ * @param pvUser User argument for any callbacks that paFields might
+ * contain.
+ */
+VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct,
+ uint32_t fFlags, PCSSMFIELD paFields, void *pvUser)
+{
+ int rc;
+ uint32_t u32Magic;
+
+ /*
+ * Validation.
+ */
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), pSSM->rc = VERR_INVALID_PARAMETER);
+ AssertPtr(pvStruct);
+ AssertPtr(paFields);
+
+ /*
+ * Begin marker.
+ */
+ if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_LEAD_MARKER)))
+ {
+ rc = SSMR3GetU32(pSSM, &u32Magic);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
+ }
+
+ /*
+ * Put the fields
+ */
+ rc = VINF_SUCCESS;
+ uint32_t off = 0;
+ for (PCSSMFIELD pCur = paFields;
+ pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
+ pCur++)
+ {
+ uint32_t const offField = (!SSMFIELDTRANS_IS_PADDING(pCur->pfnGetPutOrTransformer) || pCur->off != UINT32_MAX / 2)
+ && !SSMFIELDTRANS_IS_OLD(pCur->pfnGetPutOrTransformer)
+ ? pCur->off
+ : off;
+ uint32_t const cbField = SSMFIELDTRANS_IS_OLD(pCur->pfnGetPutOrTransformer)
+ ? 0
+ : SSMFIELDTRANS_IS_PADDING(pCur->pfnGetPutOrTransformer)
+ ? RT_HIWORD(pCur->cb)
+ : pCur->cb;
+ AssertMsgReturn( cbField <= cbStruct
+ && offField + cbField <= cbStruct
+ && offField + cbField >= offField,
+ ("off=%#x cb=%#x cbStruct=%#x (%s)\n", cbField, offField, cbStruct, pCur->pszName),
+ pSSM->rc = VERR_SSM_FIELD_OUT_OF_BOUNDS);
+ AssertMsgReturn( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == offField,
+ ("off=%#x offField=%#x (%s)\n", off, offField, pCur->pszName),
+ pSSM->rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (pCur->uFirstVer <= pSSM->u.Read.uCurUnitVer)
+ {
+ rc = VINF_SUCCESS;
+ uint8_t *pbField = (uint8_t *)pvStruct + offField;
+ switch ((uintptr_t)pCur->pfnGetPutOrTransformer)
+ {
+ case SSMFIELDTRANS_NO_TRANSFORMATION:
+ rc = ssmR3DataRead(pSSM, pbField, cbField);
+ break;
+
+ case SSMFIELDTRANS_GCPHYS:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetGCPhys(pSSM, (PRTGCPHYS)pbField);
+ break;
+
+ case SSMFIELDTRANS_GCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetGCPtr(pSSM, (PRTGCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3GetRCPtr(pSSM, (PRTRCPTR)pbField);
+ break;
+
+ case SSMFIELDTRANS_RCPTR_ARRAY:
+ {
+ uint32_t const cEntries = cbField / sizeof(RTRCPTR);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = SSMR3GetRCPtr(pSSM, &((PRTRCPTR)pbField)[i]);
+ break;
+ }
+
+ case SSMFIELDTRANS_HCPTR_NI:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = ssmR3GetHCPtrNI(pSSM, (void **)pbField, fFlags);
+ break;
+
+ case SSMFIELDTRANS_HCPTR_NI_ARRAY:
+ {
+ uint32_t const cEntries = cbField / sizeof(void *);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
+ rc = ssmR3GetHCPtrNI(pSSM, &((void **)pbField)[i], fFlags);
+ break;
+ }
+
+ case SSMFIELDTRANS_HCPTR_HACK_U32:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ *(uintptr_t *)pbField = 0;
+ rc = ssmR3DataRead(pSSM, pbField, sizeof(uint32_t));
+ if ((fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE) && ssmR3GetHostBits(pSSM) == 64)
+ {
+ uint32_t u32;
+ rc = ssmR3DataRead(pSSM, &u32, sizeof(uint32_t));
+ AssertMsgBreakStmt(RT_FAILURE(rc) || u32 == 0 || (fFlags & SSMSTRUCT_FLAGS_SAVED_AS_MEM),
+ ("high=%#x low=%#x (%s)\n", u32, *(uint32_t *)pbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_VALUE);
+ }
+ break;
+
+ case SSMFIELDTRANS_U32_ZX_U64:
+ AssertMsgBreakStmt(cbField == sizeof(uint64_t), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ ((uint32_t *)pbField)[1] = 0;
+ rc = SSMR3GetU32(pSSM, (uint32_t *)pbField);
+ break;
+
+
+ case SSMFIELDTRANS_IGNORE:
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, cbField);
+ break;
+
+ case SSMFIELDTRANS_IGN_GCPHYS:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPhys);
+ break;
+
+ case SSMFIELDTRANS_IGN_GCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPtr);
+ break;
+
+ case SSMFIELDTRANS_IGN_RCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, sizeof(RTRCPTR));
+ break;
+
+ case SSMFIELDTRANS_IGN_HCPTR:
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) / 8);
+ break;
+
+
+ case SSMFIELDTRANS_OLD:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, pCur->cb);
+ break;
+
+ case SSMFIELDTRANS_OLD_GCPHYS:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPhys);
+ break;
+
+ case SSMFIELDTRANS_OLD_GCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPtr);
+ break;
+
+ case SSMFIELDTRANS_OLD_RCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, sizeof(RTRCPTR));
+ break;
+
+ case SSMFIELDTRANS_OLD_HCPTR:
+ AssertMsgBreakStmt(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) / 8);
+ break;
+
+ case SSMFIELDTRANS_OLD_PAD_HC:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) == 64 ? RT_HIWORD(pCur->cb) : RT_LOWORD(pCur->cb));
+ break;
+
+ case SSMFIELDTRANS_OLD_PAD_MSC32:
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ if (ssmR3IsHostMsc32(pSSM))
+ rc = SSMR3Skip(pSSM, pCur->cb);
+ break;
+
+
+ case SSMFIELDTRANS_PAD_HC:
+ case SSMFIELDTRANS_PAD_HC32:
+ case SSMFIELDTRANS_PAD_HC64:
+ case SSMFIELDTRANS_PAD_HC_AUTO:
+ case SSMFIELDTRANS_PAD_MSC32_AUTO:
+ {
+ uint32_t cb32 = RT_BYTE1(pCur->cb);
+ uint32_t cb64 = RT_BYTE2(pCur->cb);
+ uint32_t cbCtx = HC_ARCH_BITS == 64
+ || ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ && !SSM_HOST_IS_MSC_32)
+ ? cb64 : cb32;
+ uint32_t cbSaved = ssmR3GetHostBits(pSSM) == 64
+ || ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ && !ssmR3IsHostMsc32(pSSM))
+ ? cb64 : cb32;
+ AssertMsgBreakStmt( cbField == cbCtx
+ && ( ( pCur->off == UINT32_MAX / 2
+ && ( cbField == 0
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ )
+ )
+ || (pCur->off != UINT32_MAX / 2 && cbField != 0)
+ )
+ , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
+ cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
+ rc = VERR_SSM_FIELD_INVALID_PADDING_SIZE);
+ if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
+ rc = SSMR3Skip(pSSM, cbSaved);
+ break;
+ }
+
+ default:
+ AssertBreakStmt(pCur->pfnGetPutOrTransformer, rc = VERR_SSM_FIELD_INVALID_CALLBACK);
+ rc = pCur->pfnGetPutOrTransformer(pSSM, pCur, pvStruct, fFlags, true /*fGetOrPut*/, pvUser);
+ break;
+ }
+ if (RT_FAILURE(rc))
+ break;
+ }
+
+ off = offField + cbField;
+ }
+
+ if (RT_SUCCESS(rc))
+ AssertMsgStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == cbStruct,
+ ("off=%#x cbStruct=%#x\n", off, cbStruct),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
+
+ /*
+ * End marker
+ */
+ if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_TAIL_MARKER)))
+ {
+ rc = SSMR3GetU32(pSSM, &u32Magic);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads a boolean item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pfBool Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetBool(PSSMHANDLE pSSM, bool *pfBool)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ uint8_t u8; /* see SSMR3PutBool */
+ int rc = ssmR3DataRead(pSSM, &u8, sizeof(u8));
+ if (RT_SUCCESS(rc))
+ {
+ Assert(u8 <= 1);
+ *pfBool = RT_BOOL(u8);
+ }
+ return rc;
+}
+
+
+/**
+ * Loads a volatile boolean item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pfBool Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetBoolV(PSSMHANDLE pSSM, bool volatile *pfBool)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ uint8_t u8; /* see SSMR3PutBool */
+ int rc = ssmR3DataRead(pSSM, &u8, sizeof(u8));
+ if (RT_SUCCESS(rc))
+ {
+ Assert(u8 <= 1);
+ *pfBool = RT_BOOL(u8);
+ }
+ return rc;
+}
+
+
+/**
+ * Loads a 8-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu8 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU8(PSSMHANDLE pSSM, uint8_t *pu8)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu8, sizeof(*pu8));
+}
+
+
+/**
+ * Loads a volatile 8-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu8 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU8V(PSSMHANDLE pSSM, uint8_t volatile *pu8)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pu8, sizeof(*pu8));
+}
+
+
+/**
+ * Loads a 8-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi8 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS8(PSSMHANDLE pSSM, int8_t *pi8)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi8, sizeof(*pi8));
+}
+
+
+/**
+ * Loads a volatile 8-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi8 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS8V(PSSMHANDLE pSSM, int8_t volatile *pi8)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pi8, sizeof(*pi8));
+}
+
+
+/**
+ * Loads a 16-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu16 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU16(PSSMHANDLE pSSM, uint16_t *pu16)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu16, sizeof(*pu16));
+}
+
+
+/**
+ * Loads a volatile 16-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu16 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU16V(PSSMHANDLE pSSM, uint16_t volatile *pu16)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pu16, sizeof(*pu16));
+}
+
+
+/**
+ * Loads a 16-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi16 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS16(PSSMHANDLE pSSM, int16_t *pi16)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi16, sizeof(*pi16));
+}
+
+
+/**
+ * Loads a volatile 16-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi16 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS16V(PSSMHANDLE pSSM, int16_t volatile *pi16)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pi16, sizeof(*pi16));
+}
+
+
+/**
+ * Loads a 32-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu32 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU32(PSSMHANDLE pSSM, uint32_t *pu32)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu32, sizeof(*pu32));
+}
+
+
+/**
+ * Loads a volatile 32-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu32 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU32V(PSSMHANDLE pSSM, uint32_t volatile *pu32)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pu32, sizeof(*pu32));
+}
+
+
+/**
+ * Loads a 32-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi32 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS32(PSSMHANDLE pSSM, int32_t *pi32)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi32, sizeof(*pi32));
+}
+
+
+/**
+ * Loads a volatile 32-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi32 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS32V(PSSMHANDLE pSSM, int32_t volatile *pi32)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pi32, sizeof(*pi32));
+}
+
+
+/**
+ * Loads a 64-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu64 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU64(PSSMHANDLE pSSM, uint64_t *pu64)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu64, sizeof(*pu64));
+}
+
+
+/**
+ * Loads a volatile 64-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu64 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU64V(PSSMHANDLE pSSM, uint64_t volatile *pu64)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pu64, sizeof(*pu64));
+}
+
+
+/**
+ * Loads a 64-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi64 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS64(PSSMHANDLE pSSM, int64_t *pi64)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi64, sizeof(*pi64));
+}
+
+
+/**
+ * Loads a volatile 64-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi64 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS64V(PSSMHANDLE pSSM, int64_t volatile *pi64)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pi64, sizeof(*pi64));
+}
+
+
+/**
+ * Loads a 128-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu128 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU128(PSSMHANDLE pSSM, uint128_t *pu128)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu128, sizeof(*pu128));
+}
+
+
+/**
+ * Loads a volatile 128-bit unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu128 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetU128V(PSSMHANDLE pSSM, uint128_t volatile *pu128)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pu128, sizeof(*pu128));
+}
+
+
+/**
+ * Loads a 128-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi128 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS128(PSSMHANDLE pSSM, int128_t *pi128)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi128, sizeof(*pi128));
+}
+
+
+/**
+ * Loads a volatile 128-bit signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi128 Where to store the item.
+ */
+VMMR3DECL(int) SSMR3GetS128V(PSSMHANDLE pSSM, int128_t volatile *pi128)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pi128, sizeof(*pi128));
+}
+
+
+/**
+ * Loads a VBox unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu Where to store the integer.
+ */
+VMMR3DECL(int) SSMR3GetUInt(PSSMHANDLE pSSM, PRTUINT pu)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pu, sizeof(*pu));
+}
+
+
+/**
+ * Loads a VBox signed integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pi Where to store the integer.
+ */
+VMMR3DECL(int) SSMR3GetSInt(PSSMHANDLE pSSM, PRTINT pi)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pi, sizeof(*pi));
+}
+
+
+/**
+ * Loads a GC natural unsigned integer item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu Where to store the integer.
+ *
+ * @deprecated Silly type with an incorrect size, don't use it.
+ */
+VMMR3DECL(int) SSMR3GetGCUInt(PSSMHANDLE pSSM, PRTGCUINT pu)
+{
+ AssertCompile(sizeof(RTGCPTR) == sizeof(*pu));
+ return SSMR3GetGCPtr(pSSM, (PRTGCPTR)pu);
+}
+
+
+/**
+ * Loads a GC unsigned integer register item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pu Where to store the integer.
+ */
+VMMR3DECL(int) SSMR3GetGCUIntReg(PSSMHANDLE pSSM, PRTGCUINTREG pu)
+{
+ AssertCompile(sizeof(RTGCPTR) == sizeof(*pu));
+ return SSMR3GetGCPtr(pSSM, (PRTGCPTR)pu);
+}
+
+
+/**
+ * Loads a 32 bits GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhys32(PSSMHANDLE pSSM, PRTGCPHYS32 pGCPhys)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pGCPhys, sizeof(*pGCPhys));
+}
+
+
+/**
+ * Loads a 32 bits GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhys32V(PSSMHANDLE pSSM, RTGCPHYS32 volatile *pGCPhys)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pGCPhys, sizeof(*pGCPhys));
+}
+
+
+/**
+ * Loads a 64 bits GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhys64(PSSMHANDLE pSSM, PRTGCPHYS64 pGCPhys)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pGCPhys, sizeof(*pGCPhys));
+}
+
+
+/**
+ * Loads a volatile 64 bits GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhys64V(PSSMHANDLE pSSM, RTGCPHYS64 volatile *pGCPhys)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, (void *)pGCPhys, sizeof(*pGCPhys));
+}
+
+
+/**
+ * Loads a GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhys(PSSMHANDLE pSSM, PRTGCPHYS pGCPhys)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+
+ /*
+ * Default size?
+ */
+ if (RT_LIKELY(sizeof(*pGCPhys) == pSSM->u.Read.cbGCPhys))
+ return ssmR3DataRead(pSSM, pGCPhys, sizeof(*pGCPhys));
+
+ /*
+ * Fiddly.
+ */
+ Assert(sizeof(*pGCPhys) == sizeof(uint64_t) || sizeof(*pGCPhys) == sizeof(uint32_t));
+ Assert(pSSM->u.Read.cbGCPhys == sizeof(uint64_t) || pSSM->u.Read.cbGCPhys == sizeof(uint32_t));
+ if (pSSM->u.Read.cbGCPhys == sizeof(uint64_t))
+ {
+ /* 64-bit saved, 32-bit load: try truncate it. */
+ uint64_t u64;
+ int rc = ssmR3DataRead(pSSM, &u64, sizeof(uint64_t));
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u64 >= _4G)
+ return VERR_SSM_GCPHYS_OVERFLOW;
+ *pGCPhys = (RTGCPHYS)u64;
+ return rc;
+ }
+
+ /* 32-bit saved, 64-bit load: clear the high part. */
+ *pGCPhys = 0;
+ return ssmR3DataRead(pSSM, pGCPhys, sizeof(uint32_t));
+}
+
+/**
+ * Loads a volatile GC physical address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPhys Where to store the GC physical address.
+ */
+VMMR3DECL(int) SSMR3GetGCPhysV(PSSMHANDLE pSSM, RTGCPHYS volatile *pGCPhys)
+{
+ return SSMR3GetGCPhys(pSSM, (PRTGCPHYS)pGCPhys);
+}
+
+
+/**
+ * Loads a GC virtual address item from the current data unit.
+ *
+ * Only applies to in the 1.1 format:
+ * - SSMR3GetGCPtr
+ * - SSMR3GetGCUIntPtr
+ * - SSMR3GetGCUInt
+ * - SSMR3GetGCUIntReg
+ *
+ * Put functions are not affected.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param cbGCPtr Size of RTGCPTR
+ *
+ * @remarks This interface only works with saved state version 1.1, if the
+ * format isn't 1.1 the call will be ignored.
+ */
+VMMR3_INT_DECL(int) SSMR3HandleSetGCPtrSize(PSSMHANDLE pSSM, unsigned cbGCPtr)
+{
+ Assert(cbGCPtr == sizeof(RTGCPTR32) || cbGCPtr == sizeof(RTGCPTR64));
+ if (!pSSM->u.Read.fFixedGCPtrSize)
+ {
+ Log(("SSMR3SetGCPtrSize: %u -> %u bytes\n", pSSM->u.Read.cbGCPtr, cbGCPtr));
+ pSSM->u.Read.cbGCPtr = cbGCPtr;
+ pSSM->u.Read.fFixedGCPtrSize = true;
+ }
+ else if ( pSSM->u.Read.cbGCPtr != cbGCPtr
+ && pSSM->u.Read.uFmtVerMajor == 1
+ && pSSM->u.Read.uFmtVerMinor == 1)
+ AssertMsgFailed(("SSMR3SetGCPtrSize: already fixed at %u bytes; requested %u bytes\n", pSSM->u.Read.cbGCPtr, cbGCPtr));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads a GC virtual address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPtr Where to store the GC virtual address.
+ */
+VMMR3DECL(int) SSMR3GetGCPtr(PSSMHANDLE pSSM, PRTGCPTR pGCPtr)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+
+ /*
+ * Default size?
+ */
+ if (RT_LIKELY(sizeof(*pGCPtr) == pSSM->u.Read.cbGCPtr))
+ return ssmR3DataRead(pSSM, pGCPtr, sizeof(*pGCPtr));
+
+ /*
+ * Fiddly.
+ */
+ Assert(sizeof(*pGCPtr) == sizeof(uint64_t) || sizeof(*pGCPtr) == sizeof(uint32_t));
+ Assert(pSSM->u.Read.cbGCPtr == sizeof(uint64_t) || pSSM->u.Read.cbGCPtr == sizeof(uint32_t));
+ if (pSSM->u.Read.cbGCPtr == sizeof(uint64_t))
+ {
+ /* 64-bit saved, 32-bit load: try truncate it. */
+ uint64_t u64;
+ int rc = ssmR3DataRead(pSSM, &u64, sizeof(uint64_t));
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u64 >= _4G)
+ return VERR_SSM_GCPTR_OVERFLOW;
+ *pGCPtr = (RTGCPTR)u64;
+ return rc;
+ }
+
+ /* 32-bit saved, 64-bit load: clear the high part. */
+ *pGCPtr = 0;
+ return ssmR3DataRead(pSSM, pGCPtr, sizeof(uint32_t));
+}
+
+
+/**
+ * Loads a GC virtual address (represented as unsigned integer) item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pGCPtr Where to store the GC virtual address.
+ */
+VMMR3DECL(int) SSMR3GetGCUIntPtr(PSSMHANDLE pSSM, PRTGCUINTPTR pGCPtr)
+{
+ AssertCompile(sizeof(RTGCPTR) == sizeof(*pGCPtr));
+ return SSMR3GetGCPtr(pSSM, (PRTGCPTR)pGCPtr);
+}
+
+
+/**
+ * Loads an RC virtual address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pRCPtr Where to store the RC virtual address.
+ */
+VMMR3DECL(int) SSMR3GetRCPtr(PSSMHANDLE pSSM, PRTRCPTR pRCPtr)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pRCPtr, sizeof(*pRCPtr));
+}
+
+
+/**
+ * Loads a I/O port address item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pIOPort Where to store the I/O port address.
+ */
+VMMR3DECL(int) SSMR3GetIOPort(PSSMHANDLE pSSM, PRTIOPORT pIOPort)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pIOPort, sizeof(*pIOPort));
+}
+
+
+/**
+ * Loads a selector item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pSel Where to store the selector.
+ */
+VMMR3DECL(int) SSMR3GetSel(PSSMHANDLE pSSM, PRTSEL pSel)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pSel, sizeof(*pSel));
+}
+
+
+/**
+ * Loads a memory item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param pv Where to store the item.
+ * @param cb Size of the item.
+ */
+VMMR3DECL(int) SSMR3GetMem(PSSMHANDLE pSSM, void *pv, size_t cb)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ return ssmR3DataRead(pSSM, pv, cb);
+}
+
+
+/**
+ * Loads a string item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param psz Where to store the item.
+ * @param cbMax Max size of the item (including '\\0').
+ */
+VMMR3DECL(int) SSMR3GetStrZ(PSSMHANDLE pSSM, char *psz, size_t cbMax)
+{
+ return SSMR3GetStrZEx(pSSM, psz, cbMax, NULL);
+}
+
+
+/**
+ * Loads a string item from the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ * @param psz Where to store the item.
+ * @param cbMax Max size of the item (including '\\0').
+ * @param pcbStr The length of the loaded string excluding the '\\0'. (optional)
+ */
+VMMR3DECL(int) SSMR3GetStrZEx(PSSMHANDLE pSSM, char *psz, size_t cbMax, size_t *pcbStr)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+
+ /* read size prefix. */
+ uint32_t u32;
+ int rc = SSMR3GetU32(pSSM, &u32);
+ if (RT_SUCCESS(rc))
+ {
+ if (pcbStr)
+ *pcbStr = u32;
+ if (u32 < cbMax)
+ {
+ /* terminate and read string content. */
+ psz[u32] = '\0';
+ return ssmR3DataRead(pSSM, psz, u32);
+ }
+ return VERR_TOO_MUCH_DATA;
+ }
+ return rc;
+}
+
+
+/**
+ * Skips a number of bytes in the current data unit.
+ *
+ * @returns VBox status code.
+ * @param pSSM The SSM handle.
+ * @param cb The number of bytes to skip.
+ */
+VMMR3DECL(int) SSMR3Skip(PSSMHANDLE pSSM, size_t cb)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ while (cb > 0)
+ {
+ uint8_t abBuf[8192];
+ size_t cbCur = RT_MIN(sizeof(abBuf), cb);
+ cb -= cbCur;
+ int rc = ssmR3DataRead(pSSM, abBuf, cbCur);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Skips to the end of the current data unit.
+ *
+ * Since version 2 of the format, the load exec callback have to explicitly call
+ * this API if it wish to be lazy for some reason. This is because there seldom
+ * is a good reason to not read your entire data unit and it was hiding bugs.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ */
+VMMR3DECL(int) SSMR3SkipToEndOfUnit(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_READABLE_RET(pSSM);
+ SSM_CHECK_CANCELLED_RET(pSSM);
+ if (pSSM->u.Read.uFmtVerMajor >= 2)
+ {
+ /*
+ * Read until we the end of data condition is raised.
+ */
+ pSSM->u.Read.cbDataBuffer = 0;
+ pSSM->u.Read.offDataBuffer = 0;
+ if (!pSSM->u.Read.fEndOfData)
+ {
+ do
+ {
+ /* read the rest of the current record */
+ while (pSSM->u.Read.cbRecLeft)
+ {
+ uint8_t abBuf[8192];
+ uint32_t cbToRead = RT_MIN(pSSM->u.Read.cbRecLeft, sizeof(abBuf));
+ int rc = ssmR3DataReadV2Raw(pSSM, abBuf, cbToRead);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ pSSM->u.Read.cbRecLeft -= cbToRead;
+ }
+
+ /* read the next header. */
+ int rc = ssmR3DataReadRecHdrV2(pSSM);
+ if (RT_FAILURE(rc))
+ return pSSM->rc = rc;
+ } while (!pSSM->u.Read.fEndOfData);
+ }
+ }
+ /* else: Doesn't matter for the version 1 loading. */
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Calculate the checksum of a file portion.
+ *
+ * @returns VBox status code.
+ * @param pStrm The stream handle
+ * @param off Where to start checksumming.
+ * @param cb How much to checksum.
+ * @param pu32CRC Where to store the calculated checksum.
+ */
+static int ssmR3CalcChecksum(PSSMSTRM pStrm, uint64_t off, uint64_t cb, uint32_t *pu32CRC)
+{
+ /*
+ * Allocate a buffer.
+ */
+ const size_t cbBuf = _32K;
+ void *pvBuf = RTMemTmpAlloc(cbBuf);
+ if (!pvBuf)
+ return VERR_NO_TMP_MEMORY;
+
+ /*
+ * Loop reading and calculating CRC32.
+ */
+ int rc = VINF_SUCCESS;
+ uint32_t u32CRC = RTCrc32Start();
+ while (cb > 0)
+ {
+ /* read chunk */
+ size_t cbToRead = cbBuf;
+ if (cb < cbBuf)
+ cbToRead = cb;
+ rc = ssmR3StrmPeekAt(pStrm, off, pvBuf, cbToRead, NULL);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Failed with rc=%Rrc while calculating crc.\n", rc));
+ RTMemTmpFree(pvBuf);
+ return rc;
+ }
+
+ /* advance */
+ cb -= cbToRead;
+ off += cbToRead;
+
+ /* calc crc32. */
+ u32CRC = RTCrc32Process(u32CRC, pvBuf, cbToRead);
+ }
+ RTMemTmpFree(pvBuf);
+
+ /* store the calculated crc */
+ u32CRC = RTCrc32Finish(u32CRC);
+ Log(("SSM: u32CRC=0x%08x\n", u32CRC));
+ *pu32CRC = u32CRC;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates a version 2 footer.
+ *
+ * @returns VBox status code.
+ *
+ * @param pFooter The footer.
+ * @param offFooter The stream offset of the footer.
+ * @param cDirEntries The number of directory entries. UINT32_MAX if
+ * unknown.
+ * @param fStreamCrc32 Whether the stream is checksummed using CRC-32.
+ * @param u32StreamCRC The stream checksum.
+ */
+static int ssmR3ValidateFooter(PSSMFILEFTR pFooter, uint64_t offFooter, uint32_t cDirEntries, bool fStreamCrc32, uint32_t u32StreamCRC)
+{
+ if (memcmp(pFooter->szMagic, SSMFILEFTR_MAGIC, sizeof(pFooter->szMagic)))
+ {
+ LogRel(("SSM: Bad footer magic: %.*Rhxs\n", sizeof(pFooter->szMagic), &pFooter->szMagic[0]));
+ return VERR_SSM_INTEGRITY_FOOTER;
+ }
+ SSM_CHECK_CRC32_RET(pFooter, sizeof(*pFooter), ("Footer CRC mismatch: %08x, correct is %08x\n", u32CRC, u32ActualCRC));
+ if (pFooter->offStream != offFooter)
+ {
+ LogRel(("SSM: SSMFILEFTR::offStream is wrong: %llx, expected %llx\n", pFooter->offStream, offFooter));
+ return VERR_SSM_INTEGRITY_FOOTER;
+ }
+ if (pFooter->u32Reserved)
+ {
+ LogRel(("SSM: Reserved footer field isn't zero: %08x\n", pFooter->u32Reserved));
+ return VERR_SSM_INTEGRITY_FOOTER;
+ }
+ if (cDirEntries != UINT32_MAX)
+ AssertLogRelMsgReturn(pFooter->cDirEntries == cDirEntries,
+ ("Footer: cDirEntries=%#x, expected %#x\n", pFooter->cDirEntries, cDirEntries),
+ VERR_SSM_INTEGRITY_FOOTER);
+ else
+ AssertLogRelMsgReturn(pFooter->cDirEntries < _64K,
+ ("Footer: cDirEntries=%#x\n", pFooter->cDirEntries),
+ VERR_SSM_INTEGRITY_FOOTER);
+ if ( !fStreamCrc32
+ && pFooter->u32StreamCRC)
+ {
+ LogRel(("SSM: u32StreamCRC field isn't zero, but header says stream checksumming is disabled.\n"));
+ return VERR_SSM_INTEGRITY_FOOTER;
+ }
+ if ( fStreamCrc32
+ && pFooter->u32StreamCRC != u32StreamCRC)
+ {
+ LogRel(("SSM: Bad stream CRC: %#x, expected %#x.\n", pFooter->u32StreamCRC, u32StreamCRC));
+ return VERR_SSM_INTEGRITY_CRC;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates the header information stored in the handle.
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The handle.
+ * @param fHaveHostBits Set if the host bits field is valid.
+ * @param fHaveVersion Set if we have a version.
+ */
+static int ssmR3ValidateHeaderInfo(PSSMHANDLE pSSM, bool fHaveHostBits, bool fHaveVersion)
+{
+ Assert(pSSM->u.Read.cbFileHdr < 256 && pSSM->u.Read.cbFileHdr > 32);
+ Assert(pSSM->u.Read.uFmtVerMajor == 1 || pSSM->u.Read.uFmtVerMajor == 2);
+ Assert(pSSM->u.Read.uFmtVerMinor <= 2);
+
+ if (fHaveVersion)
+ {
+ if ( pSSM->u.Read.u16VerMajor == 0
+ || pSSM->u.Read.u16VerMajor > 1000
+ || pSSM->u.Read.u16VerMinor > 1000
+ || pSSM->u.Read.u32VerBuild > _1M
+ || pSSM->u.Read.u32SvnRev == 0
+ || pSSM->u.Read.u32SvnRev > 10000000 /*100M*/)
+ {
+ LogRel(("SSM: Incorrect version values: %u.%u.%u.r%u\n",
+ pSSM->u.Read.u16VerMajor, pSSM->u.Read.u16VerMinor, pSSM->u.Read.u32VerBuild, pSSM->u.Read.u32SvnRev));
+ return VERR_SSM_INTEGRITY_VBOX_VERSION;
+ }
+ }
+ else
+ AssertLogRelReturn( pSSM->u.Read.u16VerMajor == 0
+ && pSSM->u.Read.u16VerMinor == 0
+ && pSSM->u.Read.u32VerBuild == 0
+ && pSSM->u.Read.u32SvnRev == 0,
+ VERR_SSM_INTEGRITY_VBOX_VERSION);
+
+ if (fHaveHostBits)
+ {
+ if ( pSSM->u.Read.cHostBits != 32
+ && pSSM->u.Read.cHostBits != 64)
+ {
+ LogRel(("SSM: Incorrect cHostBits value: %u\n", pSSM->u.Read.cHostBits));
+ return VERR_SSM_INTEGRITY_HEADER;
+ }
+ }
+ else
+ AssertLogRelReturn(pSSM->u.Read.cHostBits == 0, VERR_SSM_INTEGRITY_HEADER);
+
+ if ( pSSM->u.Read.cbGCPhys != sizeof(uint32_t)
+ && pSSM->u.Read.cbGCPhys != sizeof(uint64_t))
+ {
+ LogRel(("SSM: Incorrect cbGCPhys value: %d\n", pSSM->u.Read.cbGCPhys));
+ return VERR_SSM_INTEGRITY_HEADER;
+ }
+ if ( pSSM->u.Read.cbGCPtr != sizeof(uint32_t)
+ && pSSM->u.Read.cbGCPtr != sizeof(uint64_t))
+ {
+ LogRel(("SSM: Incorrect cbGCPtr value: %d\n", pSSM->u.Read.cbGCPtr));
+ return VERR_SSM_INTEGRITY_HEADER;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Reads the header, detects the format version and performs integrity
+ * validations.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle. A number of field will
+ * be updated, mostly header related information.
+ * fLiveSave is also set if appropriate.
+ * @param fChecksumIt Whether to checksum the file or not. This will
+ * be ignored if it the stream isn't a file.
+ * @param fChecksumOnRead Whether to validate the checksum while reading
+ * the stream instead of up front. If not possible,
+ * verify the checksum up front.
+ */
+static int ssmR3HeaderAndValidate(PSSMHANDLE pSSM, bool fChecksumIt, bool fChecksumOnRead)
+{
+ /*
+ * Read and check the header magic.
+ */
+ union
+ {
+ SSMFILEHDR v2_0;
+ SSMFILEHDRV12 v1_2;
+ SSMFILEHDRV11 v1_1;
+ } uHdr;
+ int rc = ssmR3StrmRead(&pSSM->Strm, &uHdr, sizeof(uHdr.v2_0.szMagic));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed to read file magic header. rc=%Rrc\n", rc));
+ return rc;
+ }
+ if (memcmp(uHdr.v2_0.szMagic, SSMFILEHDR_MAGIC_BASE, sizeof(SSMFILEHDR_MAGIC_BASE) - 1))
+ {
+ Log(("SSM: Not a saved state file. magic=%.*s\n", sizeof(uHdr.v2_0.szMagic) - 1, uHdr.v2_0.szMagic));
+ return VERR_SSM_INTEGRITY_MAGIC;
+ }
+
+ /*
+ * Find the header size and read the rest.
+ */
+ static const struct
+ {
+ char szMagic[sizeof(SSMFILEHDR_MAGIC_V2_0)];
+ uint32_t cbHdr;
+ unsigned uFmtVerMajor;
+ unsigned uFmtVerMinor;
+ } s_aVers[] =
+ {
+ { SSMFILEHDR_MAGIC_V2_0, sizeof(SSMFILEHDR), 2, 0 },
+ { SSMFILEHDR_MAGIC_V1_2, sizeof(SSMFILEHDRV12), 1, 2 },
+ { SSMFILEHDR_MAGIC_V1_1, sizeof(SSMFILEHDRV11), 1, 1 },
+ };
+ int iVer = RT_ELEMENTS(s_aVers);
+ while (iVer-- > 0)
+ if (!memcmp(uHdr.v2_0.szMagic, s_aVers[iVer].szMagic, sizeof(uHdr.v2_0.szMagic)))
+ break;
+ if (iVer < 0)
+ {
+ Log(("SSM: Unknown file format version. magic=%.*s\n", sizeof(uHdr.v2_0.szMagic) - 1, uHdr.v2_0.szMagic));
+ return VERR_SSM_INTEGRITY_VERSION;
+ }
+ pSSM->u.Read.uFmtVerMajor = s_aVers[iVer].uFmtVerMajor;
+ pSSM->u.Read.uFmtVerMinor = s_aVers[iVer].uFmtVerMinor;
+ pSSM->u.Read.cbFileHdr = s_aVers[iVer].cbHdr;
+
+ rc = ssmR3StrmRead(&pSSM->Strm, (uint8_t *)&uHdr + sizeof(uHdr.v2_0.szMagic), pSSM->u.Read.cbFileHdr - sizeof(uHdr.v2_0.szMagic));
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Failed to read the file header. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Make version specific adjustments.
+ */
+ if (pSSM->u.Read.uFmtVerMajor >= 2)
+ {
+ /*
+ * Version 2.0 and later.
+ */
+ if (pSSM->u.Read.uFmtVerMinor == 0)
+ {
+ /* validate the header. */
+ SSM_CHECK_CRC32_RET(&uHdr.v2_0, sizeof(uHdr.v2_0), ("Header CRC mismatch: %08x, correct is %08x\n", u32CRC, u32ActualCRC));
+ if (uHdr.v2_0.u8Reserved)
+ {
+ LogRel(("SSM: Reserved header field isn't zero: %02x\n", uHdr.v2_0.u8Reserved));
+ return VERR_SSM_INTEGRITY;
+ }
+ if (uHdr.v2_0.fFlags & ~(SSMFILEHDR_FLAGS_STREAM_CRC32 | SSMFILEHDR_FLAGS_STREAM_LIVE_SAVE))
+ {
+ LogRel(("SSM: Unknown header flags: %08x\n", uHdr.v2_0.fFlags));
+ return VERR_SSM_INTEGRITY;
+ }
+ if ( uHdr.v2_0.cbMaxDecompr > sizeof(pSSM->u.Read.abDataBuffer)
+ || uHdr.v2_0.cbMaxDecompr < _1K
+ || (uHdr.v2_0.cbMaxDecompr & 0xff) != 0)
+ {
+ LogRel(("SSM: The cbMaxDecompr header field is out of range: %#x\n", uHdr.v2_0.cbMaxDecompr));
+ return VERR_SSM_INTEGRITY;
+ }
+
+ /* set the header info. */
+ pSSM->u.Read.cHostBits = uHdr.v2_0.cHostBits;
+ pSSM->u.Read.u16VerMajor = uHdr.v2_0.u16VerMajor;
+ pSSM->u.Read.u16VerMinor = uHdr.v2_0.u16VerMinor;
+ pSSM->u.Read.u32VerBuild = uHdr.v2_0.u32VerBuild;
+ pSSM->u.Read.u32SvnRev = uHdr.v2_0.u32SvnRev;
+ pSSM->u.Read.cbGCPhys = uHdr.v2_0.cbGCPhys;
+ pSSM->u.Read.cbGCPtr = uHdr.v2_0.cbGCPtr;
+ pSSM->u.Read.fFixedGCPtrSize= true;
+ pSSM->u.Read.fStreamCrc32 = !!(uHdr.v2_0.fFlags & SSMFILEHDR_FLAGS_STREAM_CRC32);
+ pSSM->fLiveSave = !!(uHdr.v2_0.fFlags & SSMFILEHDR_FLAGS_STREAM_LIVE_SAVE);
+ }
+ else
+ AssertFailedReturn(VERR_SSM_IPE_2);
+ if (!pSSM->u.Read.fStreamCrc32)
+ ssmR3StrmDisableChecksumming(&pSSM->Strm);
+
+ /*
+ * Read and validate the footer if it's a file.
+ */
+ if (ssmR3StrmIsFile(&pSSM->Strm))
+ {
+ SSMFILEFTR Footer;
+ uint64_t offFooter;
+ rc = ssmR3StrmPeekAt(&pSSM->Strm, -(RTFOFF)sizeof(SSMFILEFTR), &Footer, sizeof(Footer), &offFooter);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = ssmR3ValidateFooter(&Footer, offFooter, UINT32_MAX, pSSM->u.Read.fStreamCrc32, Footer.u32StreamCRC);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pSSM->u.Read.cbLoadFile = offFooter + sizeof(Footer);
+ pSSM->u.Read.u32LoadCRC = Footer.u32StreamCRC;
+ }
+ else
+ {
+ pSSM->u.Read.cbLoadFile = UINT64_MAX;
+ pSSM->u.Read.u32LoadCRC = 0;
+ }
+
+ /*
+ * Validate the header info we've set in the handle.
+ */
+ rc = ssmR3ValidateHeaderInfo(pSSM, true /*fHaveHostBits*/, true /*fHaveVersion*/);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Check the checksum if that's called for and possible.
+ */
+ if ( pSSM->u.Read.fStreamCrc32
+ && fChecksumIt
+ && !fChecksumOnRead
+ && ssmR3StrmIsFile(&pSSM->Strm))
+ {
+ uint32_t u32CRC;
+ rc = ssmR3CalcChecksum(&pSSM->Strm, 0, pSSM->u.Read.cbLoadFile - sizeof(SSMFILEFTR), &u32CRC);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32CRC != pSSM->u.Read.u32LoadCRC)
+ {
+ LogRel(("SSM: Invalid CRC! Calculated %#010x, in footer %#010x\n", u32CRC, pSSM->u.Read.u32LoadCRC));
+ return VERR_SSM_INTEGRITY_CRC;
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Version 1.x of the format.
+ */
+ bool fHaveHostBits = true;
+ bool fHaveVersion = false;
+ RTUUID MachineUuidFromHdr;
+
+ ssmR3StrmDisableChecksumming(&pSSM->Strm);
+ if (pSSM->u.Read.uFmtVerMinor == 1)
+ {
+ pSSM->u.Read.cHostBits = 0; /* unknown */
+ pSSM->u.Read.u16VerMajor = 0;
+ pSSM->u.Read.u16VerMinor = 0;
+ pSSM->u.Read.u32VerBuild = 0;
+ pSSM->u.Read.u32SvnRev = 0;
+ pSSM->u.Read.cbLoadFile = uHdr.v1_1.cbFile;
+ pSSM->u.Read.u32LoadCRC = uHdr.v1_1.u32CRC;
+ pSSM->u.Read.cbGCPhys = sizeof(RTGCPHYS);
+ pSSM->u.Read.cbGCPtr = sizeof(RTGCPTR);
+ pSSM->u.Read.fFixedGCPtrSize = false; /* settable */
+ pSSM->u.Read.fStreamCrc32 = false;
+
+ MachineUuidFromHdr = uHdr.v1_1.MachineUuid;
+ fHaveHostBits = false;
+ }
+ else if (pSSM->u.Read.uFmtVerMinor == 2)
+ {
+ pSSM->u.Read.cHostBits = uHdr.v1_2.cHostBits;
+ pSSM->u.Read.u16VerMajor = uHdr.v1_2.u16VerMajor;
+ pSSM->u.Read.u16VerMinor = uHdr.v1_2.u16VerMinor;
+ pSSM->u.Read.u32VerBuild = uHdr.v1_2.u32VerBuild;
+ pSSM->u.Read.u32SvnRev = uHdr.v1_2.u32SvnRev;
+ pSSM->u.Read.cbLoadFile = uHdr.v1_2.cbFile;
+ pSSM->u.Read.u32LoadCRC = uHdr.v1_2.u32CRC;
+ pSSM->u.Read.cbGCPhys = uHdr.v1_2.cbGCPhys;
+ pSSM->u.Read.cbGCPtr = uHdr.v1_2.cbGCPtr;
+ pSSM->u.Read.fFixedGCPtrSize = true;
+ pSSM->u.Read.fStreamCrc32 = false;
+
+ MachineUuidFromHdr = uHdr.v1_2.MachineUuid;
+ fHaveVersion = true;
+ }
+ else
+ AssertFailedReturn(VERR_SSM_IPE_1);
+
+ /*
+ * The MachineUuid must be NULL (was never used).
+ */
+ if (!RTUuidIsNull(&MachineUuidFromHdr))
+ {
+ LogRel(("SSM: The UUID of the saved state doesn't match the running VM.\n"));
+ return VERR_SMM_INTEGRITY_MACHINE;
+ }
+
+ /*
+ * Verify the file size.
+ */
+ uint64_t cbFile = ssmR3StrmGetSize(&pSSM->Strm);
+ if (cbFile != pSSM->u.Read.cbLoadFile)
+ {
+ LogRel(("SSM: File size mismatch. hdr.cbFile=%lld actual %lld\n", pSSM->u.Read.cbLoadFile, cbFile));
+ return VERR_SSM_INTEGRITY_SIZE;
+ }
+
+ /*
+ * Validate the header info we've set in the handle.
+ */
+ rc = ssmR3ValidateHeaderInfo(pSSM, fHaveHostBits, fHaveVersion);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Verify the checksum if requested.
+ *
+ * Note! The checksum is not actually generated for the whole file,
+ * this is of course a bug in the v1.x code that we cannot do
+ * anything about.
+ */
+ if ( fChecksumIt
+ || fChecksumOnRead)
+ {
+ uint32_t u32CRC;
+ rc = ssmR3CalcChecksum(&pSSM->Strm,
+ RT_UOFFSETOF(SSMFILEHDRV11, u32CRC) + sizeof(uHdr.v1_1.u32CRC),
+ cbFile - pSSM->u.Read.cbFileHdr,
+ &u32CRC);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32CRC != pSSM->u.Read.u32LoadCRC)
+ {
+ LogRel(("SSM: Invalid CRC! Calculated %#010x, in header %#010x\n", u32CRC, pSSM->u.Read.u32LoadCRC));
+ return VERR_SSM_INTEGRITY_CRC;
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Open a saved state for reading.
+ *
+ * The file will be positioned at the first data unit upon successful return.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszFilename The filename. NULL if pStreamOps is used.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvUser The user argument to the stream methods.
+ * @param fChecksumIt Check the checksum for the entire file.
+ * @param fChecksumOnRead Whether to validate the checksum while reading
+ * the stream instead of up front. If not possible,
+ * verify the checksum up front.
+ * @param pSSM Pointer to the handle structure. This will be
+ * completely initialized on success.
+ * @param cBuffers The number of stream buffers.
+ */
+static int ssmR3OpenFile(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvUser,
+ bool fChecksumIt, bool fChecksumOnRead, uint32_t cBuffers, PSSMHANDLE pSSM)
+{
+ /*
+ * Initialize the handle.
+ */
+ pSSM->pVM = pVM;
+ pSSM->enmOp = SSMSTATE_INVALID;
+ pSSM->enmAfter = SSMAFTER_INVALID;
+ pSSM->fCancelled = SSMHANDLE_OK;
+ pSSM->rc = VINF_SUCCESS;
+ pSSM->cbUnitLeftV1 = 0;
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+ pSSM->fLiveSave = false;
+ pSSM->pfnProgress = NULL;
+ pSSM->pvUser = NULL;
+ pSSM->uPercent = 0;
+ pSSM->offEstProgress = 0;
+ pSSM->cbEstTotal = 0;
+ pSSM->offEst = 0;
+ pSSM->offEstUnitEnd = 0;
+ pSSM->uPercentLive = 0;
+ pSSM->uPercentPrepare = 5;
+ pSSM->uPercentDone = 2;
+ pSSM->uReportedLivePercent = 0;
+ pSSM->pszFilename = pszFilename;
+
+ pSSM->u.Read.pZipDecompV1 = NULL;
+ pSSM->u.Read.uFmtVerMajor = UINT32_MAX;
+ pSSM->u.Read.uFmtVerMinor = UINT32_MAX;
+ pSSM->u.Read.cbFileHdr = UINT32_MAX;
+ pSSM->u.Read.cbGCPhys = UINT8_MAX;
+ pSSM->u.Read.cbGCPtr = UINT8_MAX;
+ pSSM->u.Read.fFixedGCPtrSize= false;
+ pSSM->u.Read.fIsHostMsc32 = SSM_HOST_IS_MSC_32;
+ RT_ZERO(pSSM->u.Read.szHostOSAndArch);
+ pSSM->u.Read.u16VerMajor = UINT16_MAX;
+ pSSM->u.Read.u16VerMinor = UINT16_MAX;
+ pSSM->u.Read.u32VerBuild = UINT32_MAX;
+ pSSM->u.Read.u32SvnRev = UINT32_MAX;
+ pSSM->u.Read.cHostBits = UINT8_MAX;
+ pSSM->u.Read.cbLoadFile = UINT64_MAX;
+
+ pSSM->u.Read.cbRecLeft = 0;
+ pSSM->u.Read.cbDataBuffer = 0;
+ pSSM->u.Read.offDataBuffer = 0;
+ pSSM->u.Read.fEndOfData = 0;
+ pSSM->u.Read.u8TypeAndFlags = 0;
+
+ pSSM->u.Read.pCurUnit = NULL;
+ pSSM->u.Read.uCurUnitVer = UINT32_MAX;
+ pSSM->u.Read.uCurUnitPass = 0;
+ pSSM->u.Read.fHaveSetError = false;
+
+ /*
+ * Try open and validate the file.
+ */
+ int rc;
+ if (pStreamOps)
+ rc = ssmR3StrmInit(&pSSM->Strm, pStreamOps, pvUser, false /*fWrite*/, fChecksumOnRead, cBuffers);
+ else
+ rc = ssmR3StrmOpenFile(&pSSM->Strm, pszFilename, false /*fWrite*/, fChecksumOnRead, cBuffers);
+ if (RT_SUCCESS(rc))
+ {
+ rc = ssmR3HeaderAndValidate(pSSM, fChecksumIt, fChecksumOnRead);
+ if (RT_SUCCESS(rc))
+ return rc;
+
+ /* failure path */
+ ssmR3StrmClose(&pSSM->Strm, pSSM->rc == VERR_SSM_CANCELLED);
+ }
+ else
+ Log(("SSM: Failed to open save state file '%s', rc=%Rrc.\n", pszFilename, rc));
+ return rc;
+}
+
+
+/**
+ * Verifies the directory.
+ *
+ * @returns VBox status code.
+ *
+ * @param pDir The full directory.
+ * @param cbDir The size of the directory.
+ * @param offDir The directory stream offset.
+ * @param cDirEntries The directory entry count from the footer.
+ * @param cbHdr The header size.
+ * @param uSvnRev The SVN revision that saved the state. Bug detection.
+ */
+static int ssmR3ValidateDirectory(PSSMFILEDIR pDir, size_t cbDir, uint64_t offDir, uint32_t cDirEntries,
+ uint32_t cbHdr, uint32_t uSvnRev)
+{
+ AssertLogRelReturn(!memcmp(pDir->szMagic, SSMFILEDIR_MAGIC, sizeof(pDir->szMagic)), VERR_SSM_INTEGRITY_DIR_MAGIC);
+ SSM_CHECK_CRC32_RET(pDir, cbDir, ("Bad directory CRC: %08x, actual %08x\n", u32CRC, u32ActualCRC));
+ AssertLogRelMsgReturn(pDir->cEntries == cDirEntries,
+ ("Bad directory entry count: %#x, expected %#x (from the footer)\n", pDir->cEntries, cDirEntries),
+ VERR_SSM_INTEGRITY_DIR);
+ AssertLogRelReturn(RT_UOFFSETOF_DYN(SSMFILEDIR, aEntries[pDir->cEntries]) == cbDir, VERR_SSM_INTEGRITY_DIR);
+
+ for (uint32_t i = 0; i < pDir->cEntries; i++)
+ {
+ AssertLogRelMsgReturn( ( pDir->aEntries[i].off >= cbHdr
+ && pDir->aEntries[i].off < offDir)
+ || ( pDir->aEntries[i].off == 0 /* bug in unreleased code */
+ && uSvnRev < 53365),
+ ("off=%#llx cbHdr=%#x offDir=%#llx\n", pDir->aEntries[i].off, cbHdr, offDir),
+ VERR_SSM_INTEGRITY_DIR);
+ }
+ return VINF_SUCCESS;
+}
+
+#ifndef SSM_STANDALONE
+
+/**
+ * LogRel the unit content.
+ *
+ * @param pSSM The save state handle.
+ * @param pUnitHdr The unit head (for cbName).
+ * @param offUnit The offset of the unit header.
+ * @param offStart Where to start.
+ * @param offEnd Where to end.
+ */
+static void ssmR3StrmLogUnitContent(PSSMHANDLE pSSM, SSMFILEUNITHDRV2 const *pUnitHdr, uint64_t offUnit,
+ uint64_t offStart, uint64_t offEnd)
+{
+ /*
+ * Stop the I/O thread (if present).
+ */
+ ssmR3StrmStopIoThread(&pSSM->Strm);
+
+ /*
+ * Save the current status, resetting it so we can read + log the unit bytes.
+ */
+ int rcSaved = pSSM->rc;
+ pSSM->rc = VINF_SUCCESS;
+
+ /*
+ * Reverse back to the start of the unit if we can.
+ */
+ uint32_t cbUnitHdr = RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[pUnitHdr->cbName]);
+ int rc = ssmR3StrmSeek(&pSSM->Strm, offUnit/* + cbUnitHdr*/, RTFILE_SEEK_BEGIN, pUnitHdr->u32CurStreamCRC);
+ if (RT_SUCCESS(rc))
+ {
+ SSMFILEUNITHDRV2 UnitHdr2;
+ rc = ssmR3StrmRead(&pSSM->Strm, &UnitHdr2, cbUnitHdr);
+ if ( RT_SUCCESS(rc)
+ && memcmp(&UnitHdr2, pUnitHdr, cbUnitHdr) == 0)
+ {
+ pSSM->u.Read.cbDataBuffer = 0; /* avoid assertions */
+ pSSM->u.Read.cbRecLeft = 0;
+ ssmR3DataReadBeginV2(pSSM);
+
+ /*
+ * Read the unit, dumping the requested bits.
+ */
+ uint8_t cbLine = 0;
+ uint8_t abLine[16];
+ uint64_t offCur = 0;
+ offStart &= ~(uint64_t)(sizeof(abLine) - 1);
+ Assert(offStart < offEnd);
+ LogRel(("SSM: Unit '%s' contents:\n", pUnitHdr->szName));
+
+ do
+ {
+ /*
+ * Read the next 16 bytes into abLine. We have to take some care to
+ * get all the bytes in the unit, since we don't really know its size.
+ */
+ while ( cbLine < sizeof(abLine)
+ && !pSSM->u.Read.fEndOfData
+ && RT_SUCCESS(pSSM->rc))
+ {
+ uint32_t cbToRead = sizeof(abLine) - cbLine;
+ if (cbToRead > 1)
+ {
+ int32_t cbInBuffer = pSSM->u.Read.cbDataBuffer - pSSM->u.Read.offDataBuffer;
+ if ((int32_t)cbToRead > cbInBuffer)
+ {
+ if (cbInBuffer > 0)
+ cbToRead = cbInBuffer;
+ else if (pSSM->u.Read.cbRecLeft)
+ cbToRead = 1;
+ else
+ {
+ rc = ssmR3DataReadRecHdrV2(pSSM);
+ if (RT_FAILURE(rc))
+ {
+ pSSM->rc = rc;
+ break;
+ }
+ if (pSSM->u.Read.fEndOfData)
+ break;
+ }
+ }
+ }
+ rc = ssmR3DataRead(pSSM, &abLine[cbLine], cbToRead);
+ if (RT_SUCCESS(rc))
+ cbLine += cbToRead;
+ else
+ break;
+ }
+
+ /*
+ * Display the bytes if in the requested range.
+ */
+ if ( offCur >= offStart
+ && offCur <= offEnd)
+ {
+ char szLine[132];
+ char *pchDst = szLine;
+ uint8_t offSrc = 0;
+ while (offSrc < cbLine)
+ {
+ static char const s_szHex[17] = "0123456789abcdef";
+ uint8_t const b = abLine[offSrc++];
+ *pchDst++ = s_szHex[b >> 4];
+ *pchDst++ = s_szHex[b & 0xf];
+ *pchDst++ = offSrc != 8 ? ' ' : '-';
+ }
+ while (offSrc < sizeof(abLine))
+ {
+ *pchDst++ = ' ';
+ *pchDst++ = ' ';
+ *pchDst++ = offSrc != 7 ? ' ' : '-';
+ offSrc++;
+ }
+ *pchDst++ = ' ';
+
+ offSrc = 0;
+ while (offSrc < cbLine)
+ {
+ char const ch = (int8_t)abLine[offSrc++];
+ if (ch < 0x20 || ch >= 0x7f)
+ *pchDst++ = '.';
+ else
+ *pchDst++ = ch;
+ }
+ *pchDst = '\0';
+ Assert((uintptr_t)(pchDst - &szLine[0]) < sizeof(szLine));
+ Assert(strchr(szLine, '\0') == pchDst);
+
+ LogRel(("%#010llx: %s\n", offCur, szLine));
+ }
+ offCur += cbLine;
+ cbLine = 0;
+ } while ( !pSSM->u.Read.fEndOfData
+ && RT_SUCCESS(pSSM->rc));
+ LogRel(("SSM: offCur=%#llx fEndOfData=%d (rc=%Rrc)\n", offCur, pSSM->u.Read.fEndOfData, rc));
+ }
+ else if (RT_SUCCESS(rc))
+ LogRel(("SSM: Cannot dump unit - mismatching unit head\n"));
+ else
+ LogRel(("SSM: Cannot dump unit - unit header read error: %Rrc\n", rc));
+ }
+ else
+ LogRel(("SSM: Cannot dump unit - ssmR3StrmSeek error: %Rrc\n", rc));
+
+ pSSM->rc = rcSaved;
+}
+
+
+/**
+ * Find a data unit by name.
+ *
+ * @returns Pointer to the unit.
+ * @returns NULL if not found.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszName Data unit name.
+ * @param uInstance The data unit instance id.
+ */
+static PSSMUNIT ssmR3Find(PVM pVM, const char *pszName, uint32_t uInstance)
+{
+ size_t cchName = strlen(pszName);
+ PSSMUNIT pUnit = pVM->ssm.s.pHead;
+ while ( pUnit
+ && ( pUnit->u32Instance != uInstance
+ || pUnit->cchName != cchName
+ || memcmp(pUnit->szName, pszName, cchName)))
+ pUnit = pUnit->pNext;
+ return pUnit;
+}
+
+
+/**
+ * Executes the loading of a V1.X file.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3LoadExecV1(PVM pVM, PSSMHANDLE pSSM)
+{
+ int rc;
+ char *pszName = NULL;
+ size_t cchName = 0;
+ pSSM->enmOp = SSMSTATE_LOAD_EXEC;
+ for (;;)
+ {
+ /*
+ * Save the current file position and read the data unit header.
+ */
+ uint64_t offUnit = ssmR3StrmTell(&pSSM->Strm);
+ SSMFILEUNITHDRV1 UnitHdr;
+ rc = ssmR3StrmRead(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF(SSMFILEUNITHDRV1, szName));
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Check the magic and see if it's valid and whether it is a end header or not.
+ */
+ if (memcmp(&UnitHdr.achMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(SSMFILEUNITHDR_MAGIC)))
+ {
+ if (!memcmp(&UnitHdr.achMagic[0], SSMFILEUNITHDR_END, sizeof(SSMFILEUNITHDR_END)))
+ {
+ Log(("SSM: EndOfFile: offset %#9llx size %9d\n", offUnit, UnitHdr.cbUnit));
+ /* Complete the progress bar (pending 99% afterwards). */
+ ssmR3ProgressByByte(pSSM, pSSM->cbEstTotal - pSSM->offEst);
+ break;
+ }
+ LogRel(("SSM: Invalid unit magic at offset %#llx (%lld), '%.*s'!\n",
+ offUnit, offUnit, sizeof(UnitHdr.achMagic) - 1, &UnitHdr.achMagic[0]));
+ rc = VERR_SSM_INTEGRITY_UNIT_MAGIC;
+ break;
+ }
+
+ /*
+ * Read the name.
+ * Adjust the name buffer first.
+ */
+ if (cchName < UnitHdr.cchName)
+ {
+ if (pszName)
+ RTMemTmpFree(pszName);
+ cchName = RT_ALIGN_Z(UnitHdr.cchName, 64);
+ pszName = (char *)RTMemTmpAlloc(cchName);
+ }
+ if (pszName)
+ {
+ rc = ssmR3StrmRead(&pSSM->Strm, pszName, UnitHdr.cchName);
+ if (RT_SUCCESS(rc))
+ {
+ if (pszName[UnitHdr.cchName - 1])
+ {
+ LogRel(("SSM: Unit name '%.*s' was not properly terminated.\n", UnitHdr.cchName, pszName));
+ rc = VERR_SSM_INTEGRITY_UNIT;
+ break;
+ }
+ Log(("SSM: Data unit: offset %#9llx size %9lld '%s'\n", offUnit, UnitHdr.cbUnit, pszName));
+
+ /*
+ * Find the data unit in our internal table.
+ */
+ PSSMUNIT pUnit = ssmR3Find(pVM, pszName, UnitHdr.u32Instance);
+ if (pUnit)
+ {
+ /*
+ * Call the execute handler.
+ */
+ pSSM->cbUnitLeftV1 = UnitHdr.cbUnit - RT_UOFFSETOF_DYN(SSMFILEUNITHDRV1, szName[UnitHdr.cchName]);
+ pSSM->offUnit = 0;
+ pSSM->offUnitUser = 0;
+ pSSM->u.Read.uCurUnitVer = UnitHdr.u32Version;
+ pSSM->u.Read.uCurUnitPass = SSM_PASS_FINAL;
+ pSSM->u.Read.pCurUnit = pUnit;
+ if (!pUnit->u.Common.pfnLoadExec)
+ {
+ LogRel(("SSM: No load exec callback for unit '%s'!\n", pszName));
+ pSSM->rc = rc = VERR_SSM_NO_LOAD_EXEC;
+ break;
+ }
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLoadExec(pUnit->u.Dev.pDevIns, pSSM, UnitHdr.u32Version, SSM_PASS_FINAL);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLoadExec(pUnit->u.Drv.pDrvIns, pSSM, UnitHdr.u32Version, SSM_PASS_FINAL);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLoadExec(pUnit->u.Usb.pUsbIns, pSSM, UnitHdr.u32Version, SSM_PASS_FINAL);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLoadExec(pVM, pSSM, UnitHdr.u32Version, SSM_PASS_FINAL);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLoadExec(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser,
+ UnitHdr.u32Version, SSM_PASS_FINAL);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+
+ /*
+ * Close the reader stream.
+ */
+ rc = ssmR3DataReadFinishV1(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Now, we'll check the current position to see if all, or
+ * more than all, the data was read.
+ *
+ * Note! Because of buffering / compression we'll only see the
+ * really bad ones here.
+ */
+ uint64_t off = ssmR3StrmTell(&pSSM->Strm);
+ int64_t i64Diff = off - (offUnit + UnitHdr.cbUnit);
+ if (i64Diff < 0)
+ {
+ Log(("SSM: Unit '%s' left %lld bytes unread!\n", pszName, -i64Diff));
+ rc = ssmR3StrmSkipTo(&pSSM->Strm, offUnit + UnitHdr.cbUnit);
+ ssmR3ProgressByByte(pSSM, offUnit + UnitHdr.cbUnit - pSSM->offEst);
+ }
+ else if (i64Diff > 0)
+ {
+ LogRel(("SSM: Unit '%s' read %lld bytes too much!\n", pszName, i64Diff));
+ if (!ASMAtomicXchgBool(&pSSM->u.Read.fHaveSetError, true))
+ rc = VMSetError(pVM, VERR_SSM_LOADED_TOO_MUCH, RT_SRC_POS,
+ N_("Unit '%s' read %lld bytes too much"), pszName, i64Diff);
+ break;
+ }
+
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+ }
+ else
+ {
+ LogRel(("SSM: Load exec failed for '%s' instance #%u ! (version %u)\n",
+ pszName, UnitHdr.u32Instance, UnitHdr.u32Version));
+ if (!ASMAtomicXchgBool(&pSSM->u.Read.fHaveSetError, true))
+ {
+ if (rc == VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION)
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Unsupported version %u of data unit '%s' (instance #%u)"),
+ UnitHdr.u32Version, UnitHdr.szName, UnitHdr.u32Instance);
+ else
+ VMSetError(pVM, rc, RT_SRC_POS, N_("Load exec failed for '%s' instance #%u (version %u)"),
+ pszName, UnitHdr.u32Instance, UnitHdr.u32Version);
+ }
+ break;
+ }
+
+ pSSM->u.Read.pCurUnit = NULL;
+ pSSM->u.Read.uCurUnitVer = UINT32_MAX;
+ pSSM->u.Read.uCurUnitPass = 0;
+ }
+ else
+ {
+ /*
+ * SSM unit wasn't found - ignore this when loading for the debugger.
+ */
+ LogRel(("SSM: Found no handler for unit '%s'!\n", pszName));
+ rc = VERR_SSM_INTEGRITY_UNIT_NOT_FOUND;
+ if (pSSM->enmAfter != SSMAFTER_DEBUG_IT)
+ break;
+ rc = ssmR3StrmSkipTo(&pSSM->Strm, offUnit + UnitHdr.cbUnit);
+ }
+ }
+ }
+ else
+ rc = VERR_NO_TMP_MEMORY;
+ }
+
+ /*
+ * I/O errors ends up here (yea, I know, very nice programming).
+ */
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: I/O error. rc=%Rrc\n", rc));
+ break;
+ }
+
+ /*
+ * Check for cancellation.
+ */
+ if (RT_UNLIKELY(ASMAtomicUoReadU32(&(pSSM)->fCancelled) == SSMHANDLE_CANCELLED))
+ {
+ LogRel(("SSM: Cancelled!n"));
+ rc = pSSM->rc;
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc = VERR_SSM_CANCELLED;
+ break;
+ }
+ }
+
+ RTMemTmpFree(pszName);
+ return rc;
+}
+
+
+/**
+ * Reads and verifies the directory and footer.
+ *
+ * @returns VBox status code.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3LoadDirectoryAndFooter(PSSMHANDLE pSSM)
+{
+ /*
+ * The directory.
+ *
+ * Get the header containing the number of entries first. Then read the
+ * entries and pass the combined block to the validation function.
+ */
+ uint64_t off = ssmR3StrmTell(&pSSM->Strm);
+ size_t const cbDirHdr = RT_UOFFSETOF(SSMFILEDIR, aEntries);
+ SSMFILEDIR DirHdr;
+ int rc = ssmR3StrmRead(&pSSM->Strm, &DirHdr, cbDirHdr);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertLogRelMsgReturn(!memcmp(DirHdr.szMagic, SSMFILEDIR_MAGIC, sizeof(DirHdr.szMagic)),
+ ("Invalid directory magic at %#llx (%lld): %.*Rhxs\n", off, off, sizeof(DirHdr.szMagic), DirHdr.szMagic),
+ VERR_SSM_INTEGRITY_DIR_MAGIC);
+ AssertLogRelMsgReturn(DirHdr.cEntries < _64K,
+ ("Too many directory entries at %#llx (%lld): %#x\n", off, off, DirHdr.cEntries),
+ VERR_SSM_INTEGRITY_DIR);
+
+ size_t cbDir = RT_UOFFSETOF_DYN(SSMFILEDIR, aEntries[DirHdr.cEntries]);
+ PSSMFILEDIR pDir = (PSSMFILEDIR)RTMemTmpAlloc(cbDir);
+ if (!pDir)
+ return VERR_NO_TMP_MEMORY;
+ memcpy(pDir, &DirHdr, cbDirHdr);
+ rc = ssmR3StrmRead(&pSSM->Strm, (uint8_t *)pDir + cbDirHdr, cbDir - cbDirHdr);
+ if (RT_SUCCESS(rc))
+ rc = ssmR3ValidateDirectory(pDir, cbDir, off, DirHdr.cEntries, pSSM->u.Read.cbFileHdr, pSSM->u.Read.u32SvnRev);
+ RTMemTmpFree(pDir);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Read and validate the footer.
+ */
+ off = ssmR3StrmTell(&pSSM->Strm);
+ uint32_t u32StreamCRC = ssmR3StrmFinalCRC(&pSSM->Strm);
+ SSMFILEFTR Footer;
+ rc = ssmR3StrmRead(&pSSM->Strm, &Footer, sizeof(Footer));
+ if (RT_FAILURE(rc))
+ return rc;
+ return ssmR3ValidateFooter(&Footer, off, DirHdr.cEntries, pSSM->u.Read.fStreamCrc32, u32StreamCRC);
+}
+
+
+/**
+ * Executes the loading of a V2.X file.
+ *
+ * @returns VBox status code. May or may not set pSSM->rc, the returned
+ * status code is ALWAYS the more accurate of the two.
+ * @param pVM The cross context VM structure.
+ * @param pSSM The saved state handle.
+ */
+static int ssmR3LoadExecV2(PVM pVM, PSSMHANDLE pSSM)
+{
+ pSSM->enmOp = SSMSTATE_LOAD_EXEC;
+ for (;;)
+ {
+ /*
+ * Read the unit header and check its integrity.
+ */
+ uint64_t offUnit = ssmR3StrmTell(&pSSM->Strm);
+ uint32_t u32CurStreamCRC = ssmR3StrmCurCRC(&pSSM->Strm);
+ SSMFILEUNITHDRV2 UnitHdr;
+ int rc = ssmR3StrmRead(&pSSM->Strm, &UnitHdr, RT_UOFFSETOF(SSMFILEUNITHDRV2, szName));
+ if (RT_FAILURE(rc))
+ return rc;
+ if (RT_UNLIKELY( memcmp(&UnitHdr.szMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(UnitHdr.szMagic))
+ && memcmp(&UnitHdr.szMagic[0], SSMFILEUNITHDR_END, sizeof(UnitHdr.szMagic))))
+ {
+ LogRel(("SSM: Unit at %#llx (%lld): Invalid unit magic: %.*Rhxs!\n",
+ offUnit, offUnit, sizeof(UnitHdr.szMagic) - 1, &UnitHdr.szMagic[0]));
+ pSSM->u.Read.fHaveSetError = true;
+ return VMSetError(pVM, VERR_SSM_INTEGRITY_UNIT_MAGIC, RT_SRC_POS,
+ N_("Unit at %#llx (%lld): Invalid unit magic"), offUnit, offUnit);
+ }
+ if (UnitHdr.cbName)
+ {
+ AssertLogRelMsgReturn(UnitHdr.cbName <= sizeof(UnitHdr.szName),
+ ("Unit at %#llx (%lld): UnitHdr.cbName=%u > %u\n",
+ offUnit, offUnit, UnitHdr.cbName, sizeof(UnitHdr.szName)),
+ VERR_SSM_INTEGRITY_UNIT);
+ rc = ssmR3StrmRead(&pSSM->Strm, &UnitHdr.szName[0], UnitHdr.cbName);
+ if (RT_FAILURE(rc))
+ return rc;
+ AssertLogRelMsgReturn(!UnitHdr.szName[UnitHdr.cbName - 1],
+ ("Unit at %#llx (%lld): Name %.*Rhxs was not properly terminated.\n",
+ offUnit, offUnit, UnitHdr.cbName, UnitHdr.szName),
+ VERR_SSM_INTEGRITY_UNIT);
+ }
+ SSM_CHECK_CRC32_RET(&UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]),
+ ("Unit at %#llx (%lld): CRC mismatch: %08x, correct is %08x\n", offUnit, offUnit, u32CRC, u32ActualCRC));
+ AssertLogRelMsgReturn(UnitHdr.offStream == offUnit,
+ ("Unit at %#llx (%lld): offStream=%#llx, expected %#llx\n", offUnit, offUnit, UnitHdr.offStream, offUnit),
+ VERR_SSM_INTEGRITY_UNIT);
+ AssertLogRelMsgReturn(UnitHdr.u32CurStreamCRC == u32CurStreamCRC || !pSSM->Strm.fChecksummed,
+ ("Unit at %#llx (%lld): Stream CRC mismatch: %08x, correct is %08x\n", offUnit, offUnit, UnitHdr.u32CurStreamCRC, u32CurStreamCRC),
+ VERR_SSM_INTEGRITY_UNIT);
+ AssertLogRelMsgReturn(!UnitHdr.fFlags, ("Unit at %#llx (%lld): fFlags=%08x\n", offUnit, offUnit, UnitHdr.fFlags),
+ VERR_SSM_INTEGRITY_UNIT);
+ if (!memcmp(&UnitHdr.szMagic[0], SSMFILEUNITHDR_END, sizeof(UnitHdr.szMagic)))
+ {
+ AssertLogRelMsgReturn( UnitHdr.cbName == 0
+ && UnitHdr.u32Instance == 0
+ && UnitHdr.u32Version == 0
+ && UnitHdr.u32Pass == SSM_PASS_FINAL,
+ ("Unit at %#llx (%lld): Malformed END unit\n", offUnit, offUnit),
+ VERR_SSM_INTEGRITY_UNIT);
+
+ /*
+ * Complete the progress bar (pending 99% afterwards) and RETURN.
+ */
+ Log(("SSM: Unit at %#9llx: END UNIT\n", offUnit));
+ ssmR3ProgressByByte(pSSM, pSSM->cbEstTotal - pSSM->offEst);
+ return ssmR3LoadDirectoryAndFooter(pSSM);
+ }
+ AssertLogRelMsgReturn(UnitHdr.cbName > 1, ("Unit at %#llx (%lld): No name\n", offUnit, offUnit), VERR_SSM_INTEGRITY);
+
+ Log(("SSM: Unit at %#9llx: '%s', instance %u, pass %#x, version %u\n",
+ offUnit, UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Pass, UnitHdr.u32Version));
+
+ /*
+ * Find the data unit in our internal table.
+ */
+ PSSMUNIT pUnit = ssmR3Find(pVM, UnitHdr.szName, UnitHdr.u32Instance);
+ if (pUnit)
+ {
+ /*
+ * Call the execute handler.
+ */
+ AssertLogRelMsgReturn(pUnit->u.Common.pfnLoadExec,
+ ("SSM: No load exec callback for unit '%s'!\n", UnitHdr.szName),
+ VERR_SSM_NO_LOAD_EXEC);
+ pSSM->u.Read.uCurUnitVer = UnitHdr.u32Version;
+ pSSM->u.Read.uCurUnitPass = UnitHdr.u32Pass;
+ pSSM->u.Read.pCurUnit = pUnit;
+ ssmR3DataReadBeginV2(pSSM);
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLoadExec(pUnit->u.Dev.pDevIns, pSSM, UnitHdr.u32Version, UnitHdr.u32Pass);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLoadExec(pUnit->u.Drv.pDrvIns, pSSM, UnitHdr.u32Version, UnitHdr.u32Pass);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLoadExec(pUnit->u.Usb.pUsbIns, pSSM, UnitHdr.u32Version, UnitHdr.u32Pass);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLoadExec(pVM, pSSM, UnitHdr.u32Version, UnitHdr.u32Pass);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLoadExec(pSSM, VMMR3GetVTable(), pUnit->u.External.pvUser,
+ UnitHdr.u32Version, UnitHdr.u32Pass);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ pUnit->fCalled = true;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
+ pSSM->rc = rc;
+ rc = ssmR3DataReadFinishV2(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+ }
+ else
+ {
+ LogRel(("SSM: LoadExec failed for '%s' instance #%u (version %u, pass %#x): %Rrc\n",
+ UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Version, UnitHdr.u32Pass, rc));
+ LogRel(("SSM: Unit at %#llx, current position: offUnit=%#llx offUnitUser=%#llx\n",
+ offUnit, pSSM->offUnit, pSSM->offUnitUser));
+
+ if (!ASMAtomicXchgBool(&pSSM->u.Read.fHaveSetError, true))
+ {
+ if (rc == VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION)
+ rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Unsupported version %u of data unit '%s' (instance #%u, pass %#x)"),
+ UnitHdr.u32Version, UnitHdr.szName, UnitHdr.u32Instance, UnitHdr.u32Pass);
+ else
+ rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to load unit '%s'"), UnitHdr.szName);
+ }
+
+ /* Try log the unit content, unless it's too big. */
+ if (pSSM->offUnitUser < _512K)
+ ssmR3StrmLogUnitContent(pSSM, &UnitHdr, offUnit, 0, pSSM->offUnitUser + _16K);
+ else
+ ssmR3StrmLogUnitContent(pSSM, &UnitHdr, offUnit, pSSM->offUnitUser - _256K, pSSM->offUnitUser + _16K);
+ return rc;
+ }
+ }
+ else
+ {
+ /*
+ * SSM unit wasn't found - ignore this when loading for the debugger.
+ */
+ LogRel(("SSM: Found no handler for unit '%s' instance #%u!\n", UnitHdr.szName, UnitHdr.u32Instance));
+ if (pSSM->enmAfter != SSMAFTER_DEBUG_IT)
+ {
+ pSSM->u.Read.fHaveSetError = true;
+ return VMSetError(pVM, VERR_SSM_INTEGRITY_UNIT_NOT_FOUND, RT_SRC_POS,
+ N_("Found no handler for unit '%s' instance #%u"), UnitHdr.szName, UnitHdr.u32Instance);
+ }
+ SSMR3SkipToEndOfUnit(pSSM);
+ ssmR3DataReadFinishV2(pSSM);
+ }
+
+ /*
+ * Check for cancellation.
+ */
+ if (RT_UNLIKELY(ASMAtomicUoReadU32(&(pSSM)->fCancelled) == SSMHANDLE_CANCELLED))
+ {
+ LogRel(("SSM: Cancelled!\n"));
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = VERR_SSM_CANCELLED;
+ return pSSM->rc;
+ }
+ }
+ /* won't get here */
+}
+
+
+
+
+/**
+ * Load VM save operation.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszFilename The name of the saved state file. NULL if pStreamOps
+ * is used.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvStreamOpsUser The user argument for the stream methods.
+ * @param enmAfter What is planned after a successful load operation.
+ * Only acceptable values are SSMAFTER_RESUME and SSMAFTER_DEBUG_IT.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser)
+{
+ LogFlow(("SSMR3Load: pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p\n",
+ pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser));
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn( enmAfter == SSMAFTER_RESUME
+ || enmAfter == SSMAFTER_TELEPORT
+ || enmAfter == SSMAFTER_DEBUG_IT,
+ ("%d\n", enmAfter),
+ VERR_INVALID_PARAMETER);
+ AssertReturn(!pszFilename != !pStreamOps, VERR_INVALID_PARAMETER);
+ if (pStreamOps)
+ {
+ AssertReturn(pStreamOps->u32Version == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->u32EndVersion == SSMSTRMOPS_VERSION, VERR_INVALID_MAGIC);
+ AssertReturn(pStreamOps->pfnWrite, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnRead, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSeek, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnTell, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnSize, VERR_INVALID_PARAMETER);
+ AssertReturn(pStreamOps->pfnClose, VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Create the handle and open the file.
+ */
+ SSMHANDLE Handle;
+ int rc = ssmR3OpenFile(pVM, pszFilename, pStreamOps, pvStreamOpsUser, false /* fChecksumIt */,
+ true /* fChecksumOnRead */, 8 /*cBuffers*/, &Handle);
+ if (RT_SUCCESS(rc))
+ {
+ ssmR3StrmStartIoThread(&Handle.Strm);
+ ssmR3SetCancellable(pVM, &Handle, true);
+
+ Handle.enmAfter = enmAfter;
+ Handle.pfnProgress = pfnProgress;
+ Handle.pvUser = pvProgressUser;
+ Handle.uPercentLive = 0;
+ Handle.uPercentPrepare = 2;
+ Handle.uPercentDone = 2;
+
+ if (Handle.u.Read.u16VerMajor)
+ LogRel(("SSM: File header: Format %u.%u, VirtualBox Version %u.%u.%u r%u, %u-bit host, cbGCPhys=%u, cbGCPtr=%u\n",
+ Handle.u.Read.uFmtVerMajor, Handle.u.Read.uFmtVerMinor,
+ Handle.u.Read.u16VerMajor, Handle.u.Read.u16VerMinor, Handle.u.Read.u32VerBuild, Handle.u.Read.u32SvnRev,
+ Handle.u.Read.cHostBits, Handle.u.Read.cbGCPhys, Handle.u.Read.cbGCPtr));
+ else
+ LogRel(("SSM: File header: Format %u.%u, %u-bit host, cbGCPhys=%u, cbGCPtr=%u\n" ,
+ Handle.u.Read.uFmtVerMajor, Handle.u.Read.uFmtVerMinor,
+ Handle.u.Read.cHostBits, Handle.u.Read.cbGCPhys, Handle.u.Read.cbGCPtr));
+
+ if (pfnProgress)
+ pfnProgress(pVM->pUVM, Handle.uPercent, pvProgressUser);
+
+ /*
+ * Clear the per unit flags.
+ */
+ PSSMUNIT pUnit;
+ for (pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ pUnit->fCalled = false;
+
+ /*
+ * Do the prepare run.
+ */
+ Handle.rc = VINF_SUCCESS;
+ Handle.enmOp = SSMSTATE_LOAD_PREP;
+ for (pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if (pUnit->u.Common.pfnLoadPrep)
+ {
+ Handle.u.Read.pCurUnit = pUnit;
+ pUnit->fCalled = true;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLoadPrep(pUnit->u.Dev.pDevIns, &Handle);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLoadPrep(pUnit->u.Drv.pDrvIns, &Handle);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLoadPrep(pUnit->u.Usb.pUsbIns, &Handle);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLoadPrep(pVM, &Handle);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLoadPrep(&Handle, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ Handle.u.Read.pCurUnit = NULL;
+ if (RT_FAILURE(rc) && RT_SUCCESS_NP(Handle.rc))
+ Handle.rc = rc;
+ else
+ rc = Handle.rc;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: Prepare load failed with rc=%Rrc for data unit '%s.\n", rc, pUnit->szName));
+ break;
+ }
+ }
+ }
+
+ /* end of prepare % */
+ if (pfnProgress)
+ pfnProgress(pVM->pUVM, Handle.uPercentPrepare - 1, pvProgressUser);
+ Handle.uPercent = Handle.uPercentPrepare;
+ Handle.cbEstTotal = Handle.u.Read.cbLoadFile;
+ Handle.offEstUnitEnd = Handle.u.Read.cbLoadFile;
+
+ /*
+ * Do the execute run.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ if (Handle.u.Read.uFmtVerMajor >= 2)
+ rc = ssmR3LoadExecV2(pVM, &Handle);
+ else
+ rc = ssmR3LoadExecV1(pVM, &Handle);
+ Handle.u.Read.pCurUnit = NULL;
+ Handle.u.Read.uCurUnitVer = UINT32_MAX;
+ Handle.u.Read.uCurUnitPass = 0;
+
+ /* (progress should be pending 99% now) */
+ AssertMsg( Handle.fLiveSave
+ || RT_FAILURE(rc)
+ || Handle.uPercent == 101 - Handle.uPercentDone, ("%d\n", Handle.uPercent));
+ }
+
+ /*
+ * Do the done run.
+ */
+ Handle.rc = rc;
+ Handle.enmOp = SSMSTATE_LOAD_DONE;
+ for (pUnit = pVM->ssm.s.pHead; pUnit; pUnit = pUnit->pNext)
+ {
+ if ( pUnit->u.Common.pfnLoadDone
+ && ( pUnit->fCalled
+ || (!pUnit->u.Common.pfnLoadPrep && !pUnit->u.Common.pfnLoadExec)))
+ {
+ Handle.u.Read.pCurUnit = pUnit;
+ int const rcOld = Handle.rc;
+ rc = VINF_SUCCESS;
+ ssmR3UnitCritSectEnter(pVM, pUnit);
+ switch (pUnit->enmType)
+ {
+ case SSMUNITTYPE_DEV:
+ rc = pUnit->u.Dev.pfnLoadDone(pUnit->u.Dev.pDevIns, &Handle);
+ break;
+ case SSMUNITTYPE_DRV:
+ rc = pUnit->u.Drv.pfnLoadDone(pUnit->u.Drv.pDrvIns, &Handle);
+ break;
+ case SSMUNITTYPE_USB:
+ rc = pUnit->u.Usb.pfnLoadDone(pUnit->u.Usb.pUsbIns, &Handle);
+ break;
+ case SSMUNITTYPE_INTERNAL:
+ rc = pUnit->u.Internal.pfnLoadDone(pVM, &Handle);
+ break;
+ case SSMUNITTYPE_EXTERNAL:
+ rc = pUnit->u.External.pfnLoadDone(&Handle, VMMR3GetVTable(), pUnit->u.External.pvUser);
+ break;
+ default:
+ rc = VERR_SSM_IPE_1;
+ break;
+ }
+ ssmR3UnitCritSectLeave(pVM, pUnit);
+ Handle.u.Read.pCurUnit = NULL;
+ if (RT_SUCCESS(rc) && Handle.rc != rcOld)
+ rc = Handle.rc;
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("SSM: LoadDone failed with rc=%Rrc for data unit '%s' instance #%u.\n",
+ rc, pUnit->szName, pUnit->u32Instance));
+ if (!ASMAtomicXchgBool(&Handle.u.Read.fHaveSetError, true))
+ VMSetError(pVM, rc, RT_SRC_POS, N_("LoadDone failed with rc=%Rrc for data unit '%s' instance #%u."),
+ rc, pUnit->szName, pUnit->u32Instance);
+ if (RT_SUCCESS_NP(Handle.rc))
+ Handle.rc = rc;
+ }
+ }
+ }
+
+ /* progress */
+ if (pfnProgress)
+ pfnProgress(pVM->pUVM, 99, pvProgressUser);
+
+ ssmR3SetCancellable(pVM, &Handle, false);
+ ssmR3StrmClose(&Handle.Strm, Handle.rc == VERR_SSM_CANCELLED);
+ rc = Handle.rc;
+ }
+
+ /*
+ * Done
+ */
+ if (RT_SUCCESS(rc))
+ {
+ /* progress */
+ if (pfnProgress)
+ pfnProgress(pVM->pUVM, 100, pvProgressUser);
+ Log(("SSM: Load of '%s' completed!\n", pszFilename));
+ }
+ return rc;
+}
+
+
+/**
+ * VMSetError wrapper for load errors that inserts the saved state details.
+ *
+ * @returns rc.
+ * @param pSSM The saved state handle.
+ * @param rc The status code of the error. Use RT_SRC_POS.
+ * @param SRC_POS The source location.
+ * @param pszFormat The message format string.
+ * @param ... Variable argument list.
+ */
+VMMR3DECL(int) SSMR3SetLoadError(PSSMHANDLE pSSM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ rc = SSMR3SetLoadErrorV(pSSM, rc, RT_SRC_POS_ARGS, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * VMSetError wrapper for load errors that inserts the saved state details.
+ *
+ * @returns rc.
+ * @param pSSM The saved state handle.
+ * @param rc The status code of the error.
+ * @param SRC_POS The error location, use RT_SRC_POS.
+ * @param pszFormat The message format string.
+ * @param va Variable argument list.
+ */
+VMMR3DECL(int) SSMR3SetLoadErrorV(PSSMHANDLE pSSM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ /*
+ * Input validations.
+ */
+ SSM_ASSERT_READABLE_RET(pSSM);
+ AssertPtr(pszFormat);
+ Assert(RT_FAILURE_NP(rc));
+
+ /*
+ * Format the incoming error.
+ */
+ char *pszMsg;
+ RTStrAPrintfV(&pszMsg, pszFormat, va);
+ if (!pszMsg)
+ {
+ VMSetError(pSSM->pVM, VERR_NO_MEMORY, RT_SRC_POS,
+ N_("SSMR3SetLoadErrorV ran out of memory formatting: %s\n"), pszFormat);
+ return rc;
+ }
+
+ /*
+ * Forward to VMSetError with the additional info.
+ */
+ PSSMUNIT pUnit = pSSM->u.Read.pCurUnit;
+ const char *pszName = pUnit ? pUnit->szName : "unknown";
+ uint32_t uInstance = pUnit ? pUnit->u32Instance : 0;
+ if ( pSSM->enmOp == SSMSTATE_LOAD_EXEC
+ && pSSM->u.Read.uCurUnitPass == SSM_PASS_FINAL)
+ rc = VMSetError(pSSM->pVM, rc, RT_SRC_POS_ARGS, N_("%s#%u: %s [ver=%u pass=final]"),
+ pszName, uInstance, pszMsg, pSSM->u.Read.uCurUnitVer);
+ else if (pSSM->enmOp == SSMSTATE_LOAD_EXEC)
+ rc = VMSetError(pSSM->pVM, rc, RT_SRC_POS_ARGS, N_("%s#%u: %s [ver=%u pass=#%u]"),
+ pszName, uInstance, pszMsg, pSSM->u.Read.uCurUnitVer, pSSM->u.Read.uCurUnitPass);
+ else if (pSSM->enmOp == SSMSTATE_LOAD_PREP)
+ rc = VMSetError(pSSM->pVM, rc, RT_SRC_POS_ARGS, N_("%s#%u: %s [prep]"),
+ pszName, uInstance, pszMsg);
+ else if (pSSM->enmOp == SSMSTATE_LOAD_DONE)
+ rc = VMSetError(pSSM->pVM, rc, RT_SRC_POS_ARGS, N_("%s#%u: %s [done]"),
+ pszName, uInstance, pszMsg);
+ else if (pSSM->enmOp == SSMSTATE_OPEN_READ)
+ rc = VMSetError(pSSM->pVM, rc, RT_SRC_POS_ARGS, N_("%s#%u: %s [read]"),
+ pszName, uInstance, pszMsg);
+ else
+ AssertFailed();
+ pSSM->u.Read.fHaveSetError = true;
+ RTStrFree(pszMsg);
+ return rc;
+}
+
+
+/**
+ * SSMR3SetLoadError wrapper that returns VERR_SSM_LOAD_CONFIG_MISMATCH.
+ *
+ * @returns VERR_SSM_LOAD_CONFIG_MISMATCH.
+ * @param pSSM The saved state handle.
+ * @param SRC_POS The error location, use RT_SRC_POS.
+ * @param pszFormat The message format string.
+ * @param ... Variable argument list.
+ */
+VMMR3DECL(int) SSMR3SetCfgError(PSSMHANDLE pSSM, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = SSMR3SetLoadErrorV(pSSM, VERR_SSM_LOAD_CONFIG_MISMATCH, RT_SRC_POS_ARGS, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * SSMR3SetLoadError wrapper that returns VERR_SSM_LOAD_CONFIG_MISMATCH.
+ *
+ * @returns VERR_SSM_LOAD_CONFIG_MISMATCH.
+ * @param pSSM The saved state handle.
+ * @param SRC_POS The error location, use RT_SRC_POS.
+ * @param pszFormat The message format string.
+ * @param va Variable argument list.
+ */
+VMMR3DECL(int) SSMR3SetCfgErrorV(PSSMHANDLE pSSM, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ return SSMR3SetLoadErrorV(pSSM, VERR_SSM_LOAD_CONFIG_MISMATCH, RT_SRC_POS_ARGS, pszFormat, va);
+}
+
+#endif /* !SSM_STANDALONE */
+
+/**
+ * Validates a file as a validate SSM saved state.
+ *
+ * This will only verify the file format, the format and content of individual
+ * data units are not inspected.
+ *
+ * @returns VINF_SUCCESS if valid.
+ * @returns VBox status code on other failures.
+ *
+ * @param pszFilename The path to the file to validate.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvStreamOps The user argument to the stream methods.
+ * @param fChecksumIt Whether to checksum the file or not.
+ *
+ * @thread Any.
+ */
+VMMR3DECL(int) SSMR3ValidateFile(const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOps, bool fChecksumIt)
+{
+ LogFlow(("SSMR3ValidateFile: pszFilename=%p:{%s} fChecksumIt=%RTbool\n", pszFilename, pszFilename, fChecksumIt));
+
+ /*
+ * Try open the file and validate it.
+ */
+ SSMHANDLE Handle;
+ int rc = ssmR3OpenFile(NULL, pszFilename, pStreamOps, pvStreamOps, fChecksumIt,
+ false /*fChecksumOnRead*/, 1 /*cBuffers*/, &Handle);
+ if (RT_SUCCESS(rc))
+ ssmR3StrmClose(&Handle.Strm, false /*fCancelled*/);
+ else
+ Log(("SSM: Failed to open saved state file '%s', rc=%Rrc.\n", pszFilename, rc));
+ return rc;
+}
+
+
+/**
+ * Opens a saved state file for reading.
+ *
+ * @returns VBox status code.
+ *
+ * @param pszFilename The path to the saved state file.
+ * @param pStreamOps The stream method table. NULL if pszFilename is
+ * used.
+ * @param pvStreamOps The user argument to the stream methods.
+ * @param fFlags Open flags. Reserved, must be 0.
+ * @param ppSSM Where to store the SSM handle.
+ *
+ * @thread Any.
+ */
+VMMR3DECL(int) SSMR3Open(const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOps,
+ unsigned fFlags, PSSMHANDLE *ppSSM)
+{
+ LogFlow(("SSMR3Open: pszFilename=%p:{%s} fFlags=%#x ppSSM=%p\n", pszFilename, pszFilename, fFlags, ppSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertReturn(!pszFilename != !pStreamOps, VERR_INVALID_POINTER);
+ AssertMsgReturn(!fFlags, ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppSSM, VERR_INVALID_POINTER);
+
+ /*
+ * Allocate a handle.
+ */
+ PSSMHANDLE pSSM = (PSSMHANDLE)RTMemAllocZ(sizeof(*pSSM));
+ AssertReturn(pSSM, VERR_NO_MEMORY);
+
+ /*
+ * Try open the file and validate it.
+ */
+ int rc = ssmR3OpenFile(NULL, pszFilename, pStreamOps, pvStreamOps, false /*fChecksumIt*/,
+ true /*fChecksumOnRead*/, 1 /*cBuffers*/, pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ pSSM->enmAfter = SSMAFTER_OPENED;
+ pSSM->enmOp = SSMSTATE_OPEN_READ;
+ *ppSSM = pSSM;
+ LogFlow(("SSMR3Open: returns VINF_SUCCESS *ppSSM=%p\n", *ppSSM));
+ return VINF_SUCCESS;
+ }
+
+ Log(("SSMR3Open: Failed to open saved state file '%s', rc=%Rrc.\n", pszFilename, rc));
+ RTMemFree(pSSM);
+ return rc;
+
+}
+
+
+/**
+ * Closes a saved state file opened by SSMR3Open().
+ *
+ * @returns VBox status code.
+ *
+ * @param pSSM The SSM handle returned by SSMR3Open().
+ *
+ * @thread Any, but the caller is responsible for serializing calls per handle.
+ */
+VMMR3DECL(int) SSMR3Close(PSSMHANDLE pSSM)
+{
+ LogFlow(("SSMR3Close: pSSM=%p\n", pSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pSSM, VERR_INVALID_POINTER);
+ AssertMsgReturn(pSSM->enmAfter == SSMAFTER_OPENED, ("%d\n", pSSM->enmAfter),VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pSSM->enmOp == SSMSTATE_OPEN_READ, ("%d\n", pSSM->enmOp), VERR_INVALID_PARAMETER);
+ Assert(pSSM->fCancelled == SSMHANDLE_OK);
+
+ /*
+ * Close the stream and free the handle.
+ */
+ int rc = ssmR3StrmClose(&pSSM->Strm, pSSM->rc == VERR_SSM_CANCELLED);
+ if (pSSM->u.Read.pZipDecompV1)
+ {
+ RTZipDecompDestroy(pSSM->u.Read.pZipDecompV1);
+ pSSM->u.Read.pZipDecompV1 = NULL;
+ }
+ RTMemFree(pSSM);
+ return rc;
+}
+
+
+/**
+ * Worker for SSMR3Seek that seeks version 1 saved state files.
+ *
+ * @returns VBox status code.
+ * @param pSSM The SSM handle.
+ * @param pszUnit The unit to seek to.
+ * @param iInstance The particular instance we seek.
+ * @param piVersion Where to store the unit version number.
+ */
+static int ssmR3FileSeekV1(PSSMHANDLE pSSM, const char *pszUnit, uint32_t iInstance, uint32_t *piVersion)
+{
+ /*
+ * Walk the data units until we find EOF or a match.
+ */
+ size_t cbUnitNm = strlen(pszUnit) + 1;
+ AssertLogRelReturn(cbUnitNm <= SSM_MAX_NAME_SIZE, VERR_SSM_UNIT_NOT_FOUND);
+ char szName[SSM_MAX_NAME_SIZE];
+ SSMFILEUNITHDRV1 UnitHdr;
+ for (RTFOFF off = pSSM->u.Read.cbFileHdr; ; off += UnitHdr.cbUnit)
+ {
+ /*
+ * Read the unit header and verify it.
+ */
+ int rc = ssmR3StrmPeekAt(&pSSM->Strm, off, &UnitHdr, RT_UOFFSETOF(SSMFILEUNITHDRV1, szName), NULL);
+ AssertRCReturn(rc, rc);
+ if (!memcmp(&UnitHdr.achMagic[0], SSMFILEUNITHDR_MAGIC, sizeof(SSMFILEUNITHDR_MAGIC)))
+ {
+ /*
+ * Does what we've got match, if so read the name.
+ */
+ if ( UnitHdr.u32Instance == iInstance
+ && UnitHdr.cchName == cbUnitNm)
+ {
+ rc = ssmR3StrmPeekAt(&pSSM->Strm, off + RT_UOFFSETOF(SSMFILEUNITHDRV1, szName), szName, cbUnitNm, NULL);
+ AssertRCReturn(rc, rc);
+ AssertLogRelMsgReturn(!szName[UnitHdr.cchName - 1],
+ (" Unit name '%.*s' was not properly terminated.\n", cbUnitNm, szName),
+ VERR_SSM_INTEGRITY_UNIT);
+
+ /*
+ * Does the name match?
+ */
+ if (!memcmp(szName, pszUnit, cbUnitNm))
+ {
+ rc = ssmR3StrmSeek(&pSSM->Strm, off + RT_UOFFSETOF(SSMFILEUNITHDRV1, szName) + cbUnitNm, RTFILE_SEEK_BEGIN, 0);
+ pSSM->cbUnitLeftV1 = UnitHdr.cbUnit - RT_UOFFSETOF_DYN(SSMFILEUNITHDRV1, szName[cbUnitNm]);
+ pSSM->offUnit = 0;
+ pSSM->offUnitUser = 0;
+ if (piVersion)
+ *piVersion = UnitHdr.u32Version;
+ return VINF_SUCCESS;
+ }
+ }
+ }
+ else if (!memcmp(&UnitHdr.achMagic[0], SSMFILEUNITHDR_END, sizeof(SSMFILEUNITHDR_END)))
+ return VERR_SSM_UNIT_NOT_FOUND;
+ else
+ AssertLogRelMsgFailedReturn(("Invalid unit magic at offset %RTfoff, '%.*s'!\n",
+ off, sizeof(UnitHdr.achMagic) - 1, &UnitHdr.achMagic[0]),
+ VERR_SSM_INTEGRITY_UNIT_MAGIC);
+ }
+ /* won't get here. */
+}
+
+
+/**
+ * Worker for ssmR3FileSeekV2 for simplifying memory cleanup.
+ *
+ * @returns VBox status code.
+ * @param pSSM The SSM handle.
+ * @param pDir The directory buffer.
+ * @param cbDir The size of the directory.
+ * @param cDirEntries The number of directory entries.
+ * @param offDir The directory offset in the file.
+ * @param pszUnit The unit to seek to.
+ * @param iInstance The particular instance we seek.
+ * @param piVersion Where to store the unit version number.
+ */
+static int ssmR3FileSeekSubV2(PSSMHANDLE pSSM, PSSMFILEDIR pDir, size_t cbDir, uint32_t cDirEntries, uint64_t offDir,
+ const char *pszUnit, uint32_t iInstance, uint32_t *piVersion)
+{
+ /*
+ * Read it.
+ */
+ int rc = ssmR3StrmPeekAt(&pSSM->Strm, offDir, pDir, cbDir, NULL);
+ AssertLogRelRCReturn(rc, rc);
+ rc = ssmR3ValidateDirectory(pDir, (uint32_t)cbDir, offDir, cDirEntries, pSSM->u.Read.cbFileHdr, pSSM->u.Read.u32SvnRev);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Search the directory.
+ */
+ size_t cbUnitNm = strlen(pszUnit) + 1;
+ uint32_t const u32NameCRC = RTCrc32(pszUnit, cbUnitNm - 1);
+ for (uint32_t i = 0; i < cDirEntries; i++)
+ {
+ if ( pDir->aEntries[i].u32NameCRC == u32NameCRC
+ && pDir->aEntries[i].u32Instance == iInstance
+ && pDir->aEntries[i].off != 0 /* bug in unreleased code */
+ )
+ {
+ /*
+ * Read and validate the unit header.
+ */
+ SSMFILEUNITHDRV2 UnitHdr;
+ size_t cbToRead = sizeof(UnitHdr);
+ if (pDir->aEntries[i].off + cbToRead > offDir)
+ {
+ cbToRead = offDir - pDir->aEntries[i].off;
+ RT_ZERO(UnitHdr);
+ }
+ rc = ssmR3StrmPeekAt(&pSSM->Strm, pDir->aEntries[i].off, &UnitHdr, cbToRead, NULL);
+ AssertLogRelRCReturn(rc, rc);
+
+ AssertLogRelMsgReturn(!memcmp(UnitHdr.szMagic, SSMFILEUNITHDR_MAGIC, sizeof(UnitHdr.szMagic)),
+ ("Bad unit header or dictionary offset: i=%u off=%lld\n", i, pDir->aEntries[i].off),
+ VERR_SSM_INTEGRITY_UNIT);
+ AssertLogRelMsgReturn(UnitHdr.offStream == pDir->aEntries[i].off,
+ ("Bad unit header: i=%d off=%lld offStream=%lld\n", i, pDir->aEntries[i].off, UnitHdr.offStream),
+ VERR_SSM_INTEGRITY_UNIT);
+ AssertLogRelMsgReturn(UnitHdr.u32Instance == pDir->aEntries[i].u32Instance,
+ ("Bad unit header: i=%d off=%lld u32Instance=%u Dir.u32Instance=%u\n",
+ i, pDir->aEntries[i].off, UnitHdr.u32Instance, pDir->aEntries[i].u32Instance),
+ VERR_SSM_INTEGRITY_UNIT);
+ uint32_t cbUnitHdr = RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]);
+ AssertLogRelMsgReturn( UnitHdr.cbName > 0
+ && UnitHdr.cbName < sizeof(UnitHdr)
+ && cbUnitHdr <= cbToRead,
+ ("Bad unit header: i=%u off=%lld cbName=%#x cbToRead=%#x\n", i, pDir->aEntries[i].off, UnitHdr.cbName, cbToRead),
+ VERR_SSM_INTEGRITY_UNIT);
+ SSM_CHECK_CRC32_RET(&UnitHdr, RT_UOFFSETOF_DYN(SSMFILEUNITHDRV2, szName[UnitHdr.cbName]),
+ ("Bad unit header CRC: i=%u off=%lld u32CRC=%#x u32ActualCRC=%#x\n",
+ i, pDir->aEntries[i].off, u32CRC, u32ActualCRC));
+
+ /*
+ * Ok, it is valid, get on with the comparing now.
+ */
+ if ( UnitHdr.cbName == cbUnitNm
+ && !memcmp(UnitHdr.szName, pszUnit, cbUnitNm))
+ {
+ if (piVersion)
+ *piVersion = UnitHdr.u32Version;
+ rc = ssmR3StrmSeek(&pSSM->Strm, pDir->aEntries[i].off + cbUnitHdr, RTFILE_SEEK_BEGIN,
+ RTCrc32Process(UnitHdr.u32CurStreamCRC, &UnitHdr, cbUnitHdr));
+ AssertLogRelRCReturn(rc, rc);
+ ssmR3DataReadBeginV2(pSSM);
+ return VINF_SUCCESS;
+ }
+ }
+ }
+
+ return VERR_SSM_UNIT_NOT_FOUND;
+}
+
+
+/**
+ * Worker for SSMR3Seek that seeks version 2 saved state files.
+ *
+ * @returns VBox status code.
+ * @param pSSM The SSM handle.
+ * @param pszUnit The unit to seek to.
+ * @param iInstance The particular instance we seek.
+ * @param piVersion Where to store the unit version number.
+ */
+static int ssmR3FileSeekV2(PSSMHANDLE pSSM, const char *pszUnit, uint32_t iInstance, uint32_t *piVersion)
+{
+ /*
+ * Read the footer, allocate a temporary buffer for the dictionary and
+ * pass it down to a worker to simplify cleanup.
+ */
+ uint64_t offFooter;
+ SSMFILEFTR Footer;
+ int rc = ssmR3StrmPeekAt(&pSSM->Strm, -(RTFOFF)sizeof(Footer), &Footer, sizeof(Footer), &offFooter);
+ AssertLogRelRCReturn(rc, rc);
+ AssertLogRelReturn(!memcmp(Footer.szMagic, SSMFILEFTR_MAGIC, sizeof(Footer.szMagic)), VERR_SSM_INTEGRITY);
+ SSM_CHECK_CRC32_RET(&Footer, sizeof(Footer), ("Bad footer CRC: %08x, actual %08x\n", u32CRC, u32ActualCRC));
+
+ size_t const cbDir = RT_UOFFSETOF_DYN(SSMFILEDIR, aEntries[Footer.cDirEntries]);
+ PSSMFILEDIR pDir = (PSSMFILEDIR)RTMemTmpAlloc(cbDir);
+ if (RT_UNLIKELY(!pDir))
+ return VERR_NO_TMP_MEMORY;
+ rc = ssmR3FileSeekSubV2(pSSM, pDir, cbDir, Footer.cDirEntries, offFooter - cbDir,
+ pszUnit, iInstance, piVersion);
+ RTMemTmpFree(pDir);
+
+ return rc;
+}
+
+
+/**
+ * Seeks to a specific data unit.
+ *
+ * After seeking it's possible to use the getters to on
+ * that data unit.
+ *
+ * @returns VBox status code.
+ * @returns VERR_SSM_UNIT_NOT_FOUND if the unit+instance wasn't found.
+ *
+ * @param pSSM The SSM handle returned by SSMR3Open().
+ * @param pszUnit The name of the data unit.
+ * @param iInstance The instance number.
+ * @param piVersion Where to store the version number. (Optional)
+ *
+ * @thread Any, but the caller is responsible for serializing calls per handle.
+ */
+VMMR3DECL(int) SSMR3Seek(PSSMHANDLE pSSM, const char *pszUnit, uint32_t iInstance, uint32_t *piVersion)
+{
+ LogFlow(("SSMR3Seek: pSSM=%p pszUnit=%p:{%s} iInstance=%RU32 piVersion=%p\n",
+ pSSM, pszUnit, pszUnit, iInstance, piVersion));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pSSM, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pSSM->enmAfter == SSMAFTER_OPENED, ("%d\n", pSSM->enmAfter),VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pSSM->enmOp == SSMSTATE_OPEN_READ, ("%d\n", pSSM->enmOp), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszUnit, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(piVersion, VERR_INVALID_POINTER);
+
+ /*
+ * Reset the state.
+ */
+ if (pSSM->u.Read.pZipDecompV1)
+ {
+ RTZipDecompDestroy(pSSM->u.Read.pZipDecompV1);
+ pSSM->u.Read.pZipDecompV1 = NULL;
+ }
+ pSSM->cbUnitLeftV1 = 0;
+ pSSM->offUnit = UINT64_MAX;
+ pSSM->offUnitUser = UINT64_MAX;
+
+ /*
+ * Call the version specific workers.
+ */
+ if (pSSM->u.Read.uFmtVerMajor >= 2)
+ pSSM->rc = ssmR3FileSeekV2(pSSM, pszUnit, iInstance, piVersion);
+ else
+ pSSM->rc = ssmR3FileSeekV1(pSSM, pszUnit, iInstance, piVersion);
+ return pSSM->rc;
+}
+
+
+
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+/* ... Misc APIs ... */
+
+
+
+/**
+ * Query what the VBox status code of the operation is.
+ *
+ * This can be used for putting and getting a batch of values
+ * without bother checking the result till all the calls have
+ * been made.
+ *
+ * @returns SSMAFTER enum value.
+ * @param pSSM The saved state handle.
+ */
+VMMR3DECL(int) SSMR3HandleGetStatus(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ return pSSM->rc;
+}
+
+
+/**
+ * Fail the load operation.
+ *
+ * This is mainly intended for sub item loaders (like timers) which
+ * return code isn't necessarily heeded by the caller but is important
+ * to SSM.
+ *
+ * @returns VBox status code of the handle, or VERR_INVALID_PARAMETER.
+ * @param pSSM The saved state handle.
+ * @param iStatus Failure status code. This MUST be a VERR_*.
+ */
+VMMR3DECL(int) SSMR3HandleSetStatus(PSSMHANDLE pSSM, int iStatus)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ Assert(pSSM->enmOp != SSMSTATE_LIVE_VOTE);
+ if (RT_FAILURE(iStatus))
+ {
+ int rc = pSSM->rc;
+ if (RT_SUCCESS(rc))
+ pSSM->rc = rc = iStatus;
+ return rc;
+ }
+ AssertMsgFailed(("iStatus=%d %Rrc\n", iStatus, iStatus));
+ return VERR_INVALID_PARAMETER;
+}
+
+
+/**
+ * Get what to do after this operation.
+ *
+ * @returns SSMAFTER enum value.
+ * @param pSSM The saved state handle.
+ */
+VMMR3DECL(SSMAFTER) SSMR3HandleGetAfter(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ return pSSM->enmAfter;
+}
+
+
+/**
+ * Checks if it is a live save operation or not.
+ *
+ * @returns True if it is, false if it isn't.
+ * @param pSSM The saved state handle.
+ */
+VMMR3DECL(bool) SSMR3HandleIsLiveSave(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ return pSSM->fLiveSave;
+}
+
+
+/**
+ * Gets the maximum downtime for a live operation.
+ *
+ * @returns The max downtime in milliseconds. Can be anything from 0 thru
+ * UINT32_MAX.
+ *
+ * @param pSSM The saved state handle.
+ */
+VMMR3DECL(uint32_t) SSMR3HandleMaxDowntime(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ if (pSSM->enmOp <= SSMSTATE_SAVE_DONE)
+ return pSSM->u.Write.cMsMaxDowntime;
+ return UINT32_MAX;
+}
+
+
+/**
+ * Gets the host bit count of a saved state.
+ *
+ * @returns 32 or 64. If pSSM is invalid, 0 is returned.
+ * @param pSSM The saved state handle.
+ *
+ * @remarks This method should ONLY be used for hacks when loading OLDER saved
+ * state that have data layout or semantic changes without the
+ * compulsory version number change.
+ */
+VMMR3DECL(uint32_t) SSMR3HandleHostBits(PSSMHANDLE pSSM)
+{
+ SSM_ASSERT_VALID_HANDLE(pSSM);
+ return ssmR3GetHostBits(pSSM);
+}
+
+
+/**
+ * Get the VirtualBox SVN revision that created the saved state.
+ *
+ * @returns The revision number on success.
+ * form. If we don't know, it's 0.
+ * @param pSSM The saved state handle.
+ *
+ * @remarks This method should ONLY be used for hacks when loading OLDER saved
+ * state that have data layout or semantic changes without the
+ * compulsory version number change. Be VERY careful with this
+ * function since it will return different values for OSE builds!
+ */
+VMMR3DECL(uint32_t) SSMR3HandleRevision(PSSMHANDLE pSSM)
+{
+ if (pSSM->enmOp >= SSMSTATE_LOAD_PREP)
+ return pSSM->u.Read.u32SvnRev;
+#ifdef SSM_STANDALONE
+ return 0;
+#else
+ return VMMGetSvnRev();
+#endif
+}
+
+
+/**
+ * Gets the VirtualBox version that created the saved state.
+ *
+ * @returns VBOX_FULL_VERSION style version number.
+ * Returns UINT32_MAX if unknown or somehow out of range.
+ *
+ * @param pSSM The saved state handle.
+ *
+ * @remarks This method should ONLY be used for hacks when loading OLDER saved
+ * state that have data layout or semantic changes without the
+ * compulsory version number change.
+ */
+VMMR3DECL(uint32_t) SSMR3HandleVersion(PSSMHANDLE pSSM)
+{
+ if (pSSM->enmOp >= SSMSTATE_LOAD_PREP)
+ {
+ if ( !pSSM->u.Read.u16VerMajor
+ && !pSSM->u.Read.u16VerMinor
+ && !pSSM->u.Read.u32VerBuild)
+ return UINT32_MAX;
+ AssertReturn(pSSM->u.Read.u16VerMajor <= 0xff, UINT32_MAX);
+ AssertReturn(pSSM->u.Read.u16VerMinor <= 0xff, UINT32_MAX);
+ AssertReturn(pSSM->u.Read.u32VerBuild <= 0xffff, UINT32_MAX);
+ return VBOX_FULL_VERSION_MAKE(pSSM->u.Read.u16VerMajor, pSSM->u.Read.u16VerMinor, pSSM->u.Read.u32VerBuild);
+ }
+ return VBOX_FULL_VERSION;
+}
+
+
+/**
+ * Get the host OS and architecture where the saved state was created.
+ *
+ * @returns Pointer to a read only string. When known, this is on the os.arch
+ * form. If we don't know, it's an empty string.
+ * @param pSSM The saved state handle.
+ *
+ * @remarks This method should ONLY be used for hacks when loading OLDER saved
+ * state that have data layout or semantic changes without the
+ * compulsory version number change.
+ */
+VMMR3DECL(const char *) SSMR3HandleHostOSAndArch(PSSMHANDLE pSSM)
+{
+ if (pSSM->enmOp >= SSMSTATE_LOAD_PREP)
+ return pSSM->u.Read.szHostOSAndArch;
+ return KBUILD_TARGET "." KBUILD_TARGET_ARCH;
+}
+
+
+#ifdef DEBUG
+/**
+ * Gets current data offset, relative to the start of the unit - only for debugging
+ */
+VMMR3DECL(uint64_t) SSMR3HandleTellInUnit(PSSMHANDLE pSSM)
+{
+ return ssmR3StrmTell(&pSSM->Strm) - pSSM->offUnitUser;
+}
+#endif
+
+
+#ifndef SSM_STANDALONE
+/**
+ * Asynchronously cancels the current SSM operation ASAP.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_SSM_NO_PENDING_OPERATION if nothing around that can be
+ * cancelled.
+ * @retval VERR_SSM_ALREADY_CANCELLED if the operation as already been
+ * cancelled.
+ *
+ * @param pUVM The VM handle.
+ *
+ * @thread Any.
+ */
+VMMR3DECL(int) SSMR3Cancel(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ int rc = RTCritSectEnter(&pVM->ssm.s.CancelCritSect);
+ AssertRCReturn(rc, rc);
+
+ PSSMHANDLE pSSM = pVM->ssm.s.pSSM;
+ if (pSSM)
+ {
+ uint32_t u32Old;
+ if (ASMAtomicCmpXchgExU32(&pSSM->fCancelled, SSMHANDLE_CANCELLED, SSMHANDLE_OK, &u32Old))
+ {
+ LogRel(("SSM: Cancelled pending operation\n"));
+ rc = VINF_SUCCESS;
+ }
+ else if (u32Old == SSMHANDLE_CANCELLED)
+ rc = VERR_SSM_ALREADY_CANCELLED;
+ else
+ {
+ AssertLogRelMsgFailed(("fCancelled=%RX32 enmOp=%d\n", u32Old, pSSM->enmOp));
+ rc = VERR_SSM_IPE_3;
+ }
+ }
+ else
+ rc = VERR_SSM_NO_PENDING_OPERATION;
+
+ RTCritSectLeave(&pVM->ssm.s.CancelCritSect);
+ return rc;
+}
+#endif /* !SSM_STANDALONE */
+
diff --git a/src/VBox/VMM/VMMR3/STAM.cpp b/src/VBox/VMM/VMMR3/STAM.cpp
new file mode 100644
index 00000000..017baa9b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/STAM.cpp
@@ -0,0 +1,3196 @@
+/* $Id: STAM.cpp $ */
+/** @file
+ * STAM - The Statistics Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_stam STAM - The Statistics Manager
+ *
+ * The purpose for the statistics manager is to present the rest of the system
+ * with a somewhat uniform way of accessing VMM statistics. STAM sports a
+ * couple of different APIs for accessing them: STAMR3EnumU, STAMR3SnapshotU,
+ * STAMR3DumpU, STAMR3DumpToReleaseLogU and the debugger. Main is exposing the
+ * XML based one, STAMR3SnapshotU.
+ *
+ * The rest of the VMM together with the devices and drivers registers their
+ * statistics with STAM giving them a name. The name is hierarchical, the
+ * components separated by slashes ('/') and must start with a slash.
+ *
+ * Each item registered with STAM - also, half incorrectly, called a sample -
+ * has a type, unit, visibility, data pointer and description associated with it
+ * in addition to the name (described above). The type tells STAM what kind of
+ * structure the pointer is pointing to. The visibility allows unused
+ * statistics from cluttering the output or showing up in the GUI. All the bits
+ * together makes STAM able to present the items in a sensible way to the user.
+ * Some types also allows STAM to reset the data, which is very convenient when
+ * digging into specific operations and such.
+ *
+ * PS. The VirtualBox Debugger GUI has a viewer for inspecting the statistics
+ * STAM provides. You will also find statistics in the release and debug logs.
+ * And as mentioned in the introduction, the debugger console features a couple
+ * of command: .stats and .statsreset.
+ *
+ * @see grp_stam
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_STAM
+#include <VBox/vmm/stam.h>
+#include "STAMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/err.h>
+#include <VBox/dbg.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/mem.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The maximum name length excluding the terminator. */
+#define STAM_MAX_NAME_LEN 239
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Argument structure for stamR3PrintOne().
+ */
+typedef struct STAMR3PRINTONEARGS
+{
+ PUVM pUVM;
+ void *pvArg;
+ DECLCALLBACKMEMBER(void, pfnPrintf,(struct STAMR3PRINTONEARGS *pvArg, const char *pszFormat, ...));
+} STAMR3PRINTONEARGS, *PSTAMR3PRINTONEARGS;
+
+
+/**
+ * Argument structure to stamR3EnumOne().
+ */
+typedef struct STAMR3ENUMONEARGS
+{
+ PVM pVM;
+ PFNSTAMR3ENUM pfnEnum;
+ void *pvUser;
+} STAMR3ENUMONEARGS, *PSTAMR3ENUMONEARGS;
+
+
+/**
+ * The snapshot status structure.
+ * Argument package passed to stamR3SnapshotOne, stamR3SnapshotPrintf and stamR3SnapshotOutput.
+ */
+typedef struct STAMR3SNAPSHOTONE
+{
+ /** Pointer to the buffer start. */
+ char *pszStart;
+ /** Pointer to the buffer end. */
+ char *pszEnd;
+ /** Pointer to the current buffer position. */
+ char *psz;
+ /** Pointer to the VM. */
+ PVM pVM;
+ /** The number of bytes allocated. */
+ size_t cbAllocated;
+ /** The status code. */
+ int rc;
+ /** Whether to include the description strings. */
+ bool fWithDesc;
+} STAMR3SNAPSHOTONE, *PSTAMR3SNAPSHOTONE;
+
+
+/**
+ * Init record for a ring-0 statistic sample.
+ */
+typedef struct STAMR0SAMPLE
+{
+ /** The GVMMSTATS structure offset of the variable. */
+ unsigned offVar;
+ /** The type. */
+ STAMTYPE enmType;
+ /** The unit. */
+ STAMUNIT enmUnit;
+ /** The name. */
+ const char *pszName;
+ /** The description. */
+ const char *pszDesc;
+} STAMR0SAMPLE;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void stamR3LookupDestroyTree(PSTAMLOOKUP pRoot);
+static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset,
+ PFNSTAMR3CALLBACKPRINT pfnPrint, STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ const char *pszName, STAMUNIT enmUnit, const char *pszDesc, uint8_t iRefreshGrp);
+static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg);
+static DECLCALLBACK(void) stamR3EnumLogPrintf(PSTAMR3PRINTONEARGS pvArg, const char *pszFormat, ...);
+static DECLCALLBACK(void) stamR3EnumRelLogPrintf(PSTAMR3PRINTONEARGS pvArg, const char *pszFormat, ...);
+static DECLCALLBACK(void) stamR3EnumPrintf(PSTAMR3PRINTONEARGS pvArg, const char *pszFormat, ...);
+static int stamR3SnapshotOne(PSTAMDESC pDesc, void *pvArg);
+static int stamR3SnapshotPrintf(PSTAMR3SNAPSHOTONE pThis, const char *pszFormat, ...);
+static int stamR3PrintOne(PSTAMDESC pDesc, void *pvArg);
+static int stamR3EnumOne(PSTAMDESC pDesc, void *pvArg);
+static bool stamR3MultiMatch(const char * const *papszExpressions, unsigned cExpressions, unsigned *piExpression, const char *pszName);
+static char ** stamR3SplitPattern(const char *pszPat, unsigned *pcExpressions, char **ppszCopy);
+static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0, int (pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg);
+static void stamR3Ring0StatsRegisterU(PUVM pUVM);
+
+#ifdef VBOX_WITH_DEBUGGER
+static FNDBGCCMD stamR3CmdStats;
+static DECLCALLBACK(void) stamR3EnumDbgfPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...);
+static FNDBGCCMD stamR3CmdStatsReset;
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef VBOX_WITH_DEBUGGER
+/** Pattern argument. */
+static const DBGCVARDESC g_aArgPat[] =
+{
+ /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
+ { 0, 1, DBGCVAR_CAT_STRING, 0, "pattern", "Which samples the command shall be applied to. Use '*' as wildcard. Use ';' to separate expression." }
+};
+
+/** Command descriptors. */
+static const DBGCCMD g_aCmds[] =
+{
+ /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
+ { "stats", 0, 1, &g_aArgPat[0], RT_ELEMENTS(g_aArgPat), 0, stamR3CmdStats, "[pattern]", "Display statistics." },
+ { "statsreset", 0, 1, &g_aArgPat[0], RT_ELEMENTS(g_aArgPat), 0, stamR3CmdStatsReset,"[pattern]", "Resets statistics." }
+};
+#endif
+
+
+/**
+ * The GVMM mapping records - sans the host cpus.
+ */
+static const STAMR0SAMPLE g_aGVMMStats[] =
+{
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cHaltCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/HaltCalls", "The number of calls to GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cHaltBlocking), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/HaltBlocking", "The number of times we did go to sleep in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cHaltTimeouts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/HaltTimeouts", "The number of times we timed out in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cHaltNotBlocking), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/HaltNotBlocking", "The number of times we didn't go to sleep in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cHaltWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/HaltWakeUps", "The number of wake ups done during GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cWakeUpCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/WakeUpCalls", "The number of calls to GVMMR0WakeUp." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cWakeUpNotHalted), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/WakeUpNotHalted", "The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp was called." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cWakeUpWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/WakeUpWakeUps", "The number of wake ups done during GVMMR0WakeUp (not counting the explicit one)." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPokeCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PokeCalls", "The number of calls to GVMMR0Poke." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPokeNotBusy), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PokeNotBusy", "The number of times the EMT thread wasn't actually busy when GVMMR0Poke was called." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPollCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PollCalls", "The number of calls to GVMMR0SchedPoll." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPollHalts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PollHalts", "The number of times the EMT has halted in a GVMMR0SchedPoll call." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPollWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PollWakeUps", "The number of wake ups done during GVMMR0SchedPoll." },
+
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cHaltCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/HaltCalls", "The number of calls to GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cHaltBlocking), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/HaltBlocking", "The number of times we did go to sleep in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cHaltTimeouts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/HaltTimeouts", "The number of times we timed out in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cHaltNotBlocking), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/HaltNotBlocking", "The number of times we didn't go to sleep in GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cHaltWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/HaltWakeUps", "The number of wake ups done during GVMMR0SchedHalt." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cWakeUpCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/WakeUpCalls", "The number of calls to GVMMR0WakeUp." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cWakeUpNotHalted), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/WakeUpNotHalted", "The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp was called." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cWakeUpWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/WakeUpWakeUps", "The number of wake ups done during GVMMR0WakeUp (not counting the explicit one)." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPokeCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PokeCalls", "The number of calls to GVMMR0Poke." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPokeNotBusy), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PokeNotBusy", "The number of times the EMT thread wasn't actually busy when GVMMR0Poke was called." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPollCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PollCalls", "The number of calls to GVMMR0SchedPoll." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPollHalts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PollHalts", "The number of times the EMT has halted in a GVMMR0SchedPoll call." },
+ { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPollWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PollWakeUps", "The number of wake ups done during GVMMR0SchedPoll." },
+
+ { RT_UOFFSETOF(GVMMSTATS, cVMs), STAMTYPE_U32, STAMUNIT_COUNT, "/GVMM/VMs", "The number of VMs accessible to the caller." },
+ { RT_UOFFSETOF(GVMMSTATS, cEMTs), STAMTYPE_U32, STAMUNIT_COUNT, "/GVMM/EMTs", "The number of emulation threads." },
+ { RT_UOFFSETOF(GVMMSTATS, cHostCpus), STAMTYPE_U32, STAMUNIT_COUNT, "/GVMM/HostCPUs", "The number of host CPUs." },
+};
+
+
+/**
+ * The GMM mapping records.
+ */
+static const STAMR0SAMPLE g_aGMMStats[] =
+{
+ { RT_UOFFSETOF(GMMSTATS, cMaxPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cMaxPages", "The maximum number of pages GMM is allowed to allocate." },
+ { RT_UOFFSETOF(GMMSTATS, cReservedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cReservedPages", "The number of pages that has been reserved." },
+ { RT_UOFFSETOF(GMMSTATS, cOverCommittedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cOverCommittedPages", "The number of pages that we have over-committed in reservations." },
+ { RT_UOFFSETOF(GMMSTATS, cAllocatedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cAllocatedPages", "The number of actually allocated (committed if you like) pages." },
+ { RT_UOFFSETOF(GMMSTATS, cSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cSharedPages", "The number of pages that are shared. A subset of cAllocatedPages." },
+ { RT_UOFFSETOF(GMMSTATS, cDuplicatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cDuplicatePages", "The number of pages that are actually shared between VMs." },
+ { RT_UOFFSETOF(GMMSTATS, cLeftBehindSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cLeftBehindSharedPages", "The number of pages that are shared that has been left behind by VMs not doing proper cleanups." },
+ { RT_UOFFSETOF(GMMSTATS, cBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/cBalloonedPages", "The number of current ballooned pages." },
+ { RT_UOFFSETOF(GMMSTATS, cChunks), STAMTYPE_U32, STAMUNIT_COUNT, "/GMM/cChunks", "The number of allocation chunks." },
+ { RT_UOFFSETOF(GMMSTATS, cFreedChunks), STAMTYPE_U32, STAMUNIT_COUNT, "/GMM/cFreedChunks", "The number of freed chunks ever." },
+ { RT_UOFFSETOF(GMMSTATS, cShareableModules), STAMTYPE_U32, STAMUNIT_COUNT, "/GMM/cShareableModules", "The number of shareable modules." },
+ { RT_UOFFSETOF(GMMSTATS, idFreeGeneration), STAMTYPE_U64, STAMUNIT_NONE, "/GMM/idFreeGeneration", "The current chunk freeing generation number (for per-VM chunk lookup TLB versioning)." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cBasePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/Reserved/cBasePages", "The amount of base memory (RAM, ROM, ++) reserved by the VM." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cShadowPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Reserved/cShadowPages", "The amount of memory reserved for shadow/nested page tables." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Reserved.cFixedPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Reserved/cFixedPages", "The amount of memory reserved for fixed allocations like MMIO2 and the hyper heap." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cBasePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/Allocated/cBasePages", "The amount of base memory (RAM, ROM, ++) allocated by the VM." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cShadowPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Allocated/cShadowPages", "The amount of memory allocated for shadow/nested page tables." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.Allocated.cFixedPages), STAMTYPE_U32, STAMUNIT_PAGES, "/GMM/VM/Allocated/cFixedPages", "The amount of memory allocated for fixed allocations like MMIO2 and the hyper heap." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cPrivatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cPrivatePages", "The current number of private pages." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cSharedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cSharedPages", "The current number of shared pages." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cBalloonedPages", "The current number of ballooned pages." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cMaxBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cMaxBalloonedPages", "The max number of pages that can be ballooned." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cReqBalloonedPages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqBalloonedPages", "The number of pages we've currently requested the guest to give us." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cReqActuallyBalloonedPages),STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqActuallyBalloonedPages","The number of pages the guest has given us in response to the request." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cReqDeflatePages), STAMTYPE_U64, STAMUNIT_PAGES, "/GMM/VM/cReqDeflatePages", "The number of pages we've currently requested the guest to take back." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.cShareableModules), STAMTYPE_U32, STAMUNIT_COUNT, "/GMM/VM/cShareableModules", "The number of shareable modules traced by the VM." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.enmPolicy), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPolicy", "The current over-commit policy." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.enmPriority), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPriority", "The VM priority for arbitrating VMs in low and out of memory situation." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fBallooningEnabled", "Whether ballooning is enabled or not." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.fSharedPagingEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fSharedPagingEnabled", "Whether shared paging is enabled or not." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.fMayAllocate), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fMayAllocate", "Whether the VM is allowed to allocate memory or not." },
+};
+
+
+/**
+ * Initializes the STAM.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ */
+VMMR3DECL(int) STAMR3InitUVM(PUVM pUVM)
+{
+ LogFlow(("STAMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompile(sizeof(pUVM->stam.s) <= sizeof(pUVM->stam.padding));
+ AssertRelease(sizeof(pUVM->stam.s) <= sizeof(pUVM->stam.padding));
+
+ /*
+ * Initialize the read/write lock and list.
+ */
+ int rc = RTSemRWCreate(&pUVM->stam.s.RWSem);
+ AssertRCReturn(rc, rc);
+
+ RTListInit(&pUVM->stam.s.List);
+
+ /*
+ * Initialize the root node.
+ */
+ PSTAMLOOKUP pRoot = (PSTAMLOOKUP)RTMemAlloc(sizeof(STAMLOOKUP));
+ if (!pRoot)
+ {
+ RTSemRWDestroy(pUVM->stam.s.RWSem);
+ pUVM->stam.s.RWSem = NIL_RTSEMRW;
+ return VERR_NO_MEMORY;
+ }
+ pRoot->pParent = NULL;
+ pRoot->papChildren = NULL;
+ pRoot->pDesc = NULL;
+ pRoot->cDescsInTree = 0;
+ pRoot->cChildren = 0;
+ pRoot->iParent = UINT16_MAX;
+ pRoot->off = 0;
+ pRoot->cch = 0;
+ pRoot->szName[0] = '\0';
+
+ pUVM->stam.s.pRoot = pRoot;
+
+ /*
+ * Register the ring-0 statistics (GVMM/GMM).
+ */
+ if (!SUPR3IsDriverless())
+ stamR3Ring0StatsRegisterU(pUVM);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /*
+ * Register debugger commands.
+ */
+ static bool fRegisteredCmds = false;
+ if (!fRegisteredCmds)
+ {
+ rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ if (RT_SUCCESS(rc))
+ fRegisteredCmds = true;
+ }
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Terminates the STAM.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3DECL(void) STAMR3TermUVM(PUVM pUVM)
+{
+ /*
+ * Free used memory and the RWLock.
+ */
+ PSTAMDESC pCur, pNext;
+ RTListForEachSafe(&pUVM->stam.s.List, pCur, pNext, STAMDESC, ListEntry)
+ {
+ pCur->pLookup->pDesc = NULL;
+ RTMemFree(pCur);
+ }
+
+ stamR3LookupDestroyTree(pUVM->stam.s.pRoot);
+ pUVM->stam.s.pRoot = NULL;
+
+ Assert(pUVM->stam.s.RWSem != NIL_RTSEMRW);
+ RTSemRWDestroy(pUVM->stam.s.RWSem);
+ pUVM->stam.s.RWSem = NIL_RTSEMRW;
+}
+
+
+/**
+ * Registers a sample with the statistics manager.
+ *
+ * Statistics are maintained on a per VM basis and is normally registered
+ * during the VM init stage, but there is nothing preventing you from
+ * register them at runtime.
+ *
+ * Use STAMR3Deregister() to deregister statistics at runtime, however do
+ * not bother calling at termination time.
+ *
+ * It is not possible to register the same sample twice.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param pszName Sample name. The name is on this form "/<component>/<sample>".
+ * Further nesting is possible.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ */
+VMMR3DECL(int) STAMR3RegisterU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName,
+ STAMUNIT enmUnit, const char *pszDesc)
+{
+ AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc, STAM_REFRESH_GRP_NONE);
+}
+
+
+/**
+ * Registers a sample with the statistics manager.
+ *
+ * Statistics are maintained on a per VM basis and is normally registered
+ * during the VM init stage, but there is nothing preventing you from
+ * register them at runtime.
+ *
+ * Use STAMR3Deregister() to deregister statistics at runtime, however do
+ * not bother calling at termination time.
+ *
+ * It is not possible to register the same sample twice.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param pszName Sample name. The name is on this form "/<component>/<sample>".
+ * Further nesting is possible.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ */
+VMMR3DECL(int) STAMR3Register(PVM pVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName,
+ STAMUNIT enmUnit, const char *pszDesc)
+{
+ AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
+ return stamR3RegisterU(pVM->pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc,
+ STAM_REFRESH_GRP_NONE);
+}
+
+
+/**
+ * Same as STAMR3RegisterU except that the name is specified in a
+ * RTStrPrintf like fashion.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param ... Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterFU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ const char *pszDesc, const char *pszName, ...)
+{
+ va_list args;
+ va_start(args, pszName);
+ int rc = STAMR3RegisterVU(pUVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, args);
+ va_end(args);
+ return rc;
+}
+
+
+/**
+ * Same as STAMR3Register except that the name is specified in a
+ * RTStrPrintf like fashion.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param ... Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterF(PVM pVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ const char *pszDesc, const char *pszName, ...)
+{
+ va_list args;
+ va_start(args, pszName);
+ int rc = STAMR3RegisterVU(pVM->pUVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, args);
+ va_end(args);
+ return rc;
+}
+
+
+/**
+ * Same as STAMR3Register except that the name is specified in a
+ * RTStrPrintfV like fashion.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param args Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterVU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ const char *pszDesc, const char *pszName, va_list args)
+{
+ AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
+
+ char szFormattedName[STAM_MAX_NAME_LEN + 8];
+ size_t cch = RTStrPrintfV(szFormattedName, sizeof(szFormattedName), pszName, args);
+ AssertReturn(cch <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+
+ return STAMR3RegisterU(pUVM, pvSample, enmType, enmVisibility, szFormattedName, enmUnit, pszDesc);
+}
+
+
+/**
+ * Same as STAMR3Register except that the name is specified in a
+ * RTStrPrintfV like fashion.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param args Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterV(PVM pVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ const char *pszDesc, const char *pszName, va_list args)
+{
+ return STAMR3RegisterVU(pVM->pUVM, pvSample, enmType, enmVisibility, enmUnit, pszDesc, pszName, args);
+}
+
+
+/**
+ * Similar to STAMR3Register except for the two callbacks, the implied type (STAMTYPE_CALLBACK),
+ * and name given in an RTStrPrintf like fashion.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pfnReset Callback for resetting the sample. NULL should be used if the sample can't be reset.
+ * @param pfnPrint Print the sample.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param ... Arguments to the format string.
+ * @remark There is currently no device or driver variant of this API. Add one if it should become necessary!
+ */
+VMMR3DECL(int) STAMR3RegisterCallback(PVM pVM, void *pvSample, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint,
+ const char *pszDesc, const char *pszName, ...)
+{
+ va_list args;
+ va_start(args, pszName);
+ int rc = STAMR3RegisterCallbackV(pVM, pvSample, enmVisibility, enmUnit, pfnReset, pfnPrint, pszDesc, pszName, args);
+ va_end(args);
+ return rc;
+}
+
+
+/**
+ * Same as STAMR3RegisterCallback() except for the ellipsis which is a va_list here.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param pfnReset Callback for resetting the sample. NULL should be used if the sample can't be reset.
+ * @param pfnPrint Print the sample.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param args Arguments to the format string.
+ * @remark There is currently no device or driver variant of this API. Add one if it should become necessary!
+ */
+VMMR3DECL(int) STAMR3RegisterCallbackV(PVM pVM, void *pvSample, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint,
+ const char *pszDesc, const char *pszName, va_list args)
+{
+ char *pszFormattedName;
+ RTStrAPrintfV(&pszFormattedName, pszName, args);
+ if (!pszFormattedName)
+ return VERR_NO_MEMORY;
+
+ int rc = stamR3RegisterU(pVM->pUVM, pvSample, pfnReset, pfnPrint, STAMTYPE_CALLBACK, enmVisibility, pszFormattedName,
+ enmUnit, pszDesc, STAM_REFRESH_GRP_NONE);
+ RTStrFree(pszFormattedName);
+ return rc;
+}
+
+
+/**
+ * Same as STAMR3RegisterFU, except there is an extra refresh group parameter.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param ... Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterRefresh(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ uint8_t iRefreshGrp, const char *pszDesc, const char *pszName, ...)
+{
+ va_list args;
+ va_start(args, pszName);
+ int rc = STAMR3RegisterRefreshV(pUVM, pvSample, enmType, enmVisibility, enmUnit, iRefreshGrp, pszDesc, pszName, args);
+ va_end(args);
+ return rc;
+}
+
+
+/**
+ * Same as STAMR3RegisterVU, except there is an extra refresh group parameter.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param enmUnit Sample unit.
+ * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX.
+ * @param pszDesc Sample description.
+ * @param pszName The sample name format string.
+ * @param va Arguments to the format string.
+ */
+VMMR3DECL(int) STAMR3RegisterRefreshV(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, STAMUNIT enmUnit,
+ uint8_t iRefreshGrp, const char *pszDesc, const char *pszName, va_list va)
+{
+ AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
+
+ char szFormattedName[STAM_MAX_NAME_LEN + 8];
+ size_t cch = RTStrPrintfV(szFormattedName, sizeof(szFormattedName), pszName, va);
+ AssertReturn(cch <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc, iRefreshGrp);
+}
+
+
+#ifdef VBOX_STRICT
+/**
+ * Divide the strings into sub-strings using '/' as delimiter
+ * and then compare them in strcmp fashion.
+ *
+ * @returns Difference.
+ * @retval 0 if equal.
+ * @retval < 0 if psz1 is less than psz2.
+ * @retval > 0 if psz1 greater than psz2.
+ *
+ * @param psz1 The first string.
+ * @param psz2 The second string.
+ */
+static int stamR3SlashCompare(const char *psz1, const char *psz2)
+{
+ for (;;)
+ {
+ unsigned int ch1 = *psz1++;
+ unsigned int ch2 = *psz2++;
+ if (ch1 != ch2)
+ {
+ /* slash is end-of-sub-string, so it trumps everything but '\0'. */
+ if (ch1 == '/')
+ return ch2 ? -1 : 1;
+ if (ch2 == '/')
+ return ch1 ? 1 : -1;
+ return ch1 - ch2;
+ }
+
+ /* done? */
+ if (ch1 == '\0')
+ return 0;
+ }
+}
+#endif /* VBOX_STRICT */
+
+
+/**
+ * Compares a lookup node with a name.
+ *
+ * @returns like strcmp and memcmp.
+ * @param pNode The lookup node.
+ * @param pchName The name, not necessarily terminated.
+ * @param cchName The length of the name.
+ */
+DECL_FORCE_INLINE(int) stamR3LookupCmp(PSTAMLOOKUP pNode, const char *pchName, uint32_t cchName)
+{
+ uint32_t cchComp = RT_MIN(pNode->cch, cchName);
+ int iDiff = memcmp(pNode->szName, pchName, cchComp);
+ if (!iDiff && pNode->cch != cchName)
+ iDiff = pNode->cch > cchName ? 2 : -2;
+ return iDiff;
+}
+
+
+/**
+ * Creates a new lookup child node.
+ *
+ * @returns Pointer to the newly created lookup node.
+ * @param pParent The parent node.
+ * @param pchName The name (not necessarily terminated).
+ * @param cchName The length of the name.
+ * @param offName The offset of the node in a path.
+ * @param iChild Child index of a node that's before the one
+ * we're inserting (returned by
+ * stamR3LookupFindChild).
+ */
+static PSTAMLOOKUP stamR3LookupNewChild(PSTAMLOOKUP pParent, const char *pchName, uint32_t cchName, uint32_t offName,
+ uint32_t iChild)
+{
+ Assert(cchName <= UINT8_MAX);
+ Assert(offName <= UINT8_MAX);
+ Assert(iChild < UINT16_MAX);
+
+ /*
+ * Allocate a new entry.
+ */
+ PSTAMLOOKUP pNew = (PSTAMLOOKUP)RTMemAlloc(RT_UOFFSETOF_DYN(STAMLOOKUP, szName[cchName + 1]));
+ if (!pNew)
+ return NULL;
+ pNew->pParent = pParent;
+ pNew->papChildren = NULL;
+ pNew->pDesc = NULL;
+ pNew->cDescsInTree = 0;
+ pNew->cChildren = 0;
+ pNew->cch = (uint16_t)cchName;
+ pNew->off = (uint16_t)offName;
+ memcpy(pNew->szName, pchName, cchName);
+ pNew->szName[cchName] = '\0';
+
+ /*
+ * Reallocate the array?
+ */
+ if (RT_IS_POWER_OF_TWO(pParent->cChildren))
+ {
+ uint32_t cNew = pParent->cChildren ? (uint32_t)pParent->cChildren * 2 : 8;
+ AssertReturnStmt(cNew <= 0x8000, RTMemFree(pNew), NULL);
+ void *pvNew = RTMemRealloc(pParent->papChildren, cNew * sizeof(pParent->papChildren[0]));
+ if (!pvNew)
+ {
+ RTMemFree(pNew);
+ return NULL;
+ }
+ pParent->papChildren = (PSTAMLOOKUP *)pvNew;
+ }
+
+ /*
+ * Find the exact insertion point using iChild as a very good clue from
+ * the find function.
+ */
+ if (!pParent->cChildren)
+ iChild = 0;
+ else
+ {
+ if (iChild >= pParent->cChildren)
+ iChild = pParent->cChildren - 1;
+ while ( iChild < pParent->cChildren
+ && stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName) < 0)
+ iChild++;
+ }
+
+ /*
+ * Insert it.
+ */
+ if (iChild < pParent->cChildren)
+ {
+ /* Do shift. */
+ uint32_t i = pParent->cChildren;
+ while (i > iChild)
+ {
+ PSTAMLOOKUP pNode = pParent->papChildren[i - 1];
+ pParent->papChildren[i] = pNode;
+ pNode->iParent = i;
+ i--;
+ }
+ }
+
+ pNew->iParent = iChild;
+ pParent->papChildren[iChild] = pNew;
+ pParent->cChildren++;
+
+ return pNew;
+}
+
+
+/**
+ * Looks up a child.
+ *
+ * @returns Pointer to child node if found, NULL if not.
+ * @param pParent The parent node.
+ * @param pchName The name (not necessarily terminated).
+ * @param cchName The length of the name.
+ * @param piChild Where to store a child index suitable for
+ * passing to stamR3LookupNewChild when NULL is
+ * returned.
+ */
+static PSTAMLOOKUP stamR3LookupFindChild(PSTAMLOOKUP pParent, const char *pchName, uint32_t cchName, uint32_t *piChild)
+{
+ uint32_t iChild = pParent->cChildren;
+ if (iChild > 4)
+ {
+ uint32_t iFirst = 0;
+ uint32_t iEnd = iChild;
+ iChild /= 2;
+ for (;;)
+ {
+ int iDiff = stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName);
+ if (!iDiff)
+ {
+ if (piChild)
+ *piChild = iChild;
+ return pParent->papChildren[iChild];
+ }
+
+ /* Split. */
+ if (iDiff < 0)
+ {
+ iFirst = iChild + 1;
+ if (iFirst >= iEnd)
+ {
+ if (piChild)
+ *piChild = iChild;
+ break;
+ }
+ }
+ else
+ {
+ if (iChild == iFirst)
+ {
+ if (piChild)
+ *piChild = iChild ? iChild - 1 : 0;
+ break;
+ }
+ iEnd = iChild;
+ }
+
+ /* Calc next child. */
+ iChild = (iEnd - iFirst) / 2 + iFirst;
+ }
+ return NULL;
+ }
+
+ /*
+ * Linear search.
+ */
+ while (iChild-- > 0)
+ {
+ int iDiff = stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName);
+ if (iDiff <= 0)
+ {
+ if (piChild)
+ *piChild = iChild;
+ return !iDiff ? pParent->papChildren[iChild] : NULL;
+ }
+ }
+ if (piChild)
+ *piChild = 0;
+ return NULL;
+}
+
+
+/**
+ * Find the next sample descriptor node.
+ *
+ * This is for use with insertion in the big list and pattern range lookups.
+ *
+ * @returns Pointer to the next sample descriptor. NULL if not found (i.e.
+ * we're at the end of the list).
+ * @param pLookup The current node.
+ */
+static PSTAMDESC stamR3LookupFindNextWithDesc(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+ PSTAMLOOKUP pCur = pLookup;
+ uint32_t iCur = 0;
+ for (;;)
+ {
+ /*
+ * Check all children.
+ */
+ uint32_t cChildren = pCur->cChildren;
+ if (iCur < cChildren)
+ {
+ PSTAMLOOKUP *papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = papChildren[iCur];
+ if (pChild->pDesc)
+ return pChild->pDesc;
+
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = 0;
+ pCur = pChild;
+ break;
+ }
+ } while (++iCur < cChildren);
+ }
+ else
+ {
+ /*
+ * One level up, resuming after the current.
+ */
+ iCur = pCur->iParent + 1;
+ pCur = pCur->pParent;
+ if (!pCur)
+ return NULL;
+ }
+ }
+}
+
+
+/**
+ * Look up a sample descriptor by name.
+ *
+ * @returns Pointer to a sample descriptor.
+ * @param pRoot The root node.
+ * @param pszName The name to lookup.
+ */
+static PSTAMDESC stamR3LookupFindDesc(PSTAMLOOKUP pRoot, const char *pszName)
+{
+ Assert(!pRoot->pParent);
+ while (*pszName++ == '/')
+ {
+ const char *pszEnd = strchr(pszName, '/');
+ uint32_t cch = pszEnd ? pszEnd - pszName : (uint32_t)strlen(pszName);
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pRoot, pszName, cch, NULL);
+ if (!pChild)
+ break;
+ if (!pszEnd)
+ return pChild->pDesc;
+ pszName = pszEnd;
+ pRoot = pChild;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Finds the first sample descriptor for a given lookup range.
+ *
+ * This is for pattern range lookups.
+ *
+ * @returns Pointer to the first descriptor.
+ * @param pFirst The first node in the range.
+ * @param pLast The last node in the range.
+ */
+static PSTAMDESC stamR3LookupFindFirstDescForRange(PSTAMLOOKUP pFirst, PSTAMLOOKUP pLast)
+{
+ if (pFirst->pDesc)
+ return pFirst->pDesc;
+
+ PSTAMLOOKUP pCur = pFirst;
+ uint32_t iCur = 0;
+ for (;;)
+ {
+ uint32_t cChildren = pCur->cChildren;
+ if (iCur < pCur->cChildren)
+ {
+ /*
+ * Check all children.
+ */
+ PSTAMLOOKUP * const papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = papChildren[iCur];
+ if (pChild->pDesc)
+ return pChild->pDesc;
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = 0;
+ pCur = pChild;
+ break;
+ }
+ if (pChild == pLast)
+ return NULL;
+ } while (++iCur < cChildren);
+ }
+ else
+ {
+ /*
+ * One level up, checking current and its 'older' sibilings.
+ */
+ if (pCur == pLast)
+ return NULL;
+ iCur = pCur->iParent + 1;
+ pCur = pCur->pParent;
+ if (!pCur)
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Finds the last sample descriptor for a given lookup range.
+ *
+ * This is for pattern range lookups.
+ *
+ * @returns Pointer to the first descriptor.
+ * @param pFirst The first node in the range.
+ * @param pLast The last node in the range.
+ */
+static PSTAMDESC stamR3LookupFindLastDescForRange(PSTAMLOOKUP pFirst, PSTAMLOOKUP pLast)
+{
+ PSTAMLOOKUP pCur = pLast;
+ uint32_t iCur = pCur->cChildren - 1;
+ for (;;)
+ {
+ if (iCur < pCur->cChildren)
+ {
+ /*
+ * Check children backwards, depth first.
+ */
+ PSTAMLOOKUP * const papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = papChildren[iCur];
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = pChild->cChildren - 1;
+ pCur = pChild;
+ break;
+ }
+
+ if (pChild->pDesc)
+ return pChild->pDesc;
+ if (pChild == pFirst)
+ return NULL;
+ } while (iCur-- > 0); /* (underflow handled above) */
+ }
+ else
+ {
+ /*
+ * One level up, checking current and its 'older' sibilings.
+ */
+ if (pCur->pDesc)
+ return pCur->pDesc;
+ if (pCur == pFirst)
+ return NULL;
+ iCur = pCur->iParent - 1; /* (underflow handled above) */
+ pCur = pCur->pParent;
+ if (!pCur)
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Look up the first and last descriptors for a (single) pattern expression.
+ *
+ * This is used to optimize pattern enumerations and doesn't have to return 100%
+ * accurate results if that costs too much.
+ *
+ * @returns Pointer to the first descriptor in the range.
+ * @param pRoot The root node.
+ * @param pList The descriptor list anchor.
+ * @param pszPat The name patter to lookup.
+ * @param ppLastDesc Where to store the address of the last
+ * descriptor (approximate).
+ */
+static PSTAMDESC stamR3LookupFindPatternDescRange(PSTAMLOOKUP pRoot, PRTLISTANCHOR pList, const char *pszPat,
+ PSTAMDESC *ppLastDesc)
+{
+ Assert(!pRoot->pParent);
+
+ /*
+ * If there is an early enough wildcard, the whole list needs to be searched.
+ */
+ if ( pszPat[0] == '*' || pszPat[0] == '?'
+ || pszPat[1] == '*' || pszPat[1] == '?')
+ {
+ *ppLastDesc = RTListGetLast(pList, STAMDESC, ListEntry);
+ return RTListGetFirst(pList, STAMDESC, ListEntry);
+ }
+
+ /*
+ * All statistics starts with a slash.
+ */
+ while ( *pszPat++ == '/'
+ && pRoot->cDescsInTree > 0
+ && pRoot->cChildren > 0)
+ {
+ const char *pszEnd = strchr(pszPat, '/');
+ uint32_t cch = pszEnd ? pszEnd - pszPat : (uint32_t)strlen(pszPat);
+ if (!cch)
+ break;
+
+ const char *pszPat1 = (const char *)memchr(pszPat, '*', cch);
+ const char *pszPat2 = (const char *)memchr(pszPat, '?', cch);
+ if (pszPat1 || pszPat2)
+ {
+ /* We've narrowed it down to a sub-tree now. */
+ PSTAMLOOKUP pFirst = pRoot->papChildren[0];
+ PSTAMLOOKUP pLast = pRoot->papChildren[pRoot->cChildren - 1];
+ /** @todo narrow the range further if both pszPat1/2 != pszPat. */
+
+ *ppLastDesc = stamR3LookupFindLastDescForRange(pFirst, pLast);
+ return stamR3LookupFindFirstDescForRange(pFirst, pLast);
+ }
+
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pRoot, pszPat, cch, NULL);
+ if (!pChild)
+ break;
+
+ /* Advance */
+ if (!pszEnd)
+ return *ppLastDesc = pChild->pDesc;
+ pszPat = pszEnd;
+ pRoot = pChild;
+ }
+
+ /* No match. */
+ *ppLastDesc = NULL;
+ return NULL;
+}
+
+
+/**
+ * Look up the first descriptors for starts-with name string.
+ *
+ * This is used to optimize deletion.
+ *
+ * @returns Pointer to the first descriptor in the range.
+ * @param pRoot The root node.
+ * @param pchPrefix The name prefix.
+ * @param cchPrefix The name prefix length (can be shorter than the
+ * actual string).
+ * @param ppLastDesc Where to store the address of the last descriptor.
+ * @sa stamR3LookupFindPatternDescRange
+ */
+static PSTAMDESC stamR3LookupFindByPrefixRange(PSTAMLOOKUP pRoot, const char *pchPrefix, uint32_t cchPrefix,
+ PSTAMDESC *ppLastDesc)
+
+{
+ *ppLastDesc = NULL;
+ Assert(!pRoot->pParent);
+ AssertReturn(cchPrefix > 0, NULL);
+
+ /*
+ * We start with a root slash.
+ */
+ if (!cchPrefix || *pchPrefix != '/')
+ return NULL;
+
+ /*
+ * Walk thru the prefix component by component, since that's how
+ * the lookup tree is organized.
+ */
+ while ( cchPrefix
+ && *pchPrefix == '/'
+ && pRoot->cDescsInTree > 0
+ && pRoot->cChildren > 0)
+ {
+ cchPrefix -= 1;
+ pchPrefix += 1;
+
+ const char *pszEnd = (const char *)memchr(pchPrefix, '/', cchPrefix);
+ if (!pszEnd)
+ {
+ /*
+ * We've narrowed it down to a sub-tree now. If we've no more prefix to work
+ * with now (e.g. '/Devices/'), the prefix matches all the children. Otherwise,
+ * traverse the children to find the ones matching the prefix.
+ */
+ if (!cchPrefix)
+ {
+ *ppLastDesc = stamR3LookupFindLastDescForRange(pRoot->papChildren[0], pRoot->papChildren[pRoot->cChildren - 1]);
+ return stamR3LookupFindFirstDescForRange(pRoot->papChildren[0], pRoot->papChildren[pRoot->cChildren - 1]);
+ }
+
+ size_t iEnd = pRoot->cChildren;
+ if (iEnd < 16)
+ {
+ /* Linear scan of the children: */
+ for (size_t i = 0; i < pRoot->cChildren; i++)
+ {
+ PSTAMLOOKUP pCur = pRoot->papChildren[i];
+ if (pCur->cch >= cchPrefix)
+ {
+ int iDiff = memcmp(pCur->szName, pchPrefix, cchPrefix);
+ if (iDiff == 0)
+ {
+ size_t iLast = i;
+ while (++iLast < pRoot->cChildren)
+ {
+ PSTAMLOOKUP pCur2 = pRoot->papChildren[iLast];
+ if ( pCur2->cch < cchPrefix
+ || memcmp(pCur2->szName, pchPrefix, cchPrefix) != 0)
+ break;
+ }
+ iLast--;
+
+ *ppLastDesc = stamR3LookupFindLastDescForRange(pCur, pRoot->papChildren[iLast]);
+ return stamR3LookupFindFirstDescForRange(pCur, pRoot->papChildren[iLast]);
+ }
+ if (iDiff > 0)
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Binary search to find something matching the prefix, followed
+ by a reverse scan to locate the first child: */
+ size_t iFirst = 0;
+ size_t i = iEnd / 2;
+ for (;;)
+ {
+ PSTAMLOOKUP pCur = pRoot->papChildren[i];
+ int iDiff;
+ if (pCur->cch >= cchPrefix)
+ iDiff = memcmp(pCur->szName, pchPrefix, cchPrefix);
+ else
+ {
+ iDiff = memcmp(pCur->szName, pchPrefix, pCur->cch);
+ if (!iDiff)
+ iDiff = 1;
+ }
+ if (iDiff > 0)
+ {
+ if (iFirst < i)
+ iEnd = i;
+ else
+ return NULL;
+ }
+ else if (iDiff < 0)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ return NULL;
+ }
+ else
+ {
+ /* Match. Reverse scan to find the first. */
+ iFirst = i;
+ while ( iFirst > 0
+ && (pCur = pRoot->papChildren[iFirst - 1])->cch >= cchPrefix
+ && memcmp(pCur->szName, pchPrefix, cchPrefix) == 0)
+ iFirst--;
+
+ /* Forward scan to find the last.*/
+ size_t iLast = i;
+ while (++iLast < pRoot->cChildren)
+ {
+ pCur = pRoot->papChildren[iLast];
+ if ( pCur->cch < cchPrefix
+ || memcmp(pCur->szName, pchPrefix, cchPrefix) != 0)
+ break;
+ }
+ iLast--;
+
+ *ppLastDesc = stamR3LookupFindLastDescForRange(pRoot->papChildren[iFirst], pRoot->papChildren[iLast]);
+ return stamR3LookupFindFirstDescForRange(pRoot->papChildren[iFirst], pRoot->papChildren[iLast]);
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+ }
+ break;
+ }
+
+ /* Find child matching the path component: */
+ uint32_t cchChild = pszEnd - pchPrefix;
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pRoot, pchPrefix, cchChild, NULL);
+ if (!pChild)
+ break;
+
+ /* Advance: */
+ cchPrefix -= cchChild;
+ pchPrefix = pszEnd;
+ pRoot = pChild;
+ }
+ return NULL;
+}
+
+
+/**
+ * Increments the cDescInTree member of the given node an all ancestors.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupIncUsage(PSTAMLOOKUP pLookup)
+{
+ Assert(pLookup->pDesc);
+
+ PSTAMLOOKUP pCur = pLookup;
+ while (pCur != NULL)
+ {
+ pCur->cDescsInTree++;
+ pCur = pCur->pParent;
+ }
+}
+
+
+/**
+ * Descrements the cDescInTree member of the given node an all ancestors.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupDecUsage(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+
+ PSTAMLOOKUP pCur = pLookup;
+ while (pCur != NULL)
+ {
+ Assert(pCur->cDescsInTree > 0);
+ pCur->cDescsInTree--;
+ pCur = pCur->pParent;
+ }
+}
+
+
+/**
+ * Frees empty lookup nodes if it's worth it.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupMaybeFree(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+
+ /*
+ * Free between two and three levels of nodes. Freeing too much most
+ * likely wasted effort since we're either going to repopluate the tree
+ * or quit the whole thing.
+ */
+ if (pLookup->cDescsInTree > 0)
+ return;
+
+ PSTAMLOOKUP pCur = pLookup->pParent;
+ if (!pCur)
+ return;
+ if (pCur->cDescsInTree > 0)
+ return;
+ PSTAMLOOKUP pParent = pCur->pParent;
+ if (!pParent)
+ return;
+
+ if (pParent->cDescsInTree == 0 && pParent->pParent)
+ {
+ pCur = pParent;
+ pParent = pCur->pParent;
+ }
+
+ /*
+ * Remove pCur from pParent.
+ */
+ PSTAMLOOKUP *papChildren = pParent->papChildren;
+ uint32_t cChildren = --pParent->cChildren;
+ for (uint32_t i = pCur->iParent; i < cChildren; i++)
+ {
+ PSTAMLOOKUP pChild = papChildren[i + 1];
+ pChild->iParent = i;
+ papChildren[i] = pChild;
+ }
+ pCur->pParent = NULL;
+ pCur->iParent = UINT16_MAX;
+
+ /*
+ * Destroy pCur.
+ */
+ stamR3LookupDestroyTree(pCur);
+}
+
+
+/**
+ * Destroys a lookup tree.
+ *
+ * This is used by STAMR3Term as well as stamR3LookupMaybeFree.
+ *
+ * @param pRoot The root of the tree (must have no parent).
+ */
+static void stamR3LookupDestroyTree(PSTAMLOOKUP pRoot)
+{
+ Assert(pRoot); Assert(!pRoot->pParent);
+ PSTAMLOOKUP pCur = pRoot;
+ for (;;)
+ {
+ uint32_t i = pCur->cChildren;
+ if (i > 0)
+ {
+ /*
+ * Push child (with leaf optimization).
+ */
+ PSTAMLOOKUP pChild = pCur->papChildren[--i];
+ if (pChild->cChildren != 0)
+ pCur = pChild;
+ else
+ {
+ /* free leaves. */
+ for (;;)
+ {
+ if (pChild->papChildren)
+ {
+ RTMemFree(pChild->papChildren);
+ pChild->papChildren = NULL;
+ }
+ RTMemFree(pChild);
+ pCur->papChildren[i] = NULL;
+
+ /* next */
+ if (i == 0)
+ {
+ pCur->cChildren = 0;
+ break;
+ }
+ pChild = pCur->papChildren[--i];
+ if (pChild->cChildren != 0)
+ {
+ pCur->cChildren = i + 1;
+ pCur = pChild;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Pop and free current.
+ */
+ Assert(!pCur->pDesc);
+
+ PSTAMLOOKUP pParent = pCur->pParent;
+ Assert(pCur->iParent == (pParent ? pParent->cChildren - 1 : UINT16_MAX));
+
+ RTMemFree(pCur->papChildren);
+ pCur->papChildren = NULL;
+ RTMemFree(pCur);
+
+ pCur = pParent;
+ if (!pCur)
+ break;
+ pCur->papChildren[--pCur->cChildren] = NULL;
+ }
+ }
+}
+
+
+/**
+ * Internal worker for the different register calls.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pvSample Pointer to the sample.
+ * @param pfnReset Callback for resetting the sample. NULL should be used if the sample can't be reset.
+ * @param pfnPrint Print the sample.
+ * @param enmType Sample type. This indicates what pvSample is pointing at.
+ * @param enmVisibility Visibility type specifying whether unused statistics should be visible or not.
+ * @param pszName The sample name format string.
+ * @param enmUnit Sample unit.
+ * @param pszDesc Sample description.
+ * @param iRefreshGrp The refresh group, STAM_REFRESH_GRP_XXX.
+ * @remark There is currently no device or driver variant of this API. Add one if it should become necessary!
+ */
+static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint,
+ STAMTYPE enmType, STAMVISIBILITY enmVisibility,
+ const char *pszName, STAMUNIT enmUnit, const char *pszDesc, uint8_t iRefreshGrp)
+{
+ AssertReturn(pszName[0] == '/', VERR_INVALID_NAME);
+ AssertReturn(pszName[1] != '/' && pszName[1], VERR_INVALID_NAME);
+ uint32_t const cchName = (uint32_t)strlen(pszName);
+ AssertReturn(cchName <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+ AssertReturn(pszName[cchName - 1] != '/', VERR_INVALID_NAME);
+ AssertReturn(memchr(pszName, '\\', cchName) == NULL, VERR_INVALID_NAME);
+ AssertReturn(iRefreshGrp == STAM_REFRESH_GRP_NONE || iRefreshGrp < 64, VERR_INVALID_PARAMETER);
+
+ STAM_LOCK_WR(pUVM);
+
+ /*
+ * Look up the tree location, populating the lookup tree as we walk it.
+ */
+ PSTAMLOOKUP pLookup = pUVM->stam.s.pRoot; Assert(pLookup);
+ uint32_t offName = 1;
+ for (;;)
+ {
+ /* Get the next part of the path. */
+ const char *pszStart = &pszName[offName];
+ const char *pszEnd = strchr(pszStart, '/');
+ uint32_t cch = pszEnd ? (uint32_t)(pszEnd - pszStart) : cchName - offName;
+ if (cch == 0)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ AssertMsgFailed(("No double or trailing slashes are allowed: '%s'\n", pszName));
+ return VERR_INVALID_NAME;
+ }
+
+ /* Do the looking up. */
+ uint32_t iChild = 0;
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pLookup, pszStart, cch, &iChild);
+ if (!pChild)
+ {
+ pChild = stamR3LookupNewChild(pLookup, pszStart, cch, offName, iChild);
+ if (!pChild)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ return VERR_NO_MEMORY;
+ }
+ }
+
+ /* Advance. */
+ pLookup = pChild;
+ if (!pszEnd)
+ break;
+ offName += cch + 1;
+ }
+ if (pLookup->pDesc)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ AssertMsgFailed(("Duplicate sample name: %s\n", pszName));
+ return VERR_ALREADY_EXISTS;
+ }
+
+ PSTAMDESC pCur = stamR3LookupFindNextWithDesc(pLookup);
+
+ /*
+ * Check that the name doesn't screw up sorting order when taking
+ * slashes into account. The QT GUI makes some assumptions.
+ * Problematic chars are: !"#$%&'()*+,-.
+ */
+#ifdef VBOX_STRICT
+ Assert(pszName[0] == '/');
+ PSTAMDESC pPrev = pCur
+ ? RTListGetPrev(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ : RTListGetLast(&pUVM->stam.s.List, STAMDESC, ListEntry);
+ Assert(!pPrev || strcmp(pszName, pPrev->pszName) > 0);
+ Assert(!pCur || strcmp(pszName, pCur->pszName) < 0);
+ Assert(!pPrev || stamR3SlashCompare(pPrev->pszName, pszName) < 0);
+ Assert(!pCur || stamR3SlashCompare(pCur->pszName, pszName) > 0);
+
+ /*
+ * Check alignment requirements.
+ */
+ switch (enmType)
+ {
+ /* 8 byte / 64-bit */
+ case STAMTYPE_U64:
+ case STAMTYPE_U64_RESET:
+ case STAMTYPE_X64:
+ case STAMTYPE_X64_RESET:
+ case STAMTYPE_COUNTER:
+ case STAMTYPE_PROFILE:
+ case STAMTYPE_PROFILE_ADV:
+ AssertMsg(!((uintptr_t)pvSample & 7), ("%p - %s\n", pvSample, pszName));
+ break;
+
+ /* 4 byte / 32-bit */
+ case STAMTYPE_RATIO_U32:
+ case STAMTYPE_RATIO_U32_RESET:
+ case STAMTYPE_U32:
+ case STAMTYPE_U32_RESET:
+ case STAMTYPE_X32:
+ case STAMTYPE_X32_RESET:
+ AssertMsg(!((uintptr_t)pvSample & 3), ("%p - %s\n", pvSample, pszName));
+ break;
+
+ /* 2 byte / 32-bit */
+ case STAMTYPE_U16:
+ case STAMTYPE_U16_RESET:
+ case STAMTYPE_X16:
+ case STAMTYPE_X16_RESET:
+ AssertMsg(!((uintptr_t)pvSample & 1), ("%p - %s\n", pvSample, pszName));
+ break;
+
+ /* 1 byte / 8-bit / unaligned */
+ case STAMTYPE_U8:
+ case STAMTYPE_U8_RESET:
+ case STAMTYPE_X8:
+ case STAMTYPE_X8_RESET:
+ case STAMTYPE_BOOL:
+ case STAMTYPE_BOOL_RESET:
+ case STAMTYPE_CALLBACK:
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", enmType));
+ break;
+ }
+#endif /* VBOX_STRICT */
+
+ /*
+ * Create a new node and insert it at the current location.
+ */
+ int rc;
+ size_t cbDesc = pszDesc ? strlen(pszDesc) + 1 : 0;
+ PSTAMDESC pNew = (PSTAMDESC)RTMemAlloc(sizeof(*pNew) + cchName + 1 + cbDesc);
+ if (pNew)
+ {
+ pNew->pszName = (char *)memcpy((char *)(pNew + 1), pszName, cchName + 1);
+ pNew->enmType = enmType;
+ pNew->enmVisibility = enmVisibility;
+ if (enmType != STAMTYPE_CALLBACK)
+ pNew->u.pv = pvSample;
+ else
+ {
+ pNew->u.Callback.pvSample = pvSample;
+ pNew->u.Callback.pfnReset = pfnReset;
+ pNew->u.Callback.pfnPrint = pfnPrint;
+ }
+ pNew->enmUnit = enmUnit;
+ pNew->iRefreshGroup = iRefreshGrp;
+ pNew->pszDesc = NULL;
+ if (pszDesc)
+ pNew->pszDesc = (char *)memcpy((char *)(pNew + 1) + cchName + 1, pszDesc, cbDesc);
+
+ if (pCur)
+ RTListNodeInsertBefore(&pCur->ListEntry, &pNew->ListEntry);
+ else
+ RTListAppend(&pUVM->stam.s.List, &pNew->ListEntry);
+
+ pNew->pLookup = pLookup;
+ pLookup->pDesc = pNew;
+ stamR3LookupIncUsage(pLookup);
+
+ stamR3ResetOne(pNew, pUVM->pVM);
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Destroys the statistics descriptor, unlinking it and freeing all resources.
+ *
+ * @returns VINF_SUCCESS
+ * @param pCur The descriptor to destroy.
+ */
+static int stamR3DestroyDesc(PSTAMDESC pCur)
+{
+ RTListNodeRemove(&pCur->ListEntry);
+ pCur->pLookup->pDesc = NULL; /** @todo free lookup nodes once it's working. */
+ stamR3LookupDecUsage(pCur->pLookup);
+ stamR3LookupMaybeFree(pCur->pLookup);
+ RTMemFree(pCur);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a sample previously registered by STAR3Register() given its
+ * address.
+ *
+ * This is intended used for devices which can be unplugged and for
+ * temporary samples.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pvSample Pointer to the sample registered with STAMR3Register().
+ */
+VMMR3DECL(int) STAMR3DeregisterByAddr(PUVM pUVM, void *pvSample)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ STAM_LOCK_WR(pUVM);
+
+ /*
+ * Search for it.
+ */
+ int rc = VERR_INVALID_HANDLE;
+ PSTAMDESC pCur, pNext;
+ RTListForEachSafe(&pUVM->stam.s.List, pCur, pNext, STAMDESC, ListEntry)
+ {
+ if (pCur->u.pv == pvSample)
+ rc = stamR3DestroyDesc(pCur);
+ }
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Worker for STAMR3Deregister, STAMR3DeregisterV and STAMR3DeregisterF.
+ *
+ * @returns VBox status code.
+ * @retval VWRN_NOT_FOUND if no matching names found.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name pattern.
+ */
+static int stamR3DeregisterByPattern(PUVM pUVM, const char *pszPat)
+{
+ Assert(!strchr(pszPat, '|')); /* single pattern! */
+
+ int rc = VWRN_NOT_FOUND;
+ STAM_LOCK_WR(pUVM);
+
+ PSTAMDESC pLast;
+ PSTAMDESC pCur = stamR3LookupFindPatternDescRange(pUVM->stam.s.pRoot, &pUVM->stam.s.List, pszPat, &pLast);
+ if (pCur)
+ {
+ for (;;)
+ {
+ PSTAMDESC pNext = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
+
+ if (RTStrSimplePatternMatch(pszPat, pCur->pszName))
+ rc = stamR3DestroyDesc(pCur);
+
+ /* advance. */
+ if (pCur == pLast)
+ break;
+ pCur = pNext;
+ }
+ Assert(pLast);
+ }
+ else
+ Assert(!pLast);
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name pattern.
+ * @sa STAMR3DeregisterF, STAMR3DeregisterV
+ */
+VMMR3DECL(int) STAMR3Deregister(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ return stamR3DeregisterByPattern(pUVM, pszPat);
+}
+
+
+/**
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPatFmt The name pattern format string.
+ * @param ... Format string arguments.
+ * @sa STAMR3Deregister, STAMR3DeregisterV
+ */
+VMMR3DECL(int) STAMR3DeregisterF(PUVM pUVM, const char *pszPatFmt, ...)
+{
+ va_list va;
+ va_start(va, pszPatFmt);
+ int rc = STAMR3DeregisterV(pUVM, pszPatFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPatFmt The name pattern format string.
+ * @param va Format string arguments.
+ * @sa STAMR3Deregister, STAMR3DeregisterF
+ */
+VMMR3DECL(int) STAMR3DeregisterV(PUVM pUVM, const char *pszPatFmt, va_list va)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ char szPat[STAM_MAX_NAME_LEN + 8];
+ size_t cchPat = RTStrPrintfV(szPat, sizeof(szPat), pszPatFmt, va);
+ AssertReturn(cchPat <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+
+ return stamR3DeregisterByPattern(pUVM, szPat);
+}
+
+
+/**
+ * Deregister zero or more samples given their name prefix.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPrefix The name prefix of the samples to remove.
+ * @sa STAMR3Deregister, STAMR3DeregisterF, STAMR3DeregisterV
+ */
+VMMR3DECL(int) STAMR3DeregisterByPrefix(PUVM pUVM, const char *pszPrefix)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ size_t const cchPrefix = strlen(pszPrefix);
+ int rc = VWRN_NOT_FOUND;
+ STAM_LOCK_WR(pUVM);
+
+ PSTAMDESC pLast;
+ PSTAMDESC pCur = stamR3LookupFindByPrefixRange(pUVM->stam.s.pRoot, pszPrefix, (uint32_t)cchPrefix, &pLast);
+ if (pCur)
+ for (;;)
+ {
+ PSTAMDESC const pNext = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
+ Assert(strncmp(pCur->pszName, pszPrefix, cchPrefix) == 0);
+
+ rc = stamR3DestroyDesc(pCur);
+
+ /* advance. */
+ if (pCur == pLast)
+ break;
+ pCur = pNext;
+ }
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Resets statistics for the specified VM.
+ * It's possible to select a subset of the samples.
+ *
+ * @returns VBox status code. (Basically, it cannot fail.)
+ * @param pUVM The user mode VM handle.
+ * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
+ * If NULL all samples are reset.
+ * @remarks Don't confuse this with the other 'XYZR3Reset' methods, it's not called at VM reset.
+ */
+VMMR3DECL(int) STAMR3Reset(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ int rc = VINF_SUCCESS;
+
+ /* ring-0 */
+ GVMMRESETSTATISTICSSREQ GVMMReq;
+ GMMRESETSTATISTICSSREQ GMMReq;
+ bool fGVMMMatched = (!pszPat || !*pszPat) && !SUPR3IsDriverless();
+ bool fGMMMatched = fGVMMMatched;
+ if (fGVMMMatched)
+ {
+ memset(&GVMMReq.Stats, 0xff, sizeof(GVMMReq.Stats));
+ memset(&GMMReq.Stats, 0xff, sizeof(GMMReq.Stats));
+ }
+ else
+ {
+ char *pszCopy;
+ unsigned cExpressions;
+ char **papszExpressions = stamR3SplitPattern(pszPat, &cExpressions, &pszCopy);
+ if (!papszExpressions)
+ return VERR_NO_MEMORY;
+
+ /* GVMM */
+ RT_ZERO(GVMMReq.Stats);
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aGVMMStats); i++)
+ if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGVMMStats[i].pszName))
+ {
+ *((uint8_t *)&GVMMReq.Stats + g_aGVMMStats[i].offVar) = 0xff;
+ fGVMMMatched = true;
+ }
+ if (!fGVMMMatched)
+ {
+ /** @todo match cpu leaves some rainy day. */
+ }
+
+ /* GMM */
+ RT_ZERO(GMMReq.Stats);
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++)
+ if (stamR3MultiMatch(papszExpressions, cExpressions, NULL, g_aGMMStats[i].pszName))
+ {
+ *((uint8_t *)&GMMReq.Stats + g_aGMMStats[i].offVar) = 0xff;
+ fGMMMatched = true;
+ }
+
+ RTMemTmpFree(papszExpressions);
+ RTStrFree(pszCopy);
+ }
+
+ STAM_LOCK_WR(pUVM);
+
+ if (fGVMMMatched)
+ {
+ PVM pVM = pUVM->pVM;
+ GVMMReq.Hdr.cbReq = sizeof(GVMMReq);
+ GVMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ GVMMReq.pSession = pVM->pSession;
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GVMM_RESET_STATISTICS, 0, &GVMMReq.Hdr);
+ }
+
+ if (fGMMMatched)
+ {
+ PVM pVM = pUVM->pVM;
+ GMMReq.Hdr.cbReq = sizeof(GMMReq);
+ GMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ GMMReq.pSession = pVM->pSession;
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_RESET_STATISTICS, 0, &GMMReq.Hdr);
+ }
+
+ /* and the reset */
+ stamR3EnumU(pUVM, pszPat, false /* fUpdateRing0 */, stamR3ResetOne, pUVM->pVM);
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Resets one statistics sample.
+ * Callback for stamR3EnumU().
+ *
+ * @returns VINF_SUCCESS
+ * @param pDesc Pointer to the current descriptor.
+ * @param pvArg User argument - Pointer to the VM.
+ */
+static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg)
+{
+ switch (pDesc->enmType)
+ {
+ case STAMTYPE_COUNTER:
+ ASMAtomicXchgU64(&pDesc->u.pCounter->c, 0);
+ break;
+
+ case STAMTYPE_PROFILE:
+ case STAMTYPE_PROFILE_ADV:
+ ASMAtomicXchgU64(&pDesc->u.pProfile->cPeriods, 0);
+ ASMAtomicXchgU64(&pDesc->u.pProfile->cTicks, 0);
+ ASMAtomicXchgU64(&pDesc->u.pProfile->cTicksMax, 0);
+ ASMAtomicXchgU64(&pDesc->u.pProfile->cTicksMin, UINT64_MAX);
+ break;
+
+ case STAMTYPE_RATIO_U32_RESET:
+ ASMAtomicXchgU32(&pDesc->u.pRatioU32->u32A, 0);
+ ASMAtomicXchgU32(&pDesc->u.pRatioU32->u32B, 0);
+ break;
+
+ case STAMTYPE_CALLBACK:
+ if (pDesc->u.Callback.pfnReset)
+ pDesc->u.Callback.pfnReset((PVM)pvArg, pDesc->u.Callback.pvSample);
+ break;
+
+ case STAMTYPE_U8_RESET:
+ case STAMTYPE_X8_RESET:
+ ASMAtomicXchgU8(pDesc->u.pu8, 0);
+ break;
+
+ case STAMTYPE_U16_RESET:
+ case STAMTYPE_X16_RESET:
+ ASMAtomicXchgU16(pDesc->u.pu16, 0);
+ break;
+
+ case STAMTYPE_U32_RESET:
+ case STAMTYPE_X32_RESET:
+ ASMAtomicXchgU32(pDesc->u.pu32, 0);
+ break;
+
+ case STAMTYPE_U64_RESET:
+ case STAMTYPE_X64_RESET:
+ ASMAtomicXchgU64(pDesc->u.pu64, 0);
+ break;
+
+ case STAMTYPE_BOOL_RESET:
+ ASMAtomicXchgBool(pDesc->u.pf, false);
+ break;
+
+ /* These are custom and will not be touched. */
+ case STAMTYPE_U8:
+ case STAMTYPE_X8:
+ case STAMTYPE_U16:
+ case STAMTYPE_X16:
+ case STAMTYPE_U32:
+ case STAMTYPE_X32:
+ case STAMTYPE_U64:
+ case STAMTYPE_X64:
+ case STAMTYPE_RATIO_U32:
+ case STAMTYPE_BOOL:
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pDesc->enmType));
+ break;
+ }
+ NOREF(pvArg);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Get a snapshot of the statistics.
+ * It's possible to select a subset of the samples.
+ *
+ * @returns VBox status code. (Basically, it cannot fail.)
+ * @param pUVM The user mode VM handle.
+ * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
+ * If NULL all samples are reset.
+ * @param fWithDesc Whether to include the descriptions.
+ * @param ppszSnapshot Where to store the pointer to the snapshot data.
+ * The format of the snapshot should be XML, but that will have to be discussed
+ * when this function is implemented.
+ * The returned pointer must be freed by calling STAMR3SnapshotFree().
+ * @param pcchSnapshot Where to store the size of the snapshot data. (Excluding the trailing '\0')
+ */
+VMMR3DECL(int) STAMR3Snapshot(PUVM pUVM, const char *pszPat, char **ppszSnapshot, size_t *pcchSnapshot, bool fWithDesc)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ STAMR3SNAPSHOTONE State = { NULL, NULL, NULL, pUVM->pVM, 0, VINF_SUCCESS, fWithDesc };
+
+ /*
+ * Write the XML header.
+ */
+ /** @todo Make this proper & valid XML. */
+ stamR3SnapshotPrintf(&State, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n");
+
+ /*
+ * Write the content.
+ */
+ stamR3SnapshotPrintf(&State, "<Statistics>\n");
+ int rc = stamR3EnumU(pUVM, pszPat, true /* fUpdateRing0 */, stamR3SnapshotOne, &State);
+ stamR3SnapshotPrintf(&State, "</Statistics>\n");
+
+ if (RT_SUCCESS(rc))
+ rc = State.rc;
+ else
+ {
+ RTMemFree(State.pszStart);
+ State.pszStart = State.pszEnd = State.psz = NULL;
+ State.cbAllocated = 0;
+ }
+
+ /*
+ * Done.
+ */
+ *ppszSnapshot = State.pszStart;
+ if (pcchSnapshot)
+ *pcchSnapshot = State.psz - State.pszStart;
+ return rc;
+}
+
+
+/**
+ * stamR3EnumU callback employed by STAMR3Snapshot.
+ *
+ * @returns VBox status code, but it's interpreted as 0 == success / !0 == failure by enmR3Enum.
+ * @param pDesc The sample.
+ * @param pvArg The snapshot status structure.
+ */
+static int stamR3SnapshotOne(PSTAMDESC pDesc, void *pvArg)
+{
+ PSTAMR3SNAPSHOTONE pThis = (PSTAMR3SNAPSHOTONE)pvArg;
+
+ switch (pDesc->enmType)
+ {
+ case STAMTYPE_COUNTER:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && pDesc->u.pCounter->c == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<Counter c=\"%lld\"", pDesc->u.pCounter->c);
+ break;
+
+ case STAMTYPE_PROFILE:
+ case STAMTYPE_PROFILE_ADV:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && pDesc->u.pProfile->cPeriods == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<Profile cPeriods=\"%lld\" cTicks=\"%lld\" cTicksMin=\"%lld\" cTicksMax=\"%lld\"",
+ pDesc->u.pProfile->cPeriods, pDesc->u.pProfile->cTicks, pDesc->u.pProfile->cTicksMin,
+ pDesc->u.pProfile->cTicksMax);
+ break;
+
+ case STAMTYPE_RATIO_U32:
+ case STAMTYPE_RATIO_U32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && !pDesc->u.pRatioU32->u32A && !pDesc->u.pRatioU32->u32B)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<Ratio32 u32A=\"%lld\" u32B=\"%lld\"",
+ pDesc->u.pRatioU32->u32A, pDesc->u.pRatioU32->u32B);
+ break;
+
+ case STAMTYPE_CALLBACK:
+ {
+ char szBuf[512];
+ pDesc->u.Callback.pfnPrint(pThis->pVM, pDesc->u.Callback.pvSample, szBuf, sizeof(szBuf));
+ stamR3SnapshotPrintf(pThis, "<Callback val=\"%s\"", szBuf);
+ break;
+ }
+
+ case STAMTYPE_U8:
+ case STAMTYPE_U8_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu8 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<U8 val=\"%u\"", *pDesc->u.pu8);
+ break;
+
+ case STAMTYPE_X8:
+ case STAMTYPE_X8_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu8 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<X8 val=\"%#x\"", *pDesc->u.pu8);
+ break;
+
+ case STAMTYPE_U16:
+ case STAMTYPE_U16_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu16 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<U16 val=\"%u\"", *pDesc->u.pu16);
+ break;
+
+ case STAMTYPE_X16:
+ case STAMTYPE_X16_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu16 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<X16 val=\"%#x\"", *pDesc->u.pu16);
+ break;
+
+ case STAMTYPE_U32:
+ case STAMTYPE_U32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu32 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<U32 val=\"%u\"", *pDesc->u.pu32);
+ break;
+
+ case STAMTYPE_X32:
+ case STAMTYPE_X32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu32 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<X32 val=\"%#x\"", *pDesc->u.pu32);
+ break;
+
+ case STAMTYPE_U64:
+ case STAMTYPE_U64_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu64 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<U64 val=\"%llu\"", *pDesc->u.pu64);
+ break;
+
+ case STAMTYPE_X64:
+ case STAMTYPE_X64_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu64 == 0)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<X64 val=\"%#llx\"", *pDesc->u.pu64);
+ break;
+
+ case STAMTYPE_BOOL:
+ case STAMTYPE_BOOL_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pf == false)
+ return VINF_SUCCESS;
+ stamR3SnapshotPrintf(pThis, "<BOOL val=\"%RTbool\"", *pDesc->u.pf);
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", pDesc->enmType));
+ return 0;
+ }
+
+ stamR3SnapshotPrintf(pThis, " unit=\"%s\"", STAMR3GetUnit(pDesc->enmUnit));
+
+ switch (pDesc->enmVisibility)
+ {
+ default:
+ case STAMVISIBILITY_ALWAYS:
+ break;
+ case STAMVISIBILITY_USED:
+ stamR3SnapshotPrintf(pThis, " vis=\"used\"");
+ break;
+ case STAMVISIBILITY_NOT_GUI:
+ stamR3SnapshotPrintf(pThis, " vis=\"not-gui\"");
+ break;
+ }
+
+ stamR3SnapshotPrintf(pThis, " name=\"%s\"", pDesc->pszName);
+
+ if (pThis->fWithDesc && pDesc->pszDesc)
+ {
+ /*
+ * The description is a bit tricky as it may include chars that
+ * xml requires to be escaped.
+ */
+ const char *pszBadChar = strpbrk(pDesc->pszDesc, "&<>\"'");
+ if (!pszBadChar)
+ return stamR3SnapshotPrintf(pThis, " desc=\"%s\"/>\n", pDesc->pszDesc);
+
+ stamR3SnapshotPrintf(pThis, " desc=\"");
+ const char *pszCur = pDesc->pszDesc;
+ do
+ {
+ stamR3SnapshotPrintf(pThis, "%.*s", pszBadChar - pszCur, pszCur);
+ switch (*pszBadChar)
+ {
+ case '&': stamR3SnapshotPrintf(pThis, "&amp;"); break;
+ case '<': stamR3SnapshotPrintf(pThis, "&lt;"); break;
+ case '>': stamR3SnapshotPrintf(pThis, "&gt;"); break;
+ case '"': stamR3SnapshotPrintf(pThis, "&quot;"); break;
+ case '\'': stamR3SnapshotPrintf(pThis, "&apos;"); break;
+ default: AssertMsgFailed(("%c", *pszBadChar)); break;
+ }
+ pszCur = pszBadChar + 1;
+ pszBadChar = strpbrk(pszCur, "&<>\"'");
+ } while (pszBadChar);
+ return stamR3SnapshotPrintf(pThis, "%s\"/>\n", pszCur);
+ }
+ return stamR3SnapshotPrintf(pThis, "/>\n");
+}
+
+
+/**
+ * Output callback for stamR3SnapshotPrintf.
+ *
+ * @returns number of bytes written.
+ * @param pvArg The snapshot status structure.
+ * @param pach Pointer to an array of characters (bytes).
+ * @param cch The number or chars (bytes) to write from the array.
+ */
+static DECLCALLBACK(size_t) stamR3SnapshotOutput(void *pvArg, const char *pach, size_t cch)
+{
+ PSTAMR3SNAPSHOTONE pThis = (PSTAMR3SNAPSHOTONE)pvArg;
+
+ /*
+ * Make sure we've got space for it.
+ */
+ if (RT_UNLIKELY((uintptr_t)pThis->pszEnd - (uintptr_t)pThis->psz < cch + 1))
+ {
+ if (RT_FAILURE(pThis->rc))
+ return 0;
+
+ size_t cbNewSize = pThis->cbAllocated;
+ if (cbNewSize > cch)
+ cbNewSize *= 2;
+ else
+ cbNewSize += RT_ALIGN(cch + 1, 0x1000);
+ char *pszNew = (char *)RTMemRealloc(pThis->pszStart, cbNewSize);
+ if (!pszNew)
+ {
+ /*
+ * Free up immediately, out-of-memory is bad news and this
+ * isn't an important allocations / API.
+ */
+ pThis->rc = VERR_NO_MEMORY;
+ RTMemFree(pThis->pszStart);
+ pThis->pszStart = pThis->pszEnd = pThis->psz = NULL;
+ pThis->cbAllocated = 0;
+ return 0;
+ }
+
+ pThis->psz = pszNew + (pThis->psz - pThis->pszStart);
+ pThis->pszStart = pszNew;
+ pThis->pszEnd = pszNew + cbNewSize;
+ pThis->cbAllocated = cbNewSize;
+ }
+
+ /*
+ * Copy the chars to the buffer and terminate it.
+ */
+ if (cch)
+ {
+ memcpy(pThis->psz, pach, cch);
+ pThis->psz += cch;
+ }
+ *pThis->psz = '\0';
+ return cch;
+}
+
+
+/**
+ * Wrapper around RTStrFormatV for use by the snapshot API.
+ *
+ * @returns VBox status code.
+ * @param pThis The snapshot status structure.
+ * @param pszFormat The format string.
+ * @param ... Optional arguments.
+ */
+static int stamR3SnapshotPrintf(PSTAMR3SNAPSHOTONE pThis, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ RTStrFormatV(stamR3SnapshotOutput, pThis, NULL, NULL, pszFormat, va);
+ va_end(va);
+ return pThis->rc;
+}
+
+
+/**
+ * Releases a statistics snapshot returned by STAMR3Snapshot().
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszSnapshot The snapshot data pointer returned by STAMR3Snapshot().
+ * NULL is allowed.
+ */
+VMMR3DECL(int) STAMR3SnapshotFree(PUVM pUVM, char *pszSnapshot)
+{
+ if (pszSnapshot)
+ RTMemFree(pszSnapshot);
+ NOREF(pUVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps the selected statistics to the log.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
+ * If NULL all samples are written to the log.
+ */
+VMMR3DECL(int) STAMR3Dump(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ STAMR3PRINTONEARGS Args;
+ Args.pUVM = pUVM;
+ Args.pvArg = NULL;
+ Args.pfnPrintf = stamR3EnumLogPrintf;
+
+ stamR3EnumU(pUVM, pszPat, true /* fUpdateRing0 */, stamR3PrintOne, &Args);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Prints to the log.
+ *
+ * @param pArgs Pointer to the print one argument structure.
+ * @param pszFormat Format string.
+ * @param ... Format arguments.
+ */
+static DECLCALLBACK(void) stamR3EnumLogPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ RTLogPrintfV(pszFormat, va);
+ va_end(va);
+ NOREF(pArgs);
+}
+
+
+/**
+ * Dumps the selected statistics to the release log.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
+ * If NULL all samples are written to the log.
+ */
+VMMR3DECL(int) STAMR3DumpToReleaseLog(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ STAMR3PRINTONEARGS Args;
+ Args.pUVM = pUVM;
+ Args.pvArg = NULL;
+ Args.pfnPrintf = stamR3EnumRelLogPrintf;
+
+ stamR3EnumU(pUVM, pszPat, true /* fUpdateRing0 */, stamR3PrintOne, &Args);
+ return VINF_SUCCESS;
+}
+
+/**
+ * Prints to the release log.
+ *
+ * @param pArgs Pointer to the print one argument structure.
+ * @param pszFormat Format string.
+ * @param ... Format arguments.
+ */
+static DECLCALLBACK(void) stamR3EnumRelLogPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ RTLogRelPrintfV(pszFormat, va);
+ va_end(va);
+ NOREF(pArgs);
+}
+
+
+/**
+ * Prints the selected statistics to standard out.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
+ * If NULL all samples are reset.
+ */
+VMMR3DECL(int) STAMR3Print(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ STAMR3PRINTONEARGS Args;
+ Args.pUVM = pUVM;
+ Args.pvArg = NULL;
+ Args.pfnPrintf = stamR3EnumPrintf;
+
+ stamR3EnumU(pUVM, pszPat, true /* fUpdateRing0 */, stamR3PrintOne, &Args);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Prints to stdout.
+ *
+ * @param pArgs Pointer to the print one argument structure.
+ * @param pszFormat Format string.
+ * @param ... Format arguments.
+ */
+static DECLCALLBACK(void) stamR3EnumPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ RTPrintfV(pszFormat, va);
+ va_end(va);
+ NOREF(pArgs);
+}
+
+
+/**
+ * Prints one sample.
+ * Callback for stamR3EnumU().
+ *
+ * @returns VINF_SUCCESS
+ * @param pDesc Pointer to the current descriptor.
+ * @param pvArg User argument - STAMR3PRINTONEARGS.
+ */
+static int stamR3PrintOne(PSTAMDESC pDesc, void *pvArg)
+{
+ PSTAMR3PRINTONEARGS pArgs = (PSTAMR3PRINTONEARGS)pvArg;
+
+ switch (pDesc->enmType)
+ {
+ case STAMTYPE_COUNTER:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && pDesc->u.pCounter->c == 0)
+ return VINF_SUCCESS;
+
+ pArgs->pfnPrintf(pArgs, "%-32s %8llu %s\n", pDesc->pszName, pDesc->u.pCounter->c, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_PROFILE:
+ case STAMTYPE_PROFILE_ADV:
+ {
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && pDesc->u.pProfile->cPeriods == 0)
+ return VINF_SUCCESS;
+
+ uint64_t u64 = pDesc->u.pProfile->cPeriods ? pDesc->u.pProfile->cPeriods : 1;
+ pArgs->pfnPrintf(pArgs, "%-32s %8llu %s (%12llu %s, %7llu %s, max %9llu, min %7lld)\n", pDesc->pszName,
+ pDesc->u.pProfile->cTicks / u64, STAMR3GetUnit(pDesc->enmUnit),
+ pDesc->u.pProfile->cTicks, STAMR3GetUnit1(pDesc->enmUnit),
+ pDesc->u.pProfile->cPeriods, STAMR3GetUnit2(pDesc->enmUnit),
+ pDesc->u.pProfile->cTicksMax, pDesc->u.pProfile->cTicksMin);
+ break;
+ }
+
+ case STAMTYPE_RATIO_U32:
+ case STAMTYPE_RATIO_U32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && !pDesc->u.pRatioU32->u32A && !pDesc->u.pRatioU32->u32B)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8u:%-8u %s\n", pDesc->pszName,
+ pDesc->u.pRatioU32->u32A, pDesc->u.pRatioU32->u32B, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_CALLBACK:
+ {
+ char szBuf[512];
+ pDesc->u.Callback.pfnPrint(pArgs->pUVM->pVM, pDesc->u.Callback.pvSample, szBuf, sizeof(szBuf));
+ pArgs->pfnPrintf(pArgs, "%-32s %s %s\n", pDesc->pszName, szBuf, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+ }
+
+ case STAMTYPE_U8:
+ case STAMTYPE_U8_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu8 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8u %s\n", pDesc->pszName, *pDesc->u.pu8, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_X8:
+ case STAMTYPE_X8_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu8 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8x %s\n", pDesc->pszName, *pDesc->u.pu8, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_U16:
+ case STAMTYPE_U16_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu16 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8u %s\n", pDesc->pszName, *pDesc->u.pu16, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_X16:
+ case STAMTYPE_X16_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu16 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8x %s\n", pDesc->pszName, *pDesc->u.pu16, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_U32:
+ case STAMTYPE_U32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu32 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8u %s\n", pDesc->pszName, *pDesc->u.pu32, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_X32:
+ case STAMTYPE_X32_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu32 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8x %s\n", pDesc->pszName, *pDesc->u.pu32, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_U64:
+ case STAMTYPE_U64_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu64 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8llu %s\n", pDesc->pszName, *pDesc->u.pu64, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_X64:
+ case STAMTYPE_X64_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pu64 == 0)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %8llx %s\n", pDesc->pszName, *pDesc->u.pu64, STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ case STAMTYPE_BOOL:
+ case STAMTYPE_BOOL_RESET:
+ if (pDesc->enmVisibility == STAMVISIBILITY_USED && *pDesc->u.pf == false)
+ return VINF_SUCCESS;
+ pArgs->pfnPrintf(pArgs, "%-32s %s %s\n", pDesc->pszName, *pDesc->u.pf ? "true " : "false ", STAMR3GetUnit(pDesc->enmUnit));
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pDesc->enmType));
+ break;
+ }
+ NOREF(pvArg);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enumerate the statistics by the means of a callback function.
+ *
+ * @returns Whatever the callback returns.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pszPat The pattern to match samples.
+ * @param pfnEnum The callback function.
+ * @param pvUser The pvUser argument of the callback function.
+ */
+VMMR3DECL(int) STAMR3Enum(PUVM pUVM, const char *pszPat, PFNSTAMR3ENUM pfnEnum, void *pvUser)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
+ STAMR3ENUMONEARGS Args;
+ Args.pVM = pUVM->pVM;
+ Args.pfnEnum = pfnEnum;
+ Args.pvUser = pvUser;
+
+ return stamR3EnumU(pUVM, pszPat, true /* fUpdateRing0 */, stamR3EnumOne, &Args);
+}
+
+
+/**
+ * Callback function for STARTR3Enum().
+ *
+ * @returns whatever the callback returns.
+ * @param pDesc Pointer to the current descriptor.
+ * @param pvArg Points to a STAMR3ENUMONEARGS structure.
+ */
+static int stamR3EnumOne(PSTAMDESC pDesc, void *pvArg)
+{
+ PSTAMR3ENUMONEARGS pArgs = (PSTAMR3ENUMONEARGS)pvArg;
+ const char *pszUnit = STAMR3GetUnit(pDesc->enmUnit);
+ int rc;
+ if (pDesc->enmType == STAMTYPE_CALLBACK)
+ {
+ /* Give the enumerator something useful. */
+ char szBuf[512];
+ pDesc->u.Callback.pfnPrint(pArgs->pVM, pDesc->u.Callback.pvSample, szBuf, sizeof(szBuf));
+ rc = pArgs->pfnEnum(pDesc->pszName, pDesc->enmType, szBuf, pDesc->enmUnit, pszUnit,
+ pDesc->enmVisibility, pDesc->pszDesc, pArgs->pvUser);
+ }
+ else
+ rc = pArgs->pfnEnum(pDesc->pszName, pDesc->enmType, pDesc->u.pv, pDesc->enmUnit, pszUnit,
+ pDesc->enmVisibility, pDesc->pszDesc, pArgs->pvUser);
+ return rc;
+}
+
+static void stamR3RefreshGroup(PUVM pUVM, uint8_t iRefreshGroup, uint64_t *pbmRefreshedGroups)
+{
+ *pbmRefreshedGroups |= RT_BIT_64(iRefreshGroup);
+
+ PVM pVM = pUVM->pVM;
+ if (pVM && pVM->pSession)
+ {
+ switch (iRefreshGroup)
+ {
+ /*
+ * GVMM
+ */
+ case STAM_REFRESH_GRP_GVMM:
+ {
+ GVMMQUERYSTATISTICSSREQ Req;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.pSession = pVM->pSession;
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GVMM_QUERY_STATISTICS, 0, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ {
+ pUVM->stam.s.GVMMStats = Req.Stats;
+
+ /*
+ * Check if the number of host CPUs has changed (it will the first
+ * time around and normally never again).
+ */
+ if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus))
+ {
+ if (RT_UNLIKELY(pUVM->stam.s.GVMMStats.cHostCpus > pUVM->stam.s.cRegisteredHostCpus))
+ {
+ STAM_UNLOCK_RD(pUVM);
+ STAM_LOCK_WR(pUVM);
+ uint32_t cCpus = pUVM->stam.s.GVMMStats.cHostCpus;
+ for (uint32_t iCpu = pUVM->stam.s.cRegisteredHostCpus; iCpu < cCpus; iCpu++)
+ {
+ char szName[120];
+ size_t cchBase = RTStrPrintf(szName, sizeof(szName), "/GVMM/HostCpus/%u", iCpu);
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idCpu, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE,
+ "Host CPU ID", STAM_REFRESH_GRP_GVMM);
+ strcpy(&szName[cchBase], "/idxCpuSet");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].idxCpuSet, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE,
+ "CPU Set index", STAM_REFRESH_GRP_GVMM);
+ strcpy(&szName[cchBase], "/DesiredHz");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uDesiredHz, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ,
+ "The desired frequency", STAM_REFRESH_GRP_GVMM);
+ strcpy(&szName[cchBase], "/CurTimerHz");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].uTimerHz, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_HZ,
+ "The current timer frequency", STAM_REFRESH_GRP_GVMM);
+ strcpy(&szName[cchBase], "/PPTChanges");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cChanges, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES,
+ "RTTimerChangeInterval calls", STAM_REFRESH_GRP_GVMM);
+ strcpy(&szName[cchBase], "/PPTStarts");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aHostCpus[iCpu].cStarts, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES,
+ "RTTimerStart calls", STAM_REFRESH_GRP_GVMM);
+ }
+ pUVM->stam.s.cRegisteredHostCpus = cCpus;
+ STAM_UNLOCK_WR(pUVM);
+ STAM_LOCK_RD(pUVM);
+ }
+ }
+ }
+ break;
+ }
+
+ /*
+ * GMM
+ */
+ case STAM_REFRESH_GRP_GMM:
+ {
+ GMMQUERYSTATISTICSSREQ Req;
+ Req.Hdr.cbReq = sizeof(Req);
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.pSession = pVM->pSession;
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_QUERY_STATISTICS, 0, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ pUVM->stam.s.GMMStats = Req.Stats;
+ break;
+ }
+
+ /*
+ * NEM.
+ */
+ case STAM_REFRESH_GRP_NEM:
+ SUPR3CallVMMR0(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_NEM_UPDATE_STATISTICS, NULL);
+ break;
+
+ default:
+ AssertMsgFailed(("iRefreshGroup=%d\n", iRefreshGroup));
+ }
+ }
+}
+
+
+/**
+ * Refreshes the statistics behind the given entry, if necessary.
+ *
+ * This helps implement fetching global ring-0 stats into ring-3 accessible
+ * storage. GVMM, GMM and NEM makes use of this.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pCur The statistics descriptor which group to check
+ * and maybe update.
+ * @param pbmRefreshedGroups Bitmap tracking what has already been updated.
+ */
+DECLINLINE(void) stamR3Refresh(PUVM pUVM, PSTAMDESC pCur, uint64_t *pbmRefreshedGroups)
+{
+ uint8_t const iRefreshGroup = pCur->iRefreshGroup;
+ if (RT_LIKELY(iRefreshGroup == STAM_REFRESH_GRP_NONE))
+ { /* likely */ }
+ else if (!(*pbmRefreshedGroups & RT_BIT_64(iRefreshGroup)))
+ stamR3RefreshGroup(pUVM, iRefreshGroup, pbmRefreshedGroups);
+}
+
+
+/**
+ * Match a name against an array of patterns.
+ *
+ * @returns true if it matches, false if it doesn't match.
+ * @param papszExpressions The array of pattern expressions.
+ * @param cExpressions The number of array entries.
+ * @param piExpression Where to read/store the current skip index. Optional.
+ * @param pszName The name to match.
+ */
+static bool stamR3MultiMatch(const char * const *papszExpressions, unsigned cExpressions,
+ unsigned *piExpression, const char *pszName)
+{
+ for (unsigned i = piExpression ? *piExpression : 0; i < cExpressions; i++)
+ {
+ const char *pszPat = papszExpressions[i];
+ if (RTStrSimplePatternMatch(pszPat, pszName))
+ {
+ /* later:
+ if (piExpression && i > *piExpression)
+ {
+ Check if we can skip some expressions.
+ Requires the expressions to be sorted.
+ }*/
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/**
+ * Splits a multi pattern into single ones.
+ *
+ * @returns Pointer to an array of single patterns. Free it with RTMemTmpFree.
+ * @param pszPat The pattern to split.
+ * @param pcExpressions The number of array elements.
+ * @param ppszCopy The pattern copy to free using RTStrFree.
+ */
+static char **stamR3SplitPattern(const char *pszPat, unsigned *pcExpressions, char **ppszCopy)
+{
+ Assert(pszPat && *pszPat);
+
+ char *pszCopy = RTStrDup(pszPat);
+ if (!pszCopy)
+ return NULL;
+
+ /* count them & allocate array. */
+ char *psz = pszCopy;
+ unsigned cExpressions = 1;
+ while ((psz = strchr(psz, '|')) != NULL)
+ cExpressions++, psz++;
+
+ char **papszExpressions = (char **)RTMemTmpAllocZ((cExpressions + 1) * sizeof(char *));
+ if (!papszExpressions)
+ {
+ RTStrFree(pszCopy);
+ return NULL;
+ }
+
+ /* split */
+ psz = pszCopy;
+ for (unsigned i = 0;;)
+ {
+ papszExpressions[i] = psz;
+ if (++i >= cExpressions)
+ break;
+ psz = strchr(psz, '|');
+ *psz++ = '\0';
+ }
+
+ /* sort the array, putting '*' last. */
+ /** @todo sort it... */
+
+ *pcExpressions = cExpressions;
+ *ppszCopy = pszCopy;
+ return papszExpressions;
+}
+
+
+/**
+ * Enumerates the nodes selected by a pattern or all nodes if no pattern
+ * is specified.
+ *
+ * The call may lock STAM for writing before calling this function, however do
+ * not lock it for reading as this function may need to write lock STAM.
+ *
+ * @returns The rc from the callback.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat Pattern.
+ * @param fUpdateRing0 Update the stats residing in ring-0.
+ * @param pfnCallback Callback function which shall be called for matching nodes.
+ * If it returns anything but VINF_SUCCESS the enumeration is
+ * terminated and the status code returned to the caller.
+ * @param pvArg User parameter for the callback.
+ */
+static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0,
+ int (*pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg)
+{
+ size_t const cchPat = pszPat ? strlen(pszPat) : 0;
+ int rc = VINF_SUCCESS;
+ uint64_t bmRefreshedGroups = 0;
+ PSTAMDESC pCur;
+
+ /*
+ * All.
+ */
+ if ( cchPat < 1
+ || ( cchPat == 1
+ && *pszPat == '*'))
+ {
+ STAM_LOCK_RD(pUVM);
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ {
+ if (fUpdateRing0)
+ stamR3Refresh(pUVM, pCur, &bmRefreshedGroups);
+ rc = pfnCallback(pCur, pvArg);
+ if (rc)
+ break;
+ }
+ STAM_UNLOCK_RD(pUVM);
+ }
+
+ /*
+ * Single expression pattern.
+ */
+ else if (memchr(pszPat, '|', cchPat) == NULL)
+ {
+ const char *pszAsterisk = (const char *)memchr(pszPat, '*', cchPat);
+ const char *pszQuestion = (const char *)memchr(pszPat, '?', cchPat);
+
+ STAM_LOCK_RD(pUVM);
+ if (!pszAsterisk && !pszQuestion)
+ {
+ pCur = stamR3LookupFindDesc(pUVM->stam.s.pRoot, pszPat);
+ if (pCur)
+ {
+ if (fUpdateRing0)
+ stamR3Refresh(pUVM, pCur, &bmRefreshedGroups);
+ rc = pfnCallback(pCur, pvArg);
+ }
+ }
+ /* Is this a prefix expression where we can use the lookup tree to
+ efficiently figure out the exact range? */
+ else if ( pszAsterisk == &pszPat[cchPat - 1]
+ && pszPat[0] == '/'
+ && !pszQuestion)
+ {
+ PSTAMDESC pLast;
+ pCur = stamR3LookupFindByPrefixRange(pUVM->stam.s.pRoot, pszPat, (uint32_t)(cchPat - 1), &pLast);
+ if (pCur)
+ {
+ for (;;)
+ {
+ Assert(strncmp(pCur->pszName, pszPat, cchPat - 1) == 0);
+ if (fUpdateRing0)
+ stamR3Refresh(pUVM, pCur, &bmRefreshedGroups);
+ rc = pfnCallback(pCur, pvArg);
+ if (rc)
+ break;
+ if (pCur == pLast)
+ break;
+ pCur = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
+ }
+ Assert(pLast);
+ }
+ else
+ Assert(!pLast);
+ }
+ else
+ {
+ /* It's a more complicated pattern. Find the approximate range
+ and scan it for matches. */
+ PSTAMDESC pLast;
+ pCur = stamR3LookupFindPatternDescRange(pUVM->stam.s.pRoot, &pUVM->stam.s.List, pszPat, &pLast);
+ if (pCur)
+ {
+ for (;;)
+ {
+ if (RTStrSimplePatternMatch(pszPat, pCur->pszName))
+ {
+ if (fUpdateRing0)
+ stamR3Refresh(pUVM, pCur, &bmRefreshedGroups);
+ rc = pfnCallback(pCur, pvArg);
+ if (rc)
+ break;
+ }
+ if (pCur == pLast)
+ break;
+ pCur = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
+ }
+ Assert(pLast);
+ }
+ else
+ Assert(!pLast);
+ }
+ STAM_UNLOCK_RD(pUVM);
+ }
+
+ /*
+ * Multi expression pattern.
+ */
+ else
+ {
+ /*
+ * Split up the pattern first.
+ */
+ char *pszCopy;
+ unsigned cExpressions;
+ char **papszExpressions = stamR3SplitPattern(pszPat, &cExpressions, &pszCopy);
+ if (!papszExpressions)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Perform the enumeration.
+ */
+ STAM_LOCK_RD(pUVM);
+ unsigned iExpression = 0;
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ {
+ if (stamR3MultiMatch(papszExpressions, cExpressions, &iExpression, pCur->pszName))
+ {
+ if (fUpdateRing0)
+ stamR3Refresh(pUVM, pCur, &bmRefreshedGroups);
+ rc = pfnCallback(pCur, pvArg);
+ if (rc)
+ break;
+ }
+ }
+ STAM_UNLOCK_RD(pUVM);
+
+ RTMemTmpFree(papszExpressions);
+ RTStrFree(pszCopy);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Registers the ring-0 statistics.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+static void stamR3Ring0StatsRegisterU(PUVM pUVM)
+{
+ /* GVMM */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aGVMMStats); i++)
+ stamR3RegisterU(pUVM, (uint8_t *)&pUVM->stam.s.GVMMStats + g_aGVMMStats[i].offVar, NULL, NULL,
+ g_aGVMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGVMMStats[i].pszName,
+ g_aGVMMStats[i].enmUnit, g_aGVMMStats[i].pszDesc, STAM_REFRESH_GRP_GVMM);
+
+ for (unsigned i = 0; i < pUVM->cCpus; i++)
+ {
+ char szName[120];
+ size_t cchBase = RTStrPrintf(szName, sizeof(szName), pUVM->cCpus < 10 ? "/GVMM/VCpus/%u/" : "/GVMM/VCpus/%02u/", i);
+
+ strcpy(&szName[cchBase], "cWakeUpTimerHits");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].cWakeUpTimerHits, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "", STAM_REFRESH_GRP_GVMM);
+
+ strcpy(&szName[cchBase], "cWakeUpTimerMisses");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].cWakeUpTimerMisses, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "", STAM_REFRESH_GRP_GVMM);
+
+ strcpy(&szName[cchBase], "cWakeUpTimerCanceled");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].cWakeUpTimerCanceled, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "", STAM_REFRESH_GRP_GVMM);
+
+ strcpy(&szName[cchBase], "cWakeUpTimerSameCpu");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].cWakeUpTimerSameCpu, NULL, NULL,
+ STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "", STAM_REFRESH_GRP_GVMM);
+
+ strcpy(&szName[cchBase], "Start");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].Start, NULL, NULL,
+ STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_TICKS_PER_CALL, "", STAM_REFRESH_GRP_GVMM);
+
+ strcpy(&szName[cchBase], "Stop");
+ stamR3RegisterU(pUVM, &pUVM->stam.s.GVMMStats.aVCpus[i].Stop, NULL, NULL,
+ STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_TICKS_PER_CALL, "", STAM_REFRESH_GRP_GVMM);
+ }
+ pUVM->stam.s.cRegisteredHostCpus = 0;
+
+ /* GMM */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++)
+ stamR3RegisterU(pUVM, (uint8_t *)&pUVM->stam.s.GMMStats + g_aGMMStats[i].offVar, NULL, NULL,
+ g_aGMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGMMStats[i].pszName,
+ g_aGMMStats[i].enmUnit, g_aGMMStats[i].pszDesc, STAM_REFRESH_GRP_GMM);
+}
+
+
+/**
+ * Get the unit string.
+ *
+ * @returns Pointer to read only unit string.
+ * @param enmUnit The unit.
+ */
+VMMR3DECL(const char *) STAMR3GetUnit(STAMUNIT enmUnit)
+{
+ switch (enmUnit)
+ {
+ case STAMUNIT_NONE: return "";
+ case STAMUNIT_CALLS: return "calls";
+ case STAMUNIT_COUNT: return "count";
+ case STAMUNIT_BYTES: return "bytes";
+ case STAMUNIT_BYTES_PER_CALL: return "bytes/call";
+ case STAMUNIT_PAGES: return "pages";
+ case STAMUNIT_ERRORS: return "errors";
+ case STAMUNIT_OCCURENCES: return "times";
+ case STAMUNIT_TICKS: return "ticks";
+ case STAMUNIT_TICKS_PER_CALL: return "ticks/call";
+ case STAMUNIT_TICKS_PER_OCCURENCE: return "ticks/time";
+ case STAMUNIT_GOOD_BAD: return "good:bad";
+ case STAMUNIT_MEGABYTES: return "megabytes";
+ case STAMUNIT_KILOBYTES: return "kilobytes";
+ case STAMUNIT_NS: return "ns";
+ case STAMUNIT_NS_PER_CALL: return "ns/call";
+ case STAMUNIT_NS_PER_OCCURENCE: return "ns/time";
+ case STAMUNIT_PCT: return "%";
+ case STAMUNIT_HZ: return "Hz";
+
+ default:
+ AssertMsgFailed(("Unknown unit %d\n", enmUnit));
+ return "(?unit?)";
+ }
+}
+
+
+/**
+ * For something per something-else unit, get the first something.
+ *
+ * @returns Pointer to read only unit string.
+ * @param enmUnit The unit.
+ */
+VMMR3DECL(const char *) STAMR3GetUnit1(STAMUNIT enmUnit)
+{
+ switch (enmUnit)
+ {
+ case STAMUNIT_NONE: return "";
+ case STAMUNIT_CALLS: return "calls";
+ case STAMUNIT_COUNT: return "count";
+ case STAMUNIT_BYTES: return "bytes";
+ case STAMUNIT_BYTES_PER_CALL: return "bytes";
+ case STAMUNIT_PAGES: return "pages";
+ case STAMUNIT_ERRORS: return "errors";
+ case STAMUNIT_OCCURENCES: return "times";
+ case STAMUNIT_TICKS: return "ticks";
+ case STAMUNIT_TICKS_PER_CALL: return "ticks";
+ case STAMUNIT_TICKS_PER_OCCURENCE: return "ticks";
+ case STAMUNIT_GOOD_BAD: return "good";
+ case STAMUNIT_MEGABYTES: return "megabytes";
+ case STAMUNIT_KILOBYTES: return "kilobytes";
+ case STAMUNIT_NS: return "ns";
+ case STAMUNIT_NS_PER_CALL: return "ns";
+ case STAMUNIT_NS_PER_OCCURENCE: return "ns";
+ case STAMUNIT_PCT: return "%";
+ case STAMUNIT_HZ: return "Hz";
+
+ default:
+ AssertMsgFailed(("Unknown unit %d\n", enmUnit));
+ return "(?unit?)";
+ }
+}
+
+
+/**
+ * For something per something-else unit, get the something-else.
+ *
+ * @returns Pointer to read only unit string.
+ * @param enmUnit The unit.
+ */
+VMMR3DECL(const char *) STAMR3GetUnit2(STAMUNIT enmUnit)
+{
+ switch (enmUnit)
+ {
+ case STAMUNIT_TICKS_PER_CALL: return "calls";
+ case STAMUNIT_NS_PER_CALL: return "calls";
+ case STAMUNIT_BYTES_PER_CALL: return "calls";
+ case STAMUNIT_TICKS_PER_OCCURENCE: return "times";
+ case STAMUNIT_NS_PER_OCCURENCE: return "times";
+ case STAMUNIT_NONE: return "times";
+ case STAMUNIT_GOOD_BAD: return "bad";
+ default:
+ AssertMsgFailed(("Wrong unit %d\n", enmUnit));
+ return "times";
+ }
+}
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.stats' command.}
+ */
+static DECLCALLBACK(int) stamR3CmdStats(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ if (RTListIsEmpty(&pUVM->stam.s.List))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "No statistics present");
+
+ /*
+ * Do the printing.
+ */
+ STAMR3PRINTONEARGS Args;
+ Args.pUVM = pUVM;
+ Args.pvArg = pCmdHlp;
+ Args.pfnPrintf = stamR3EnumDbgfPrintf;
+
+ return stamR3EnumU(pUVM, cArgs ? paArgs[0].u.pszString : NULL, true /* fUpdateRing0 */, stamR3PrintOne, &Args);
+}
+
+
+/**
+ * Display one sample in the debugger.
+ *
+ * @param pArgs Pointer to the print one argument structure.
+ * @param pszFormat Format string.
+ * @param ... Format arguments.
+ */
+static DECLCALLBACK(void) stamR3EnumDbgfPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...)
+{
+ PDBGCCMDHLP pCmdHlp = (PDBGCCMDHLP)pArgs->pvArg;
+
+ va_list va;
+ va_start(va, pszFormat);
+ pCmdHlp->pfnPrintfV(pCmdHlp, NULL, pszFormat, va);
+ va_end(va);
+ NOREF(pArgs);
+}
+
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.statsreset' command.}
+ */
+static DECLCALLBACK(int) stamR3CmdStatsReset(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ /*
+ * Validate input.
+ */
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ if (RTListIsEmpty(&pUVM->stam.s.List))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "No statistics present");
+
+ /*
+ * Execute reset.
+ */
+ int rc = STAMR3Reset(pUVM, cArgs ? paArgs[0].u.pszString : NULL);
+ if (RT_SUCCESS(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "STAMR3ResetU");
+ return DBGCCmdHlpPrintf(pCmdHlp, "Statistics have been reset.\n");
+}
+
+#endif /* VBOX_WITH_DEBUGGER */
+
diff --git a/src/VBox/VMM/VMMR3/TM.cpp b/src/VBox/VMM/VMMR3/TM.cpp
new file mode 100644
index 00000000..a5f516d1
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/TM.cpp
@@ -0,0 +1,4314 @@
+/* $Id: TM.cpp $ */
+/** @file
+ * TM - Time Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_tm TM - The Time Manager
+ *
+ * The Time Manager abstracts the CPU clocks and manages timers used by the VMM,
+ * device and drivers.
+ *
+ * @see grp_tm
+ *
+ *
+ * @section sec_tm_clocks Clocks
+ *
+ * There are currently 4 clocks:
+ * - Virtual (guest).
+ * - Synchronous virtual (guest).
+ * - CPU Tick (TSC) (guest). Only current use is rdtsc emulation. Usually a
+ * function of the virtual clock.
+ * - Real (host). This is only used for display updates atm.
+ *
+ * The most important clocks are the three first ones and of these the second is
+ * the most interesting.
+ *
+ *
+ * The synchronous virtual clock is tied to the virtual clock except that it
+ * will take into account timer delivery lag caused by host scheduling. It will
+ * normally never advance beyond the head timer, and when lagging too far behind
+ * it will gradually speed up to catch up with the virtual clock. All devices
+ * implementing time sources accessible to and used by the guest is using this
+ * clock (for timers and other things). This ensures consistency between the
+ * time sources.
+ *
+ * The virtual clock is implemented as an offset to a monotonic, high
+ * resolution, wall clock. The current time source is using the RTTimeNanoTS()
+ * machinery based upon the Global Info Pages (GIP), that is, we're using TSC
+ * deltas (usually 10 ms) to fill the gaps between GIP updates. The result is
+ * a fairly high res clock that works in all contexts and on all hosts. The
+ * virtual clock is paused when the VM isn't in the running state.
+ *
+ * The CPU tick (TSC) is normally virtualized as a function of the synchronous
+ * virtual clock, where the frequency defaults to the host cpu frequency (as we
+ * measure it). In this mode it is possible to configure the frequency. Another
+ * (non-default) option is to use the raw unmodified host TSC values. And yet
+ * another, to tie it to time spent executing guest code. All these things are
+ * configurable should non-default behavior be desirable.
+ *
+ * The real clock is a monotonic clock (when available) with relatively low
+ * resolution, though this a bit host specific. Note that we're currently not
+ * servicing timers using the real clock when the VM is not running, this is
+ * simply because it has not been needed yet therefore not implemented.
+ *
+ *
+ * @subsection subsec_tm_timesync Guest Time Sync / UTC time
+ *
+ * Guest time syncing is primarily taken care of by the VMM device. The
+ * principle is very simple, the guest additions periodically asks the VMM
+ * device what the current UTC time is and makes adjustments accordingly.
+ *
+ * A complicating factor is that the synchronous virtual clock might be doing
+ * catchups and the guest perception is currently a little bit behind the world
+ * but it will (hopefully) be catching up soon as we're feeding timer interrupts
+ * at a slightly higher rate. Adjusting the guest clock to the current wall
+ * time in the real world would be a bad idea then because the guest will be
+ * advancing too fast and run ahead of world time (if the catchup works out).
+ * To solve this problem TM provides the VMM device with an UTC time source that
+ * gets adjusted with the current lag, so that when the guest eventually catches
+ * up the lag it will be showing correct real world time.
+ *
+ *
+ * @section sec_tm_timers Timers
+ *
+ * The timers can use any of the TM clocks described in the previous section.
+ * Each clock has its own scheduling facility, or timer queue if you like.
+ * There are a few factors which makes it a bit complex. First, there is the
+ * usual R0 vs R3 vs. RC thing. Then there are multiple threads, and then there
+ * is the timer thread that periodically checks whether any timers has expired
+ * without EMT noticing. On the API level, all but the create and save APIs
+ * must be multithreaded. EMT will always run the timers.
+ *
+ * The design is using a doubly linked list of active timers which is ordered
+ * by expire date. This list is only modified by the EMT thread. Updates to
+ * the list are batched in a singly linked list, which is then processed by the
+ * EMT thread at the first opportunity (immediately, next time EMT modifies a
+ * timer on that clock, or next timer timeout). Both lists are offset based and
+ * all the elements are therefore allocated from the hyper heap.
+ *
+ * For figuring out when there is need to schedule and run timers TM will:
+ * - Poll whenever somebody queries the virtual clock.
+ * - Poll the virtual clocks from the EM and REM loops.
+ * - Poll the virtual clocks from trap exit path.
+ * - Poll the virtual clocks and calculate first timeout from the halt loop.
+ * - Employ a thread which periodically (100Hz) polls all the timer queues.
+ *
+ *
+ * @image html TMTIMER-Statechart-Diagram.gif
+ *
+ * @section sec_tm_timer Logging
+ *
+ * Level 2: Logs a most of the timer state transitions and queue servicing.
+ * Level 3: Logs a few oddments.
+ * Level 4: Logs TMCLOCK_VIRTUAL_SYNC catch-up events.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_TM
+#ifdef DEBUG_bird
+# define DBGFTRACE_DISABLED /* annoying */
+#endif
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/dbgftrace.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/iom.h>
+#include "TMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/vmm/pdmdev.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#include <iprt/asm-math.h>
+#include <iprt/assert.h>
+#include <iprt/env.h>
+#include <iprt/file.h>
+#include <iprt/getopt.h>
+#include <iprt/mem.h>
+#include <iprt/rand.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+#include <iprt/timer.h>
+
+#include "TMInline.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The current saved state version.*/
+#define TM_SAVED_STATE_VERSION 3
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static bool tmR3HasFixedTSC(PVM pVM);
+static uint64_t tmR3CalibrateTSC(void);
+static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+#ifdef VBOX_WITH_STATISTICS
+static void tmR3TimerQueueRegisterStats(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cTimers);
+#endif
+static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent);
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+static DECLCALLBACK(void) tmR3CpuLoadTimer(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser);
+#endif
+static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+static DECLCALLBACK(void) tmR3InfoCpuLoad(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs);
+static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpu, void *pvData);
+static const char *tmR3GetTSCModeName(PVM pVM);
+static const char *tmR3GetTSCModeNameEx(TMTSCMODE enmMode);
+static int tmR3TimerQueueGrow(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cNewTimers);
+
+
+/**
+ * Initializes the TM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMM_INT_DECL(int) TMR3Init(PVM pVM)
+{
+ LogFlow(("TMR3Init:\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, tm.s, 32);
+ AssertCompile(sizeof(pVM->tm.s) <= sizeof(pVM->tm.padding));
+ AssertCompileMemberAlignment(TM, VirtualSyncLock, 8);
+
+ /*
+ * Init the structure.
+ */
+ pVM->tm.s.idTimerCpu = pVM->cCpus - 1; /* The last CPU. */
+
+ int rc = PDMR3CritSectInit(pVM, &pVM->tm.s.VirtualSyncLock, RT_SRC_POS, "TM VirtualSync Lock");
+ AssertLogRelRCReturn(rc, rc);
+
+ strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].szName, "virtual");
+ strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].szName, "virtual_sync"); /* Underscore is for STAM ordering issue. */
+ strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_REAL].szName, "real");
+ strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_TSC].szName, "tsc");
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->tm.s.aTimerQueues); i++)
+ {
+ Assert(pVM->tm.s.aTimerQueues[i].szName[0] != '\0');
+ pVM->tm.s.aTimerQueues[i].enmClock = (TMCLOCK)i;
+ pVM->tm.s.aTimerQueues[i].u64Expire = INT64_MAX;
+ pVM->tm.s.aTimerQueues[i].idxActive = UINT32_MAX;
+ pVM->tm.s.aTimerQueues[i].idxSchedule = UINT32_MAX;
+ pVM->tm.s.aTimerQueues[i].idxFreeHint = 1;
+ pVM->tm.s.aTimerQueues[i].fBeingProcessed = false;
+ pVM->tm.s.aTimerQueues[i].fCannotGrow = false;
+ pVM->tm.s.aTimerQueues[i].hThread = NIL_RTTHREAD;
+ pVM->tm.s.aTimerQueues[i].hWorkerEvt = NIL_SUPSEMEVENT;
+
+ rc = PDMR3CritSectInit(pVM, &pVM->tm.s.aTimerQueues[i].TimerLock, RT_SRC_POS,
+ "TM %s queue timer lock", pVM->tm.s.aTimerQueues[i].szName);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = PDMR3CritSectRwInit(pVM, &pVM->tm.s.aTimerQueues[i].AllocLock, RT_SRC_POS,
+ "TM %s queue alloc lock", pVM->tm.s.aTimerQueues[i].szName);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * We directly use the GIP to calculate the virtual time. We map the
+ * the GIP into the guest context so we can do this calculation there
+ * as well and save costly world switches.
+ */
+ PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
+ if (pGip || !SUPR3IsDriverless())
+ {
+ pVM->tm.s.pvGIPR3 = (void *)pGip;
+ AssertMsgReturn(pVM->tm.s.pvGIPR3, ("GIP support is now required!\n"), VERR_TM_GIP_REQUIRED);
+ AssertMsgReturn((pGip->u32Version >> 16) == (SUPGLOBALINFOPAGE_VERSION >> 16),
+ ("Unsupported GIP version %#x! (expected=%#x)\n", pGip->u32Version, SUPGLOBALINFOPAGE_VERSION),
+ VERR_TM_GIP_VERSION);
+
+ /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */
+ if ( pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC
+ && pGip->u32UpdateIntervalNS >= 250000000 /* 0.25s */)
+ return VMSetError(pVM, VERR_TM_GIP_UPDATE_INTERVAL_TOO_BIG, RT_SRC_POS,
+ N_("The GIP update interval is too big. u32UpdateIntervalNS=%RU32 (u32UpdateHz=%RU32)"),
+ pGip->u32UpdateIntervalNS, pGip->u32UpdateHz);
+
+ /* Log GIP info that may come in handy. */
+ LogRel(("TM: GIP - u32Mode=%d (%s) u32UpdateHz=%u u32UpdateIntervalNS=%u enmUseTscDelta=%d (%s) fGetGipCpu=%#x cCpus=%d\n",
+ pGip->u32Mode, SUPGetGIPModeName(pGip), pGip->u32UpdateHz, pGip->u32UpdateIntervalNS,
+ pGip->enmUseTscDelta, SUPGetGIPTscDeltaModeName(pGip), pGip->fGetGipCpu, pGip->cCpus));
+ LogRel(("TM: GIP - u64CpuHz=%'RU64 (%#RX64) SUPGetCpuHzFromGip => %'RU64\n",
+ pGip->u64CpuHz, pGip->u64CpuHz, SUPGetCpuHzFromGip(pGip)));
+ for (uint32_t iCpuSet = 0; iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx); iCpuSet++)
+ {
+ uint16_t iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
+ if (iGipCpu != UINT16_MAX)
+ LogRel(("TM: GIP - CPU: iCpuSet=%#x idCpu=%#x idApic=%#x iGipCpu=%#x i64TSCDelta=%RI64 enmState=%d u64CpuHz=%RU64(*) cErrors=%u\n",
+ iCpuSet, pGip->aCPUs[iGipCpu].idCpu, pGip->aCPUs[iGipCpu].idApic, iGipCpu, pGip->aCPUs[iGipCpu].i64TSCDelta,
+ pGip->aCPUs[iGipCpu].enmState, pGip->aCPUs[iGipCpu].u64CpuHz, pGip->aCPUs[iGipCpu].cErrors));
+ }
+ }
+
+ /*
+ * Setup the VirtualGetRaw backend.
+ */
+ pVM->tm.s.pfnVirtualGetRaw = tmVirtualNanoTSRediscover;
+ pVM->tm.s.VirtualGetRawData.pfnRediscover = tmVirtualNanoTSRediscover;
+ pVM->tm.s.VirtualGetRawData.pfnBad = tmVirtualNanoTSBad;
+ pVM->tm.s.VirtualGetRawData.pfnBadCpuIndex = tmVirtualNanoTSBadCpuIndex;
+ pVM->tm.s.VirtualGetRawData.pu64Prev = &pVM->tm.s.u64VirtualRawPrev;
+
+ /*
+ * Get our CFGM node, create it if necessary.
+ */
+ PCFGMNODE pCfgHandle = CFGMR3GetChild(CFGMR3GetRoot(pVM), "TM");
+ if (!pCfgHandle)
+ {
+ rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "TM", &pCfgHandle);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Specific errors about some obsolete TM settings (remove after 2015-12-03).
+ */
+ if (CFGMR3Exists(pCfgHandle, "TSCVirtualized"))
+ return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS,
+ N_("Configuration error: TM setting \"TSCVirtualized\" is no longer supported. Use the \"TSCMode\" setting instead."));
+ if (CFGMR3Exists(pCfgHandle, "UseRealTSC"))
+ return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS,
+ N_("Configuration error: TM setting \"UseRealTSC\" is no longer supported. Use the \"TSCMode\" setting instead."));
+
+ if (CFGMR3Exists(pCfgHandle, "MaybeUseOffsettedHostTSC"))
+ return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS,
+ N_("Configuration error: TM setting \"MaybeUseOffsettedHostTSC\" is no longer supported. Use the \"TSCMode\" setting instead."));
+
+ /*
+ * Validate the rest of the TM settings.
+ */
+ rc = CFGMR3ValidateConfig(pCfgHandle, "/TM/",
+ "TSCMode|"
+ "TSCModeSwitchAllowed|"
+ "TSCTicksPerSecond|"
+ "TSCTiedToExecution|"
+ "TSCNotTiedToHalt|"
+ "ScheduleSlack|"
+ "CatchUpStopThreshold|"
+ "CatchUpGiveUpThreshold|"
+ "CatchUpStartThreshold0|CatchUpStartThreshold1|CatchUpStartThreshold2|CatchUpStartThreshold3|"
+ "CatchUpStartThreshold4|CatchUpStartThreshold5|CatchUpStartThreshold6|CatchUpStartThreshold7|"
+ "CatchUpStartThreshold8|CatchUpStartThreshold9|"
+ "CatchUpPrecentage0|CatchUpPrecentage1|CatchUpPrecentage2|CatchUpPrecentage3|"
+ "CatchUpPrecentage4|CatchUpPrecentage5|CatchUpPrecentage6|CatchUpPrecentage7|"
+ "CatchUpPrecentage8|CatchUpPrecentage9|"
+ "UTCOffset|"
+ "UTCTouchFileOnJump|"
+ "WarpDrivePercentage|"
+ "HostHzMax|"
+ "HostHzFudgeFactorTimerCpu|"
+ "HostHzFudgeFactorOtherCpu|"
+ "HostHzFudgeFactorCatchUp100|"
+ "HostHzFudgeFactorCatchUp200|"
+ "HostHzFudgeFactorCatchUp400|"
+ "TimerMillies"
+ ,
+ "",
+ "TM", 0);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Determine the TSC configuration and frequency.
+ */
+ /** @cfgm{/TM/TSCMode, string, Depends on the CPU and VM config}
+ * The name of the TSC mode to use: VirtTSCEmulated, RealTSCOffset or Dynamic.
+ * The default depends on the VM configuration and the capabilities of the
+ * host CPU. Other config options or runtime changes may override the TSC
+ * mode specified here.
+ */
+ char szTSCMode[32];
+ rc = CFGMR3QueryString(pCfgHandle, "TSCMode", szTSCMode, sizeof(szTSCMode));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ /** @todo Rainy-day/never: Dynamic mode isn't currently suitable for SMP VMs, so
+ * fall back on the more expensive emulated mode. With the current TSC handling
+ * (frequent switching between offsetted mode and taking VM exits, on all VCPUs
+ * without any kind of coordination) will lead to inconsistent TSC behavior with
+ * guest SMP, including TSC going backwards. */
+ pVM->tm.s.enmTSCMode = NEMR3NeedSpecialTscMode(pVM) ? TMTSCMODE_NATIVE_API
+ : pVM->cCpus == 1 && tmR3HasFixedTSC(pVM) ? TMTSCMODE_DYNAMIC : TMTSCMODE_VIRT_TSC_EMULATED;
+ }
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \"TSCMode\""));
+ else
+ {
+ if (!RTStrCmp(szTSCMode, "VirtTSCEmulated"))
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+ else if (!RTStrCmp(szTSCMode, "RealTSCOffset"))
+ pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET;
+ else if (!RTStrCmp(szTSCMode, "Dynamic"))
+ pVM->tm.s.enmTSCMode = TMTSCMODE_DYNAMIC;
+ else
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM TSC mode value \"%s\""), szTSCMode);
+ if (NEMR3NeedSpecialTscMode(pVM))
+ {
+ LogRel(("TM: NEM overrides the /TM/TSCMode=%s settings.\n", szTSCMode));
+ pVM->tm.s.enmTSCMode = TMTSCMODE_NATIVE_API;
+ }
+ }
+
+ /**
+ * @cfgm{/TM/TSCModeSwitchAllowed, bool, Whether TM TSC mode switch is allowed
+ * at runtime}
+ * When using paravirtualized guests, we dynamically switch TSC modes to a more
+ * optimal one for performance. This setting allows overriding this behaviour.
+ */
+ rc = CFGMR3QueryBool(pCfgHandle, "TSCModeSwitchAllowed", &pVM->tm.s.fTSCModeSwitchAllowed);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ /* This is finally determined in TMR3InitFinalize() as GIM isn't initialized yet. */
+ pVM->tm.s.fTSCModeSwitchAllowed = true;
+ }
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying bool value \"TSCModeSwitchAllowed\""));
+ if (pVM->tm.s.fTSCModeSwitchAllowed && pVM->tm.s.enmTSCMode == TMTSCMODE_NATIVE_API)
+ {
+ LogRel(("TM: NEM overrides the /TM/TSCModeSwitchAllowed setting.\n"));
+ pVM->tm.s.fTSCModeSwitchAllowed = false;
+ }
+
+ /** @cfgm{/TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP}
+ * The number of TSC ticks per second (i.e. the TSC frequency). This will
+ * override enmTSCMode.
+ */
+ pVM->tm.s.cTSCTicksPerSecondHost = tmR3CalibrateTSC();
+ rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ pVM->tm.s.cTSCTicksPerSecond = pVM->tm.s.cTSCTicksPerSecondHost;
+ if ( ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
+ || pVM->tm.s.enmTSCMode == TMTSCMODE_VIRT_TSC_EMULATED)
+ && pVM->tm.s.cTSCTicksPerSecond >= _4G)
+ {
+ pVM->tm.s.cTSCTicksPerSecond = _4G - 1; /* (A limitation of our math code) */
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+ }
+ }
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint64_t value \"TSCTicksPerSecond\""));
+ else if ( pVM->tm.s.cTSCTicksPerSecond < _1M
+ || pVM->tm.s.cTSCTicksPerSecond >= _4G)
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
+ N_("Configuration error: \"TSCTicksPerSecond\" = %RI64 is not in the range 1MHz..4GHz-1"),
+ pVM->tm.s.cTSCTicksPerSecond);
+ else if (pVM->tm.s.enmTSCMode != TMTSCMODE_NATIVE_API)
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+ else
+ {
+ LogRel(("TM: NEM overrides the /TM/TSCTicksPerSecond=%RU64 setting.\n", pVM->tm.s.cTSCTicksPerSecond));
+ pVM->tm.s.cTSCTicksPerSecond = pVM->tm.s.cTSCTicksPerSecondHost;
+ }
+
+ /** @cfgm{/TM/TSCTiedToExecution, bool, false}
+ * Whether the TSC should be tied to execution. This will exclude most of the
+ * virtualization overhead, but will by default include the time spent in the
+ * halt state (see TM/TSCNotTiedToHalt). This setting will override all other
+ * TSC settings except for TSCTicksPerSecond and TSCNotTiedToHalt, which should
+ * be used avoided or used with great care. Note that this will only work right
+ * together with VT-x or AMD-V, and with a single virtual CPU. */
+ rc = CFGMR3QueryBoolDef(pCfgHandle, "TSCTiedToExecution", &pVM->tm.s.fTSCTiedToExecution, false);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying bool value \"TSCTiedToExecution\""));
+ if (pVM->tm.s.fTSCTiedToExecution && pVM->tm.s.enmTSCMode == TMTSCMODE_NATIVE_API)
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("/TM/TSCTiedToExecution is not supported in NEM mode!"));
+ if (pVM->tm.s.fTSCTiedToExecution)
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+
+
+ /** @cfgm{/TM/TSCNotTiedToHalt, bool, false}
+ * This is used with /TM/TSCTiedToExecution to control how TSC operates
+ * accross HLT instructions. When true HLT is considered execution time and
+ * TSC continues to run, while when false (default) TSC stops during halt. */
+ rc = CFGMR3QueryBoolDef(pCfgHandle, "TSCNotTiedToHalt", &pVM->tm.s.fTSCNotTiedToHalt, false);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying bool value \"TSCNotTiedToHalt\""));
+
+ /*
+ * Configure the timer synchronous virtual time.
+ */
+ /** @cfgm{/TM/ScheduleSlack, uint32_t, ns, 0, UINT32_MAX, 100000}
+ * Scheduling slack when processing timers. */
+ rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.u32VirtualSyncScheduleSlack = 100000; /* 0.100ms (ASSUMES virtual time is nanoseconds) */
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying 32-bit integer value \"ScheduleSlack\""));
+
+ /** @cfgm{/TM/CatchUpStopThreshold, uint64_t, ns, 0, UINT64_MAX, 500000}
+ * When to stop a catch-up, considering it successful. */
+ rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStopThreshold", &pVM->tm.s.u64VirtualSyncCatchUpStopThreshold);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.u64VirtualSyncCatchUpStopThreshold = 500000; /* 0.5ms */
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpStopThreshold\""));
+
+ /** @cfgm{/TM/CatchUpGiveUpThreshold, uint64_t, ns, 0, UINT64_MAX, 60000000000}
+ * When to give up a catch-up attempt. */
+ rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = UINT64_C(60000000000); /* 60 sec */
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpGiveUpThreshold\""));
+
+
+ /** @cfgm{/TM/CatchUpPrecentage[0..9], uint32_t, %, 1, 2000, various}
+ * The catch-up percent for a given period. */
+ /** @cfgm{/TM/CatchUpStartThreshold[0..9], uint64_t, ns, 0, UINT64_MAX}
+ * The catch-up period threshold, or if you like, when a period starts. */
+#define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \
+ do \
+ { \
+ uint64_t u64; \
+ rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
+ u64 = UINT64_C(DefStart); \
+ else if (RT_FAILURE(rc)) \
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\"")); \
+ if ( (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \
+ || u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %'RU64"), u64); \
+ pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \
+ rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
+ pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage = (DefPct); \
+ else if (RT_FAILURE(rc)) \
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\"")); \
+ } while (0)
+ /* This needs more tuning. Not sure if we really need so many period and be so gentle. */
+ TM_CFG_PERIOD(0, 750000, 5); /* 0.75ms at 1.05x */
+ TM_CFG_PERIOD(1, 1500000, 10); /* 1.50ms at 1.10x */
+ TM_CFG_PERIOD(2, 8000000, 25); /* 8ms at 1.25x */
+ TM_CFG_PERIOD(3, 30000000, 50); /* 30ms at 1.50x */
+ TM_CFG_PERIOD(4, 75000000, 75); /* 75ms at 1.75x */
+ TM_CFG_PERIOD(5, 175000000, 100); /* 175ms at 2x */
+ TM_CFG_PERIOD(6, 500000000, 200); /* 500ms at 3x */
+ TM_CFG_PERIOD(7, 3000000000, 300); /* 3s at 4x */
+ TM_CFG_PERIOD(8,30000000000, 400); /* 30s at 5x */
+ TM_CFG_PERIOD(9,55000000000, 500); /* 55s at 6x */
+ AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 10);
+#undef TM_CFG_PERIOD
+
+ /*
+ * Configure real world time (UTC).
+ */
+ /** @cfgm{/TM/UTCOffset, int64_t, ns, INT64_MIN, INT64_MAX, 0}
+ * The UTC offset. This is used to put the guest back or forwards in time. */
+ rc = CFGMR3QueryS64(pCfgHandle, "UTCOffset", &pVM->tm.s.offUTC);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.offUTC = 0; /* ns */
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying 64-bit integer value \"UTCOffset\""));
+
+ /** @cfgm{/TM/UTCTouchFileOnJump, string, none}
+ * File to be written to everytime the host time jumps. */
+ rc = CFGMR3QueryStringAlloc(pCfgHandle, "UTCTouchFileOnJump", &pVM->tm.s.pszUtcTouchFileOnJump);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.pszUtcTouchFileOnJump = NULL;
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying string value \"UTCTouchFileOnJump\""));
+
+ /*
+ * Setup the warp drive.
+ */
+ /** @cfgm{/TM/WarpDrivePercentage, uint32_t, %, 0, 20000, 100}
+ * The warp drive percentage, 100% is normal speed. This is used to speed up
+ * or slow down the virtual clock, which can be useful for fast forwarding
+ * borring periods during tests. */
+ rc = CFGMR3QueryU32(pCfgHandle, "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); /* legacy */
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ pVM->tm.s.u32VirtualWarpDrivePercentage = 100;
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"WarpDrivePercent\""));
+ else if ( pVM->tm.s.u32VirtualWarpDrivePercentage < 2
+ || pVM->tm.s.u32VirtualWarpDrivePercentage > 20000)
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
+ N_("Configuration error: \"WarpDrivePercent\" = %RI32 is not in the range 2..20000"),
+ pVM->tm.s.u32VirtualWarpDrivePercentage);
+ pVM->tm.s.fVirtualWarpDrive = pVM->tm.s.u32VirtualWarpDrivePercentage != 100;
+ if (pVM->tm.s.fVirtualWarpDrive)
+ {
+ if (pVM->tm.s.enmTSCMode == TMTSCMODE_NATIVE_API)
+ LogRel(("TM: Warp-drive active, escept for TSC which is in NEM mode. u32VirtualWarpDrivePercentage=%RI32\n",
+ pVM->tm.s.u32VirtualWarpDrivePercentage));
+ else
+ {
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+ LogRel(("TM: Warp-drive active. u32VirtualWarpDrivePercentage=%RI32\n", pVM->tm.s.u32VirtualWarpDrivePercentage));
+ }
+ }
+
+ /*
+ * Gather the Host Hz configuration values.
+ */
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzMax", &pVM->tm.s.cHostHzMax, 20000);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzMax\""));
+
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorTimerCpu", &pVM->tm.s.cPctHostHzFudgeFactorTimerCpu, 111);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorTimerCpu\""));
+
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorOtherCpu", &pVM->tm.s.cPctHostHzFudgeFactorOtherCpu, 110);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorOtherCpu\""));
+
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp100", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp100, 300);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp100\""));
+
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp200", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp200, 250);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp200\""));
+
+ rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp400", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp400, 200);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp400\""));
+
+ /*
+ * Finally, setup and report.
+ */
+ pVM->tm.s.enmOriginalTSCMode = pVM->tm.s.enmTSCMode;
+ CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD);
+ LogRel(("TM: cTSCTicksPerSecond=%'RU64 (%#RX64) enmTSCMode=%d (%s)\n"
+ "TM: cTSCTicksPerSecondHost=%'RU64 (%#RX64)\n"
+ "TM: TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n",
+ pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM),
+ pVM->tm.s.cTSCTicksPerSecondHost, pVM->tm.s.cTSCTicksPerSecondHost,
+ pVM->tm.s.fTSCTiedToExecution, pVM->tm.s.fTSCNotTiedToHalt));
+
+ /*
+ * Start the timer (guard against REM not yielding).
+ */
+ /** @cfgm{/TM/TimerMillies, uint32_t, ms, 1, 1000, 10}
+ * The watchdog timer interval. */
+ uint32_t u32Millies;
+ rc = CFGMR3QueryU32(pCfgHandle, "TimerMillies", &u32Millies);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ u32Millies = VM_IS_HM_ENABLED(pVM) ? 1000 : 10;
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Configuration error: Failed to query uint32_t value \"TimerMillies\""));
+ rc = RTTimerCreate(&pVM->tm.s.pTimer, u32Millies, tmR3TimerCallback, pVM);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("Failed to create timer, u32Millies=%d rc=%Rrc.\n", u32Millies, rc));
+ return rc;
+ }
+ Log(("TM: Created timer %p firing every %d milliseconds\n", pVM->tm.s.pTimer, u32Millies));
+ pVM->tm.s.u32TimerMillies = u32Millies;
+
+ /*
+ * Register saved state.
+ */
+ rc = SSMR3RegisterInternal(pVM, "tm", 1, TM_SAVED_STATE_VERSION, sizeof(uint64_t) * 8,
+ NULL, NULL, NULL,
+ NULL, tmR3Save, NULL,
+ NULL, tmR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Register statistics.
+ */
+ STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawData.c1nsSteps,STAMTYPE_U32, "/TM/R3/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
+ STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawData.cBadPrev, STAMTYPE_U32, "/TM/R3/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
+#if 0 /** @todo retreive from ring-0 */
+ STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps,STAMTYPE_U32, "/TM/R0/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
+ STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
+#endif
+ STAM_REL_REG( pVM,(void*)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)");
+ STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attempted caught up with.");
+ STAM_REL_REG( pVM,(void*)&pVM->tm.s.HzHint.s.uMax, STAMTYPE_U32, "/TM/MaxHzHint", STAMUNIT_HZ, "Max guest timer frequency hint.");
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->tm.s.aTimerQueues); i++)
+ {
+ rc = STAMR3RegisterF(pVM, (void *)&pVM->tm.s.aTimerQueues[i].uMaxHzHint, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_HZ,
+ "", "/TM/MaxHzHint/%s", pVM->tm.s.aTimerQueues[i].szName);
+ AssertRC(rc);
+ }
+
+#ifdef VBOX_WITH_STATISTICS
+ STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawData.cExpired, STAMTYPE_U32, "/TM/R3/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
+ STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawData.cUpdateRaces,STAMTYPE_U32, "/TM/R3/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
+# if 0 /** @todo retreive from ring-0 */
+ STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired, STAMTYPE_U32, "/TM/R0/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
+ STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
+# endif
+ STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
+ STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].StatDo, STAMTYPE_PROFILE, "/TM/DoQueues/Virtual", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue.");
+ STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].StatDo,STAMTYPE_PROFILE,"/TM/DoQueues/VirtualSync", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual sync clock queue.");
+ STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL].StatDo, STAMTYPE_PROFILE, "/TM/DoQueues/Real", STAMUNIT_TICKS_PER_CALL, "Time spent on the real clock queue.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatPoll, STAMTYPE_COUNTER, "/TM/Poll", STAMUNIT_OCCURENCES, "TMTimerPoll calls.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/Poll/AlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollELoop, STAMTYPE_COUNTER, "/TM/Poll/ELoop", STAMUNIT_OCCURENCES, "Times TMTimerPoll has given up getting a consistent virtual sync data set.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/Poll/Miss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollRunning, STAMTYPE_COUNTER, "/TM/Poll/Running", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollSimple, STAMTYPE_COUNTER, "/TM/Poll/Simple", STAMUNIT_OCCURENCES, "TMTimerPoll calls where we could take the simple path.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
+ STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatPostponedR3, STAMTYPE_COUNTER, "/TM/PostponedR3", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3.");
+ STAM_REG(pVM, &pVM->tm.s.StatPostponedRZ, STAMTYPE_COUNTER, "/TM/PostponedRZ", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-0 / RC.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatScheduleOneR3, STAMTYPE_PROFILE, "/TM/ScheduleOneR3", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.");
+ STAM_REG(pVM, &pVM->tm.s.StatScheduleOneRZ, STAMTYPE_PROFILE, "/TM/ScheduleOneRZ", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.");
+ STAM_REG(pVM, &pVM->tm.s.StatScheduleSetFF, STAMTYPE_COUNTER, "/TM/ScheduleSetFF", STAMUNIT_OCCURENCES, "The number of times the timer FF was set instead of doing scheduling.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSet, STAMTYPE_COUNTER, "/TM/TimerSet", STAMUNIT_OCCURENCES, "Calls, except virtual sync timers");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetOpt, STAMTYPE_COUNTER, "/TM/TimerSet/Opt", STAMUNIT_OCCURENCES, "Optimized path taken.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetR3, STAMTYPE_PROFILE, "/TM/TimerSet/R3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-3.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRZ, STAMTYPE_PROFILE, "/TM/TimerSet/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-0 / RC.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStActive, STAMTYPE_COUNTER, "/TM/TimerSet/StActive", STAMUNIT_OCCURENCES, "ACTIVE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStExpDeliver, STAMTYPE_COUNTER, "/TM/TimerSet/StExpDeliver", STAMUNIT_OCCURENCES, "EXPIRED_DELIVER");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStOther, STAMTYPE_COUNTER, "/TM/TimerSet/StOther", STAMUNIT_OCCURENCES, "Other states");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStPendStop, STAMTYPE_COUNTER, "/TM/TimerSet/StPendStop", STAMUNIT_OCCURENCES, "PENDING_STOP");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStPendStopSched, STAMTYPE_COUNTER, "/TM/TimerSet/StPendStopSched", STAMUNIT_OCCURENCES, "PENDING_STOP_SCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStPendSched, STAMTYPE_COUNTER, "/TM/TimerSet/StPendSched", STAMUNIT_OCCURENCES, "PENDING_SCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStPendResched, STAMTYPE_COUNTER, "/TM/TimerSet/StPendResched", STAMUNIT_OCCURENCES, "PENDING_RESCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetStStopped, STAMTYPE_COUNTER, "/TM/TimerSet/StStopped", STAMUNIT_OCCURENCES, "STOPPED");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVs, STAMTYPE_COUNTER, "/TM/TimerSetVs", STAMUNIT_OCCURENCES, "TMTimerSet calls on virtual sync timers");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVsR3, STAMTYPE_PROFILE, "/TM/TimerSetVs/R3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-3 on virtual sync timers.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVsRZ, STAMTYPE_PROFILE, "/TM/TimerSetVs/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-0 / RC on virtual sync timers.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVsStActive, STAMTYPE_COUNTER, "/TM/TimerSetVs/StActive", STAMUNIT_OCCURENCES, "ACTIVE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVsStExpDeliver, STAMTYPE_COUNTER, "/TM/TimerSetVs/StExpDeliver", STAMUNIT_OCCURENCES, "EXPIRED_DELIVER");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetVsStStopped, STAMTYPE_COUNTER, "/TM/TimerSetVs/StStopped", STAMUNIT_OCCURENCES, "STOPPED");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelative, STAMTYPE_COUNTER, "/TM/TimerSetRelative", STAMUNIT_OCCURENCES, "Calls, except virtual sync timers");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeOpt, STAMTYPE_COUNTER, "/TM/TimerSetRelative/Opt", STAMUNIT_OCCURENCES, "Optimized path taken.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeR3, STAMTYPE_PROFILE, "/TM/TimerSetRelative/R3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSetRelative calls made in ring-3 (sans virtual sync).");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeRZ, STAMTYPE_PROFILE, "/TM/TimerSetRelative/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSetReltaive calls made in ring-0 / RC (sans virtual sync).");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStActive, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StActive", STAMUNIT_OCCURENCES, "ACTIVE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStExpDeliver, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StExpDeliver", STAMUNIT_OCCURENCES, "EXPIRED_DELIVER");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStOther, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StOther", STAMUNIT_OCCURENCES, "Other states");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStPendStop, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StPendStop", STAMUNIT_OCCURENCES, "PENDING_STOP");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStPendStopSched, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StPendStopSched",STAMUNIT_OCCURENCES, "PENDING_STOP_SCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStPendSched, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StPendSched", STAMUNIT_OCCURENCES, "PENDING_SCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStPendResched, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StPendResched", STAMUNIT_OCCURENCES, "PENDING_RESCHEDULE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeStStopped, STAMTYPE_COUNTER, "/TM/TimerSetRelative/StStopped", STAMUNIT_OCCURENCES, "STOPPED");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVs, STAMTYPE_COUNTER, "/TM/TimerSetRelativeVs", STAMUNIT_OCCURENCES, "TMTimerSetRelative calls on virtual sync timers");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVsR3, STAMTYPE_PROFILE, "/TM/TimerSetRelativeVs/R3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSetRelative calls made in ring-3 on virtual sync timers.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVsRZ, STAMTYPE_PROFILE, "/TM/TimerSetRelativeVs/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSetReltaive calls made in ring-0 / RC on virtual sync timers.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVsStActive, STAMTYPE_COUNTER, "/TM/TimerSetRelativeVs/StActive", STAMUNIT_OCCURENCES, "ACTIVE");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVsStExpDeliver, STAMTYPE_COUNTER, "/TM/TimerSetRelativeVs/StExpDeliver", STAMUNIT_OCCURENCES, "EXPIRED_DELIVER");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerSetRelativeVsStStopped, STAMTYPE_COUNTER, "/TM/TimerSetRelativeVs/StStopped", STAMUNIT_OCCURENCES, "STOPPED");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerStopR3, STAMTYPE_PROFILE, "/TM/TimerStopR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-3.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerStopRZ, STAMTYPE_PROFILE, "/TM/TimerStopRZ", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-0 / RC.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualGet, STAMTYPE_COUNTER, "/TM/VirtualGet", STAMUNIT_OCCURENCES, "The number of times TMTimerGet was called when the clock was running.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet, STAMTYPE_COUNTER, "/TM/VirtualSyncGet", STAMUNIT_OCCURENCES, "The number of times tmVirtualSyncGetEx was called.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetAdjLast, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/AdjLast", STAMUNIT_OCCURENCES, "Times we've adjusted against the last returned time stamp .");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx has given up getting a consistent virtual sync data set.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetExpired, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Expired", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx encountered an expired timer stopping the clock.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Locked", STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLockless, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Lockless", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx returned without needing to take the lock.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/SetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling tmVirtualSyncGetEx.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/VirtualPause", STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/VirtualResume", STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTimerCallbackSetFF, STAMTYPE_COUNTER, "/TM/CallbackSetFF", STAMUNIT_OCCURENCES, "The number of times the timer callback set FF.");
+ STAM_REG(pVM, &pVM->tm.s.StatTimerCallback, STAMTYPE_COUNTER, "/TM/Callback", STAMUNIT_OCCURENCES, "The number of times the timer callback is invoked.");
+
+ STAM_REG(pVM, &pVM->tm.s.StatTSCCatchupLE010, STAMTYPE_COUNTER, "/TM/TSC/Intercept/CatchupLE010", STAMUNIT_OCCURENCES, "In catch-up mode, 10% or lower.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCCatchupLE025, STAMTYPE_COUNTER, "/TM/TSC/Intercept/CatchupLE025", STAMUNIT_OCCURENCES, "In catch-up mode, 25%-11%.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCCatchupLE100, STAMTYPE_COUNTER, "/TM/TSC/Intercept/CatchupLE100", STAMUNIT_OCCURENCES, "In catch-up mode, 100%-26%.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCCatchupOther, STAMTYPE_COUNTER, "/TM/TSC/Intercept/CatchupOther", STAMUNIT_OCCURENCES, "In catch-up mode, > 100%.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCNotFixed, STAMTYPE_COUNTER, "/TM/TSC/Intercept/NotFixed", STAMUNIT_OCCURENCES, "TSC is not fixed, it may run at variable speed.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCNotTicking, STAMTYPE_COUNTER, "/TM/TSC/Intercept/NotTicking", STAMUNIT_OCCURENCES, "TSC is not ticking.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCSyncNotTicking, STAMTYPE_COUNTER, "/TM/TSC/Intercept/SyncNotTicking", STAMUNIT_OCCURENCES, "VirtualSync isn't ticking.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCWarp, STAMTYPE_COUNTER, "/TM/TSC/Intercept/Warp", STAMUNIT_OCCURENCES, "Warpdrive is active.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCSet, STAMTYPE_COUNTER, "/TM/TSC/Sets", STAMUNIT_OCCURENCES, "Calls to TMCpuTickSet.");
+ STAM_REG(pVM, &pVM->tm.s.StatTSCUnderflow, STAMTYPE_COUNTER, "/TM/TSC/Underflow", STAMUNIT_OCCURENCES, "TSC underflow; corrected with last seen value .");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/TSC/Pause", STAMUNIT_OCCURENCES, "The number of times the TSC was paused.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/TSC/Resume", STAMUNIT_OCCURENCES, "The number of times the TSC was resumed.");
+#endif /* VBOX_WITH_STATISTICS */
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.offTSCRawSrc, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS, "TSC offset relative the raw source", "/TM/TSC/offCPU%u", i);
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Resettable: Total CPU run time.", "/TM/CPU/%02u", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsExecuting, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent executing guest code.", "/TM/CPU/%02u/PrfExecuting", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsExecLong, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent executing guest code - long hauls.", "/TM/CPU/%02u/PrfExecLong", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsExecShort, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent executing guest code - short stretches.", "/TM/CPU/%02u/PrfExecShort", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsExecTiny, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent executing guest code - tiny bits.", "/TM/CPU/%02u/PrfExecTiny", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsHalted, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent halted.", "/TM/CPU/%02u/PrfHalted", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.StatNsOther, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE, "Resettable: Time spent in the VMM or preempted.", "/TM/CPU/%02u/PrfOther", i);
+# endif
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cNsTotalStat, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Total CPU run time.", "/TM/CPU/%02u/cNsTotal", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cNsExecuting, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Time spent executing guest code.", "/TM/CPU/%02u/cNsExecuting", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cNsHalted, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Time spent halted.", "/TM/CPU/%02u/cNsHalted", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cNsOtherStat, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Time spent in the VMM or preempted.", "/TM/CPU/%02u/cNsOther", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cPeriodsExecuting, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Times executed guest code.", "/TM/CPU/%02u/cPeriodsExecuting", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.cPeriodsHalted, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Times halted.", "/TM/CPU/%02u/cPeriodsHalted", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.CpuLoad.cPctExecuting, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent executing guest code recently.", "/TM/CPU/%02u/pctExecuting", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.CpuLoad.cPctHalted, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent halted recently.", "/TM/CPU/%02u/pctHalted", i);
+ STAMR3RegisterF(pVM, &pVCpu->tm.s.CpuLoad.cPctOther, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent in the VMM or preempted recently.", "/TM/CPU/%02u/pctOther", i);
+#endif
+ }
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ STAMR3RegisterF(pVM, &pVM->tm.s.CpuLoad.cPctExecuting, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent executing guest code recently.", "/TM/CPU/pctExecuting");
+ STAMR3RegisterF(pVM, &pVM->tm.s.CpuLoad.cPctHalted, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent halted recently.", "/TM/CPU/pctHalted");
+ STAMR3RegisterF(pVM, &pVM->tm.s.CpuLoad.cPctOther, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "Time spent in the VMM or preempted recently.", "/TM/CPU/pctOther");
+#endif
+
+#ifdef VBOX_WITH_STATISTICS
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
+ STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp, STAMTYPE_U8, "/TM/VirtualSync/CatchUpActive", STAMUNIT_NONE, "Catch-Up active indicator.");
+ STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage, STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage", STAMUNIT_PCT, "The catch-up percentage. (+100/100 to get clock multiplier)");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncFF, STAMTYPE_PROFILE, "/TM/VirtualSync/FF", STAMUNIT_TICKS_PER_OCCURENCE, "Time spent in TMR3VirtualSyncFF by all but the dedicate timer EMT.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting",STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun, STAMTYPE_COUNTER, "/TM/VirtualSync/Run", STAMUNIT_OCCURENCES, "Times the virtual sync timer queue was considered.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Restarts", STAMUNIT_OCCURENCES, "Times the clock was restarted after a run.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStop, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Stop", STAMUNIT_OCCURENCES, "Times the clock was stopped when calculating the current time before examining the timers.");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/StoppedAlready", STAMUNIT_OCCURENCES, "Times the clock was already stopped elsewhere (TMVirtualSyncGet).");
+ STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack, STAMTYPE_PROFILE, "/TM/VirtualSync/Run/Slack", STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)");
+ for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++)
+ {
+ STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/Periods/%u", i);
+ STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/Periods/%u/Adjust", i);
+ STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/Periods/%u/Initial", i);
+ STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/Periods/%u/Start", i);
+ }
+#endif /* VBOX_WITH_STATISTICS */
+
+ /*
+ * Register info handlers.
+ */
+ DBGFR3InfoRegisterInternalEx(pVM, "timers", "Dumps all timers. No arguments.", tmR3TimerInfo, DBGFINFO_FLAGS_RUN_ON_EMT);
+ DBGFR3InfoRegisterInternalEx(pVM, "activetimers", "Dumps active all timers. No arguments.", tmR3TimerInfoActive, DBGFINFO_FLAGS_RUN_ON_EMT);
+ DBGFR3InfoRegisterInternalEx(pVM, "clocks", "Display the time of the various clocks.", tmR3InfoClocks, DBGFINFO_FLAGS_RUN_ON_EMT);
+ DBGFR3InfoRegisterInternalArgv(pVM, "cpuload", "Display the CPU load stats (--help for details).", tmR3InfoCpuLoad, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the host CPU has a fixed TSC frequency.
+ *
+ * @returns true if it has, false if it hasn't.
+ *
+ * @remarks This test doesn't bother with very old CPUs that don't do power
+ * management or any other stuff that might influence the TSC rate.
+ * This isn't currently relevant.
+ */
+static bool tmR3HasFixedTSC(PVM pVM)
+{
+ /*
+ * ASSUME that if the GIP is in invariant TSC mode, it's because the CPU
+ * actually has invariant TSC.
+ *
+ * In driverless mode we just assume sync TSC for now regardless of what
+ * the case actually is.
+ */
+ PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
+ SUPGIPMODE const enmGipMode = pGip ? (SUPGIPMODE)pGip->u32Mode : SUPGIPMODE_INVARIANT_TSC;
+ if (enmGipMode == SUPGIPMODE_INVARIANT_TSC)
+ return true;
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ /*
+ * Go by features and model info from the CPUID instruction.
+ */
+ if (ASMHasCpuId())
+ {
+ uint32_t uEAX, uEBX, uECX, uEDX;
+
+ /*
+ * By feature. (Used to be AMD specific, intel seems to have picked it up.)
+ */
+ ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
+ if (uEAX >= 0x80000007 && RTX86IsValidExtRange(uEAX))
+ {
+ ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
+ if ( (uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) /* TscInvariant */
+ && enmGipMode != SUPGIPMODE_ASYNC_TSC) /* No fixed tsc if the gip timer is in async mode. */
+ return true;
+ }
+
+ /*
+ * By model.
+ */
+ if (CPUMGetHostCpuVendor(pVM) == CPUMCPUVENDOR_AMD)
+ {
+ /*
+ * AuthenticAMD - Check for APM support and that TscInvariant is set.
+ *
+ * This test isn't correct with respect to fixed/non-fixed TSC and
+ * older models, but this isn't relevant since the result is currently
+ * only used for making a decision on AMD-V models.
+ */
+# if 0 /* Promoted to generic */
+ ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
+ if (uEAX >= 0x80000007)
+ {
+ ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
+ if ( (uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) /* TscInvariant */
+ && ( enmGipMode == SUPGIPMODE_SYNC_TSC /* No fixed tsc if the gip timer is in async mode. */
+ || enmGipMode == SUPGIPMODE_INVARIANT_TSC))
+ return true;
+ }
+# endif
+ }
+ else if (CPUMGetHostCpuVendor(pVM) == CPUMCPUVENDOR_INTEL)
+ {
+ /*
+ * GenuineIntel - Check the model number.
+ *
+ * This test is lacking in the same way and for the same reasons
+ * as the AMD test above.
+ */
+ /** @todo use RTX86GetCpuFamily() and RTX86GetCpuModel() here. */
+ ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX);
+ unsigned uModel = (uEAX >> 4) & 0x0f;
+ unsigned uFamily = (uEAX >> 8) & 0x0f;
+ if (uFamily == 0x0f)
+ uFamily += (uEAX >> 20) & 0xff;
+ if (uFamily >= 0x06)
+ uModel += ((uEAX >> 16) & 0x0f) << 4;
+ if ( (uFamily == 0x0f /*P4*/ && uModel >= 0x03)
+ || (uFamily == 0x06 /*P2/P3*/ && uModel >= 0x0e))
+ return true;
+ }
+ else if (CPUMGetHostCpuVendor(pVM) == CPUMCPUVENDOR_VIA)
+ {
+ /*
+ * CentaurHauls - Check the model, family and stepping.
+ *
+ * This only checks for VIA CPU models Nano X2, Nano X3,
+ * Eden X2 and QuadCore.
+ */
+ /** @todo use RTX86GetCpuFamily() and RTX86GetCpuModel() here. */
+ ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX);
+ unsigned uStepping = (uEAX & 0x0f);
+ unsigned uModel = (uEAX >> 4) & 0x0f;
+ unsigned uFamily = (uEAX >> 8) & 0x0f;
+ if ( uFamily == 0x06
+ && uModel == 0x0f
+ && uStepping >= 0x0c
+ && uStepping <= 0x0f)
+ return true;
+ }
+ else if (CPUMGetHostCpuVendor(pVM) == CPUMCPUVENDOR_SHANGHAI)
+ {
+ /*
+ * Shanghai - Check the model, family and stepping.
+ */
+ /** @todo use RTX86GetCpuFamily() and RTX86GetCpuModel() here. */
+ ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX);
+ unsigned uFamily = (uEAX >> 8) & 0x0f;
+ if ( uFamily == 0x06
+ || uFamily == 0x07)
+ {
+ return true;
+ }
+ }
+ }
+
+# else /* !X86 && !AMD64 */
+ RT_NOREF_PV(pVM);
+# endif /* !X86 && !AMD64 */
+ return false;
+}
+
+
+/**
+ * Calibrate the CPU tick.
+ *
+ * @returns Number of ticks per second.
+ */
+static uint64_t tmR3CalibrateTSC(void)
+{
+ uint64_t u64Hz;
+
+ /*
+ * Use GIP when available. Prefere the nominal one, no need to wait for it.
+ */
+ PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
+ if (pGip)
+ {
+ u64Hz = pGip->u64CpuHz;
+ if (u64Hz < _1T && u64Hz > _1M)
+ return u64Hz;
+ AssertFailed(); /* This shouldn't happen. */
+
+ u64Hz = SUPGetCpuHzFromGip(pGip);
+ if (u64Hz < _1T && u64Hz > _1M)
+ return u64Hz;
+
+ AssertFailed(); /* This shouldn't happen. */
+ }
+ else
+ Assert(SUPR3IsDriverless());
+
+ /* Call this once first to make sure it's initialized. */
+ RTTimeNanoTS();
+
+ /*
+ * Yield the CPU to increase our chances of getting a correct value.
+ */
+ RTThreadYield(); /* Try avoid interruptions between TSC and NanoTS samplings. */
+ static const unsigned s_auSleep[5] = { 50, 30, 30, 40, 40 };
+ uint64_t au64Samples[5];
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(au64Samples); i++)
+ {
+ RTMSINTERVAL cMillies;
+ int cTries = 5;
+ uint64_t u64Start = ASMReadTSC();
+ uint64_t u64End;
+ uint64_t StartTS = RTTimeNanoTS();
+ uint64_t EndTS;
+ do
+ {
+ RTThreadSleep(s_auSleep[i]);
+ u64End = ASMReadTSC();
+ EndTS = RTTimeNanoTS();
+ cMillies = (RTMSINTERVAL)((EndTS - StartTS + 500000) / 1000000);
+ } while ( cMillies == 0 /* the sleep may be interrupted... */
+ || (cMillies < 20 && --cTries > 0));
+ uint64_t u64Diff = u64End - u64Start;
+
+ au64Samples[i] = (u64Diff * 1000) / cMillies;
+ AssertMsg(cTries > 0, ("cMillies=%d i=%d\n", cMillies, i));
+ }
+
+ /*
+ * Discard the highest and lowest results and calculate the average.
+ */
+ unsigned iHigh = 0;
+ unsigned iLow = 0;
+ for (i = 1; i < RT_ELEMENTS(au64Samples); i++)
+ {
+ if (au64Samples[i] < au64Samples[iLow])
+ iLow = i;
+ if (au64Samples[i] > au64Samples[iHigh])
+ iHigh = i;
+ }
+ au64Samples[iLow] = 0;
+ au64Samples[iHigh] = 0;
+
+ u64Hz = au64Samples[0];
+ for (i = 1; i < RT_ELEMENTS(au64Samples); i++)
+ u64Hz += au64Samples[i];
+ u64Hz /= RT_ELEMENTS(au64Samples) - 2;
+
+ return u64Hz;
+}
+
+
+/**
+ * Finalizes the TM initialization.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMM_INT_DECL(int) TMR3InitFinalize(PVM pVM)
+{
+ int rc;
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ /*
+ * Create a timer for refreshing the CPU load stats.
+ */
+ TMTIMERHANDLE hTimer;
+ rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, tmR3CpuLoadTimer, NULL, TMTIMER_FLAGS_NO_RING0, "CPU Load Timer", &hTimer);
+ if (RT_SUCCESS(rc))
+ rc = TMTimerSetMillies(pVM, hTimer, 1000);
+#endif
+
+ /*
+ * GIM is now initialized. Determine if TSC mode switching is allowed (respecting CFGM override).
+ */
+ pVM->tm.s.fTSCModeSwitchAllowed &= tmR3HasFixedTSC(pVM) && GIMIsEnabled(pVM);
+ LogRel(("TM: TMR3InitFinalize: fTSCModeSwitchAllowed=%RTbool\n", pVM->tm.s.fTSCModeSwitchAllowed));
+
+ /*
+ * Grow the virtual & real timer tables so we've got sufficient
+ * space for dynamically created timers. We cannot allocate more
+ * after ring-0 init completes.
+ */
+ static struct { uint32_t idxQueue, cExtra; } s_aExtra[] = { {TMCLOCK_VIRTUAL, 128}, {TMCLOCK_REAL, 32} };
+ for (uint32_t i = 0; i < RT_ELEMENTS(s_aExtra); i++)
+ {
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[s_aExtra[i].idxQueue];
+ PDMCritSectRwEnterExcl(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ if (s_aExtra[i].cExtra > pQueue->cTimersFree)
+ {
+ uint32_t cTimersAlloc = pQueue->cTimersAlloc + s_aExtra[i].cExtra - pQueue->cTimersFree;
+ rc = tmR3TimerQueueGrow(pVM, pQueue, cTimersAlloc);
+ AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc cTimersAlloc=%u %s\n", rc, cTimersAlloc, pQueue->szName), rc);
+ }
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+ }
+
+#ifdef VBOX_WITH_STATISTICS
+ /*
+ * Register timer statistics now that we've fixed the timer table sizes.
+ */
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ pVM->tm.s.aTimerQueues[idxQueue].fCannotGrow = true;
+ tmR3TimerQueueRegisterStats(pVM, &pVM->tm.s.aTimerQueues[idxQueue], UINT32_MAX);
+ }
+#endif
+
+ return rc;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMM_INT_DECL(void) TMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ LogFlow(("TMR3Relocate\n"));
+ RT_NOREF(pVM, offDelta);
+}
+
+
+/**
+ * Terminates the TM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM it self is at this point powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMM_INT_DECL(int) TMR3Term(PVM pVM)
+{
+ if (pVM->tm.s.pTimer)
+ {
+ int rc = RTTimerDestroy(pVM->tm.s.pTimer);
+ AssertRC(rc);
+ pVM->tm.s.pTimer = NULL;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the TM component this means that a rescheduling is preformed,
+ * the FF is cleared and but without running the queues. We'll have to
+ * check if this makes sense or not, but it seems like a good idea now....
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMM_INT_DECL(void) TMR3Reset(PVM pVM)
+{
+ LogFlow(("TMR3Reset:\n"));
+ VM_ASSERT_EMT(pVM);
+
+ /*
+ * Abort any pending catch up.
+ * This isn't perfect...
+ */
+ if (pVM->tm.s.fVirtualSyncCatchUp)
+ {
+ const uint64_t offVirtualNow = TMVirtualGetNoCheck(pVM);
+ const uint64_t offVirtualSyncNow = TMVirtualSyncGetNoCheck(pVM);
+ if (pVM->tm.s.fVirtualSyncCatchUp)
+ {
+ STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
+
+ const uint64_t offOld = pVM->tm.s.offVirtualSyncGivenUp;
+ const uint64_t offNew = offVirtualNow - offVirtualSyncNow;
+ Assert(offOld <= offNew);
+ ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+ ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+ LogRel(("TM: Aborting catch-up attempt on reset with a %'RU64 ns lag on reset; new total: %'RU64 ns\n", offNew - offOld, offNew));
+ }
+ }
+
+ /*
+ * Process the queues.
+ */
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+ tmTimerQueueSchedule(pVM, pQueue, pQueue);
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ }
+#ifdef VBOX_STRICT
+ tmTimerQueuesSanityChecks(pVM, "TMR3Reset");
+#endif
+
+ PVMCPU pVCpuDst = pVM->apCpusR3[pVM->tm.s.idTimerCpu];
+ VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /** @todo FIXME: this isn't right. */
+
+ /*
+ * Switch TM TSC mode back to the original mode after a reset for
+ * paravirtualized guests that alter the TM TSC mode during operation.
+ * We're already in an EMT rendezvous at this point.
+ */
+ if ( pVM->tm.s.fTSCModeSwitchAllowed
+ && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
+ {
+ VM_ASSERT_EMT0(pVM);
+ tmR3CpuTickParavirtDisable(pVM, pVM->apCpusR3[0], NULL /* pvData */);
+ }
+ Assert(!GIMIsParavirtTscEnabled(pVM));
+ pVM->tm.s.fParavirtTscEnabled = false;
+
+ /*
+ * Reset TSC to avoid a Windows 8+ bug (see @bugref{8926}). If Windows
+ * sees TSC value beyond 0x40000000000 at startup, it will reset the
+ * TSC on boot-up CPU only, causing confusion and mayhem with SMP.
+ */
+ VM_ASSERT_EMT0(pVM);
+ uint64_t offTscRawSrc;
+ switch (pVM->tm.s.enmTSCMode)
+ {
+ case TMTSCMODE_REAL_TSC_OFFSET:
+ offTscRawSrc = SUPReadTsc();
+ break;
+ case TMTSCMODE_DYNAMIC:
+ case TMTSCMODE_VIRT_TSC_EMULATED:
+ offTscRawSrc = TMVirtualSyncGetNoCheck(pVM);
+ offTscRawSrc = ASMMultU64ByU32DivByU32(offTscRawSrc, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
+ break;
+ case TMTSCMODE_NATIVE_API:
+ /** @todo NEM TSC reset on reset for Windows8+ bug workaround. */
+ offTscRawSrc = 0;
+ break;
+ default:
+ AssertFailedBreakStmt(offTscRawSrc = 0);
+ }
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ pVCpu->tm.s.offTSCRawSrc = offTscRawSrc;
+ pVCpu->tm.s.u64TSC = 0;
+ pVCpu->tm.s.u64TSCLastSeen = 0;
+ }
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("tmR3Save:\n"));
+#ifdef VBOX_STRICT
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ Assert(!pVCpu->tm.s.fTSCTicking);
+ }
+ Assert(!pVM->tm.s.cVirtualTicking);
+ Assert(!pVM->tm.s.fVirtualSyncTicking);
+ Assert(!pVM->tm.s.cTSCsTicking);
+#endif
+
+ /*
+ * Save the virtual clocks.
+ */
+ /* the virtual clock. */
+ SSMR3PutU64(pSSM, TMCLOCK_FREQ_VIRTUAL);
+ SSMR3PutU64(pSSM, pVM->tm.s.u64Virtual);
+
+ /* the virtual timer synchronous clock. */
+ SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSync);
+ SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSync);
+ SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSyncGivenUp);
+ SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncCatchUpPrev);
+ SSMR3PutBool(pSSM, pVM->tm.s.fVirtualSyncCatchUp);
+
+ /* real time clock */
+ SSMR3PutU64(pSSM, TMCLOCK_FREQ_REAL);
+
+ /* the cpu tick clock. */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ SSMR3PutU64(pSSM, TMCpuTickGet(pVCpu));
+ }
+ return SSMR3PutU64(pSSM, pVM->tm.s.cTSCTicksPerSecond);
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ LogFlow(("tmR3Load:\n"));
+
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+#ifdef VBOX_STRICT
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ Assert(!pVCpu->tm.s.fTSCTicking);
+ }
+ Assert(!pVM->tm.s.cVirtualTicking);
+ Assert(!pVM->tm.s.fVirtualSyncTicking);
+ Assert(!pVM->tm.s.cTSCsTicking);
+#endif
+
+ /*
+ * Validate version.
+ */
+ if (uVersion != TM_SAVED_STATE_VERSION)
+ {
+ AssertMsgFailed(("tmR3Load: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ /*
+ * Load the virtual clock.
+ */
+ pVM->tm.s.cVirtualTicking = 0;
+ /* the virtual clock. */
+ uint64_t u64Hz;
+ int rc = SSMR3GetU64(pSSM, &u64Hz);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u64Hz != TMCLOCK_FREQ_VIRTUAL)
+ {
+ AssertMsgFailed(("The virtual clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
+ u64Hz, TMCLOCK_FREQ_VIRTUAL));
+ return VERR_SSM_VIRTUAL_CLOCK_HZ;
+ }
+ SSMR3GetU64(pSSM, &pVM->tm.s.u64Virtual);
+ pVM->tm.s.u64VirtualOffset = 0;
+
+ /* the virtual timer synchronous clock. */
+ pVM->tm.s.fVirtualSyncTicking = false;
+ uint64_t u64;
+ SSMR3GetU64(pSSM, &u64);
+ pVM->tm.s.u64VirtualSync = u64;
+ SSMR3GetU64(pSSM, &u64);
+ pVM->tm.s.offVirtualSync = u64;
+ SSMR3GetU64(pSSM, &u64);
+ pVM->tm.s.offVirtualSyncGivenUp = u64;
+ SSMR3GetU64(pSSM, &u64);
+ pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
+ bool f;
+ SSMR3GetBool(pSSM, &f);
+ pVM->tm.s.fVirtualSyncCatchUp = f;
+
+ /* the real clock */
+ rc = SSMR3GetU64(pSSM, &u64Hz);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u64Hz != TMCLOCK_FREQ_REAL)
+ {
+ AssertMsgFailed(("The real clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
+ u64Hz, TMCLOCK_FREQ_REAL));
+ return VERR_SSM_VIRTUAL_CLOCK_HZ; /* misleading... */
+ }
+
+ /* the cpu tick clock. */
+ pVM->tm.s.cTSCsTicking = 0;
+ pVM->tm.s.offTSCPause = 0;
+ pVM->tm.s.u64LastPausedTSC = 0;
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+
+ pVCpu->tm.s.fTSCTicking = false;
+ SSMR3GetU64(pSSM, &pVCpu->tm.s.u64TSC);
+ if (pVM->tm.s.u64LastPausedTSC < pVCpu->tm.s.u64TSC)
+ pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
+
+ if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
+ pVCpu->tm.s.offTSCRawSrc = 0; /** @todo TSC restore stuff and HWACC. */
+ }
+
+ rc = SSMR3GetU64(pSSM, &u64Hz);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
+ pVM->tm.s.cTSCTicksPerSecond = u64Hz;
+
+ LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) enmTSCMode=%d (%s) (state load)\n",
+ pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
+
+ /* Disabled as this isn't tested, also should this apply only if GIM is enabled etc. */
+#if 0
+ /*
+ * If the current host TSC frequency is incompatible with what is in the
+ * saved state of the VM, fall back to emulating TSC and disallow TSC mode
+ * switches during VM runtime (e.g. by GIM).
+ */
+ if ( GIMIsEnabled(pVM)
+ || pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
+ {
+ uint64_t uGipCpuHz;
+ bool fRelax = RTSystemIsInsideVM();
+ bool fCompat = SUPIsTscFreqCompatible(pVM->tm.s.cTSCTicksPerSecond, &uGipCpuHz, fRelax);
+ if (!fCompat)
+ {
+ pVM->tm.s.enmTSCMode = TMTSCMODE_VIRT_TSC_EMULATED;
+ pVM->tm.s.fTSCModeSwitchAllowed = false;
+ if (g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_ASYNC_TSC)
+ {
+ LogRel(("TM: TSC frequency incompatible! uGipCpuHz=%#RX64 (%'RU64) enmTSCMode=%d (%s) fTSCModeSwitchAllowed=%RTbool (state load)\n",
+ uGipCpuHz, uGipCpuHz, pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM), pVM->tm.s.fTSCModeSwitchAllowed));
+ }
+ else
+ {
+ LogRel(("TM: GIP is async, enmTSCMode=%d (%s) fTSCModeSwitchAllowed=%RTbool (state load)\n",
+ uGipCpuHz, uGipCpuHz, pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM), pVM->tm.s.fTSCModeSwitchAllowed));
+ }
+ }
+ }
+#endif
+
+ /*
+ * Make sure timers get rescheduled immediately.
+ */
+ PVMCPU pVCpuDst = pVM->apCpusR3[pVM->tm.s.idTimerCpu];
+ VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+
+ return VINF_SUCCESS;
+}
+
+#ifdef VBOX_WITH_STATISTICS
+
+/**
+ * Register statistics for a timer.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue the timer belongs to.
+ * @param pTimer The timer to register statistics for.
+ */
+static void tmR3TimerRegisterStats(PVM pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
+{
+ STAMR3RegisterF(pVM, &pTimer->StatTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ pQueue->szName, "/TM/Timers/%s", pTimer->szName);
+ STAMR3RegisterF(pVM, &pTimer->StatCritSectEnter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "", "/TM/Timers/%s/CritSectEnter", pTimer->szName);
+ STAMR3RegisterF(pVM, &pTimer->StatGet, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "", "/TM/Timers/%s/Get", pTimer->szName);
+ STAMR3RegisterF(pVM, &pTimer->StatSetAbsolute, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "", "/TM/Timers/%s/SetAbsolute", pTimer->szName);
+ STAMR3RegisterF(pVM, &pTimer->StatSetRelative, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "", "/TM/Timers/%s/SetRelative", pTimer->szName);
+ STAMR3RegisterF(pVM, &pTimer->StatStop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
+ "", "/TM/Timers/%s/Stop", pTimer->szName);
+}
+
+
+/**
+ * Deregister the statistics for a timer.
+ */
+static void tmR3TimerDeregisterStats(PVM pVM, PTMTIMER pTimer)
+{
+ char szPrefix[128];
+ size_t cchPrefix = RTStrPrintf(szPrefix, sizeof(szPrefix), "/TM/Timers/%s/", pTimer->szName);
+ STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
+ szPrefix[cchPrefix - 1] = '\0';
+ STAMR3Deregister(pVM->pUVM, szPrefix);
+}
+
+
+/**
+ * Register statistics for all allocated timers in a queue.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue to register statistics for.
+ * @param cTimers Number of timers to consider (in growth scenario).
+ */
+static void tmR3TimerQueueRegisterStats(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cTimers)
+{
+ uint32_t idxTimer = RT_MIN(cTimers, pQueue->cTimersAlloc);
+ while (idxTimer-- > 0)
+ {
+ PTMTIMER pTimer = &pQueue->paTimers[idxTimer];
+ TMTIMERSTATE enmState = pTimer->enmState;
+ if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
+ tmR3TimerRegisterStats(pVM, pQueue, pTimer);
+ }
+}
+
+#endif /* VBOX_WITH_STATISTICS */
+
+
+/**
+ * Grows a timer queue.
+ *
+ * @returns VBox status code (errors are LogRel'ed already).
+ * @param pVM The cross context VM structure.
+ * @param pQueue The timer queue to grow.
+ * @param cNewTimers The minimum number of timers after growing.
+ * @note Caller owns the queue's allocation lock.
+ */
+static int tmR3TimerQueueGrow(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cNewTimers)
+{
+ /*
+ * Validate input and state.
+ */
+ VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); /** @todo must do better than this! */
+ AssertReturn(!pQueue->fCannotGrow, VERR_TM_TIMER_QUEUE_CANNOT_GROW);
+
+ uint32_t const cOldEntries = pQueue->cTimersAlloc;
+ AssertReturn(cNewTimers > cOldEntries, VERR_TM_IPE_1);
+ AssertReturn(cNewTimers < _32K, VERR_TM_IPE_1);
+
+ /*
+ * Do the growing.
+ */
+ int rc;
+ if (!SUPR3IsDriverless())
+ {
+ rc = VMMR3CallR0Emt(pVM, VMMGetCpu(pVM), VMMR0_DO_TM_GROW_TIMER_QUEUE,
+ RT_MAKE_U64(cNewTimers, (uint64_t)(pQueue - &pVM->tm.s.aTimerQueues[0])), NULL);
+ AssertLogRelRCReturn(rc, rc);
+ AssertReturn(pQueue->cTimersAlloc >= cNewTimers, VERR_TM_IPE_3);
+ }
+ else
+ {
+ AssertReturn(cNewTimers <= _32K && cOldEntries <= _32K, VERR_TM_TOO_MANY_TIMERS);
+ ASMCompilerBarrier();
+
+ /*
+ * Round up the request to the nearest page and do the allocation.
+ */
+ size_t cbNew = sizeof(TMTIMER) * cNewTimers;
+ cbNew = RT_ALIGN_Z(cbNew, HOST_PAGE_SIZE);
+ cNewTimers = (uint32_t)(cbNew / sizeof(TMTIMER));
+
+ PTMTIMER paTimers = (PTMTIMER)RTMemPageAllocZ(cbNew);
+ if (paTimers)
+ {
+ /*
+ * Copy over the old timer, init the new free ones, then switch over
+ * and free the old ones.
+ */
+ PTMTIMER const paOldTimers = pQueue->paTimers;
+ tmHCTimerQueueGrowInit(paTimers, paOldTimers, cNewTimers, cOldEntries);
+
+ pQueue->paTimers = paTimers;
+ pQueue->cTimersAlloc = cNewTimers;
+ pQueue->cTimersFree += cNewTimers - (cOldEntries ? cOldEntries : 1);
+
+ RTMemPageFree(paOldTimers, RT_ALIGN_Z(sizeof(TMTIMER) * cOldEntries, HOST_PAGE_SIZE));
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NO_PAGE_MEMORY;
+ }
+ return rc;
+}
+
+
+/**
+ * Internal TMR3TimerCreate worker.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmClock The timer clock.
+ * @param fFlags TMTIMER_FLAGS_XXX.
+ * @param pszName The timer name.
+ * @param ppTimer Where to store the timer pointer on success.
+ */
+static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, uint32_t fFlags, const char *pszName, PPTMTIMERR3 ppTimer)
+{
+ PTMTIMER pTimer;
+
+ /*
+ * Validate input.
+ */
+ VM_ASSERT_EMT(pVM);
+
+ AssertReturn((fFlags & (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0)) != (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0),
+ VERR_INVALID_FLAGS);
+
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+ size_t const cchName = strlen(pszName);
+ AssertMsgReturn(cchName < sizeof(pTimer->szName), ("timer name too long: %s\n", pszName), VERR_INVALID_NAME);
+ AssertMsgReturn(cchName > 2, ("Too short timer name: %s\n", pszName), VERR_INVALID_NAME);
+
+ AssertMsgReturn(enmClock >= TMCLOCK_REAL && enmClock < TMCLOCK_MAX,
+ ("%d\n", enmClock), VERR_INVALID_PARAMETER);
+ AssertReturn(enmClock != TMCLOCK_TSC, VERR_NOT_SUPPORTED);
+ if (enmClock == TMCLOCK_VIRTUAL_SYNC)
+ VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER);
+
+ /*
+ * Exclusively lock the queue.
+ *
+ * Note! This means that it is not possible to allocate timers from a timer callback.
+ */
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[enmClock];
+ int rc = PDMCritSectRwEnterExcl(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Allocate the timer.
+ */
+ if (!pQueue->cTimersFree)
+ {
+ rc = tmR3TimerQueueGrow(pVM, pQueue, pQueue->cTimersAlloc + 64);
+ AssertRCReturnStmt(rc, PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock), rc);
+ }
+
+ /* Scan the array for free timers. */
+ pTimer = NULL;
+ PTMTIMER const paTimers = pQueue->paTimers;
+ uint32_t const cTimersAlloc = pQueue->cTimersAlloc;
+ uint32_t idxTimer = pQueue->idxFreeHint;
+ for (uint32_t iScan = 0; iScan < 2; iScan++)
+ {
+ while (idxTimer < cTimersAlloc)
+ {
+ if (paTimers[idxTimer].enmState == TMTIMERSTATE_FREE)
+ {
+ pTimer = &paTimers[idxTimer];
+ pQueue->idxFreeHint = idxTimer + 1;
+ break;
+ }
+ idxTimer++;
+ }
+ if (pTimer != NULL)
+ break;
+ idxTimer = 1;
+ }
+ AssertLogRelMsgReturnStmt(pTimer != NULL, ("cTimersFree=%u cTimersAlloc=%u enmClock=%s\n", pQueue->cTimersFree,
+ pQueue->cTimersAlloc, pQueue->szName),
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock), VERR_INTERNAL_ERROR_3);
+ pQueue->cTimersFree -= 1;
+
+ /*
+ * Initialize it.
+ */
+ Assert(idxTimer != 0);
+ Assert(idxTimer <= TMTIMERHANDLE_TIMER_IDX_MASK);
+ pTimer->hSelf = idxTimer
+ | ((uintptr_t)(pQueue - &pVM->tm.s.aTimerQueues[0]) << TMTIMERHANDLE_QUEUE_IDX_SHIFT);
+ Assert(!(pTimer->hSelf & TMTIMERHANDLE_RANDOM_MASK));
+ pTimer->hSelf |= (RTRandU64() & TMTIMERHANDLE_RANDOM_MASK);
+
+ pTimer->u64Expire = 0;
+ pTimer->enmState = TMTIMERSTATE_STOPPED;
+ pTimer->idxScheduleNext = UINT32_MAX;
+ pTimer->idxNext = UINT32_MAX;
+ pTimer->idxPrev = UINT32_MAX;
+ pTimer->fFlags = fFlags;
+ pTimer->uHzHint = 0;
+ pTimer->pvUser = NULL;
+ pTimer->pCritSect = NULL;
+ memcpy(pTimer->szName, pszName, cchName);
+ pTimer->szName[cchName] = '\0';
+
+#ifdef VBOX_STRICT
+ tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate");
+#endif
+
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+
+#ifdef VBOX_WITH_STATISTICS
+ /*
+ * Only register statistics if we're passed the no-realloc point.
+ */
+ if (pQueue->fCannotGrow)
+ tmR3TimerRegisterStats(pVM, pQueue, pTimer);
+#endif
+
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Creates a device timer.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device instance.
+ * @param enmClock The clock to use on this timer.
+ * @param pfnCallback Callback function.
+ * @param pvUser The user argument to the callback.
+ * @param fFlags Timer creation flags, see grp_tm_timer_flags.
+ * @param pszName Timer name (will be copied). Max 31 chars.
+ * @param phTimer Where to store the timer handle on success.
+ */
+VMM_INT_DECL(int) TMR3TimerCreateDevice(PVM pVM, PPDMDEVINS pDevIns, TMCLOCK enmClock,
+ PFNTMTIMERDEV pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszName, PTMTIMERHANDLE phTimer)
+{
+ AssertReturn(!(fFlags & ~(TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0)),
+ VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and init stuff.
+ */
+ PTMTIMER pTimer;
+ int rc = tmr3TimerCreate(pVM, enmClock, fFlags, pszName, &pTimer);
+ if (RT_SUCCESS(rc))
+ {
+ pTimer->enmType = TMTIMERTYPE_DEV;
+ pTimer->u.Dev.pfnTimer = pfnCallback;
+ pTimer->u.Dev.pDevIns = pDevIns;
+ pTimer->pvUser = pvUser;
+ if (!(fFlags & TMTIMER_FLAGS_NO_CRIT_SECT))
+ pTimer->pCritSect = PDMR3DevGetCritSect(pVM, pDevIns);
+ *phTimer = pTimer->hSelf;
+ Log(("TM: Created device timer %p clock %d callback %p '%s'\n", phTimer, enmClock, pfnCallback, pszName));
+ }
+
+ return rc;
+}
+
+
+
+
+/**
+ * Creates a USB device timer.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns The USB device instance.
+ * @param enmClock The clock to use on this timer.
+ * @param pfnCallback Callback function.
+ * @param pvUser The user argument to the callback.
+ * @param fFlags Timer creation flags, see grp_tm_timer_flags.
+ * @param pszName Timer name (will be copied). Max 31 chars.
+ * @param phTimer Where to store the timer handle on success.
+ */
+VMM_INT_DECL(int) TMR3TimerCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, TMCLOCK enmClock,
+ PFNTMTIMERUSB pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszName, PTMTIMERHANDLE phTimer)
+{
+ AssertReturn(!(fFlags & ~(TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_NO_RING0)), VERR_INVALID_PARAMETER);
+
+ /*
+ * Allocate and init stuff.
+ */
+ PTMTIMER pTimer;
+ int rc = tmr3TimerCreate(pVM, enmClock, fFlags, pszName, &pTimer);
+ if (RT_SUCCESS(rc))
+ {
+ pTimer->enmType = TMTIMERTYPE_USB;
+ pTimer->u.Usb.pfnTimer = pfnCallback;
+ pTimer->u.Usb.pUsbIns = pUsbIns;
+ pTimer->pvUser = pvUser;
+ //if (!(fFlags & TMTIMER_FLAGS_NO_CRIT_SECT))
+ //{
+ // if (pDevIns->pCritSectR3)
+ // pTimer->pCritSect = pUsbIns->pCritSectR3;
+ // else
+ // pTimer->pCritSect = IOMR3GetCritSect(pVM);
+ //}
+ *phTimer = pTimer->hSelf;
+ Log(("TM: Created USB device timer %p clock %d callback %p '%s'\n", *phTimer, enmClock, pfnCallback, pszName));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Creates a driver timer.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver instance.
+ * @param enmClock The clock to use on this timer.
+ * @param pfnCallback Callback function.
+ * @param pvUser The user argument to the callback.
+ * @param fFlags Timer creation flags, see grp_tm_timer_flags.
+ * @param pszName Timer name (will be copied). Max 31 chars.
+ * @param phTimer Where to store the timer handle on success.
+ */
+VMM_INT_DECL(int) TMR3TimerCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, TMCLOCK enmClock, PFNTMTIMERDRV pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszName, PTMTIMERHANDLE phTimer)
+{
+ AssertReturn(!(fFlags & ~(TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0)),
+ VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and init stuff.
+ */
+ PTMTIMER pTimer;
+ int rc = tmr3TimerCreate(pVM, enmClock, fFlags, pszName, &pTimer);
+ if (RT_SUCCESS(rc))
+ {
+ pTimer->enmType = TMTIMERTYPE_DRV;
+ pTimer->u.Drv.pfnTimer = pfnCallback;
+ pTimer->u.Drv.pDrvIns = pDrvIns;
+ pTimer->pvUser = pvUser;
+ *phTimer = pTimer->hSelf;
+ Log(("TM: Created device timer %p clock %d callback %p '%s'\n", *phTimer, enmClock, pfnCallback, pszName));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Creates an internal timer.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmClock The clock to use on this timer.
+ * @param pfnCallback Callback function.
+ * @param pvUser User argument to be passed to the callback.
+ * @param fFlags Timer creation flags, see grp_tm_timer_flags.
+ * @param pszName Timer name (will be copied). Max 31 chars.
+ * @param phTimer Where to store the timer handle on success.
+ */
+VMMR3DECL(int) TMR3TimerCreate(PVM pVM, TMCLOCK enmClock, PFNTMTIMERINT pfnCallback, void *pvUser,
+ uint32_t fFlags, const char *pszName, PTMTIMERHANDLE phTimer)
+{
+ AssertReturn(fFlags & (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0), VERR_INVALID_FLAGS);
+ AssertReturn((fFlags & (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0)) != (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0),
+ VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and init stuff.
+ */
+ PTMTIMER pTimer;
+ int rc = tmr3TimerCreate(pVM, enmClock, fFlags, pszName, &pTimer);
+ if (RT_SUCCESS(rc))
+ {
+ pTimer->enmType = TMTIMERTYPE_INTERNAL;
+ pTimer->u.Internal.pfnTimer = pfnCallback;
+ pTimer->pvUser = pvUser;
+ *phTimer = pTimer->hSelf;
+ Log(("TM: Created internal timer %p clock %d callback %p '%s'\n", pTimer, enmClock, pfnCallback, pszName));
+ }
+
+ return rc;
+}
+
+
+/**
+ * Destroy a timer
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue the timer is on.
+ * @param pTimer Timer handle as returned by one of the create functions.
+ */
+static int tmR3TimerDestroy(PVMCC pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
+{
+ bool fActive = false;
+ bool fPending = false;
+
+ AssertMsg( !pTimer->pCritSect
+ || VMR3GetState(pVM) != VMSTATE_RUNNING
+ || PDMCritSectIsOwner(pVM, pTimer->pCritSect), ("%s\n", pTimer->szName));
+
+ /*
+ * The rest of the game happens behind the lock, just
+ * like create does. All the work is done here.
+ */
+ PDMCritSectRwEnterExcl(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+
+ for (int cRetries = 1000;; cRetries--)
+ {
+ /*
+ * Change to the DESTROY state.
+ */
+ TMTIMERSTATE const enmState = pTimer->enmState;
+ Log2(("TMTimerDestroy: %p:{.enmState=%s, .szName='%s'} cRetries=%d\n",
+ pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
+ switch (enmState)
+ {
+ case TMTIMERSTATE_STOPPED:
+ case TMTIMERSTATE_EXPIRED_DELIVER:
+ break;
+
+ case TMTIMERSTATE_ACTIVE:
+ fActive = true;
+ break;
+
+ case TMTIMERSTATE_PENDING_STOP:
+ case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
+ case TMTIMERSTATE_PENDING_RESCHEDULE:
+ fActive = true;
+ fPending = true;
+ break;
+
+ case TMTIMERSTATE_PENDING_SCHEDULE:
+ fPending = true;
+ break;
+
+ /*
+ * This shouldn't happen as the caller should make sure there are no races.
+ */
+ case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+ case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
+ case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
+ AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName));
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+
+ AssertMsgReturn(cRetries > 0, ("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName),
+ VERR_TM_UNSTABLE_STATE);
+ if (!RTThreadYield())
+ RTThreadSleep(1);
+
+ PDMCritSectRwEnterExcl(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+ continue;
+
+ /*
+ * Invalid states.
+ */
+ case TMTIMERSTATE_FREE:
+ case TMTIMERSTATE_DESTROY:
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+ AssertLogRelMsgFailedReturn(("pTimer=%p %s\n", pTimer, tmTimerState(enmState)), VERR_TM_INVALID_STATE);
+
+ default:
+ AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+ return VERR_TM_UNKNOWN_STATE;
+ }
+
+ /*
+ * Try switch to the destroy state.
+ * This should always succeed as the caller should make sure there are no race.
+ */
+ bool fRc;
+ TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_DESTROY, enmState, fRc);
+ if (fRc)
+ break;
+ AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName));
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+
+ AssertMsgReturn(cRetries > 0, ("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName),
+ VERR_TM_UNSTABLE_STATE);
+
+ PDMCritSectRwEnterExcl(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+ }
+
+ /*
+ * Unlink from the active list.
+ */
+ if (fActive)
+ {
+ const PTMTIMER pPrev = tmTimerGetPrev(pQueue, pTimer);
+ const PTMTIMER pNext = tmTimerGetNext(pQueue, pTimer);
+ if (pPrev)
+ tmTimerSetNext(pQueue, pPrev, pNext);
+ else
+ {
+ tmTimerQueueSetHead(pQueue, pQueue, pNext);
+ pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
+ }
+ if (pNext)
+ tmTimerSetPrev(pQueue, pNext, pPrev);
+ pTimer->idxNext = UINT32_MAX;
+ pTimer->idxPrev = UINT32_MAX;
+ }
+
+ /*
+ * Unlink from the schedule list by running it.
+ */
+ if (fPending)
+ {
+ Log3(("TMR3TimerDestroy: tmTimerQueueSchedule\n"));
+ STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
+ Assert(pQueue->idxSchedule < pQueue->cTimersAlloc);
+ tmTimerQueueSchedule(pVM, pQueue, pQueue);
+ STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
+ }
+
+#ifdef VBOX_WITH_STATISTICS
+ /*
+ * Deregister statistics.
+ */
+ tmR3TimerDeregisterStats(pVM, pTimer);
+#endif
+
+ /*
+ * Change it to free state and update the queue accordingly.
+ */
+ Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX); Assert(pTimer->idxScheduleNext == UINT32_MAX);
+
+ TM_SET_STATE(pTimer, TMTIMERSTATE_FREE);
+
+ pQueue->cTimersFree += 1;
+ uint32_t idxTimer = (uint32_t)(pTimer - pQueue->paTimers);
+ if (idxTimer < pQueue->idxFreeHint)
+ pQueue->idxFreeHint = idxTimer;
+
+#ifdef VBOX_STRICT
+ tmTimerQueuesSanityChecks(pVM, "TMR3TimerDestroy");
+#endif
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveExcl(pVM, &pQueue->AllocLock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy a timer
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param hTimer Timer handle as returned by one of the create functions.
+ */
+VMMR3DECL(int) TMR3TimerDestroy(PVM pVM, TMTIMERHANDLE hTimer)
+{
+ /* We ignore NILs here. */
+ if (hTimer == NIL_TMTIMERHANDLE)
+ return VINF_SUCCESS;
+ TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
+ return tmR3TimerDestroy(pVM, pQueue, pTimer);
+}
+
+
+/**
+ * Destroy all timers owned by a device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDevIns Device which timers should be destroyed.
+ */
+VMM_INT_DECL(int) TMR3TimerDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
+{
+ LogFlow(("TMR3TimerDestroyDevice: pDevIns=%p\n", pDevIns));
+ if (!pDevIns)
+ return VERR_INVALID_PARAMETER;
+
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ uint32_t idxTimer = pQueue->cTimersAlloc;
+ while (idxTimer-- > 0)
+ {
+ PTMTIMER pTimer = &pQueue->paTimers[idxTimer];
+ if ( pTimer->enmType == TMTIMERTYPE_DEV
+ && pTimer->u.Dev.pDevIns == pDevIns
+ && pTimer->enmState < TMTIMERSTATE_DESTROY)
+ {
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+
+ int rc = tmR3TimerDestroy(pVM, pQueue, pTimer);
+ AssertRC(rc);
+
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ }
+ }
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+ }
+
+ LogFlow(("TMR3TimerDestroyDevice: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy all timers owned by a USB device.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUsbIns USB device which timers should be destroyed.
+ */
+VMM_INT_DECL(int) TMR3TimerDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns)
+{
+ LogFlow(("TMR3TimerDestroyUsb: pUsbIns=%p\n", pUsbIns));
+ if (!pUsbIns)
+ return VERR_INVALID_PARAMETER;
+
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ uint32_t idxTimer = pQueue->cTimersAlloc;
+ while (idxTimer-- > 0)
+ {
+ PTMTIMER pTimer = &pQueue->paTimers[idxTimer];
+ if ( pTimer->enmType == TMTIMERTYPE_USB
+ && pTimer->u.Usb.pUsbIns == pUsbIns
+ && pTimer->enmState < TMTIMERSTATE_DESTROY)
+ {
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+
+ int rc = tmR3TimerDestroy(pVM, pQueue, pTimer);
+ AssertRC(rc);
+
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ }
+ }
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+ }
+
+ LogFlow(("TMR3TimerDestroyUsb: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destroy all timers owned by a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDrvIns Driver which timers should be destroyed.
+ */
+VMM_INT_DECL(int) TMR3TimerDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
+{
+ LogFlow(("TMR3TimerDestroyDriver: pDrvIns=%p\n", pDrvIns));
+ if (!pDrvIns)
+ return VERR_INVALID_PARAMETER;
+
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ uint32_t idxTimer = pQueue->cTimersAlloc;
+ while (idxTimer-- > 0)
+ {
+ PTMTIMER pTimer = &pQueue->paTimers[idxTimer];
+ if ( pTimer->enmType == TMTIMERTYPE_DRV
+ && pTimer->u.Drv.pDrvIns == pDrvIns
+ && pTimer->enmState < TMTIMERSTATE_DESTROY)
+ {
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+
+ int rc = tmR3TimerDestroy(pVM, pQueue, pTimer);
+ AssertRC(rc);
+
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ }
+ }
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+ }
+
+ LogFlow(("TMR3TimerDestroyDriver: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal function for getting the clock time.
+ *
+ * @returns clock time.
+ * @param pVM The cross context VM structure.
+ * @param enmClock The clock.
+ */
+DECLINLINE(uint64_t) tmClock(PVM pVM, TMCLOCK enmClock)
+{
+ switch (enmClock)
+ {
+ case TMCLOCK_VIRTUAL: return TMVirtualGet(pVM);
+ case TMCLOCK_VIRTUAL_SYNC: return TMVirtualSyncGet(pVM);
+ case TMCLOCK_REAL: return TMRealGet(pVM);
+ case TMCLOCK_TSC: return TMCpuTickGet(pVM->apCpusR3[0] /* just take VCPU 0 */);
+ default:
+ AssertMsgFailed(("enmClock=%d\n", enmClock));
+ return ~(uint64_t)0;
+ }
+}
+
+
+/**
+ * Checks if the sync queue has one or more expired timers.
+ *
+ * @returns true / false.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmClock The queue.
+ */
+DECLINLINE(bool) tmR3HasExpiredTimer(PVM pVM, TMCLOCK enmClock)
+{
+ const uint64_t u64Expire = pVM->tm.s.aTimerQueues[enmClock].u64Expire;
+ return u64Expire != INT64_MAX && u64Expire <= tmClock(pVM, enmClock);
+}
+
+
+/**
+ * Checks for expired timers in all the queues.
+ *
+ * @returns true / false.
+ * @param pVM The cross context VM structure.
+ */
+DECLINLINE(bool) tmR3AnyExpiredTimers(PVM pVM)
+{
+ /*
+ * Combine the time calculation for the first two since we're not on EMT
+ * TMVirtualSyncGet only permits EMT.
+ */
+ uint64_t u64Now = TMVirtualGetNoCheck(pVM);
+ if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64Now)
+ return true;
+ u64Now = pVM->tm.s.fVirtualSyncTicking
+ ? u64Now - pVM->tm.s.offVirtualSync
+ : pVM->tm.s.u64VirtualSync;
+ if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now)
+ return true;
+
+ /*
+ * The remaining timers.
+ */
+ if (tmR3HasExpiredTimer(pVM, TMCLOCK_REAL))
+ return true;
+ if (tmR3HasExpiredTimer(pVM, TMCLOCK_TSC))
+ return true;
+ return false;
+}
+
+
+/**
+ * Schedule timer callback.
+ *
+ * @param pTimer Timer handle.
+ * @param pvUser Pointer to the VM.
+ * @thread Timer thread.
+ *
+ * @remark We cannot do the scheduling and queues running from a timer handler
+ * since it's not executing in EMT, and even if it was it would be async
+ * and we wouldn't know the state of the affairs.
+ * So, we'll just raise the timer FF and force any REM execution to exit.
+ */
+static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t /*iTick*/)
+{
+ PVM pVM = (PVM)pvUser;
+ PVMCPU pVCpuDst = pVM->apCpusR3[pVM->tm.s.idTimerCpu];
+ NOREF(pTimer);
+
+ AssertCompile(TMCLOCK_MAX == 4);
+ STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallback);
+
+#ifdef DEBUG_Sander /* very annoying, keep it private. */
+ if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
+ Log(("tmR3TimerCallback: timer event still pending!!\n"));
+#endif
+ if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
+ && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule != UINT32_MAX /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */
+ || pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].idxSchedule != UINT32_MAX
+ || pVM->tm.s.aTimerQueues[TMCLOCK_REAL].idxSchedule != UINT32_MAX
+ || pVM->tm.s.aTimerQueues[TMCLOCK_TSC].idxSchedule != UINT32_MAX
+ || tmR3AnyExpiredTimers(pVM)
+ )
+ && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
+ && !pVM->tm.s.fRunningQueues
+ )
+ {
+ Log5(("TM(%u): FF: 0 -> 1\n", __LINE__));
+ VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+ VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE);
+ STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF);
+ }
+}
+
+
+/**
+ * Worker for tmR3TimerQueueDoOne that runs pending timers on the specified
+ * non-empty timer queue.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue to run.
+ * @param pTimer The head timer. Caller already check that this is
+ * not NULL.
+ */
+static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
+{
+ VM_ASSERT_EMT(pVM); /** @todo relax this */
+
+ /*
+ * Run timers.
+ *
+ * We check the clock once and run all timers which are ACTIVE
+ * and have an expire time less or equal to the time we read.
+ *
+ * N.B. A generic unlink must be applied since other threads
+ * are allowed to mess with any active timer at any time.
+ *
+ * However, we only allow EMT to handle EXPIRED_PENDING
+ * timers, thus enabling the timer handler function to
+ * arm the timer again.
+ */
+/** @todo the above 'however' is outdated. */
+ const uint64_t u64Now = tmClock(pVM, pQueue->enmClock);
+ while (pTimer->u64Expire <= u64Now)
+ {
+ PTMTIMER const pNext = tmTimerGetNext(pQueue, pTimer);
+ PPDMCRITSECT pCritSect = pTimer->pCritSect;
+ if (pCritSect)
+ {
+ STAM_PROFILE_START(&pTimer->StatCritSectEnter, Locking);
+ PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+ STAM_PROFILE_STOP(&pTimer->StatCritSectEnter, Locking);
+ }
+ Log2(("tmR3TimerQueueRun: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .szName='%s'}\n",
+ pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->szName));
+ bool fRc;
+ TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc);
+ if (fRc)
+ {
+ Assert(pTimer->idxScheduleNext == UINT32_MAX); /* this can trigger falsely */
+
+ /* unlink */
+ const PTMTIMER pPrev = tmTimerGetPrev(pQueue, pTimer);
+ if (pPrev)
+ tmTimerSetNext(pQueue, pPrev, pNext);
+ else
+ {
+ tmTimerQueueSetHead(pQueue, pQueue, pNext);
+ pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
+ }
+ if (pNext)
+ tmTimerSetPrev(pQueue, pNext, pPrev);
+ pTimer->idxNext = UINT32_MAX;
+ pTimer->idxPrev = UINT32_MAX;
+
+ /* fire */
+ TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
+ STAM_PROFILE_START(&pTimer->StatTimer, PrfTimer);
+ switch (pTimer->enmType)
+ {
+ case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_USB: pTimer->u.Usb.pfnTimer(pTimer->u.Usb.pUsbIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer->hSelf, pTimer->pvUser); break;
+ default:
+ AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->szName));
+ break;
+ }
+ STAM_PROFILE_STOP(&pTimer->StatTimer, PrfTimer);
+
+ /* change the state if it wasn't changed already in the handler. */
+ TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
+ Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
+ }
+ if (pCritSect)
+ PDMCritSectLeave(pVM, pCritSect);
+
+ /* Advance? */
+ pTimer = pNext;
+ if (!pTimer)
+ break;
+ } /* run loop */
+}
+
+
+/**
+ * Service one regular timer queue.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pQueue The queue.
+ */
+static void tmR3TimerQueueDoOne(PVM pVM, PTMTIMERQUEUE pQueue)
+{
+ Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
+
+ /*
+ * Only one thread should be "doing" the queue.
+ */
+ if (ASMAtomicCmpXchgBool(&pQueue->fBeingProcessed, true, false))
+ {
+ STAM_PROFILE_START(&pQueue->StatDo, s);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+
+ if (pQueue->idxSchedule != UINT32_MAX)
+ tmTimerQueueSchedule(pVM, pQueue, pQueue);
+
+ PTMTIMER pHead = tmTimerQueueGetHead(pQueue, pQueue);
+ if (pHead)
+ tmR3TimerQueueRun(pVM, pQueue, pHead);
+
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ STAM_PROFILE_STOP(&pQueue->StatDo, s);
+ ASMAtomicWriteBool(&pQueue->fBeingProcessed, false);
+ }
+}
+
+
+/**
+ * Schedules and runs any pending times in the timer queue for the
+ * synchronous virtual clock.
+ *
+ * This scheduling is a bit different from the other queues as it need
+ * to implement the special requirements of the timer synchronous virtual
+ * clock, thus this 2nd queue run function.
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @remarks The caller must the Virtual Sync lock. Owning the TM lock is no
+ * longer important.
+ */
+static void tmR3TimerQueueRunVirtualSync(PVM pVM)
+{
+ PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
+ VM_ASSERT_EMT(pVM);
+ Assert(PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock));
+
+ /*
+ * Any timers?
+ */
+ PTMTIMER pNext = tmTimerQueueGetHead(pQueue, pQueue);
+ if (RT_UNLIKELY(!pNext))
+ {
+ Assert(pVM->tm.s.fVirtualSyncTicking || !pVM->tm.s.cVirtualTicking);
+ return;
+ }
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRun);
+
+ /*
+ * Calculate the time frame for which we will dispatch timers.
+ *
+ * We use a time frame ranging from the current sync time (which is most likely the
+ * same as the head timer) and some configurable period (100000ns) up towards the
+ * current virtual time. This period might also need to be restricted by the catch-up
+ * rate so frequent calls to this function won't accelerate the time too much, however
+ * this will be implemented at a later point if necessary.
+ *
+ * Without this frame we would 1) having to run timers much more frequently
+ * and 2) lag behind at a steady rate.
+ */
+ const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM);
+ uint64_t const offSyncGivenUp = pVM->tm.s.offVirtualSyncGivenUp;
+ uint64_t u64Now;
+ if (!pVM->tm.s.fVirtualSyncTicking)
+ {
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready);
+ u64Now = pVM->tm.s.u64VirtualSync;
+ Assert(u64Now <= pNext->u64Expire);
+ }
+ else
+ {
+ /* Calc 'now'. */
+ bool fStopCatchup = false;
+ bool fUpdateStuff = false;
+ uint64_t off = pVM->tm.s.offVirtualSync;
+ if (pVM->tm.s.fVirtualSyncCatchUp)
+ {
+ uint64_t u64Delta = u64VirtualNow - pVM->tm.s.u64VirtualSyncCatchUpPrev;
+ if (RT_LIKELY(!(u64Delta >> 32)))
+ {
+ uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
+ if (off > u64Sub + offSyncGivenUp)
+ {
+ off -= u64Sub;
+ Log4(("TM: %'RU64/-%'8RU64: sub %'RU64 [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
+ }
+ else
+ {
+ STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
+ fStopCatchup = true;
+ off = offSyncGivenUp;
+ }
+ fUpdateStuff = true;
+ }
+ }
+ u64Now = u64VirtualNow - off;
+
+ /* Adjust against last returned time. */
+ uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
+ if (u64Last > u64Now)
+ {
+ u64Now = u64Last + 1;
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
+ }
+
+ /* Check if stopped by expired timer. */
+ uint64_t const u64Expire = pNext->u64Expire;
+ if (u64Now >= u64Expire)
+ {
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop);
+ u64Now = u64Expire;
+ ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
+ Log4(("TM: %'RU64/-%'8RU64: exp tmr [tmR3TimerQueueRunVirtualSync]\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
+ }
+ else
+ {
+ ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
+ if (fUpdateStuff)
+ {
+ ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
+ ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow);
+ ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
+ if (fStopCatchup)
+ {
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+ Log4(("TM: %'RU64/0: caught up [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow));
+ }
+ }
+ }
+ }
+
+ /* calc end of frame. */
+ uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
+ if (u64Max > u64VirtualNow - offSyncGivenUp)
+ u64Max = u64VirtualNow - offSyncGivenUp;
+
+ /* assert sanity */
+ Assert(u64Now <= u64VirtualNow - offSyncGivenUp);
+ Assert(u64Max <= u64VirtualNow - offSyncGivenUp);
+ Assert(u64Now <= u64Max);
+ Assert(offSyncGivenUp == pVM->tm.s.offVirtualSyncGivenUp);
+
+ /*
+ * Process the expired timers moving the clock along as we progress.
+ */
+#ifdef VBOX_STRICT
+ uint64_t u64Prev = u64Now; NOREF(u64Prev);
+#endif
+ while (pNext && pNext->u64Expire <= u64Max)
+ {
+ /* Advance */
+ PTMTIMER pTimer = pNext;
+ pNext = tmTimerGetNext(pQueue, pTimer);
+
+ /* Take the associated lock. */
+ PPDMCRITSECT pCritSect = pTimer->pCritSect;
+ if (pCritSect)
+ {
+ STAM_PROFILE_START(&pTimer->StatCritSectEnter, Locking);
+ PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+ STAM_PROFILE_STOP(&pTimer->StatCritSectEnter, Locking);
+ }
+
+ Log2(("tmR3TimerQueueRunVirtualSync: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .szName='%s'}\n",
+ pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->szName));
+
+ /* Advance the clock - don't permit timers to be out of order or armed
+ in the 'past'. */
+#ifdef VBOX_STRICT
+ AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->szName));
+ u64Prev = pTimer->u64Expire;
+#endif
+ ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
+
+ /* Unlink it, change the state and do the callout. */
+ tmTimerQueueUnlinkActive(pVM, pQueue, pQueue, pTimer);
+ TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
+ STAM_PROFILE_START(&pTimer->StatTimer, PrfTimer);
+ switch (pTimer->enmType)
+ {
+ case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_USB: pTimer->u.Usb.pfnTimer(pTimer->u.Usb.pUsbIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer->hSelf, pTimer->pvUser); break;
+ case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer->hSelf, pTimer->pvUser); break;
+ default:
+ AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->szName));
+ break;
+ }
+ STAM_PROFILE_STOP(&pTimer->StatTimer, PrfTimer);
+
+ /* Change the state if it wasn't changed already in the handler.
+ Reset the Hz hint too since this is the same as TMTimerStop. */
+ bool fRc;
+ TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
+ if (fRc && pTimer->uHzHint)
+ {
+ if (pTimer->uHzHint >= pQueue->uMaxHzHint)
+ ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
+ pTimer->uHzHint = 0;
+ }
+ Log2(("tmR3TimerQueueRunVirtualSync: new state %s\n", tmTimerState(pTimer->enmState)));
+
+ /* Leave the associated lock. */
+ if (pCritSect)
+ PDMCritSectLeave(pVM, pCritSect);
+ } /* run loop */
+
+
+ /*
+ * Restart the clock if it was stopped to serve any timers,
+ * and start/adjust catch-up if necessary.
+ */
+ if ( !pVM->tm.s.fVirtualSyncTicking
+ && pVM->tm.s.cVirtualTicking)
+ {
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart);
+
+ /* calc the slack we've handed out. */
+ const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM);
+ Assert(u64VirtualNow2 >= u64VirtualNow);
+ AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
+ const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
+ STAM_STATS({
+ if (offSlack)
+ {
+ PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack;
+ p->cPeriods++;
+ p->cTicks += offSlack;
+ if (p->cTicksMax < offSlack) p->cTicksMax = offSlack;
+ if (p->cTicksMin > offSlack) p->cTicksMin = offSlack;
+ }
+ });
+
+ /* Let the time run a little bit while we were busy running timers(?). */
+ uint64_t u64Elapsed;
+#define MAX_ELAPSED 30000U /* ns */
+ if (offSlack > MAX_ELAPSED)
+ u64Elapsed = 0;
+ else
+ {
+ u64Elapsed = u64VirtualNow2 - u64VirtualNow;
+ if (u64Elapsed > MAX_ELAPSED)
+ u64Elapsed = MAX_ELAPSED;
+ u64Elapsed = u64Elapsed > offSlack ? u64Elapsed - offSlack : 0;
+ }
+#undef MAX_ELAPSED
+
+ /* Calc the current offset. */
+ uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed;
+ Assert(!(offNew & RT_BIT_64(63)));
+ uint64_t offLag = offNew - pVM->tm.s.offVirtualSyncGivenUp;
+ Assert(!(offLag & RT_BIT_64(63)));
+
+ /*
+ * Deal with starting, adjusting and stopping catchup.
+ */
+ if (pVM->tm.s.fVirtualSyncCatchUp)
+ {
+ if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)
+ {
+ /* stop */
+ STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+ Log4(("TM: %'RU64/-%'8RU64: caught up [pt]\n", u64VirtualNow2 - offNew, offLag));
+ }
+ else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
+ {
+ /* adjust */
+ unsigned i = 0;
+ while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
+ && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
+ i++;
+ if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage)
+ {
+ STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
+ ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
+ Log4(("TM: %'RU64/%'8RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+ }
+ pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
+ }
+ else
+ {
+ /* give up */
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp);
+ STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
+ ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+ Log4(("TM: %'RU64/%'8RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+ LogRel(("TM: Giving up catch-up attempt at a %'RU64 ns lag; new total: %'RU64 ns\n", offLag, offNew));
+ }
+ }
+ else if (offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)
+ {
+ if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
+ {
+ /* start */
+ STAM_PROFILE_ADV_START(&pVM->tm.s.StatVirtualSyncCatchup, c);
+ unsigned i = 0;
+ while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
+ && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
+ i++;
+ STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]);
+ ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
+ Log4(("TM: %'RU64/%'8RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+ }
+ else
+ {
+ /* don't bother */
+ STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
+ ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+ Log4(("TM: %'RU64/%'8RU64: give up\n", u64VirtualNow2 - offNew, offLag));
+ LogRel(("TM: Not bothering to attempt catching up a %'RU64 ns lag; new total: %'RU64\n", offLag, offNew));
+ }
+ }
+
+ /*
+ * Update the offset and restart the clock.
+ */
+ Assert(!(offNew & RT_BIT_64(63)));
+ ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, offNew);
+ ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
+ }
+}
+
+
+/**
+ * Deals with stopped Virtual Sync clock.
+ *
+ * This is called by the forced action flag handling code in EM when it
+ * encounters the VM_FF_TM_VIRTUAL_SYNC flag. It is called by all VCPUs and they
+ * will block on the VirtualSyncLock until the pending timers has been executed
+ * and the clock restarted.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ *
+ * @thread EMTs
+ */
+VMMR3_INT_DECL(void) TMR3VirtualSyncFF(PVM pVM, PVMCPU pVCpu)
+{
+ Log2(("TMR3VirtualSyncFF:\n"));
+
+ /*
+ * The EMT doing the timers is diverted to them.
+ */
+ if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
+ TMR3TimerQueuesDo(pVM);
+ /*
+ * The other EMTs will block on the virtual sync lock and the first owner
+ * will run the queue and thus restarting the clock.
+ *
+ * Note! This is very suboptimal code wrt to resuming execution when there
+ * are more than two Virtual CPUs, since they will all have to enter
+ * the critical section one by one. But it's a very simple solution
+ * which will have to do the job for now.
+ */
+ else
+ {
+/** @todo Optimize for SMP */
+ STAM_PROFILE_START(&pVM->tm.s.StatVirtualSyncFF, a);
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED);
+ if (pVM->tm.s.fVirtualSyncTicking)
+ {
+ STAM_PROFILE_STOP(&pVM->tm.s.StatVirtualSyncFF, a); /* before the unlock! */
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ Log2(("TMR3VirtualSyncFF: ticking\n"));
+ }
+ else
+ {
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+
+ /* try run it. */
+ PDMCritSectEnter(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].TimerLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED);
+ if (pVM->tm.s.fVirtualSyncTicking)
+ Log2(("TMR3VirtualSyncFF: ticking (2)\n"));
+ else
+ {
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, true);
+ Log2(("TMR3VirtualSyncFF: running queue\n"));
+
+ Assert(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule == UINT32_MAX);
+ tmR3TimerQueueRunVirtualSync(pVM);
+ if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */
+ VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
+
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false);
+ }
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ STAM_PROFILE_STOP(&pVM->tm.s.StatVirtualSyncFF, a); /* before the unlock! */
+ PDMCritSectLeave(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].TimerLock);
+ }
+ }
+}
+
+
+/**
+ * Service the special virtual sync timer queue.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpuDst The destination VCpu.
+ */
+static void tmR3TimerQueueDoVirtualSync(PVM pVM, PVMCPU pVCpuDst)
+{
+ PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
+ if (ASMAtomicCmpXchgBool(&pQueue->fBeingProcessed, true, false))
+ {
+ STAM_PROFILE_START(&pQueue->StatDo, s1);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED);
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, true);
+ VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /* Clear the FF once we started working for real. */
+
+ Assert(pQueue->idxSchedule == UINT32_MAX);
+ tmR3TimerQueueRunVirtualSync(pVM);
+ if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */
+ VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
+
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false);
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ STAM_PROFILE_STOP(&pQueue->StatDo, s1);
+ ASMAtomicWriteBool(&pQueue->fBeingProcessed, false);
+ }
+}
+
+
+/**
+ * Schedules and runs any pending timers.
+ *
+ * This is normally called from a forced action handler in EMT.
+ *
+ * @param pVM The cross context VM structure.
+ *
+ * @thread EMT (actually EMT0, but we fend off the others)
+ */
+VMMR3DECL(void) TMR3TimerQueuesDo(PVM pVM)
+{
+ /*
+ * Only the dedicated timer EMT should do stuff here.
+ * (fRunningQueues is only used as an indicator.)
+ */
+ Assert(pVM->tm.s.idTimerCpu < pVM->cCpus);
+ PVMCPU pVCpuDst = pVM->apCpusR3[pVM->tm.s.idTimerCpu];
+ if (VMMGetCpu(pVM) != pVCpuDst)
+ {
+ Assert(pVM->cCpus > 1);
+ return;
+ }
+ STAM_PROFILE_START(&pVM->tm.s.StatDoQueues, a);
+ Log2(("TMR3TimerQueuesDo:\n"));
+ Assert(!pVM->tm.s.fRunningQueues);
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, true);
+
+ /*
+ * Process the queues.
+ */
+ AssertCompile(TMCLOCK_MAX == 4);
+
+ /*
+ * TMCLOCK_VIRTUAL_SYNC (see also TMR3VirtualSyncFF)
+ */
+ tmR3TimerQueueDoVirtualSync(pVM, pVCpuDst);
+
+ /*
+ * TMCLOCK_VIRTUAL
+ */
+ tmR3TimerQueueDoOne(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]);
+
+ /*
+ * TMCLOCK_TSC
+ */
+ Assert(pVM->tm.s.aTimerQueues[TMCLOCK_TSC].idxActive == UINT32_MAX); /* not used */
+
+ /*
+ * TMCLOCK_REAL
+ */
+ tmR3TimerQueueDoOne(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]);
+
+#ifdef VBOX_STRICT
+ /* check that we didn't screw up. */
+ tmTimerQueuesSanityChecks(pVM, "TMR3TimerQueuesDo");
+#endif
+
+ /* done */
+ Log2(("TMR3TimerQueuesDo: returns void\n"));
+ ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, false);
+ STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a);
+}
+
+
+
+/** @name Saved state values
+ * @{ */
+#define TMTIMERSTATE_SAVED_PENDING_STOP 4
+#define TMTIMERSTATE_SAVED_PENDING_SCHEDULE 7
+/** @} */
+
+
+/**
+ * Saves the state of a timer to a saved state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param hTimer Timer to save.
+ * @param pSSM Save State Manager handle.
+ */
+VMMR3DECL(int) TMR3TimerSave(PVM pVM, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT(pVM);
+ TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
+ LogFlow(("TMR3TimerSave: %p:{enmState=%s, .szName='%s'} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->szName, pSSM));
+
+ switch (pTimer->enmState)
+ {
+ case TMTIMERSTATE_STOPPED:
+ case TMTIMERSTATE_PENDING_STOP:
+ case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
+ return SSMR3PutU8(pSSM, TMTIMERSTATE_SAVED_PENDING_STOP);
+
+ case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
+ case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
+ AssertMsgFailed(("u64Expire is being updated! (%s)\n", pTimer->szName));
+ if (!RTThreadYield())
+ RTThreadSleep(1);
+ RT_FALL_THRU();
+ case TMTIMERSTATE_ACTIVE:
+ case TMTIMERSTATE_PENDING_SCHEDULE:
+ case TMTIMERSTATE_PENDING_RESCHEDULE:
+ SSMR3PutU8(pSSM, TMTIMERSTATE_SAVED_PENDING_SCHEDULE);
+ return SSMR3PutU64(pSSM, pTimer->u64Expire);
+
+ case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+ case TMTIMERSTATE_EXPIRED_DELIVER:
+ case TMTIMERSTATE_DESTROY:
+ case TMTIMERSTATE_FREE:
+ case TMTIMERSTATE_INVALID:
+ AssertMsgFailed(("Invalid timer state %d %s (%s)\n", pTimer->enmState, tmTimerState(pTimer->enmState), pTimer->szName));
+ return SSMR3HandleSetStatus(pSSM, VERR_TM_INVALID_STATE);
+ }
+
+ AssertMsgFailed(("Unknown timer state %d (%s)\n", pTimer->enmState, pTimer->szName));
+ return SSMR3HandleSetStatus(pSSM, VERR_TM_UNKNOWN_STATE);
+}
+
+
+/**
+ * Loads the state of a timer from a saved state.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param hTimer Handle of Timer to restore.
+ * @param pSSM Save State Manager handle.
+ */
+VMMR3DECL(int) TMR3TimerLoad(PVM pVM, TMTIMERHANDLE hTimer, PSSMHANDLE pSSM)
+{
+ VM_ASSERT_EMT(pVM);
+ TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
+ Assert(pSSM);
+ LogFlow(("TMR3TimerLoad: %p:{enmState=%s, .szName='%s'} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->szName, pSSM));
+
+ /*
+ * Load the state and validate it.
+ */
+ uint8_t u8State;
+ int rc = SSMR3GetU8(pSSM, &u8State);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* TMTIMERSTATE_SAVED_XXX: Workaround for accidental state shift in r47786 (2009-05-26 19:12:12). */
+ if ( u8State == TMTIMERSTATE_SAVED_PENDING_STOP + 1
+ || u8State == TMTIMERSTATE_SAVED_PENDING_SCHEDULE + 1)
+ u8State--;
+
+ if ( u8State != TMTIMERSTATE_SAVED_PENDING_STOP
+ && u8State != TMTIMERSTATE_SAVED_PENDING_SCHEDULE)
+ {
+ AssertLogRelMsgFailed(("u8State=%d\n", u8State));
+ return SSMR3HandleSetStatus(pSSM, VERR_TM_LOAD_STATE);
+ }
+
+ /* Enter the critical sections to make TMTimerSet/Stop happy. */
+ if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED);
+ PPDMCRITSECT pCritSect = pTimer->pCritSect;
+ if (pCritSect)
+ PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
+
+ if (u8State == TMTIMERSTATE_SAVED_PENDING_SCHEDULE)
+ {
+ /*
+ * Load the expire time.
+ */
+ uint64_t u64Expire;
+ rc = SSMR3GetU64(pSSM, &u64Expire);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Set it.
+ */
+ Log(("u8State=%d u64Expire=%llu\n", u8State, u64Expire));
+ rc = TMTimerSet(pVM, hTimer, u64Expire);
+ }
+ else
+ {
+ /*
+ * Stop it.
+ */
+ Log(("u8State=%d\n", u8State));
+ rc = TMTimerStop(pVM, hTimer);
+ }
+
+ if (pCritSect)
+ PDMCritSectLeave(pVM, pCritSect);
+ if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+
+ /*
+ * On failure set SSM status.
+ */
+ if (RT_FAILURE(rc))
+ rc = SSMR3HandleSetStatus(pSSM, rc);
+ return rc;
+}
+
+
+/**
+ * Skips the state of a timer in a given saved state.
+ *
+ * @returns VBox status.
+ * @param pSSM Save State Manager handle.
+ * @param pfActive Where to store whether the timer was active
+ * when the state was saved.
+ */
+VMMR3DECL(int) TMR3TimerSkip(PSSMHANDLE pSSM, bool *pfActive)
+{
+ Assert(pSSM); AssertPtr(pfActive);
+ LogFlow(("TMR3TimerSkip: pSSM=%p pfActive=%p\n", pSSM, pfActive));
+
+ /*
+ * Load the state and validate it.
+ */
+ uint8_t u8State;
+ int rc = SSMR3GetU8(pSSM, &u8State);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* TMTIMERSTATE_SAVED_XXX: Workaround for accidental state shift in r47786 (2009-05-26 19:12:12). */
+ if ( u8State == TMTIMERSTATE_SAVED_PENDING_STOP + 1
+ || u8State == TMTIMERSTATE_SAVED_PENDING_SCHEDULE + 1)
+ u8State--;
+
+ if ( u8State != TMTIMERSTATE_SAVED_PENDING_STOP
+ && u8State != TMTIMERSTATE_SAVED_PENDING_SCHEDULE)
+ {
+ AssertLogRelMsgFailed(("u8State=%d\n", u8State));
+ return SSMR3HandleSetStatus(pSSM, VERR_TM_LOAD_STATE);
+ }
+
+ *pfActive = (u8State == TMTIMERSTATE_SAVED_PENDING_SCHEDULE);
+ if (*pfActive)
+ {
+ /*
+ * Load the expire time.
+ */
+ uint64_t u64Expire;
+ rc = SSMR3GetU64(pSSM, &u64Expire);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Associates a critical section with a timer.
+ *
+ * The critical section will be entered prior to doing the timer call back, thus
+ * avoiding potential races between the timer thread and other threads trying to
+ * stop or adjust the timer expiration while it's being delivered. The timer
+ * thread will leave the critical section when the timer callback returns.
+ *
+ * In strict builds, ownership of the critical section will be asserted by
+ * TMTimerSet, TMTimerStop, TMTimerGetExpire and TMTimerDestroy (when called at
+ * runtime).
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_INVALID_HANDLE if the timer handle is NULL or invalid
+ * (asserted).
+ * @retval VERR_INVALID_PARAMETER if pCritSect is NULL or has an invalid magic
+ * (asserted).
+ * @retval VERR_ALREADY_EXISTS if a critical section was already associated
+ * with the timer (asserted).
+ * @retval VERR_INVALID_STATE if the timer isn't stopped.
+ *
+ * @param pVM The cross context VM structure.
+ * @param hTimer The timer handle.
+ * @param pCritSect The critical section. The caller must make sure this
+ * is around for the life time of the timer.
+ *
+ * @thread Any, but the caller is responsible for making sure the timer is not
+ * active.
+ */
+VMMR3DECL(int) TMR3TimerSetCritSect(PVM pVM, TMTIMERHANDLE hTimer, PPDMCRITSECT pCritSect)
+{
+ TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
+ AssertPtrReturn(pCritSect, VERR_INVALID_PARAMETER);
+ const char *pszName = PDMR3CritSectName(pCritSect); /* exploited for validation */
+ AssertReturn(pszName, VERR_INVALID_PARAMETER);
+ AssertReturn(!pTimer->pCritSect, VERR_ALREADY_EXISTS);
+ AssertReturn(pTimer->enmState == TMTIMERSTATE_STOPPED, VERR_INVALID_STATE);
+ AssertReturn( pTimer->enmType == TMTIMERTYPE_DEV
+ || pTimer->enmType == TMTIMERTYPE_DRV
+ || pTimer->enmType == TMTIMERTYPE_USB,
+ VERR_NOT_SUPPORTED); /* Not supported on internal timers, see tmRZTimerGetCritSect. */
+ LogFlow(("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, pCritSect, pszName));
+
+ pTimer->pCritSect = pCritSect;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Get the real world UTC time adjusted for VM lag.
+ *
+ * @returns pTime.
+ * @param pVM The cross context VM structure.
+ * @param pTime Where to store the time.
+ */
+VMMR3_INT_DECL(PRTTIMESPEC) TMR3UtcNow(PVM pVM, PRTTIMESPEC pTime)
+{
+ /*
+ * Get a stable set of VirtualSync parameters and calc the lag.
+ */
+ uint64_t offVirtualSync;
+ uint64_t offVirtualSyncGivenUp;
+ do
+ {
+ offVirtualSync = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+ offVirtualSyncGivenUp = ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp);
+ } while (ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) != offVirtualSync);
+
+ Assert(offVirtualSync >= offVirtualSyncGivenUp);
+ uint64_t const offLag = offVirtualSync - offVirtualSyncGivenUp;
+
+ /*
+ * Get current time and adjust for virtual sync lag and do time displacement.
+ */
+ RTTimeNow(pTime);
+ RTTimeSpecSubNano(pTime, offLag);
+ RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
+
+ /*
+ * Log details if the time changed radically (also triggers on first call).
+ */
+ int64_t nsPrev = ASMAtomicXchgS64(&pVM->tm.s.nsLastUtcNow, RTTimeSpecGetNano(pTime));
+ int64_t cNsDelta = RTTimeSpecGetNano(pTime) - nsPrev;
+ if ((uint64_t)RT_ABS(cNsDelta) > RT_NS_1HOUR / 2)
+ {
+ RTTIMESPEC NowAgain;
+ RTTimeNow(&NowAgain);
+ LogRel(("TMR3UtcNow: nsNow=%'RI64 nsPrev=%'RI64 -> cNsDelta=%'RI64 (offLag=%'RI64 offVirtualSync=%'RU64 offVirtualSyncGivenUp=%'RU64, NowAgain=%'RI64)\n",
+ RTTimeSpecGetNano(pTime), nsPrev, cNsDelta, offLag, offVirtualSync, offVirtualSyncGivenUp, RTTimeSpecGetNano(&NowAgain)));
+ if (pVM->tm.s.pszUtcTouchFileOnJump && nsPrev != 0)
+ {
+ RTFILE hFile;
+ int rc = RTFileOpen(&hFile, pVM->tm.s.pszUtcTouchFileOnJump,
+ RTFILE_O_WRITE | RTFILE_O_APPEND | RTFILE_O_OPEN_CREATE | RTFILE_O_DENY_NONE);
+ if (RT_SUCCESS(rc))
+ {
+ char szMsg[256];
+ size_t cch;
+ cch = RTStrPrintf(szMsg, sizeof(szMsg),
+ "TMR3UtcNow: nsNow=%'RI64 nsPrev=%'RI64 -> cNsDelta=%'RI64 (offLag=%'RI64 offVirtualSync=%'RU64 offVirtualSyncGivenUp=%'RU64, NowAgain=%'RI64)\n",
+ RTTimeSpecGetNano(pTime), nsPrev, cNsDelta, offLag, offVirtualSync, offVirtualSyncGivenUp, RTTimeSpecGetNano(&NowAgain));
+ RTFileWrite(hFile, szMsg, cch, NULL);
+ RTFileClose(hFile);
+ }
+ }
+ }
+
+ return pTime;
+}
+
+
+/**
+ * Pauses all clocks except TMCLOCK_REAL.
+ *
+ * @returns VBox status code, all errors are asserted.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @thread EMT corresponding to Pointer to the VMCPU.
+ */
+VMMR3DECL(int) TMR3NotifySuspend(PVM pVM, PVMCPU pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */
+
+ /*
+ * The shared virtual clock (includes virtual sync which is tied to it).
+ */
+ int rc = tmVirtualPauseLocked(pVM);
+ AssertRCReturnStmt(rc, PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock), rc);
+
+ /*
+ * Pause the TSC last since it is normally linked to the virtual
+ * sync clock, so the above code may actually stop both clocks.
+ */
+ if (!pVM->tm.s.fTSCTiedToExecution)
+ {
+ rc = tmCpuTickPauseLocked(pVM, pVCpu);
+ AssertRCReturnStmt(rc, PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock), rc);
+ }
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ /*
+ * Update cNsTotal and stats.
+ */
+ Assert(!pVCpu->tm.s.fSuspended);
+ uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
+ uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - pVCpu->tm.s.cNsHalted;
+
+# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
+ STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
+ int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
+ if (cNsOtherNewDelta > 0)
+ STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
+# endif
+
+ uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
+ pVCpu->tm.s.nsStartTotal = cNsTotalNew;
+ pVCpu->tm.s.fSuspended = true;
+ pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
+ pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
+ ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
+#endif
+
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resumes all clocks except TMCLOCK_REAL.
+ *
+ * @returns VBox status code, all errors are asserted.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @thread EMT corresponding to Pointer to the VMCPU.
+ */
+VMMR3DECL(int) TMR3NotifyResume(PVM pVM, PVMCPU pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ /*
+ * Set u64NsTsStartTotal. There is no need to back this out if either of
+ * the two calls below fail.
+ */
+ uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
+ pVCpu->tm.s.nsStartTotal = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
+ pVCpu->tm.s.fSuspended = false;
+ ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
+#endif
+
+ /*
+ * Resume the TSC first since it is normally linked to the virtual sync
+ * clock, so it may actually not be resumed until we've executed the code
+ * below.
+ */
+ if (!pVM->tm.s.fTSCTiedToExecution)
+ {
+ int rc = tmCpuTickResumeLocked(pVM, pVCpu);
+ AssertRCReturnStmt(rc, PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock), rc);
+ }
+
+ /*
+ * The shared virtual clock (includes virtual sync which is tied to it).
+ */
+ int rc = tmVirtualResumeLocked(pVM);
+
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ return rc;
+}
+
+
+/**
+ * Sets the warp drive percent of the virtual time.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param u32Percent The new percentage. 100 means normal operation.
+ */
+VMMDECL(int) TMR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
+{
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)tmR3SetWarpDrive, 2, pUVM, u32Percent);
+}
+
+
+/**
+ * EMT worker for TMR3SetWarpDrive.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param u32Percent See TMR3SetWarpDrive().
+ * @internal
+ */
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
+{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ /*
+ * Validate it.
+ */
+ AssertMsgReturn(u32Percent >= 2 && u32Percent <= 20000,
+ ("%RX32 is not between 2 and 20000 (inclusive).\n", u32Percent),
+ VERR_INVALID_PARAMETER);
+
+/** @todo This isn't a feature specific to virtual time, move the variables to
+ * TM level and make it affect TMR3UTCNow as well! */
+
+ PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */
+
+ /*
+ * If the time is running we'll have to pause it before we can change
+ * the warp drive settings.
+ */
+ bool fPaused = !!pVM->tm.s.cVirtualTicking;
+ if (fPaused) /** @todo this isn't really working, but wtf. */
+ TMR3NotifySuspend(pVM, pVCpu);
+
+ /** @todo Should switch TM mode to virt-tsc-emulated if it isn't already! */
+ pVM->tm.s.u32VirtualWarpDrivePercentage = u32Percent;
+ pVM->tm.s.fVirtualWarpDrive = u32Percent != 100;
+ LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32 fVirtualWarpDrive=%RTbool\n",
+ pVM->tm.s.u32VirtualWarpDrivePercentage, pVM->tm.s.fVirtualWarpDrive));
+
+ if (fPaused)
+ TMR3NotifyResume(pVM, pVCpu);
+
+ PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the current TMCLOCK_VIRTUAL time without checking
+ * timers or anything.
+ *
+ * @returns The timestamp.
+ * @param pUVM The user mode VM structure.
+ *
+ * @remarks See TMVirtualGetNoCheck.
+ */
+VMMR3DECL(uint64_t) TMR3TimeVirtGet(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT64_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT64_MAX);
+ return TMVirtualGetNoCheck(pVM);
+}
+
+
+/**
+ * Gets the current TMCLOCK_VIRTUAL time in milliseconds without checking
+ * timers or anything.
+ *
+ * @returns The timestamp in milliseconds.
+ * @param pUVM The user mode VM structure.
+ *
+ * @remarks See TMVirtualGetNoCheck.
+ */
+VMMR3DECL(uint64_t) TMR3TimeVirtGetMilli(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT64_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT64_MAX);
+ return TMVirtualToMilli(pVM, TMVirtualGetNoCheck(pVM));
+}
+
+
+/**
+ * Gets the current TMCLOCK_VIRTUAL time in microseconds without checking
+ * timers or anything.
+ *
+ * @returns The timestamp in microseconds.
+ * @param pUVM The user mode VM structure.
+ *
+ * @remarks See TMVirtualGetNoCheck.
+ */
+VMMR3DECL(uint64_t) TMR3TimeVirtGetMicro(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT64_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT64_MAX);
+ return TMVirtualToMicro(pVM, TMVirtualGetNoCheck(pVM));
+}
+
+
+/**
+ * Gets the current TMCLOCK_VIRTUAL time in nanoseconds without checking
+ * timers or anything.
+ *
+ * @returns The timestamp in nanoseconds.
+ * @param pUVM The user mode VM structure.
+ *
+ * @remarks See TMVirtualGetNoCheck.
+ */
+VMMR3DECL(uint64_t) TMR3TimeVirtGetNano(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT64_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT64_MAX);
+ return TMVirtualToNano(pVM, TMVirtualGetNoCheck(pVM));
+}
+
+
+/**
+ * Gets the current warp drive percent.
+ *
+ * @returns The warp drive percent.
+ * @param pUVM The user mode VM structure.
+ */
+VMMR3DECL(uint32_t) TMR3GetWarpDrive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+ return pVM->tm.s.u32VirtualWarpDrivePercentage;
+}
+
+
+#if 0 /* unused - needs a little updating after @bugref{9941}*/
+/**
+ * Gets the performance information for one virtual CPU as seen by the VMM.
+ *
+ * The returned times covers the period where the VM is running and will be
+ * reset when restoring a previous VM state (at least for the time being).
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_NOT_IMPLEMENTED if not compiled in.
+ * @retval VERR_INVALID_STATE if the VM handle is bad.
+ * @retval VERR_INVALID_CPU_ID if idCpu is out of range.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu The ID of the virtual CPU which times to get.
+ * @param pcNsTotal Where to store the total run time (nano seconds) of
+ * the CPU, i.e. the sum of the three other returns.
+ * Optional.
+ * @param pcNsExecuting Where to store the time (nano seconds) spent
+ * executing guest code. Optional.
+ * @param pcNsHalted Where to store the time (nano seconds) spent
+ * halted. Optional
+ * @param pcNsOther Where to store the time (nano seconds) spent
+ * preempted by the host scheduler, on virtualization
+ * overhead and on other tasks.
+ */
+VMMR3DECL(int) TMR3GetCpuLoadTimes(PVM pVM, VMCPUID idCpu, uint64_t *pcNsTotal, uint64_t *pcNsExecuting,
+ uint64_t *pcNsHalted, uint64_t *pcNsOther)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ /*
+ * Get a stable result set.
+ * This should be way quicker than an EMT request.
+ */
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ uint32_t uTimesGen = ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen);
+ uint64_t cNsTotal = pVCpu->tm.s.cNsTotal;
+ uint64_t cNsExecuting = pVCpu->tm.s.cNsExecuting;
+ uint64_t cNsHalted = pVCpu->tm.s.cNsHalted;
+ uint64_t cNsOther = pVCpu->tm.s.cNsOther;
+ while ( (uTimesGen & 1) /* update in progress */
+ || uTimesGen != ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen))
+ {
+ RTThreadYield();
+ uTimesGen = ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen);
+ cNsTotal = pVCpu->tm.s.cNsTotal;
+ cNsExecuting = pVCpu->tm.s.cNsExecuting;
+ cNsHalted = pVCpu->tm.s.cNsHalted;
+ cNsOther = pVCpu->tm.s.cNsOther;
+ }
+
+ /*
+ * Fill in the return values.
+ */
+ if (pcNsTotal)
+ *pcNsTotal = cNsTotal;
+ if (pcNsExecuting)
+ *pcNsExecuting = cNsExecuting;
+ if (pcNsHalted)
+ *pcNsHalted = cNsHalted;
+ if (pcNsOther)
+ *pcNsOther = cNsOther;
+
+ return VINF_SUCCESS;
+
+#else
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+#endif /* unused */
+
+
+/**
+ * Gets the performance information for one virtual CPU as seen by the VMM in
+ * percents.
+ *
+ * The returned times covers the period where the VM is running and will be
+ * reset when restoring a previous VM state (at least for the time being).
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_NOT_IMPLEMENTED if not compiled in.
+ * @retval VERR_INVALID_VM_HANDLE if the VM handle is bad.
+ * @retval VERR_INVALID_CPU_ID if idCpu is out of range.
+ *
+ * @param pUVM The usermode VM structure.
+ * @param idCpu The ID of the virtual CPU which times to get.
+ * @param pcMsInterval Where to store the interval of the percentages in
+ * milliseconds. Optional.
+ * @param pcPctExecuting Where to return the percentage of time spent
+ * executing guest code. Optional.
+ * @param pcPctHalted Where to return the percentage of time spent halted.
+ * Optional
+ * @param pcPctOther Where to return the percentage of time spent
+ * preempted by the host scheduler, on virtualization
+ * overhead and on other tasks.
+ */
+VMMR3DECL(int) TMR3GetCpuLoadPercents(PUVM pUVM, VMCPUID idCpu, uint64_t *pcMsInterval, uint8_t *pcPctExecuting,
+ uint8_t *pcPctHalted, uint8_t *pcPctOther)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+ TMCPULOADSTATE volatile *pState;
+ if (idCpu == VMCPUID_ALL)
+ pState = &pVM->tm.s.CpuLoad;
+ else
+ pState = &pVM->apCpusR3[idCpu]->tm.s.CpuLoad;
+
+ if (pcMsInterval)
+ *pcMsInterval = RT_MS_1SEC;
+ if (pcPctExecuting)
+ *pcPctExecuting = pState->cPctExecuting;
+ if (pcPctHalted)
+ *pcPctHalted = pState->cPctHalted;
+ if (pcPctOther)
+ *pcPctOther = pState->cPctOther;
+
+ return VINF_SUCCESS;
+
+#else
+ RT_NOREF(pcMsInterval, pcPctExecuting, pcPctHalted, pcPctOther);
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+
+/**
+ * Helper for tmR3CpuLoadTimer.
+ *
+ * @param pState The state to update.
+ * @param cNsTotal Total time.
+ * @param cNsExecuting Time executing.
+ * @param cNsHalted Time halted.
+ */
+DECLINLINE(void) tmR3CpuLoadTimerMakeUpdate(PTMCPULOADSTATE pState, uint64_t cNsTotal, uint64_t cNsExecuting, uint64_t cNsHalted)
+{
+ /* Calc & update deltas */
+ uint64_t cNsTotalDelta = cNsTotal - pState->cNsPrevTotal;
+ uint64_t cNsExecutingDelta = cNsExecuting - pState->cNsPrevExecuting;
+ uint64_t cNsHaltedDelta = cNsHalted - pState->cNsPrevHalted;
+
+ if (cNsExecutingDelta + cNsHaltedDelta <= cNsTotalDelta)
+ { /* likely */ }
+ else
+ {
+ /* Just adjust the executing and halted values down to match the total delta. */
+ uint64_t const cNsExecAndHalted = cNsExecutingDelta + cNsHaltedDelta;
+ uint64_t const cNsAdjust = cNsExecAndHalted - cNsTotalDelta + cNsTotalDelta / 64;
+ cNsExecutingDelta -= (cNsAdjust * cNsExecutingDelta + cNsExecAndHalted - 1) / cNsExecAndHalted;
+ cNsHaltedDelta -= (cNsAdjust * cNsHaltedDelta + cNsExecAndHalted - 1) / cNsExecAndHalted;
+ /*Assert(cNsExecutingDelta + cNsHaltedDelta <= cNsTotalDelta); - annoying when debugging */
+ }
+
+ pState->cNsPrevExecuting = cNsExecuting;
+ pState->cNsPrevHalted = cNsHalted;
+ pState->cNsPrevTotal = cNsTotal;
+
+ /* Calc pcts. */
+ uint8_t cPctExecuting, cPctHalted, cPctOther;
+ if (!cNsTotalDelta)
+ {
+ cPctExecuting = 0;
+ cPctHalted = 100;
+ cPctOther = 0;
+ }
+ else if (cNsTotalDelta < UINT64_MAX / 4)
+ {
+ cPctExecuting = (uint8_t)(cNsExecutingDelta * 100 / cNsTotalDelta);
+ cPctHalted = (uint8_t)(cNsHaltedDelta * 100 / cNsTotalDelta);
+ cPctOther = (uint8_t)((cNsTotalDelta - cNsExecutingDelta - cNsHaltedDelta) * 100 / cNsTotalDelta);
+ }
+ else
+ {
+ cPctExecuting = 0;
+ cPctHalted = 100;
+ cPctOther = 0;
+ }
+
+ /* Update percentages: */
+ size_t idxHistory = pState->idxHistory + 1;
+ if (idxHistory >= RT_ELEMENTS(pState->aHistory))
+ idxHistory = 0;
+
+ pState->cPctExecuting = cPctExecuting;
+ pState->cPctHalted = cPctHalted;
+ pState->cPctOther = cPctOther;
+
+ pState->aHistory[idxHistory].cPctExecuting = cPctExecuting;
+ pState->aHistory[idxHistory].cPctHalted = cPctHalted;
+ pState->aHistory[idxHistory].cPctOther = cPctOther;
+
+ pState->idxHistory = (uint16_t)idxHistory;
+ if (pState->cHistoryEntries < RT_ELEMENTS(pState->aHistory))
+ pState->cHistoryEntries++;
+}
+
+
+/**
+ * @callback_method_impl{FNTMTIMERINT,
+ * Timer callback that calculates the CPU load since the last
+ * time it was called.}
+ */
+static DECLCALLBACK(void) tmR3CpuLoadTimer(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ /*
+ * Re-arm the timer first.
+ */
+ int rc = TMTimerSetMillies(pVM, hTimer, 1000);
+ AssertLogRelRC(rc);
+ NOREF(pvUser);
+
+ /*
+ * Update the values for each CPU.
+ */
+ uint64_t cNsTotalAll = 0;
+ uint64_t cNsExecutingAll = 0;
+ uint64_t cNsHaltedAll = 0;
+ for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[iCpu];
+
+ /* Try get a stable data set. */
+ uint32_t cTries = 3;
+ uint64_t nsNow = RTTimeNanoTS();
+ uint32_t uTimesGen = ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen);
+ bool fSuspended = pVCpu->tm.s.fSuspended;
+ uint64_t nsStartTotal = pVCpu->tm.s.nsStartTotal;
+ uint64_t cNsExecuting = pVCpu->tm.s.cNsExecuting;
+ uint64_t cNsHalted = pVCpu->tm.s.cNsHalted;
+ while (RT_UNLIKELY( (uTimesGen & 1) /* update in progress */
+ || uTimesGen != ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen)))
+ {
+ if (!--cTries)
+ break;
+ ASMNopPause();
+ nsNow = RTTimeNanoTS();
+ uTimesGen = ASMAtomicReadU32(&pVCpu->tm.s.uTimesGen);
+ fSuspended = pVCpu->tm.s.fSuspended;
+ nsStartTotal = pVCpu->tm.s.nsStartTotal;
+ cNsExecuting = pVCpu->tm.s.cNsExecuting;
+ cNsHalted = pVCpu->tm.s.cNsHalted;
+ }
+
+ /* Totals */
+ uint64_t cNsTotal = fSuspended ? nsStartTotal : nsNow - nsStartTotal;
+ cNsTotalAll += cNsTotal;
+ cNsExecutingAll += cNsExecuting;
+ cNsHaltedAll += cNsHalted;
+
+ /* Calc the PCTs and update the state. */
+ tmR3CpuLoadTimerMakeUpdate(&pVCpu->tm.s.CpuLoad, cNsTotal, cNsExecuting, cNsHalted);
+
+ /* Tell the VCpu to update the other and total stat members. */
+ ASMAtomicWriteBool(&pVCpu->tm.s.fUpdateStats, true);
+ }
+
+ /*
+ * Update the value for all the CPUs.
+ */
+ tmR3CpuLoadTimerMakeUpdate(&pVM->tm.s.CpuLoad, cNsTotalAll, cNsExecutingAll, cNsHaltedAll);
+
+}
+
+#endif /* !VBOX_WITHOUT_NS_ACCOUNTING */
+
+
+/**
+ * @callback_method_impl{PFNVMMEMTRENDEZVOUS,
+ * Worker for TMR3CpuTickParavirtEnable}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtEnable(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
+{
+ AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt); NOREF(pvData);
+ Assert(pVM->tm.s.enmTSCMode != TMTSCMODE_NATIVE_API); /** @todo figure out NEM/win and paravirt */
+ Assert(tmR3HasFixedTSC(pVM));
+
+ if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
+ {
+ /*
+ * The return value of TMCpuTickGet() and the guest's TSC value for each
+ * CPU must remain constant across the TM TSC mode-switch. Thus we have
+ * the following equation (new/old signifies the new/old tsc modes):
+ * uNewTsc = uOldTsc
+ *
+ * Where (see tmCpuTickGetInternal):
+ * uOldTsc = uRawOldTsc - offTscRawSrcOld
+ * uNewTsc = uRawNewTsc - offTscRawSrcNew
+ *
+ * Solve it for offTscRawSrcNew without replacing uOldTsc:
+ * uRawNewTsc - offTscRawSrcNew = uOldTsc
+ * => -offTscRawSrcNew = uOldTsc - uRawNewTsc
+ * => offTscRawSrcNew = uRawNewTsc - uOldTsc
+ */
+ uint64_t uRawOldTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM);
+ uint64_t uRawNewTsc = SUPReadTsc();
+ uint32_t cCpus = pVM->cCpus;
+ for (uint32_t i = 0; i < cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc;
+ pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc;
+ Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */
+ }
+
+ LogRel(("TM: Switching TSC mode from '%s' to '%s'\n", tmR3GetTSCModeNameEx(pVM->tm.s.enmTSCMode),
+ tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET)));
+ pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Notify TM that the guest has enabled usage of a paravirtualized TSC.
+ *
+ * This may perform a EMT rendezvous and change the TSC virtualization mode.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) TMR3CpuTickParavirtEnable(PVM pVM)
+{
+ int rc = VINF_SUCCESS;
+ if (pVM->tm.s.fTSCModeSwitchAllowed)
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtEnable, NULL);
+ else
+ LogRel(("TM: Host/VM is not suitable for using TSC mode '%s', request to change TSC mode ignored\n",
+ tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET)));
+ pVM->tm.s.fParavirtTscEnabled = true;
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{PFNVMMEMTRENDEZVOUS,
+ * Worker for TMR3CpuTickParavirtDisable}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
+{
+ AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt);
+ RT_NOREF1(pvData);
+
+ if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
+ && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
+ {
+ /*
+ * See tmR3CpuTickParavirtEnable for an explanation of the conversion math.
+ */
+ uint64_t uRawOldTsc = SUPReadTsc();
+ uint64_t uRawNewTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM);
+ uint32_t cCpus = pVM->cCpus;
+ for (uint32_t i = 0; i < cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc;
+ pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc;
+ Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */
+
+ /* Update the last-seen tick here as we havent't been updating it (as we don't
+ need it) while in pure TSC-offsetting mode. */
+ pVCpu->tm.s.u64TSCLastSeen = uOldTsc;
+ }
+
+ LogRel(("TM: Switching TSC mode from '%s' to '%s'\n", tmR3GetTSCModeNameEx(pVM->tm.s.enmTSCMode),
+ tmR3GetTSCModeNameEx(pVM->tm.s.enmOriginalTSCMode)));
+ pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Notify TM that the guest has disabled usage of a paravirtualized TSC.
+ *
+ * If TMR3CpuTickParavirtEnable() changed the TSC virtualization mode, this will
+ * perform an EMT rendezvous to revert those changes.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) TMR3CpuTickParavirtDisable(PVM pVM)
+{
+ int rc = VINF_SUCCESS;
+ if (pVM->tm.s.fTSCModeSwitchAllowed)
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtDisable, NULL);
+ pVM->tm.s.fParavirtTscEnabled = false;
+ return rc;
+}
+
+
+/**
+ * Check whether the guest can be presented a fixed rate & monotonic TSC.
+ *
+ * @returns true if TSC is stable, false otherwise.
+ * @param pVM The cross context VM structure.
+ * @param fWithParavirtEnabled Whether it's fixed & monotonic when
+ * paravirt. TSC is enabled or not.
+ *
+ * @remarks Must be called only after TMR3InitFinalize().
+ */
+VMMR3_INT_DECL(bool) TMR3CpuTickIsFixedRateMonotonic(PVM pVM, bool fWithParavirtEnabled)
+{
+ /** @todo figure out what exactly we want here later. */
+ NOREF(fWithParavirtEnabled);
+ PSUPGLOBALINFOPAGE pGip;
+ return tmR3HasFixedTSC(pVM) /* Host has fixed-rate TSC. */
+ && ( (pGip = g_pSUPGlobalInfoPage) == NULL /* Can be NULL in driverless mode. */
+ || (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)); /* GIP thinks it's monotonic. */
+}
+
+
+/**
+ * Gets the 5 char clock name for the info tables.
+ *
+ * @returns The name.
+ * @param enmClock The clock.
+ */
+DECLINLINE(const char *) tmR3Get5CharClockName(TMCLOCK enmClock)
+{
+ switch (enmClock)
+ {
+ case TMCLOCK_REAL: return "Real ";
+ case TMCLOCK_VIRTUAL: return "Virt ";
+ case TMCLOCK_VIRTUAL_SYNC: return "VrSy ";
+ case TMCLOCK_TSC: return "TSC ";
+ default: return "Bad ";
+ }
+}
+
+
+/**
+ * Display all timers.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ pHlp->pfnPrintf(pHlp,
+ "Timers (pVM=%p)\n"
+ "%.*s %.*s %.*s %.*s Clock %18s %18s %6s %-25s Description\n",
+ pVM,
+ sizeof(RTR3PTR) * 2, "pTimerR3 ",
+ sizeof(int32_t) * 2, "offNext ",
+ sizeof(int32_t) * 2, "offPrev ",
+ sizeof(int32_t) * 2, "offSched ",
+ "Time",
+ "Expire",
+ "HzHint",
+ "State");
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ const char * const pszClock = tmR3Get5CharClockName(pQueue->enmClock);
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ for (uint32_t idxTimer = 0; idxTimer < pQueue->cTimersAlloc; idxTimer++)
+ {
+ PTMTIMER pTimer = &pQueue->paTimers[idxTimer];
+ TMTIMERSTATE enmState = pTimer->enmState;
+ if (enmState < TMTIMERSTATE_DESTROY && enmState > TMTIMERSTATE_INVALID)
+ pHlp->pfnPrintf(pHlp,
+ "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %6RU32 %-25s %s\n",
+ pTimer,
+ pTimer->idxNext,
+ pTimer->idxPrev,
+ pTimer->idxScheduleNext,
+ pszClock,
+ TMTimerGet(pVM, pTimer->hSelf),
+ pTimer->u64Expire,
+ pTimer->uHzHint,
+ tmTimerState(enmState),
+ pTimer->szName);
+ }
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+ }
+}
+
+
+/**
+ * Display all active timers.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ pHlp->pfnPrintf(pHlp,
+ "Active Timers (pVM=%p)\n"
+ "%.*s %.*s %.*s %.*s Clock %18s %18s %6s %-25s Description\n",
+ pVM,
+ sizeof(RTR3PTR) * 2, "pTimerR3 ",
+ sizeof(int32_t) * 2, "offNext ",
+ sizeof(int32_t) * 2, "offPrev ",
+ sizeof(int32_t) * 2, "offSched ",
+ "Time",
+ "Expire",
+ "HzHint",
+ "State");
+ for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
+ {
+ PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
+ const char * const pszClock = tmR3Get5CharClockName(pQueue->enmClock);
+ PDMCritSectRwEnterShared(pVM, &pQueue->AllocLock, VERR_IGNORED);
+ PDMCritSectEnter(pVM, &pQueue->TimerLock, VERR_IGNORED);
+
+ for (PTMTIMERR3 pTimer = tmTimerQueueGetHead(pQueue, pQueue);
+ pTimer;
+ pTimer = tmTimerGetNext(pQueue, pTimer))
+ {
+ pHlp->pfnPrintf(pHlp,
+ "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %6RU32 %-25s %s\n",
+ pTimer,
+ pTimer->idxNext,
+ pTimer->idxPrev,
+ pTimer->idxScheduleNext,
+ pszClock,
+ TMTimerGet(pVM, pTimer->hSelf),
+ pTimer->u64Expire,
+ pTimer->uHzHint,
+ tmTimerState(pTimer->enmState),
+ pTimer->szName);
+ }
+
+ PDMCritSectLeave(pVM, &pQueue->TimerLock);
+ PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
+ }
+}
+
+
+/**
+ * Display all clocks.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helpers.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+
+ /*
+ * Read the times first to avoid more than necessary time variation.
+ */
+ const uint64_t u64Virtual = TMVirtualGet(pVM);
+ const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM);
+ const uint64_t u64Real = TMRealGet(pVM);
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ uint64_t u64TSC = TMCpuTickGet(pVCpu);
+
+ /*
+ * TSC
+ */
+ pHlp->pfnPrintf(pHlp,
+ "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s - virtualized",
+ u64TSC, u64TSC, TMCpuTicksPerSecond(pVM),
+ pVCpu->tm.s.fTSCTicking ? "ticking" : "paused");
+ if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
+ {
+ pHlp->pfnPrintf(pHlp, " - real tsc offset");
+ if (pVCpu->tm.s.offTSCRawSrc)
+ pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVCpu->tm.s.offTSCRawSrc);
+ }
+ else if (pVM->tm.s.enmTSCMode == TMTSCMODE_NATIVE_API)
+ pHlp->pfnPrintf(pHlp, " - native api");
+ else
+ pHlp->pfnPrintf(pHlp, " - virtual clock");
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+
+ /*
+ * virtual
+ */
+ pHlp->pfnPrintf(pHlp,
+ " Virtual: %18RU64 (%#016RX64) %RU64Hz %s",
+ u64Virtual, u64Virtual, TMVirtualGetFreq(pVM),
+ pVM->tm.s.cVirtualTicking ? "ticking" : "paused");
+ if (pVM->tm.s.fVirtualWarpDrive)
+ pHlp->pfnPrintf(pHlp, " WarpDrive %RU32 %%", pVM->tm.s.u32VirtualWarpDrivePercentage);
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /*
+ * virtual sync
+ */
+ pHlp->pfnPrintf(pHlp,
+ "VirtSync: %18RU64 (%#016RX64) %s%s",
+ u64VirtualSync, u64VirtualSync,
+ pVM->tm.s.fVirtualSyncTicking ? "ticking" : "paused",
+ pVM->tm.s.fVirtualSyncCatchUp ? " - catchup" : "");
+ if (pVM->tm.s.offVirtualSync)
+ {
+ pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.offVirtualSync);
+ if (pVM->tm.s.u32VirtualSyncCatchUpPercentage)
+ pHlp->pfnPrintf(pHlp, " catch-up rate %u %%", pVM->tm.s.u32VirtualSyncCatchUpPercentage);
+ }
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /*
+ * real
+ */
+ pHlp->pfnPrintf(pHlp,
+ " Real: %18RU64 (%#016RX64) %RU64Hz\n",
+ u64Real, u64Real, TMRealGetFreq(pVM));
+}
+
+
+/**
+ * Helper for tmR3InfoCpuLoad that adjust @a uPct to the given graph width.
+ */
+DECLINLINE(size_t) tmR3InfoCpuLoadAdjustWidth(size_t uPct, size_t cchWidth)
+{
+ if (cchWidth != 100)
+ uPct = (size_t)(((double)uPct + 0.5) * ((double)cchWidth / 100.0));
+ return uPct;
+}
+
+
+/**
+ * @callback_method_impl{FNDBGFINFOARGVINT}
+ */
+static DECLCALLBACK(void) tmR3InfoCpuLoad(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
+{
+ char szTmp[1024];
+
+ /*
+ * Parse arguments.
+ */
+ PTMCPULOADSTATE pState = &pVM->tm.s.CpuLoad;
+ VMCPUID idCpu = 0;
+ bool fAllCpus = true;
+ bool fExpGraph = true;
+ uint32_t cchWidth = 80;
+ uint32_t cPeriods = RT_ELEMENTS(pState->aHistory);
+ uint32_t cRows = 60;
+
+ static const RTGETOPTDEF s_aOptions[] =
+ {
+ { "all", 'a', RTGETOPT_REQ_NOTHING },
+ { "cpu", 'c', RTGETOPT_REQ_UINT32 },
+ { "periods", 'p', RTGETOPT_REQ_UINT32 },
+ { "rows", 'r', RTGETOPT_REQ_UINT32 },
+ { "uni", 'u', RTGETOPT_REQ_NOTHING },
+ { "uniform", 'u', RTGETOPT_REQ_NOTHING },
+ { "width", 'w', RTGETOPT_REQ_UINT32 },
+ { "exp", 'x', RTGETOPT_REQ_NOTHING },
+ { "exponential", 'x', RTGETOPT_REQ_NOTHING },
+ };
+
+ RTGETOPTSTATE State;
+ int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0, 0 /*fFlags*/);
+ AssertRC(rc);
+
+ RTGETOPTUNION ValueUnion;
+ while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
+ {
+ switch (rc)
+ {
+ case 'a':
+ pState = &pVM->apCpusR3[0]->tm.s.CpuLoad;
+ idCpu = 0;
+ fAllCpus = true;
+ break;
+ case 'c':
+ if (ValueUnion.u32 < pVM->cCpus)
+ {
+ pState = &pVM->apCpusR3[ValueUnion.u32]->tm.s.CpuLoad;
+ idCpu = ValueUnion.u32;
+ }
+ else
+ {
+ pState = &pVM->tm.s.CpuLoad;
+ idCpu = VMCPUID_ALL;
+ }
+ fAllCpus = false;
+ break;
+ case 'p':
+ cPeriods = RT_MIN(RT_MAX(ValueUnion.u32, 1), RT_ELEMENTS(pState->aHistory));
+ break;
+ case 'r':
+ cRows = RT_MIN(RT_MAX(ValueUnion.u32, 5), RT_ELEMENTS(pState->aHistory));
+ break;
+ case 'w':
+ cchWidth = RT_MIN(RT_MAX(ValueUnion.u32, 10), sizeof(szTmp) - 32);
+ break;
+ case 'x':
+ fExpGraph = true;
+ break;
+ case 'u':
+ fExpGraph = false;
+ break;
+ case 'h':
+ pHlp->pfnPrintf(pHlp,
+ "Usage: cpuload [parameters]\n"
+ " all, -a\n"
+ " Show statistics for all CPUs. (default)\n"
+ " cpu=id, -c id\n"
+ " Show statistics for the specified CPU ID. Show combined stats if out of range.\n"
+ " periods=count, -p count\n"
+ " Number of periods to show. Default: all\n"
+ " rows=count, -r count\n"
+ " Number of rows in the graphs. Default: 60\n"
+ " width=count, -w count\n"
+ " Core graph width in characters. Default: 80\n"
+ " exp, exponential, -e\n"
+ " Do 1:1 for more recent half / 30 seconds of the graph, combine the\n"
+ " rest into increasinly larger chunks. Default.\n"
+ " uniform, uni, -u\n"
+ " Combine periods into rows in a uniform manner for the whole graph.\n");
+ return;
+ default:
+ pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
+ return;
+ }
+ }
+
+ /*
+ * Do the job.
+ */
+ for (;;)
+ {
+ uint32_t const cMaxPeriods = pState->cHistoryEntries;
+ if (cPeriods > cMaxPeriods)
+ cPeriods = cMaxPeriods;
+ if (cPeriods > 0)
+ {
+ if (fAllCpus)
+ {
+ if (idCpu > 0)
+ pHlp->pfnPrintf(pHlp, "\n");
+ pHlp->pfnPrintf(pHlp, " CPU load for virtual CPU %#04x\n"
+ " -------------------------------\n", idCpu);
+ }
+
+ /*
+ * Figure number of periods per chunk. We can either do this in a linear
+ * fashion or a exponential fashion that compresses old history more.
+ */
+ size_t cPerRowDecrement = 0;
+ size_t cPeriodsPerRow = 1;
+ if (cRows < cPeriods)
+ {
+ if (!fExpGraph)
+ cPeriodsPerRow = (cPeriods + cRows / 2) / cRows;
+ else
+ {
+ /* The last 30 seconds or half of the rows are 1:1, the other part
+ is in increasing period counts. Code is a little simple but seems
+ to do the job most of the time, which is all I have time now. */
+ size_t cPeriodsOneToOne = RT_MIN(30, cRows / 2);
+ size_t cRestRows = cRows - cPeriodsOneToOne;
+ size_t cRestPeriods = cPeriods - cPeriodsOneToOne;
+
+ size_t cPeriodsInWindow = 0;
+ for (cPeriodsPerRow = 0; cPeriodsPerRow <= cRestRows && cPeriodsInWindow < cRestPeriods; cPeriodsPerRow++)
+ cPeriodsInWindow += cPeriodsPerRow + 1;
+
+ size_t iLower = 1;
+ while (cPeriodsInWindow < cRestPeriods)
+ {
+ cPeriodsPerRow++;
+ cPeriodsInWindow += cPeriodsPerRow;
+ cPeriodsInWindow -= iLower;
+ iLower++;
+ }
+
+ cPerRowDecrement = 1;
+ }
+ }
+
+ /*
+ * Do the work.
+ */
+ size_t cPctExecuting = 0;
+ size_t cPctOther = 0;
+ size_t cPeriodsAccumulated = 0;
+
+ size_t cRowsLeft = cRows;
+ size_t iHistory = (pState->idxHistory - cPeriods) % RT_ELEMENTS(pState->aHistory);
+ while (cPeriods-- > 0)
+ {
+ iHistory++;
+ if (iHistory >= RT_ELEMENTS(pState->aHistory))
+ iHistory = 0;
+
+ cPctExecuting += pState->aHistory[iHistory].cPctExecuting;
+ cPctOther += pState->aHistory[iHistory].cPctOther;
+ cPeriodsAccumulated += 1;
+ if ( cPeriodsAccumulated >= cPeriodsPerRow
+ || cPeriods < cRowsLeft)
+ {
+ /*
+ * Format and output the line.
+ */
+ size_t offTmp = 0;
+ size_t i = tmR3InfoCpuLoadAdjustWidth(cPctExecuting / cPeriodsAccumulated, cchWidth);
+ while (i-- > 0)
+ szTmp[offTmp++] = '#';
+ i = tmR3InfoCpuLoadAdjustWidth(cPctOther / cPeriodsAccumulated, cchWidth);
+ while (i-- > 0)
+ szTmp[offTmp++] = 'O';
+ szTmp[offTmp] = '\0';
+
+ cRowsLeft--;
+ pHlp->pfnPrintf(pHlp, "%3zus: %s\n", cPeriods + cPeriodsAccumulated / 2, szTmp);
+
+ /* Reset the state: */
+ cPctExecuting = 0;
+ cPctOther = 0;
+ cPeriodsAccumulated = 0;
+ if (cPeriodsPerRow > cPerRowDecrement)
+ cPeriodsPerRow -= cPerRowDecrement;
+ }
+ }
+ pHlp->pfnPrintf(pHlp, " (#=guest, O=VMM overhead) idCpu=%#x\n", idCpu);
+
+ }
+ else
+ pHlp->pfnPrintf(pHlp, "No load data.\n");
+
+ /*
+ * Next CPU if we're display all.
+ */
+ if (!fAllCpus)
+ break;
+ idCpu++;
+ if (idCpu >= pVM->cCpus)
+ break;
+ pState = &pVM->apCpusR3[idCpu]->tm.s.CpuLoad;
+ }
+
+}
+
+
+/**
+ * Gets the descriptive TM TSC mode name given the enum value.
+ *
+ * @returns The name.
+ * @param enmMode The mode to name.
+ */
+static const char *tmR3GetTSCModeNameEx(TMTSCMODE enmMode)
+{
+ switch (enmMode)
+ {
+ case TMTSCMODE_REAL_TSC_OFFSET: return "RealTSCOffset";
+ case TMTSCMODE_VIRT_TSC_EMULATED: return "VirtTSCEmulated";
+ case TMTSCMODE_DYNAMIC: return "Dynamic";
+ case TMTSCMODE_NATIVE_API: return "NativeApi";
+ default: return "???";
+ }
+}
+
+
+/**
+ * Gets the descriptive TM TSC mode name.
+ *
+ * @returns The name.
+ * @param pVM The cross context VM structure.
+ */
+static const char *tmR3GetTSCModeName(PVM pVM)
+{
+ Assert(pVM);
+ return tmR3GetTSCModeNameEx(pVM->tm.s.enmTSCMode);
+}
+
diff --git a/src/VBox/VMM/VMMR3/TRPM.cpp b/src/VBox/VMM/VMMR3/TRPM.cpp
new file mode 100644
index 00000000..ea4b87e3
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/TRPM.cpp
@@ -0,0 +1,471 @@
+/* $Id: TRPM.cpp $ */
+/** @file
+ * TRPM - The Trap Monitor.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_trpm TRPM - The Trap Monitor
+ *
+ * The Trap Monitor (TRPM) is responsible for all trap and interrupt handling in
+ * the VMM. It plays a major role in raw-mode execution and a lesser one in the
+ * hardware assisted mode.
+ *
+ * Note first, the following will use trap as a collective term for faults,
+ * aborts and traps.
+ *
+ * @see grp_trpm
+ *
+ *
+ * @section sec_trpm_rc Raw-Mode Context
+ *
+ * When executing in the raw-mode context, TRPM will be managing the IDT and
+ * processing all traps and interrupts. It will also monitor the guest IDT
+ * because CSAM wishes to know about changes to it (trap/interrupt/syscall
+ * handler patching) and TRPM needs to keep the \#BP gate in sync (ring-3
+ * considerations). See TRPMR3SyncIDT and CSAMR3CheckGates.
+ *
+ * External interrupts will be forwarded to the host context by the quickest
+ * possible route where they will be reasserted. The other events will be
+ * categorized into virtualization traps, genuine guest traps and hypervisor
+ * traps. The latter group may be recoverable depending on when they happen and
+ * whether there is a handler for it, otherwise it will cause a guru meditation.
+ *
+ * TRPM distinguishes the between the first two (virt and guest traps) and the
+ * latter (hyper) by checking the CPL of the trapping code, if CPL == 0 then
+ * it's a hyper trap otherwise it's a virt/guest trap. There are three trap
+ * dispatcher tables, one ad-hoc for one time traps registered via
+ * TRPMGCSetTempHandler(), one for hyper traps and one for virt/guest traps.
+ * The latter two live in TRPMGCHandlersA.asm, the former in the VM structure.
+ *
+ * The raw-mode context trap handlers found in TRPMGCHandlers.cpp (for the most
+ * part), will call up the other VMM sub-systems depending on what it things
+ * happens. The two most busy traps are page faults (\#PF) and general
+ * protection fault/trap (\#GP).
+ *
+ * Before resuming guest code after having taken a virtualization trap or
+ * injected a guest trap, TRPM will check for pending forced action and
+ * every now and again let TM check for timed out timers. This allows code that
+ * is being executed as part of virtualization traps to signal ring-3 exits,
+ * page table resyncs and similar without necessarily using the status code. It
+ * also make sure we're more responsive to timers and requests from other
+ * threads (necessarily running on some different core/cpu in most cases).
+ *
+ *
+ * @section sec_trpm_all All Contexts
+ *
+ * TRPM will also dispatch / inject interrupts and traps to the guest, both when
+ * in raw-mode and when in hardware assisted mode. See TRPMInject().
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_TRPM
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/iem.h>
+#include "TRPMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
+
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <iprt/alloc.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** TRPM saved state version. */
+#define TRPM_SAVED_STATE_VERSION 10
+#define TRPM_SAVED_STATE_VERSION_PRE_ICEBP 9 /* INT1/ICEBP support bumped the version */
+#define TRPM_SAVED_STATE_VERSION_UNI 8 /* SMP support bumped the version */
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static DECLCALLBACK(void) trpmR3InfoEvent(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+/**
+ * Initializes the Trap Manager
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) TRPMR3Init(PVM pVM)
+{
+ LogFlow(("TRPMR3Init\n"));
+ int rc;
+
+ /*
+ * Assert sizes and alignments.
+ */
+ AssertRelease(sizeof(pVM->trpm.s) <= sizeof(pVM->trpm.padding));
+
+ /*
+ * Initialize members.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ pVCpu->trpm.s.uActiveVector = ~0U;
+ }
+
+ /*
+ * Register the saved state data unit.
+ */
+ rc = SSMR3RegisterInternal(pVM, "trpm", 1, TRPM_SAVED_STATE_VERSION, sizeof(TRPM),
+ NULL, NULL, NULL,
+ NULL, trpmR3Save, NULL,
+ NULL, trpmR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Register info handlers.
+ */
+ rc = DBGFR3InfoRegisterInternalEx(pVM, "trpmevent", "Dumps TRPM pending event.", trpmR3InfoEvent,
+ DBGFINFO_FLAGS_ALL_EMTS);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Statistics.
+ */
+ for (unsigned i = 0; i < 256; i++)
+ STAMR3RegisterF(pVM, &pVM->trpm.s.aStatForwardedIRQ[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Forwarded interrupts.", i < 0x20 ? "/TRPM/ForwardRaw/TRAP/%02X" : "/TRPM/ForwardRaw/IRQ/%02X", i);
+
+ return 0;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this component.
+ *
+ * This function will be called at init and whenever the VMM need
+ * to relocate itself inside the GC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3DECL(void) TRPMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ RT_NOREF(pVM, offDelta);
+}
+
+
+/**
+ * Terminates the Trap Manager
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(int) TRPMR3Term(PVM pVM)
+{
+ NOREF(pVM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used by TRPMR3Reset and CPU hot plugging.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3DECL(void) TRPMR3ResetCpu(PVMCPU pVCpu)
+{
+ pVCpu->trpm.s.uActiveVector = ~0U;
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the TRPM component this means that any IDT write monitors
+ * needs to be removed, any pending trap cleared, and the IDT reset.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(void) TRPMR3Reset(PVM pVM)
+{
+ /*
+ * Reinitialize other members calling the relocator to get things right.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ TRPMR3ResetCpu(pVM->apCpusR3[i]);
+ TRPMR3Relocate(pVM, 0);
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("trpmR3Save:\n"));
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PCTRPMCPU pTrpmCpu = &pVM->apCpusR3[i]->trpm.s;
+ SSMR3PutUInt(pSSM, pTrpmCpu->uActiveVector);
+ SSMR3PutUInt(pSSM, pTrpmCpu->enmActiveType);
+ SSMR3PutU32(pSSM, pTrpmCpu->uActiveErrorCode);
+ SSMR3PutGCUIntPtr(pSSM, pTrpmCpu->uActiveCR2);
+ SSMR3PutU8(pSSM, pTrpmCpu->cbInstr);
+ SSMR3PutBool(pSSM, pTrpmCpu->fIcebp);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ LogFlow(("trpmR3Load:\n"));
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != TRPM_SAVED_STATE_VERSION
+ && uVersion != TRPM_SAVED_STATE_VERSION_PRE_ICEBP
+ && uVersion != TRPM_SAVED_STATE_VERSION_UNI)
+ {
+ AssertMsgFailed(("trpmR3Load: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ if (uVersion == TRPM_SAVED_STATE_VERSION)
+ {
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PTRPMCPU pTrpmCpu = &pVM->apCpusR3[i]->trpm.s;
+ SSMR3GetU32(pSSM, &pTrpmCpu->uActiveVector);
+ SSM_GET_ENUM32_RET(pSSM, pTrpmCpu->enmActiveType, TRPMEVENT);
+ SSMR3GetU32(pSSM, &pTrpmCpu->uActiveErrorCode);
+ SSMR3GetGCUIntPtr(pSSM, &pTrpmCpu->uActiveCR2);
+ SSMR3GetU8(pSSM, &pTrpmCpu->cbInstr);
+ SSMR3GetBool(pSSM, &pTrpmCpu->fIcebp);
+ }
+ }
+ else
+ {
+ /*
+ * Active and saved traps.
+ */
+ if (uVersion == TRPM_SAVED_STATE_VERSION_PRE_ICEBP)
+ {
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ RTGCUINT GCUIntErrCode;
+ PTRPMCPU pTrpmCpu = &pVM->apCpusR3[i]->trpm.s;
+ SSMR3GetU32(pSSM, &pTrpmCpu->uActiveVector);
+ SSM_GET_ENUM32_RET(pSSM, pTrpmCpu->enmActiveType, TRPMEVENT);
+ SSMR3GetGCUInt(pSSM, &GCUIntErrCode);
+ SSMR3GetGCUIntPtr(pSSM, &pTrpmCpu->uActiveCR2);
+ SSMR3Skip(pSSM, sizeof(RTGCUINT)); /* uSavedVector - No longer used. */
+ SSMR3Skip(pSSM, sizeof(RTUINT)); /* enmSavedType - No longer used. */
+ SSMR3Skip(pSSM, sizeof(RTGCUINT)); /* uSavedErrorCode - No longer used. */
+ SSMR3Skip(pSSM, sizeof(RTGCUINTPTR)); /* uSavedCR2 - No longer used. */
+ SSMR3Skip(pSSM, sizeof(RTGCUINT)); /* uPrevVector - No longer used. */
+
+ /*
+ * We lose the high 64-bits here (if RTGCUINT is 64-bit) after making the
+ * active error code as 32-bits. However, for error codes even 16-bit should
+ * be sufficient. Despite this, we decided to use and keep it at 32-bits
+ * since VMX/SVM defines these as 32-bit in their event fields and converting
+ * to/from these events are safer.
+ */
+ pTrpmCpu->uActiveErrorCode = GCUIntErrCode;
+ }
+ }
+ else
+ {
+ RTGCUINT GCUIntErrCode;
+ PTRPMCPU pTrpmCpu = &pVM->apCpusR3[0]->trpm.s;
+ SSMR3GetU32(pSSM, &pTrpmCpu->uActiveVector);
+ SSM_GET_ENUM32_RET(pSSM, pTrpmCpu->enmActiveType, TRPMEVENT);
+ SSMR3GetGCUInt(pSSM, &GCUIntErrCode);
+ SSMR3GetGCUIntPtr(pSSM, &pTrpmCpu->uActiveCR2);
+ pTrpmCpu->uActiveErrorCode = GCUIntErrCode;
+ }
+
+ /*
+ * Skip rest of TRPM saved-state unit involving IDT and trampoline gates.
+ * With the removal of raw-mode support, we no longer need these.
+ */
+ SSMR3SkipToEndOfUnit(pSSM);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Inject event (such as external irq or trap).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param enmEvent Trpm event type
+ * @param pfInjected Where to store whether the event was injected or not.
+ */
+VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent, bool *pfInjected)
+{
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ Assert(!CPUMIsInInterruptShadow(pCtx));
+ Assert(pfInjected);
+ *pfInjected = false;
+
+ /* Currently only useful for external hardware interrupts. */
+ Assert(enmEvent == TRPM_HARDWARE_INT);
+
+ RT_NOREF3(pVM, enmEvent, pCtx);
+ uint8_t u8Interrupt = 0;
+ int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+ Log(("TRPMR3InjectEvent: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
+ if (RT_SUCCESS(rc))
+ {
+ *pfInjected = true;
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if ( CPUMIsGuestInVmxNonRootMode(pCtx)
+ && CPUMIsGuestVmxInterceptEvents(pCtx)
+ && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
+ {
+ VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
+ Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
+ return VBOXSTRICTRC_VAL(rcStrict);
+ }
+#endif
+#ifdef RT_OS_WINDOWS
+ if (!VM_IS_NEM_ENABLED(pVM))
+ {
+#endif
+ rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
+ AssertRC(rc);
+#ifdef RT_OS_WINDOWS
+ }
+ else
+ {
+ VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8Interrupt, enmEvent, 0, 0, 0);
+ /** @todo NSTVMX: NSTSVM: We don't support nested VMX or nested SVM with NEM yet.
+ * If so we should handle VINF_SVM_VMEXIT and VINF_VMX_VMEXIT codes here. */
+ if (rcStrict != VINF_SUCCESS)
+ return VBOXSTRICTRC_TODO(rcStrict);
+ }
+#endif
+ STAM_REL_COUNTER_INC(&pVM->trpm.s.aStatForwardedIRQ[u8Interrupt]);
+ }
+ else
+ {
+ /* Can happen if the interrupt is masked by TPR or APIC is disabled. */
+ AssertMsg(rc == VERR_APIC_INTR_MASKED_BY_TPR || rc == VERR_NO_DATA, ("PDMGetInterrupt failed. rc=%Rrc\n", rc));
+ }
+ return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
+ : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
+ : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */
+}
+
+
+/**
+ * Displays the pending TRPM event.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The info helper functions.
+ * @param pszArgs Arguments, ignored.
+ */
+static DECLCALLBACK(void) trpmR3InfoEvent(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ NOREF(pszArgs);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ pVCpu = pVM->apCpusR3[0];
+
+ uint8_t uVector;
+ uint8_t cbInstr;
+ TRPMEVENT enmTrapEvent;
+ uint32_t uErrorCode;
+ RTGCUINTPTR uCR2;
+ bool fIcebp;
+ int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrapEvent, &uErrorCode, &uCR2, &cbInstr, &fIcebp);
+ if (RT_SUCCESS(rc))
+ {
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: TRPM event\n", pVCpu->idCpu);
+ static const char * const s_apszTrpmEventType[] =
+ {
+ "Trap",
+ "Hardware Int",
+ "Software Int"
+ };
+ if (RT_LIKELY((size_t)enmTrapEvent < RT_ELEMENTS(s_apszTrpmEventType)))
+ {
+ pHlp->pfnPrintf(pHlp, " Type = %s\n", s_apszTrpmEventType[enmTrapEvent]);
+ pHlp->pfnPrintf(pHlp, " uVector = %#x\n", uVector);
+ pHlp->pfnPrintf(pHlp, " uErrorCode = %#x\n", uErrorCode);
+ pHlp->pfnPrintf(pHlp, " uCR2 = %#RGp\n", uCR2);
+ pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", cbInstr);
+ pHlp->pfnPrintf(pHlp, " fIcebp = %RTbool\n", fIcebp);
+ }
+ else
+ pHlp->pfnPrintf(pHlp, " Type = %#x (Invalid!)\n", enmTrapEvent);
+ }
+ else if (rc == VERR_TRPM_NO_ACTIVE_TRAP)
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: TRPM event (None)\n", pVCpu->idCpu);
+ else
+ pHlp->pfnPrintf(pHlp, "CPU[%u]: TRPM event - Query failed! rc=%Rrc\n", pVCpu->idCpu, rc);
+}
+
diff --git a/src/VBox/VMM/VMMR3/VM.cpp b/src/VBox/VMM/VMMR3/VM.cpp
new file mode 100644
index 00000000..94c3f46f
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VM.cpp
@@ -0,0 +1,4458 @@
+/* $Id: VM.cpp $ */
+/** @file
+ * VM - Virtual Machine
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_vm VM API
+ *
+ * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
+ * use to create a VMM instance for running a guest in. It also provides
+ * facilities for queuing request for execution in EMT (serialization purposes
+ * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
+ *
+ *
+ * @section sec_vm_design Design Critique / Things To Do
+ *
+ * In hindsight this component is a big design mistake, all this stuff really
+ * belongs in the VMM component. It just seemed like a kind of ok idea at a
+ * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
+ * of the per-VM instance structure (see vm.h), so it kind of made sense.
+ * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
+ * is some minor functionally and some "routing" services.
+ *
+ * Fixing this is just a matter of some more or less straight forward
+ * refactoring, the question is just when someone will get to it. Moving the EMT
+ * would be a good start.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VM
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/gvmm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmdev.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/gcm.h>
+#include "VMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/sup.h>
+#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
+# include <VBox/VBoxTpG.h>
+#endif
+#include <VBox/dbg.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/alloca.h>
+#include <iprt/asm.h>
+#include <iprt/env.h>
+#include <iprt/mem.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#ifdef RT_OS_DARWIN
+# include <iprt/system.h>
+#endif
+#include <iprt/time.h>
+#include <iprt/thread.h>
+#include <iprt/uuid.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
+static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
+static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
+static int vmR3InitRing3(PVM pVM, PUVM pUVM);
+static int vmR3InitRing0(PVM pVM);
+static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
+static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
+static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
+static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
+static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
+static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
+static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
+static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
+
+
+/**
+ * Creates a virtual machine by calling the supplied configuration constructor.
+ *
+ * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
+ * called to start the execution.
+ *
+ * @returns 0 on success.
+ * @returns VBox error code on failure.
+ * @param cCpus Number of virtual CPUs for the new VM.
+ * @param pVmm2UserMethods An optional method table that the VMM can use
+ * to make the user perform various action, like
+ * for instance state saving.
+ * @param fFlags VMCREATE_F_XXX
+ * @param pfnVMAtError Pointer to callback function for setting VM
+ * errors. This was added as an implicit call to
+ * VMR3AtErrorRegister() since there is no way the
+ * caller can get to the VM handle early enough to
+ * do this on its own.
+ * This is called in the context of an EMT.
+ * @param pvUserVM The user argument passed to pfnVMAtError.
+ * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
+ * This is called in the context of an EMT0.
+ * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
+ * @param ppVM Where to optionally store the 'handle' of the
+ * created VM.
+ * @param ppUVM Where to optionally store the user 'handle' of
+ * the created VM, this includes one reference as
+ * if VMR3RetainUVM() was called. The caller
+ * *MUST* remember to pass the returned value to
+ * VMR3ReleaseUVM() once done with the handle.
+ */
+VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, uint64_t fFlags,
+ PFNVMATERROR pfnVMAtError, void *pvUserVM,
+ PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
+ PVM *ppVM, PUVM *ppUVM)
+{
+ LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p fFlags=%#RX64 pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
+ cCpus, pVmm2UserMethods, fFlags, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
+
+ if (pVmm2UserMethods)
+ {
+ AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
+ AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
+ AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
+ AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
+ }
+ AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
+ AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(fFlags & ~VMCREATE_F_DRIVERLESS), ("%#RX64\n", fFlags), VERR_INVALID_FLAGS);
+
+ /*
+ * Validate input.
+ */
+ AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
+
+ /*
+ * Create the UVM so we can register the at-error callback
+ * and consolidate a bit of cleanup code.
+ */
+ PUVM pUVM = NULL; /* shuts up gcc */
+ int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (pfnVMAtError)
+ rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the support library creating the session for this VM.
+ */
+ if (fFlags & VMCREATE_F_DRIVERLESS)
+ rc = SUPR3InitEx(SUPR3INIT_F_DRIVERLESS | SUPR3INIT_F_DRIVERLESS_IEM_ALLOWED, &pUVM->vm.s.pSession);
+ else
+ rc = SUPR3Init(&pUVM->vm.s.pSession);
+ if (RT_SUCCESS(rc))
+ {
+#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
+ /* Now that we've opened the device, we can register trace probes. */
+ static bool s_fRegisteredProbes = false;
+ if (!SUPR3IsDriverless() && ASMAtomicCmpXchgBool(&s_fRegisteredProbes, true, false))
+ SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
+ SUP_TRACER_UMOD_FLAGS_SHARED);
+#endif
+
+ /*
+ * Call vmR3CreateU in the EMT thread and wait for it to finish.
+ *
+ * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
+ * submitting a request to a specific VCPU without a pVM. So, to make
+ * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
+ * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
+ */
+ PVMREQ pReq;
+ rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
+ (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Success!
+ */
+ if (ppVM)
+ *ppVM = pUVM->pVM;
+ if (ppUVM)
+ {
+ VMR3RetainUVM(pUVM);
+ *ppUVM = pUVM;
+ }
+ LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
+
+ /*
+ * An error occurred during VM creation. Set the error message directly
+ * using the initial callback, as the callback list might not exist yet.
+ */
+ const char *pszError;
+ switch (rc)
+ {
+ case VERR_VMX_IN_VMX_ROOT_MODE:
+#ifdef RT_OS_LINUX
+ pszError = N_("VirtualBox can't operate in VMX root mode. "
+ "Please disable the KVM kernel extension, recompile your kernel and reboot");
+#else
+ pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
+#endif
+ break;
+
+#ifndef RT_OS_DARWIN
+ case VERR_HM_CONFIG_MISMATCH:
+ pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
+ "This hardware extension is required by the VM configuration");
+ break;
+#endif
+
+ case VERR_SVM_IN_USE:
+#ifdef RT_OS_LINUX
+ pszError = N_("VirtualBox can't enable the AMD-V extension. "
+ "Please disable the KVM kernel extension, recompile your kernel and reboot");
+#else
+ pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
+#endif
+ break;
+
+#ifdef RT_OS_LINUX
+ case VERR_SUPDRV_COMPONENT_NOT_FOUND:
+ pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
+ "that VirtualBox is correctly installed, and if you are using EFI "
+ "Secure Boot that the modules are signed if necessary in the right "
+ "way for your host system. Then try to recompile and reload the "
+ "kernel modules by executing "
+ "'/sbin/vboxconfig' as root");
+ break;
+#endif
+
+ case VERR_RAW_MODE_INVALID_SMP:
+ pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
+ "VirtualBox requires this hardware extension to emulate more than one "
+ "guest CPU");
+ break;
+
+ case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
+#ifdef RT_OS_LINUX
+ pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
+ "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
+ "the VT-x extension in the VM settings. Note that without VT-x you have "
+ "to reduce the number of guest CPUs to one");
+#else
+ pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
+ "extension. Either upgrade your kernel or disable the VT-x extension in the "
+ "VM settings. Note that without VT-x you have to reduce the number of guest "
+ "CPUs to one");
+#endif
+ break;
+
+ case VERR_PDM_DEVICE_NOT_FOUND:
+ pszError = N_("A virtual device is configured in the VM settings but the device "
+ "implementation is missing.\n"
+ "A possible reason for this error is a missing extension pack. Note "
+ "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
+ "support and remote desktop) are only available from an 'extension "
+ "pack' which must be downloaded and installed separately");
+ break;
+
+ case VERR_PCI_PASSTHROUGH_NO_HM:
+ pszError = N_("PCI passthrough requires VT-x/AMD-V");
+ break;
+
+ case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
+ pszError = N_("PCI passthrough requires nested paging");
+ break;
+
+ default:
+ if (VMR3GetErrorCount(pUVM) == 0)
+ {
+ pszError = (char *)alloca(1024);
+ RTErrQueryMsgFull(rc, (char *)pszError, 1024, false /*fFailIfUnknown*/);
+ }
+ else
+ pszError = NULL; /* already set. */
+ break;
+ }
+ if (pszError)
+ vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
+ }
+ else
+ {
+ /*
+ * An error occurred at support library initialization time (before the
+ * VM could be created). Set the error message directly using the
+ * initial callback, as the callback list doesn't exist yet.
+ */
+ const char *pszError;
+ switch (rc)
+ {
+ case VERR_VM_DRIVER_LOAD_ERROR:
+#ifdef RT_OS_LINUX
+ pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
+ "was either not loaded, /dev/vboxdrv is not set up properly, "
+ "or you are using EFI Secure Boot and the module is not signed "
+ "in the right way for your system. If necessary, try setting up "
+ "the kernel module again by executing "
+ "'/sbin/vboxconfig' as root");
+#else
+ pszError = N_("VirtualBox kernel driver not loaded");
+#endif
+ break;
+ case VERR_VM_DRIVER_OPEN_ERROR:
+ pszError = N_("VirtualBox kernel driver cannot be opened");
+ break;
+ case VERR_VM_DRIVER_NOT_ACCESSIBLE:
+#ifdef VBOX_WITH_HARDENING
+ /* This should only happen if the executable wasn't hardened - bad code/build. */
+ pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
+ "Re-install VirtualBox. If you are building it yourself, you "
+ "should make sure it installed correctly and that the setuid "
+ "bit is set on the executables calling VMR3Create.");
+#else
+ /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
+# if defined(RT_OS_DARWIN)
+ pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
+ "If you have built VirtualBox yourself, make sure that you do not "
+ "have the vboxdrv KEXT from a different build or installation loaded.");
+# elif defined(RT_OS_LINUX)
+ pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
+ "If you have built VirtualBox yourself, make sure that you do "
+ "not have the vboxdrv kernel module from a different build or "
+ "installation loaded. Also, make sure the vboxdrv udev rule gives "
+ "you the permission you need to access the device.");
+# elif defined(RT_OS_WINDOWS)
+ pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
+# else /* solaris, freebsd, ++. */
+ pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
+ "If you have built VirtualBox yourself, make sure that you do "
+ "not have the vboxdrv kernel module from a different install loaded.");
+# endif
+#endif
+ break;
+ case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
+ case VERR_VM_DRIVER_NOT_INSTALLED:
+#ifdef RT_OS_LINUX
+ pszError = N_("VirtualBox kernel driver not Installed. The vboxdrv kernel module "
+ "was either not loaded, /dev/vboxdrv is not set up properly, "
+ "or you are using EFI Secure Boot and the module is not signed "
+ "in the right way for your system. If necessary, try setting up "
+ "the kernel module again by executing "
+ "'/sbin/vboxconfig' as root");
+#else
+ pszError = N_("VirtualBox kernel driver not installed");
+#endif
+ break;
+ case VERR_NO_MEMORY:
+ pszError = N_("VirtualBox support library out of memory");
+ break;
+ case VERR_VERSION_MISMATCH:
+ case VERR_VM_DRIVER_VERSION_MISMATCH:
+ pszError = N_("The VirtualBox support driver which is running is from a different "
+ "version of VirtualBox. You can correct this by stopping all "
+ "running instances of VirtualBox and reinstalling the software.");
+ break;
+ default:
+ pszError = N_("Unknown error initializing kernel driver");
+ AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
+ }
+ vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
+ }
+ }
+
+ /* cleanup */
+ vmR3DestroyUVM(pUVM, 2000);
+ LogFlow(("VMR3Create: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Creates the UVM.
+ *
+ * This will not initialize the support library even if vmR3DestroyUVM
+ * will terminate that.
+ *
+ * @returns VBox status code.
+ * @param cCpus Number of virtual CPUs
+ * @param pVmm2UserMethods Pointer to the optional VMM -> User method
+ * table.
+ * @param ppUVM Where to store the UVM pointer.
+ */
+static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
+{
+ uint32_t i;
+
+ /*
+ * Create and initialize the UVM.
+ */
+ PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_UOFFSETOF_DYN(UVM, aCpus[cCpus]));
+ AssertReturn(pUVM, VERR_NO_MEMORY);
+ pUVM->u32Magic = UVM_MAGIC;
+ pUVM->cCpus = cCpus;
+ pUVM->pVmm2UserMethods = pVmm2UserMethods;
+
+ AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
+
+ pUVM->vm.s.cUvmRefs = 1;
+ pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
+ pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
+ pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
+
+ pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
+ RTUuidClear(&pUVM->vm.s.Uuid);
+
+ /* Initialize the VMCPU array in the UVM. */
+ for (i = 0; i < cCpus; i++)
+ {
+ pUVM->aCpus[i].pUVM = pUVM;
+ pUVM->aCpus[i].idCpu = i;
+ }
+
+ /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
+ int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
+ AssertRC(rc);
+ if (RT_SUCCESS(rc))
+ {
+ /* Allocate a halt method event semaphore for each VCPU. */
+ for (i = 0; i < cCpus; i++)
+ pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
+ for (i = 0; i < cCpus; i++)
+ {
+ rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
+ */
+ rc = PDMR3InitUVM(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = STAMR3InitUVM(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = MMR3InitUVM(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Start the emulation threads for all VMCPUs.
+ */
+ for (i = 0; i < cCpus; i++)
+ {
+ rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
+ _1M, RTTHREADTYPE_EMULATION,
+ RTTHREADFLAGS_WAITABLE | RTTHREADFLAGS_COM_MTA | RTTHREADFLAGS_NO_SIGNALS,
+ cCpus > 1 ? "EMT-%u" : "EMT", i);
+ if (RT_FAILURE(rc))
+ break;
+
+ pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ *ppUVM = pUVM;
+ return VINF_SUCCESS;
+ }
+
+ /* bail out. */
+ while (i-- > 0)
+ {
+ /** @todo rainy day: terminate the EMTs. */
+ }
+ MMR3TermUVM(pUVM);
+ }
+ STAMR3TermUVM(pUVM);
+ }
+ PDMR3TermUVM(pUVM);
+ }
+ RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
+ }
+ RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
+ }
+ }
+ for (i = 0; i < cCpus; i++)
+ {
+ RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
+ pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
+ }
+ RTTlsFree(pUVM->vm.s.idxTLS);
+ }
+ RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
+ return rc;
+}
+
+
+/**
+ * Creates and initializes the VM.
+ *
+ * @thread EMT
+ */
+static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
+{
+#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
+ /*
+ * Require SSE2 to be present (already checked for in supdrv, so we
+ * shouldn't ever really get here).
+ */
+ if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
+ {
+ LogRel(("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1)));
+ return VERR_UNSUPPORTED_CPU;
+ }
+#endif
+
+
+ /*
+ * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
+ */
+ if (!SUPR3IsDriverless())
+ {
+ int rc = PDMR3LdrLoadVMMR0U(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
+ * bird: what about moving the message down here? Main picks the first message, right? */
+ if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
+ return rc; /* proper error message set later on */
+ return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
+ }
+ }
+
+ /*
+ * Request GVMM to create a new VM for us.
+ */
+ RTR0PTR pVMR0;
+ int rc = GVMMR3CreateVM(pUVM, cCpus, pUVM->vm.s.pSession, &pUVM->pVM, &pVMR0);
+ if (RT_SUCCESS(rc))
+ {
+ PVM pVM = pUVM->pVM;
+ AssertReleaseMsg(RT_VALID_PTR(pVM), ("pVM=%p pVMR0=%p\n", pVM, pVMR0));
+ AssertRelease(pVM->pVMR0ForCall == pVMR0);
+ AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
+ AssertRelease(pVM->cCpus == cCpus);
+ AssertRelease(pVM->uCpuExecutionCap == 100);
+ AssertCompileMemberAlignment(VM, cpum, 64);
+ AssertCompileMemberAlignment(VM, tm, 64);
+
+ Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n", pUVM, pVM, pVMR0, pVM->hSelf, pVM->cCpus));
+
+ /*
+ * Initialize the VM structure and our internal data (VMINT).
+ */
+ pVM->pUVM = pUVM;
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ pVCpu->pUVCpu = &pUVM->aCpus[i];
+ pVCpu->idCpu = i;
+ pVCpu->hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
+ pVCpu->hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
+ Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
+ /* hNativeThreadR0 is initialized on EMT registration. */
+ pUVM->aCpus[i].pVCpu = pVCpu;
+ pUVM->aCpus[i].pVM = pVM;
+ }
+
+ /*
+ * Init the configuration.
+ */
+ rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Init the ring-3 components and ring-3 per cpu data, finishing it off
+ * by a relocation round (intermediate context finalization will do this).
+ */
+ rc = vmR3InitRing3(pVM, pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ LogFlow(("Ring-3 init succeeded\n"));
+
+ /*
+ * Init the Ring-0 components.
+ */
+ rc = vmR3InitRing0(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ /* Relocate again, because some switcher fixups depends on R0 init results. */
+ VMR3Relocate(pVM, 0 /* offDelta */);
+
+#ifdef VBOX_WITH_DEBUGGER
+ /*
+ * Init the tcp debugger console if we're building
+ * with debugger support.
+ */
+ void *pvUser = NULL;
+ rc = DBGCIoCreate(pUVM, &pvUser);
+ if ( RT_SUCCESS(rc)
+ || rc == VERR_NET_ADDRESS_IN_USE)
+ {
+ pUVM->vm.s.pvDBGC = pvUser;
+#endif
+ /*
+ * Now we can safely set the VM halt method to default.
+ */
+ rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Set the state and we're done.
+ */
+ vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
+ return VINF_SUCCESS;
+ }
+#ifdef VBOX_WITH_DEBUGGER
+ DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
+ pUVM->vm.s.pvDBGC = NULL;
+ }
+#endif
+ //..
+ }
+ vmR3Destroy(pVM);
+ }
+ }
+ //..
+
+ /* Clean CFGM. */
+ int rc2 = CFGMR3Term(pVM);
+ AssertRC(rc2);
+ }
+
+ /*
+ * Do automatic cleanups while the VM structure is still alive and all
+ * references to it are still working.
+ */
+ PDMR3CritSectBothTerm(pVM);
+
+ /*
+ * Drop all references to VM and the VMCPU structures, then
+ * tell GVMM to destroy the VM.
+ */
+ pUVM->pVM = NULL;
+ for (VMCPUID i = 0; i < pUVM->cCpus; i++)
+ {
+ pUVM->aCpus[i].pVM = NULL;
+ pUVM->aCpus[i].pVCpu = NULL;
+ }
+ Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
+
+ if (pUVM->cCpus > 1)
+ {
+ /* Poke the other EMTs since they may have stale pVM and pVCpu references
+ on the stack (see VMR3WaitU for instance) if they've been awakened after
+ VM creation. */
+ for (VMCPUID i = 1; i < pUVM->cCpus; i++)
+ VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
+ RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
+ }
+
+ int rc2 = GVMMR3DestroyVM(pUVM, pVM);
+ AssertRC(rc2);
+ }
+ else
+ vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
+
+ LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Reads the base configuation from CFGM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUVM The user mode VM structure.
+ * @param cCpus The CPU count given to VMR3Create.
+ */
+static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
+{
+ PCFGMNODE const pRoot = CFGMR3GetRoot(pVM);
+
+ /*
+ * Base EM and HM config properties.
+ */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ pVM->fHMEnabled = true;
+#else /* Other architectures must fall back on IEM for the time being: */
+ pVM->fHMEnabled = false;
+#endif
+
+ /*
+ * Make sure the CPU count in the config data matches.
+ */
+ uint32_t cCPUsCfg;
+ int rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
+ AssertLogRelMsgReturn(cCPUsCfg == cCpus,
+ ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
+ cCPUsCfg, cCpus),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Get the CPU execution cap.
+ */
+ rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
+
+ /*
+ * Get the VM name and UUID.
+ */
+ rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
+
+ rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = VINF_SUCCESS;
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
+
+ rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes all R3 components of the VM
+ */
+static int vmR3InitRing3(PVM pVM, PUVM pUVM)
+{
+ int rc;
+
+ /*
+ * Register the other EMTs with GVM.
+ */
+ for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
+ {
+ rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)GVMMR3RegisterVCpu, 2, pVM, idCpu);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Register statistics.
+ */
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+ {
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
+ AssertRC(rc);
+ }
+
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
+
+ /* Statistics for ring-0 components: */
+ STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbHits, STAMTYPE_COUNTER, "/GMM/ChunkTlbHits", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL hits");
+ STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbMisses, STAMTYPE_COUNTER, "/GMM/ChunkTlbMisses", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL misses");
+
+ /*
+ * Init all R3 components, the order here might be important.
+ * NEM and HM shall be initialized first!
+ */
+ Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
+ rc = NEMR3InitConfig(pVM);
+ if (RT_SUCCESS(rc))
+ rc = HMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ ASMCompilerBarrier(); /* HMR3Init will have modified const member bMainExecutionEngine. */
+ Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
+ || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API
+ || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_IEM);
+ rc = MMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = CPUMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = NEMR3InitAfterCPUM(pVM);
+ if (RT_SUCCESS(rc))
+ rc = PGMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = MMR3InitPaging(pVM);
+ if (RT_SUCCESS(rc))
+ rc = TMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VMMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = SELMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = TRPMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = SSMR3RegisterStub(pVM, "CSAM", 0);
+ if (RT_SUCCESS(rc))
+ {
+ rc = SSMR3RegisterStub(pVM, "PATM", 0);
+ if (RT_SUCCESS(rc))
+ {
+ rc = IOMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = EMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = IEMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = DBGFR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ /* GIM must be init'd before PDM, gimdevR3Construct()
+ requires GIM provider to be setup. */
+ rc = GIMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = GCMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PDMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PGMR3InitFinalize(pVM);
+ if (RT_SUCCESS(rc))
+ rc = TMR3InitFinalize(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ PGMR3MemSetup(pVM, false /*fAtReset*/);
+ PDMR3MemSetup(pVM, false /*fAtReset*/);
+ }
+ if (RT_SUCCESS(rc))
+ rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
+ if (RT_SUCCESS(rc))
+ {
+ LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
+ return VINF_SUCCESS;
+ }
+
+ int rc2 = PDMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = GCMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = GIMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = DBGFR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = IEMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = EMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = IOMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ }
+ }
+ int rc2 = TRPMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = SELMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = VMMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = TMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ int rc2 = PGMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ //int rc2 = CPUMR3Term(pVM);
+ //AssertRC(rc2);
+ }
+ /* MMR3Term is not called here because it'll kill the heap. */
+ }
+ int rc2 = HMR3Term(pVM);
+ AssertRC(rc2);
+ }
+ NEMR3Term(pVM);
+
+ LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Initializes all R0 components of the VM.
+ */
+static int vmR3InitRing0(PVM pVM)
+{
+ LogFlow(("vmR3InitRing0:\n"));
+
+ /*
+ * Check for FAKE suplib mode.
+ */
+ int rc = VINF_SUCCESS;
+ const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
+ if (!psz || strcmp(psz, "fake"))
+ {
+ /*
+ * Call the VMMR0 component and let it do the init.
+ */
+ rc = VMMR3InitR0(pVM);
+ }
+ else
+ Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
+
+ /*
+ * Do notifications and return.
+ */
+ if (RT_SUCCESS(rc))
+ rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
+ if (RT_SUCCESS(rc))
+ rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
+
+ LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Do init completed notifications.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat What's completed.
+ */
+static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ int rc = VMMR3InitCompleted(pVM, enmWhat);
+ if (RT_SUCCESS(rc))
+ rc = HMR3InitCompleted(pVM, enmWhat);
+ if (RT_SUCCESS(rc))
+ rc = NEMR3InitCompleted(pVM, enmWhat);
+ if (RT_SUCCESS(rc))
+ rc = PGMR3InitCompleted(pVM, enmWhat);
+ if (RT_SUCCESS(rc))
+ rc = CPUMR3InitCompleted(pVM, enmWhat);
+ if (RT_SUCCESS(rc))
+ rc = EMR3InitCompleted(pVM, enmWhat);
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ {
+ if (RT_SUCCESS(rc))
+ rc = SSMR3RegisterStub(pVM, "rem", 1);
+ }
+ if (RT_SUCCESS(rc))
+ rc = PDMR3InitCompleted(pVM, enmWhat);
+
+ /* IOM *must* come after PDM, as device (DevPcArch) may register some final
+ handlers in their init completion method. */
+ if (RT_SUCCESS(rc))
+ rc = IOMR3InitCompleted(pVM, enmWhat);
+ return rc;
+}
+
+
+/**
+ * Calls the relocation functions for all VMM components so they can update
+ * any GC pointers. When this function is called all the basic VM members
+ * have been updated and the actual memory relocation have been done
+ * by the PGM/MM.
+ *
+ * This is used both on init and on runtime relocations.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta Relocation delta relative to old location.
+ */
+VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
+
+ /*
+ * The order here is very important!
+ */
+ PGMR3Relocate(pVM, offDelta);
+ PDMR3LdrRelocateU(pVM->pUVM, offDelta);
+ PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
+ CPUMR3Relocate(pVM);
+ HMR3Relocate(pVM);
+ SELMR3Relocate(pVM);
+ VMMR3Relocate(pVM, offDelta);
+ SELMR3Relocate(pVM); /* !hack! fix stack! */
+ TRPMR3Relocate(pVM, offDelta);
+ IOMR3Relocate(pVM, offDelta);
+ EMR3Relocate(pVM);
+ TMR3Relocate(pVM, offDelta);
+ IEMR3Relocate(pVM);
+ DBGFR3Relocate(pVM, offDelta);
+ PDMR3Relocate(pVM, offDelta);
+ GIMR3Relocate(pVM, offDelta);
+ GCMR3Relocate(pVM, offDelta);
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3PowerOn.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
+ * code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Ignored.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
+ Assert(!pvUser); NOREF(pvUser);
+
+ /*
+ * The first thread thru here tries to change the state. We shouldn't be
+ * called again if this fails.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+ /*
+ * All EMTs changes their state to started.
+ */
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
+
+ /*
+ * EMT(0) is last thru here and it will make the notification calls
+ * and advance the state.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ PDMR3PowerOn(pVM);
+ vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Powers on the virtual machine.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The VM to power on.
+ *
+ * @thread Any thread.
+ * @vmstate Created
+ * @vmstateto PoweringOn+Running
+ */
+VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
+{
+ LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Gather all the EMTs to reduce the init TSC drift and keep
+ * the state changing APIs a bit uniform.
+ */
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3PowerOn, NULL);
+ LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Does the suspend notifications.
+ *
+ * @param pVM The cross context VM structure.
+ * @thread EMT(0)
+ */
+static void vmR3SuspendDoWork(PVM pVM)
+{
+ PDMR3Suspend(pVM);
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3Suspend.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
+ * return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Ignored.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
+ LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
+
+ /*
+ * The first EMT switches the state to suspending. If this fails because
+ * something was racing us in one way or the other, there will be no more
+ * calls and thus the state assertion below is not going to annoy anyone.
+ *
+ * Note! Changes to the state transition here needs to be reflected in the
+ * checks in vmR3SetRuntimeErrorCommon!
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
+ VMSTATE_SUSPENDING, VMSTATE_RUNNING,
+ VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->pUVM->vm.s.enmSuspendReason = enmReason;
+ }
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+ /*
+ * EMT(0) does the actually suspending *after* all the other CPUs have
+ * been thru here.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ vmR3SuspendDoWork(pVM);
+
+ int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
+ VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
+ VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
+ if (RT_FAILURE(rc))
+ return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
+ }
+
+ return VINF_EM_SUSPEND;
+}
+
+
+/**
+ * Suspends a running VM.
+ *
+ * @returns VBox status code. When called on EMT, this will be a strict status
+ * code that has to be propagated up the call stack.
+ *
+ * @param pUVM The VM to suspend.
+ * @param enmReason The reason for suspending.
+ *
+ * @thread Any thread.
+ * @vmstate Running or RunningLS
+ * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
+ */
+VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
+{
+ LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
+
+ /*
+ * Gather all the EMTs to make sure there are no races before
+ * changing the VM state.
+ */
+ int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3Suspend, (void *)(uintptr_t)enmReason);
+ LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Retrieves the reason for the most recent suspend.
+ *
+ * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
+ * or the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
+ return pUVM->vm.s.enmSuspendReason;
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3Resume.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
+ * return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Reason.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
+ LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
+
+ /*
+ * The first thread thru here tries to change the state. We shouldn't be
+ * called again if this fails.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->pUVM->vm.s.enmResumeReason = enmReason;
+ }
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+#if 0
+ /*
+ * All EMTs changes their state to started.
+ */
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
+#endif
+
+ /*
+ * EMT(0) is last thru here and it will make the notification calls
+ * and advance the state.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ PDMR3Resume(pVM);
+ vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
+ pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
+ }
+
+ return VINF_EM_RESUME;
+}
+
+
+/**
+ * Resume VM execution.
+ *
+ * @returns VBox status code. When called on EMT, this will be a strict status
+ * code that has to be propagated up the call stack.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param enmReason The reason we're resuming.
+ *
+ * @thread Any thread.
+ * @vmstate Suspended
+ * @vmstateto Running
+ */
+VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
+{
+ LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
+
+ /*
+ * Gather all the EMTs to make sure there are no races before
+ * changing the VM state.
+ */
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3Resume, (void *)(uintptr_t)enmReason);
+ LogFlow(("VMR3Resume: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Retrieves the reason for the most recent resume.
+ *
+ * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
+ * done or the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
+ return pUVM->vm.s.enmResumeReason;
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
+ * after the live step has been completed.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
+ * return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
+ bool *pfSuspended = (bool *)pvUser;
+
+ /*
+ * The first thread thru here tries to change the state. We shouldn't be
+ * called again if this fails.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1U)
+ {
+ PUVM pUVM = pVM->pUVM;
+ int rc;
+
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+ VMSTATE enmVMState = pVM->enmVMState;
+ switch (enmVMState)
+ {
+ case VMSTATE_RUNNING_LS:
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
+ rc = VINF_SUCCESS;
+ break;
+
+ case VMSTATE_SUSPENDED_EXT_LS:
+ case VMSTATE_SUSPENDED_LS: /* (via reset) */
+ rc = VINF_SUCCESS;
+ break;
+
+ case VMSTATE_DEBUGGING_LS:
+ rc = VERR_TRY_AGAIN;
+ break;
+
+ case VMSTATE_OFF_LS:
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
+ rc = VERR_SSM_LIVE_POWERED_OFF;
+ break;
+
+ case VMSTATE_FATAL_ERROR_LS:
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
+ rc = VERR_SSM_LIVE_FATAL_ERROR;
+ break;
+
+ case VMSTATE_GURU_MEDITATION_LS:
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
+ rc = VERR_SSM_LIVE_GURU_MEDITATION;
+ break;
+
+ case VMSTATE_POWERING_OFF_LS:
+ case VMSTATE_SUSPENDING_EXT_LS:
+ case VMSTATE_RESETTING_LS:
+ default:
+ AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
+ rc = VERR_VM_UNEXPECTED_VM_STATE;
+ break;
+ }
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ if (RT_FAILURE(rc))
+ {
+ LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
+ return rc;
+ }
+ }
+
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+ /*
+ * Only EMT(0) have work to do since it's last thru here.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ vmR3SuspendDoWork(pVM);
+ int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
+ VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
+ if (RT_FAILURE(rc))
+ return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
+
+ *pfSuspended = true;
+ }
+
+ return VINF_EM_SUSPEND;
+}
+
+
+/**
+ * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
+ * SSMR3LiveDoStep1 failure.
+ *
+ * Doing this as a rendezvous operation avoids all annoying transition
+ * states.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
+ * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
+ bool *pfSuspended = (bool *)pvUser;
+ NOREF(pVCpu);
+
+ int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
+ VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
+ VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
+ VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
+ VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
+ VMSTATE_SUSPENDED, VMSTATE_SAVING,
+ VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
+ VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
+ VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
+ if (rc == 1)
+ rc = VERR_SSM_LIVE_POWERED_OFF;
+ else if (rc == 2)
+ rc = VERR_SSM_LIVE_FATAL_ERROR;
+ else if (rc == 3)
+ rc = VERR_SSM_LIVE_GURU_MEDITATION;
+ else if (rc == 4)
+ {
+ *pfSuspended = true;
+ rc = VINF_SUCCESS;
+ }
+ else if (rc > 0)
+ rc = VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSSM The handle of saved state operation.
+ *
+ * @thread EMT(0)
+ */
+static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
+ VM_ASSERT_EMT0(pVM);
+
+ /*
+ * Advance the state and mark if VMR3Suspend was called.
+ */
+ int rc = VINF_SUCCESS;
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ if (enmVMState == VMSTATE_SUSPENDED_LS)
+ vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
+ else
+ {
+ if (enmVMState != VMSTATE_SAVING)
+ vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
+ rc = VINF_SSM_LIVE_SUSPENDED;
+ }
+
+ /*
+ * Finish up and release the handle. Careful with the status codes.
+ */
+ int rc2 = SSMR3LiveDoStep2(pSSM);
+ if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
+ rc = rc2;
+
+ rc2 = SSMR3LiveDone(pSSM);
+ if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
+ rc = rc2;
+
+ /*
+ * Advance to the final state and return.
+ */
+ vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
+ Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
+ return rc;
+}
+
+
+/**
+ * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
+ * SSMR3LiveSave.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param cMsMaxDowntime The maximum downtime given as milliseconds.
+ * @param pszFilename The name of the file. NULL if pStreamOps is used.
+ * @param pStreamOps The stream methods. NULL if pszFilename is used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param enmAfter What to do afterwards.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param ppSSM Where to return the saved state handle in case of a
+ * live snapshot scenario.
+ *
+ * @thread EMT
+ */
+static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
+{
+ int rc = VINF_SUCCESS;
+
+ LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
+ pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrNull(pszFilename);
+ AssertPtrNull(pStreamOps);
+ AssertPtr(pVM);
+ Assert( enmAfter == SSMAFTER_DESTROY
+ || enmAfter == SSMAFTER_CONTINUE
+ || enmAfter == SSMAFTER_TELEPORT);
+ AssertPtr(ppSSM);
+ *ppSSM = NULL;
+
+ /*
+ * Change the state and perform/start the saving.
+ */
+ rc = vmR3TrySetState(pVM, "VMR3Save", 2,
+ VMSTATE_SAVING, VMSTATE_SUSPENDED,
+ VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
+ if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
+ {
+ rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
+ vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
+ }
+ else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
+ {
+ if (enmAfter == SSMAFTER_TELEPORT)
+ pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
+ rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
+ enmAfter, pfnProgress, pvProgressUser, ppSSM);
+ /* (We're not subject to cancellation just yet.) */
+ }
+ else
+ Assert(RT_FAILURE(rc));
+ return rc;
+}
+
+
+/**
+ * Common worker for VMR3Save and VMR3Teleport.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param cMsMaxDowntime The maximum downtime given as milliseconds.
+ * @param pszFilename The name of the file. NULL if pStreamOps is used.
+ * @param pStreamOps The stream methods. NULL if pszFilename is used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param enmAfter What to do afterwards.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param pfSuspended Set if we suspended the VM.
+ *
+ * @thread Non-EMT
+ */
+static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
+ const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
+{
+ /*
+ * Request the operation in EMT(0).
+ */
+ PSSMHANDLE pSSM;
+ int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
+ (PFNRT)vmR3Save, 9, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
+ enmAfter, pfnProgress, pvProgressUser, &pSSM);
+ if ( RT_SUCCESS(rc)
+ && pSSM)
+ {
+ /*
+ * Live snapshot.
+ *
+ * The state handling here is kind of tricky, doing it on EMT(0) helps
+ * a bit. See the VMSTATE diagram for details.
+ */
+ rc = SSMR3LiveDoStep1(pSSM);
+ if (RT_SUCCESS(rc))
+ {
+ if (VMR3GetState(pVM) != VMSTATE_SAVING)
+ for (;;)
+ {
+ /* Try suspend the VM. */
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3LiveDoSuspend, pfSuspended);
+ if (rc != VERR_TRY_AGAIN)
+ break;
+
+ /* Wait for the state to change. */
+ RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
+ }
+ if (RT_SUCCESS(rc))
+ rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
+ else
+ {
+ int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
+ AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
+ }
+ }
+ else
+ {
+ int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
+ AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
+
+ rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
+ if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
+ rc = rc2;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * Save current VM state.
+ *
+ * Can be used for both saving the state and creating snapshots.
+ *
+ * When called for a VM in the Running state, the saved state is created live
+ * and the VM is only suspended when the final part of the saving is preformed.
+ * The VM state will not be restored to Running in this case and it's up to the
+ * caller to call VMR3Resume if this is desirable. (The rational is that the
+ * caller probably wish to reconfigure the disks before resuming the VM.)
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The VM which state should be saved.
+ * @param pszFilename The name of the save state file.
+ * @param pStreamOps The stream methods. NULL if pszFilename is used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param fContinueAfterwards Whether continue execution afterwards or not.
+ * When in doubt, set this to true.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvUser User argument for the progress callback.
+ * @param pfSuspended Set if we suspended the VM.
+ *
+ * @thread Non-EMT.
+ * @vmstate Suspended or Running
+ * @vmstateto Saving+Suspended or
+ * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
+ */
+VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
+ bool *pfSuspended)
+{
+ LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
+ pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
+
+ /*
+ * Validate input.
+ */
+ AssertPtr(pfSuspended);
+ *pfSuspended = false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_OTHER_THREAD(pVM);
+ AssertReturn(pszFilename || pStreamOps, VERR_INVALID_POINTER);
+ AssertReturn( (!pStreamOps && *pszFilename)
+ || pStreamOps,
+ VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
+
+ /*
+ * Join paths with VMR3Teleport.
+ */
+ SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
+ int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
+ pszFilename, pStreamOps, pvStreamOpsUser,
+ enmAfter, pfnProgress, pvUser, pfSuspended);
+ LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
+ return rc;
+}
+
+
+/**
+ * Teleport the VM (aka live migration).
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The VM which state should be saved.
+ * @param cMsMaxDowntime The maximum downtime given as milliseconds.
+ * @param pStreamOps The stream methods.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param pfSuspended Set if we suspended the VM.
+ *
+ * @thread Non-EMT.
+ * @vmstate Suspended or Running
+ * @vmstateto Saving+Suspended or
+ * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
+ */
+VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
+{
+ LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
+ pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtr(pfSuspended);
+ *pfSuspended = false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_OTHER_THREAD(pVM);
+ AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
+
+ /*
+ * Join paths with VMR3Save.
+ */
+ int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime, NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
+ SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended);
+ LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
+ return rc;
+}
+
+
+
+/**
+ * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param pszFilename The name of the file. NULL if pStreamOps is used.
+ * @param pStreamOps The stream methods. NULL if pszFilename is used.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param fTeleporting Indicates whether we're teleporting or not.
+ *
+ * @thread EMT.
+ */
+static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
+{
+ LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
+ pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
+
+ /*
+ * Validate input (paranoia).
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrNull(pszFilename);
+ AssertPtrNull(pStreamOps);
+ AssertPtrNull(pfnProgress);
+
+ /*
+ * Change the state and perform the load.
+ *
+ * Always perform a relocation round afterwards to make sure hypervisor
+ * selectors and such are correct.
+ */
+ int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
+ VMSTATE_LOADING, VMSTATE_CREATED,
+ VMSTATE_LOADING, VMSTATE_SUSPENDED);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
+
+ uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
+ rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
+ if (RT_SUCCESS(rc))
+ {
+ VMR3Relocate(pVM, 0 /*offDelta*/);
+ vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
+ }
+ else
+ {
+ pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
+ vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
+
+ if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
+ rc = VMSetError(pVM, rc, RT_SRC_POS,
+ N_("Unable to restore the virtual machine's saved state from '%s'. "
+ "It may be damaged or from an older version of VirtualBox. "
+ "Please discard the saved state before starting the virtual machine"),
+ pszFilename);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Loads a VM state into a newly created VM or a one that is suspended.
+ *
+ * To restore a saved state on VM startup, call this function and then resume
+ * the VM instead of powering it on.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM structure.
+ * @param pszFilename The name of the save state file.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvUser User argument for the progress callback.
+ *
+ * @thread Any thread.
+ * @vmstate Created, Suspended
+ * @vmstateto Loading+Suspended
+ */
+VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
+{
+ LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
+ pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
+
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
+
+ /*
+ * Forward the request to EMT(0). No need to setup a rendezvous here
+ * since there is no execution taking place when this call is allowed.
+ */
+ int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
+ pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/,
+ pfnProgress, pvUser, false /*fTeleporting*/);
+ LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * VMR3LoadFromFile for arbitrary file streams.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param pStreamOps The stream methods.
+ * @param pvStreamOpsUser The user argument to the stream methods.
+ * @param pfnProgress Progress callback. Optional.
+ * @param pvProgressUser User argument for the progress callback.
+ * @param fTeleporting Flag whether this call is part of a teleportation operation.
+ *
+ * @thread Any thread.
+ * @vmstate Created, Suspended
+ * @vmstateto Loading+Suspended
+ */
+VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+ PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
+{
+ LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
+ pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
+
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
+
+ /*
+ * Forward the request to EMT(0). No need to setup a rendezvous here
+ * since there is no execution taking place when this call is allowed.
+ */
+ int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
+ pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress,
+ pvProgressUser, fTeleporting);
+ LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3PowerOff.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
+ * return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Ignored.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
+ Assert(!pvUser); NOREF(pvUser);
+
+ /*
+ * The first EMT thru here will change the state to PoweringOff.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
+ VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
+ VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
+ VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
+ VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
+ VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
+ VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
+ VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
+ VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
+ VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
+ VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
+ VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
+ if (RT_FAILURE(rc))
+ return rc;
+ if (rc >= 7)
+ SSMR3Cancel(pVM->pUVM);
+ }
+
+ /*
+ * Check the state.
+ */
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
+ || enmVMState == VMSTATE_POWERING_OFF_LS,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_INVALID_VM_STATE);
+
+ /*
+ * EMT(0) does the actual power off work here *after* all the other EMTs
+ * have been thru and entered the STOPPED state.
+ */
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
+ if (pVCpu->idCpu == 0)
+ {
+ /*
+ * For debugging purposes, we will log a summary of the guest state at this point.
+ */
+ if (enmVMState != VMSTATE_GURU_MEDITATION)
+ {
+ /** @todo make the state dumping at VMR3PowerOff optional. */
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
+ /** @todo dump guest call stack. */
+ RTLogRelSetBuffering(fOldBuffered);
+ RTLogRelPrintf("************** End of Guest state at power off ***************\n");
+ }
+
+ /*
+ * Perform the power off notifications and advance the state to
+ * Off or OffLS.
+ */
+ PDMR3PowerOff(pVM);
+ DBGFR3PowerOff(pVM);
+
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+ enmVMState = pVM->enmVMState;
+ if (enmVMState == VMSTATE_POWERING_OFF_LS)
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
+ else
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ }
+ else if (enmVMState != VMSTATE_GURU_MEDITATION)
+ {
+ /** @todo make the state dumping at VMR3PowerOff optional. */
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
+ RTLogRelPrintf("***\n");
+ RTLogRelSetBuffering(fOldBuffered);
+ RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
+ }
+
+ return VINF_EM_OFF;
+}
+
+
+/**
+ * Power off the VM.
+ *
+ * @returns VBox status code. When called on EMT, this will be a strict status
+ * code that has to be propagated up the call stack.
+ *
+ * @param pUVM The handle of the VM to be powered off.
+ *
+ * @thread Any thread.
+ * @vmstate Suspended, Running, Guru Meditation, Load Failure
+ * @vmstateto Off or OffLS
+ */
+VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
+{
+ LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Gather all the EMTs to make sure there are no races before
+ * changing the VM state.
+ */
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3PowerOff, NULL);
+ LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
+ * Destroys the VM.
+ *
+ * The VM must be powered off (or never really powered on) to call this
+ * function. The VM handle is destroyed and can no longer be used up successful
+ * return.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ *
+ * @thread Any none emulation thread.
+ * @vmstate Off, Created
+ * @vmstateto N/A
+ */
+VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
+{
+ LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
+
+ /*
+ * Validate input.
+ */
+ if (!pUVM)
+ return VERR_INVALID_VM_HANDLE;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
+
+ /*
+ * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
+ * ending with EMT(0) doing the bulk of the cleanup.
+ */
+ int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
+ AssertLogRelRC(rc);
+
+ /*
+ * Wait for EMTs to quit and destroy the UVM.
+ */
+ vmR3DestroyUVM(pUVM, 30000);
+
+ LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal destruction worker.
+ *
+ * This is either called from VMR3Destroy via VMR3ReqCallU or from
+ * vmR3EmulationThreadWithId when EMT(0) terminates after having called
+ * VMR3Destroy().
+ *
+ * When called on EMT(0), it will performed the great bulk of the destruction.
+ * When called on the other EMTs, they will do nothing and the whole purpose is
+ * to return VINF_EM_TERMINATE so they break out of their run loops.
+ *
+ * @returns VINF_EM_TERMINATE.
+ * @param pVM The cross context VM structure.
+ */
+DECLCALLBACK(int) vmR3Destroy(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ Assert(pVCpu);
+ LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
+
+ /*
+ * Only VCPU 0 does the full cleanup (last).
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ /*
+ * Dump statistics to the log.
+ */
+#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
+ RTLogFlags(NULL, "nodisabled nobuffered");
+#endif
+//#ifdef VBOX_WITH_STATISTICS
+// STAMR3Dump(pUVM, "*");
+//#else
+ LogRel(("************************* Statistics *************************\n"));
+ STAMR3DumpToReleaseLog(pUVM, "*");
+ LogRel(("********************* End of statistics **********************\n"));
+//#endif
+
+ /*
+ * Destroy the VM components.
+ */
+ int rc = TMR3Term(pVM);
+ AssertRC(rc);
+#ifdef VBOX_WITH_DEBUGGER
+ rc = DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
+ pUVM->vm.s.pvDBGC = NULL;
+#endif
+ AssertRC(rc);
+ rc = PDMR3Term(pVM);
+ AssertRC(rc);
+ rc = GIMR3Term(pVM);
+ AssertRC(rc);
+ rc = DBGFR3Term(pVM);
+ AssertRC(rc);
+ rc = IEMR3Term(pVM);
+ AssertRC(rc);
+ rc = EMR3Term(pVM);
+ AssertRC(rc);
+ rc = IOMR3Term(pVM);
+ AssertRC(rc);
+ rc = TRPMR3Term(pVM);
+ AssertRC(rc);
+ rc = SELMR3Term(pVM);
+ AssertRC(rc);
+ rc = HMR3Term(pVM);
+ AssertRC(rc);
+ rc = NEMR3Term(pVM);
+ AssertRC(rc);
+ rc = PGMR3Term(pVM);
+ AssertRC(rc);
+ rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
+ AssertRC(rc);
+ rc = CPUMR3Term(pVM);
+ AssertRC(rc);
+ SSMR3Term(pVM);
+ rc = PDMR3CritSectBothTerm(pVM);
+ AssertRC(rc);
+ rc = MMR3Term(pVM);
+ AssertRC(rc);
+
+ /*
+ * We're done, tell the other EMTs to quit.
+ */
+ ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
+ ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
+ LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
+ }
+
+ /*
+ * Decrement the active EMT count here.
+ */
+ PUVMCPU pUVCpu = &pUVM->aCpus[pVCpu->idCpu];
+ if (!pUVCpu->vm.s.fBeenThruVmDestroy)
+ {
+ pUVCpu->vm.s.fBeenThruVmDestroy = true;
+ ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
+ }
+ else
+ AssertFailed();
+
+ return VINF_EM_TERMINATE;
+}
+
+
+/**
+ * Destroys the UVM portion.
+ *
+ * This is called as the final step in the VM destruction or as the cleanup
+ * in case of a creation failure.
+ *
+ * @param pUVM The user mode VM structure.
+ * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
+ * threads.
+ */
+static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
+{
+ /*
+ * Signal termination of each the emulation threads and
+ * wait for them to complete.
+ */
+ /* Signal them - in reverse order since EMT(0) waits for the others. */
+ ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
+ if (pUVM->pVM)
+ VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
+ VMCPUID iCpu = pUVM->cCpus;
+ while (iCpu-- > 0)
+ {
+ VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
+ RTSemEventSignal(pUVM->aCpus[iCpu].vm.s.EventSemWait);
+ }
+
+ /* Wait for EMT(0), it in turn waits for the rest. */
+ ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
+
+ RTTHREAD const hSelf = RTThreadSelf();
+ RTTHREAD hThread = pUVM->aCpus[0].vm.s.ThreadEMT;
+ if ( hThread != NIL_RTTHREAD
+ && hThread != hSelf)
+ {
+ int rc2 = RTThreadWait(hThread, RT_MAX(cMilliesEMTWait, 2000), NULL);
+ if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
+ rc2 = RTThreadWait(hThread, 1000, NULL);
+ AssertLogRelMsgRC(rc2, ("iCpu=0 rc=%Rrc\n", rc2));
+ if (RT_SUCCESS(rc2))
+ pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
+ }
+
+ /* Just in case we're in a weird failure situation w/o EMT(0) to do the
+ waiting, wait the other EMTs too. */
+ for (iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
+ {
+ ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
+ if (hThread != NIL_RTTHREAD)
+ {
+ if (hThread != hSelf)
+ {
+ int rc2 = RTThreadWait(hThread, 250 /*ms*/, NULL);
+ AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
+ if (RT_SUCCESS(rc2))
+ continue;
+ }
+ pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
+ }
+ }
+
+ /* Cleanup the semaphores. */
+ iCpu = pUVM->cCpus;
+ while (iCpu-- > 0)
+ {
+ RTSemEventDestroy(pUVM->aCpus[iCpu].vm.s.EventSemWait);
+ pUVM->aCpus[iCpu].vm.s.EventSemWait = NIL_RTSEMEVENT;
+ }
+
+ /*
+ * Free the event semaphores associated with the request packets.
+ */
+ unsigned cReqs = 0;
+ for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
+ {
+ PVMREQ pReq = pUVM->vm.s.apReqFree[i];
+ pUVM->vm.s.apReqFree[i] = NULL;
+ for (; pReq; pReq = pReq->pNext, cReqs++)
+ {
+ pReq->enmState = VMREQSTATE_INVALID;
+ RTSemEventDestroy(pReq->EventSem);
+ }
+ }
+ Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
+
+ /*
+ * Kill all queued requests. (There really shouldn't be any!)
+ */
+ for (unsigned i = 0; i < 10; i++)
+ {
+ PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
+ if (!pReqHead)
+ {
+ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
+ if (!pReqHead)
+ break;
+ }
+ AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
+
+ for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
+ {
+ ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
+ ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
+ RTSemEventSignal(pReq->EventSem);
+ RTThreadSleep(2);
+ RTSemEventDestroy(pReq->EventSem);
+ }
+ /* give them a chance to respond before we free the request memory. */
+ RTThreadSleep(32);
+ }
+
+ /*
+ * Now all queued VCPU requests (again, there shouldn't be any).
+ */
+ for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
+ {
+ PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
+
+ for (unsigned i = 0; i < 10; i++)
+ {
+ PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
+ if (!pReqHead)
+ {
+ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
+ if (!pReqHead)
+ break;
+ }
+ AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
+
+ for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
+ {
+ ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
+ ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
+ RTSemEventSignal(pReq->EventSem);
+ RTThreadSleep(2);
+ RTSemEventDestroy(pReq->EventSem);
+ }
+ /* give them a chance to respond before we free the request memory. */
+ RTThreadSleep(32);
+ }
+ }
+
+ /*
+ * Make sure the VMMR0.r0 module and whatever else is unloaded.
+ */
+ PDMR3TermUVM(pUVM);
+
+ RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
+ RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
+
+ /*
+ * Terminate the support library if initialized.
+ */
+ if (pUVM->vm.s.pSession)
+ {
+ int rc = SUPR3Term(false /*fForced*/);
+ AssertRC(rc);
+ pUVM->vm.s.pSession = NIL_RTR0PTR;
+ }
+
+ /*
+ * Release the UVM structure reference.
+ */
+ VMR3ReleaseUVM(pUVM);
+
+ /*
+ * Clean up and flush logs.
+ */
+ RTLogFlush(NULL);
+}
+
+
+/**
+ * Worker which checks integrity of some internal structures.
+ * This is yet another attempt to track down that AVL tree crash.
+ */
+static void vmR3CheckIntegrity(PVM pVM)
+{
+#ifdef VBOX_STRICT
+ int rc = PGMR3CheckIntegrity(pVM);
+ AssertReleaseRC(rc);
+#else
+ RT_NOREF_PV(pVM);
+#endif
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
+ *
+ * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
+ * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser The reset flags.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ uint32_t fResetFlags = *(uint32_t *)pvUser;
+
+
+ /*
+ * The first EMT will try change the state to resetting. If this fails,
+ * we won't get called for the other EMTs.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
+ VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
+ VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
+ VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->vm.s.cResets++;
+ pVM->vm.s.cSoftResets++;
+ }
+
+ /*
+ * Check the state.
+ */
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
+ || enmVMState == VMSTATE_SOFT_RESETTING_LS,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+ /*
+ * EMT(0) does the full cleanup *after* all the other EMTs has been
+ * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
+ *
+ * Because there are per-cpu reset routines and order may/is important,
+ * the following sequence looks a bit ugly...
+ */
+
+ /* Reset the VCpu state. */
+ VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
+
+ /*
+ * Soft reset the VM components.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ PDMR3SoftReset(pVM, fResetFlags);
+ TRPMR3Reset(pVM);
+ CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
+ EMR3Reset(pVM);
+ HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
+ NEMR3Reset(pVM);
+
+ /*
+ * Since EMT(0) is the last to go thru here, it will advance the state.
+ * (Unlike vmR3HardReset we won't be doing any suspending of live
+ * migration VMs here since memory is unchanged.)
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+ enmVMState = pVM->enmVMState;
+ if (enmVMState == VMSTATE_SOFT_RESETTING)
+ {
+ if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
+ else
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
+ }
+ else
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ }
+
+ return VINF_EM_RESCHEDULE;
+}
+
+
+/**
+ * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
+ *
+ * This is called by the emulation threads as a response to the reset request
+ * issued by VMR3Reset().
+ *
+ * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
+ * is a strict return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Ignored.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ Assert(!pvUser); NOREF(pvUser);
+
+ /*
+ * The first EMT will try change the state to resetting. If this fails,
+ * we won't get called for the other EMTs.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
+ VMSTATE_RESETTING, VMSTATE_RUNNING,
+ VMSTATE_RESETTING, VMSTATE_SUSPENDED,
+ VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->vm.s.cResets++;
+ pVM->vm.s.cHardResets++;
+ }
+
+ /*
+ * Check the state.
+ */
+ VMSTATE enmVMState = VMR3GetState(pVM);
+ AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
+ || enmVMState == VMSTATE_RESETTING_LS,
+ ("%s\n", VMR3GetStateName(enmVMState)),
+ VERR_VM_UNEXPECTED_UNSTABLE_STATE);
+
+ /*
+ * EMT(0) does the full cleanup *after* all the other EMTs has been
+ * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
+ *
+ * Because there are per-cpu reset routines and order may/is important,
+ * the following sequence looks a bit ugly...
+ */
+ if (pVCpu->idCpu == 0)
+ vmR3CheckIntegrity(pVM);
+
+ /* Reset the VCpu state. */
+ VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
+
+ /* Clear all pending forced actions. */
+ VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
+
+ /*
+ * Reset the VM components.
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
+ PDMR3Reset(pVM);
+ PGMR3Reset(pVM);
+ SELMR3Reset(pVM);
+ TRPMR3Reset(pVM);
+ IOMR3Reset(pVM);
+ CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
+ TMR3Reset(pVM);
+ EMR3Reset(pVM);
+ HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
+ NEMR3Reset(pVM);
+
+ /*
+ * Do memory setup.
+ */
+ PGMR3MemSetup(pVM, true /*fAtReset*/);
+ PDMR3MemSetup(pVM, true /*fAtReset*/);
+
+ /*
+ * Since EMT(0) is the last to go thru here, it will advance the state.
+ * When a live save is active, we will move on to SuspendingLS but
+ * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+ enmVMState = pVM->enmVMState;
+ if (enmVMState == VMSTATE_RESETTING)
+ {
+ if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
+ else
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
+ }
+ else
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+
+ vmR3CheckIntegrity(pVM);
+
+ /*
+ * Do the suspend bit as well.
+ * It only requires some EMT(0) work at present.
+ */
+ if (enmVMState != VMSTATE_RESETTING)
+ {
+ vmR3SuspendDoWork(pVM);
+ vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
+ }
+ }
+
+ return enmVMState == VMSTATE_RESETTING
+ ? VINF_EM_RESET
+ : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
+}
+
+
+/**
+ * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param fHardReset Whether it's a hard reset or not.
+ * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
+ */
+static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
+{
+ LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
+ int rc;
+ if (fHardReset)
+ {
+ /*
+ * Hard reset.
+ */
+ /* Check whether we're supposed to power off instead of resetting. */
+ if (pVM->vm.s.fPowerOffInsteadOfReset)
+ {
+ PUVM pUVM = pVM->pUVM;
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
+ pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
+ return VMR3PowerOff(pUVM);
+ }
+
+ /* Gather all the EMTs to make sure there are no races before changing
+ the VM state. */
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3HardReset, NULL);
+ }
+ else
+ {
+ /*
+ * Soft reset. Since we only support this with a single CPU active,
+ * we must be on EMT #0 here.
+ */
+ VM_ASSERT_EMT0(pVM);
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3SoftReset, &fResetFlags);
+ }
+
+ LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+
+/**
+ * Reset the current VM.
+ *
+ * @returns VBox status code.
+ * @param pUVM The VM to reset.
+ */
+VMMR3DECL(int) VMR3Reset(PUVM pUVM)
+{
+ LogFlow(("VMR3Reset:\n"));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
+}
+
+
+/**
+ * Handle the reset force flag or triple fault.
+ *
+ * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @thread EMT
+ *
+ * @remarks Caller is expected to clear the VM_FF_RESET force flag.
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
+{
+ LogFlow(("VMR3ResetFF:\n"));
+
+ /*
+ * First consult the firmware on whether this is a hard or soft reset.
+ */
+ uint32_t fResetFlags;
+ bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
+ return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
+}
+
+
+/**
+ * For handling a CPU reset on triple fault.
+ *
+ * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
+ * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
+ * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @thread EMT
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
+{
+ LogFlow(("VMR3ResetTripleFault:\n"));
+
+ /*
+ * First consult the firmware on whether this is a hard or soft reset.
+ */
+ uint32_t fResetFlags;
+ bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
+ return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
+}
+
+
+/**
+ * Gets the user mode VM structure pointer given Pointer to the VM.
+ *
+ * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
+ * invalid (asserted).
+ * @param pVM The cross context VM structure.
+ * @sa VMR3GetVM, VMR3RetainUVM
+ */
+VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
+ return pVM->pUVM;
+}
+
+
+/**
+ * Gets the shared VM structure pointer given the pointer to the user mode VM
+ * structure.
+ *
+ * @returns Pointer to the VM.
+ * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
+ * is currently associated with it.
+ * @param pUVM The user mode VM handle.
+ * @sa VMR3GetUVM
+ */
+VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ return pUVM->pVM;
+}
+
+
+/**
+ * Retain the user mode VM handle.
+ *
+ * @returns Reference count.
+ * UINT32_MAX if @a pUVM is invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @sa VMR3ReleaseUVM
+ */
+VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
+ uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
+ AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
+ return cRefs;
+}
+
+
+/**
+ * Does the final release of the UVM structure.
+ *
+ * @param pUVM The user mode VM handle.
+ */
+static void vmR3DoReleaseUVM(PUVM pUVM)
+{
+ /*
+ * Free the UVM.
+ */
+ Assert(!pUVM->pVM);
+
+ MMR3HeapFree(pUVM->vm.s.pszName);
+ pUVM->vm.s.pszName = NULL;
+
+ MMR3TermUVM(pUVM);
+ STAMR3TermUVM(pUVM);
+
+ ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
+ RTTlsFree(pUVM->vm.s.idxTLS);
+ RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
+}
+
+
+/**
+ * Releases a refernece to the mode VM handle.
+ *
+ * @returns The new reference count, 0 if destroyed.
+ * UINT32_MAX if @a pUVM is invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @sa VMR3RetainUVM
+ */
+VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
+{
+ if (!pUVM)
+ return 0;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
+ uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
+ if (!cRefs)
+ vmR3DoReleaseUVM(pUVM);
+ else
+ AssertMsg(cRefs < _64K, ("%u\n", cRefs));
+ return cRefs;
+}
+
+
+/**
+ * Gets the VM name.
+ *
+ * @returns Pointer to a read-only string containing the name. NULL if called
+ * too early.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ return pUVM->vm.s.pszName;
+}
+
+
+/**
+ * Gets the VM UUID.
+ *
+ * @returns pUuid on success, NULL on failure.
+ * @param pUVM The user mode VM handle.
+ * @param pUuid Where to store the UUID.
+ */
+VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ AssertPtrReturn(pUuid, NULL);
+
+ *pUuid = pUVM->vm.s.Uuid;
+ return pUuid;
+}
+
+
+/**
+ * Gets the current VM state.
+ *
+ * @returns The current VM state.
+ * @param pVM The cross context VM structure.
+ * @thread Any
+ */
+VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
+{
+ AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
+ VMSTATE enmVMState = pVM->enmVMState;
+ return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
+}
+
+
+/**
+ * Gets the current VM state.
+ *
+ * @returns The current VM state.
+ * @param pUVM The user-mode VM handle.
+ * @thread Any
+ */
+VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
+ if (RT_UNLIKELY(!pUVM->pVM))
+ return VMSTATE_TERMINATED;
+ return pUVM->pVM->enmVMState;
+}
+
+
+/**
+ * Gets the state name string for a VM state.
+ *
+ * @returns Pointer to the state name. (readonly)
+ * @param enmState The state.
+ */
+VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
+{
+ switch (enmState)
+ {
+ case VMSTATE_CREATING: return "CREATING";
+ case VMSTATE_CREATED: return "CREATED";
+ case VMSTATE_LOADING: return "LOADING";
+ case VMSTATE_POWERING_ON: return "POWERING_ON";
+ case VMSTATE_RESUMING: return "RESUMING";
+ case VMSTATE_RUNNING: return "RUNNING";
+ case VMSTATE_RUNNING_LS: return "RUNNING_LS";
+ case VMSTATE_RESETTING: return "RESETTING";
+ case VMSTATE_RESETTING_LS: return "RESETTING_LS";
+ case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
+ case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
+ case VMSTATE_SUSPENDED: return "SUSPENDED";
+ case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
+ case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
+ case VMSTATE_SUSPENDING: return "SUSPENDING";
+ case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
+ case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
+ case VMSTATE_SAVING: return "SAVING";
+ case VMSTATE_DEBUGGING: return "DEBUGGING";
+ case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
+ case VMSTATE_POWERING_OFF: return "POWERING_OFF";
+ case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
+ case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
+ case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
+ case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
+ case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
+ case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
+ case VMSTATE_OFF: return "OFF";
+ case VMSTATE_OFF_LS: return "OFF_LS";
+ case VMSTATE_DESTROYING: return "DESTROYING";
+ case VMSTATE_TERMINATED: return "TERMINATED";
+
+ default:
+ AssertMsgFailed(("Unknown state %d\n", enmState));
+ return "Unknown!\n";
+ }
+}
+
+
+/**
+ * Validates the state transition in strict builds.
+ *
+ * @returns true if valid, false if not.
+ *
+ * @param enmStateOld The old (current) state.
+ * @param enmStateNew The proposed new state.
+ *
+ * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
+ * diagram (under State Machine Diagram).
+ */
+static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
+{
+#ifndef VBOX_STRICT
+ RT_NOREF2(enmStateOld, enmStateNew);
+#else
+ switch (enmStateOld)
+ {
+ case VMSTATE_CREATING:
+ AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_CREATED:
+ AssertMsgReturn( enmStateNew == VMSTATE_LOADING
+ || enmStateNew == VMSTATE_POWERING_ON
+ || enmStateNew == VMSTATE_POWERING_OFF
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_LOADING:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
+ || enmStateNew == VMSTATE_LOAD_FAILURE
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_POWERING_ON:
+ AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
+ /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_RESUMING:
+ AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
+ /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_RUNNING:
+ AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
+ || enmStateNew == VMSTATE_SUSPENDING
+ || enmStateNew == VMSTATE_RESETTING
+ || enmStateNew == VMSTATE_SOFT_RESETTING
+ || enmStateNew == VMSTATE_RUNNING_LS
+ || enmStateNew == VMSTATE_DEBUGGING
+ || enmStateNew == VMSTATE_FATAL_ERROR
+ || enmStateNew == VMSTATE_GURU_MEDITATION
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_RUNNING_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
+ || enmStateNew == VMSTATE_SUSPENDING_LS
+ || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
+ || enmStateNew == VMSTATE_RESETTING_LS
+ || enmStateNew == VMSTATE_SOFT_RESETTING_LS
+ || enmStateNew == VMSTATE_RUNNING
+ || enmStateNew == VMSTATE_DEBUGGING_LS
+ || enmStateNew == VMSTATE_FATAL_ERROR_LS
+ || enmStateNew == VMSTATE_GURU_MEDITATION_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_RESETTING:
+ AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SOFT_RESETTING:
+ AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_RESETTING_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SOFT_RESETTING_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDING:
+ AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDING_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
+ || enmStateNew == VMSTATE_SUSPENDED_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDING_EXT_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
+ || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDED:
+ AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
+ || enmStateNew == VMSTATE_SAVING
+ || enmStateNew == VMSTATE_RESETTING
+ || enmStateNew == VMSTATE_SOFT_RESETTING
+ || enmStateNew == VMSTATE_RESUMING
+ || enmStateNew == VMSTATE_LOADING
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDED_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
+ || enmStateNew == VMSTATE_SAVING
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SUSPENDED_EXT_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
+ || enmStateNew == VMSTATE_SAVING
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_SAVING:
+ AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_DEBUGGING:
+ AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
+ || enmStateNew == VMSTATE_POWERING_OFF
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_DEBUGGING_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
+ || enmStateNew == VMSTATE_RUNNING_LS
+ || enmStateNew == VMSTATE_POWERING_OFF_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_POWERING_OFF:
+ AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_POWERING_OFF_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
+ || enmStateNew == VMSTATE_OFF_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_OFF:
+ AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_OFF_LS:
+ AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_FATAL_ERROR:
+ AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_FATAL_ERROR_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
+ || enmStateNew == VMSTATE_POWERING_OFF_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_GURU_MEDITATION:
+ AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
+ || enmStateNew == VMSTATE_POWERING_OFF
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_GURU_MEDITATION_LS:
+ AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
+ || enmStateNew == VMSTATE_DEBUGGING_LS
+ || enmStateNew == VMSTATE_POWERING_OFF_LS
+ , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_LOAD_FAILURE:
+ AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_DESTROYING:
+ AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+
+ case VMSTATE_TERMINATED:
+ default:
+ AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
+ break;
+ }
+#endif /* VBOX_STRICT */
+ return true;
+}
+
+
+/**
+ * Does the state change callouts.
+ *
+ * The caller owns the AtStateCritSect.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUVM The UVM handle.
+ * @param enmStateNew The New state.
+ * @param enmStateOld The old state.
+ */
+static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
+{
+ LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
+
+ for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
+ {
+ pCur->pfnAtState(pUVM, VMMR3GetVTable(), enmStateNew, enmStateOld, pCur->pvUser);
+ if ( enmStateNew != VMSTATE_DESTROYING
+ && pVM->enmVMState == VMSTATE_DESTROYING)
+ break;
+ AssertMsg(pVM->enmVMState == enmStateNew,
+ ("You are not allowed to change the state while in the change callback, except "
+ "from destroying the VM. There are restrictions in the way the state changes "
+ "are propagated up to the EM execution loop and it makes the program flow very "
+ "difficult to follow. (%s, expected %s, old %s)\n",
+ VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
+ VMR3GetStateName(enmStateOld)));
+ }
+}
+
+
+/**
+ * Sets the current VM state, with the AtStatCritSect already entered.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pUVM The UVM handle.
+ * @param enmStateNew The new state.
+ * @param enmStateOld The old state.
+ * @param fSetRatherThanClearFF The usual behavior is to clear the
+ * VM_FF_CHECK_VM_STATE force flag, but for
+ * some transitions (-> guru) we need to kick
+ * the other EMTs to stop what they're doing.
+ */
+static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
+{
+ vmR3ValidateStateTransition(enmStateOld, enmStateNew);
+
+ AssertMsg(pVM->enmVMState == enmStateOld,
+ ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
+
+ pUVM->vm.s.enmPrevVMState = enmStateOld;
+ pVM->enmVMState = enmStateNew;
+
+ if (!fSetRatherThanClearFF)
+ VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
+ else if (pVM->cCpus > 0)
+ VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
+
+ vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
+}
+
+
+/**
+ * Sets the current VM state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param enmStateNew The new state.
+ * @param enmStateOld The old state (for asserting only).
+ */
+static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
+{
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+
+ RT_NOREF_PV(enmStateOld);
+ AssertMsg(pVM->enmVMState == enmStateOld,
+ ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
+ vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
+
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+}
+
+
+/**
+ * Tries to perform a state transition.
+ *
+ * @returns The 1-based ordinal of the succeeding transition.
+ * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pszWho Who is trying to change it.
+ * @param cTransitions The number of transitions in the ellipsis.
+ * @param ... Transition pairs; new, old.
+ */
+static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
+{
+ va_list va;
+ VMSTATE enmStateNew = VMSTATE_CREATED;
+ VMSTATE enmStateOld = VMSTATE_CREATED;
+
+#ifdef VBOX_STRICT
+ /*
+ * Validate the input first.
+ */
+ va_start(va, cTransitions);
+ for (unsigned i = 0; i < cTransitions; i++)
+ {
+ enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ vmR3ValidateStateTransition(enmStateOld, enmStateNew);
+ }
+ va_end(va);
+#endif
+
+ /*
+ * Grab the lock and see if any of the proposed transitions works out.
+ */
+ va_start(va, cTransitions);
+ int rc = VERR_VM_INVALID_VM_STATE;
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+
+ VMSTATE enmStateCur = pVM->enmVMState;
+
+ for (unsigned i = 0; i < cTransitions; i++)
+ {
+ enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ if (enmStateCur == enmStateOld)
+ {
+ vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
+ rc = i + 1;
+ break;
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ /*
+ * Complain about it.
+ */
+ const char * const pszStateCur = VMR3GetStateName(enmStateCur);
+ if (cTransitions == 1)
+ {
+ LogRel(("%s: %s -> %s failed, because the VM state is actually %s!\n",
+ pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
+ VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS, N_("%s failed because the VM state is %s instead of %s"),
+ pszWho, pszStateCur, VMR3GetStateName(enmStateOld));
+ AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
+ pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
+ }
+ else
+ {
+ char szTransitions[4096];
+ size_t cchTransitions = 0;
+ szTransitions[0] = '\0';
+ va_end(va);
+ va_start(va, cTransitions);
+ for (unsigned i = 0; i < cTransitions; i++)
+ {
+ enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
+ const char * const pszStateNew = VMR3GetStateName(enmStateNew);
+ const char * const pszStateOld = VMR3GetStateName(enmStateOld);
+ LogRel(("%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew));
+ cchTransitions += RTStrPrintf(&szTransitions[cchTransitions], sizeof(szTransitions) - cchTransitions,
+ "%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew);
+ }
+ Assert(cchTransitions < sizeof(szTransitions) - 64);
+
+ LogRel(("%s: %s failed, because the VM state is actually %s!\n", pszWho, szTransitions, pszStateCur));
+ VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
+ N_("%s failed because the current VM state, %s, was not found in the state transition table (%s)"),
+ pszWho, pszStateCur, szTransitions);
+ AssertMsgFailed(("%s - state=%s, transitions: %s. Check the cTransitions passed us.\n",
+ pszWho, pszStateCur, szTransitions));
+ }
+ }
+
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ va_end(va);
+ Assert(rc > 0 || rc < 0);
+ return rc;
+}
+
+
+/**
+ * Interface used by EM to signal that it's entering the guru meditation state.
+ *
+ * This will notifying other threads.
+ *
+ * @returns true if the state changed to Guru, false if no state change.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+
+ VMSTATE enmStateCur = pVM->enmVMState;
+ bool fRc = true;
+ if (enmStateCur == VMSTATE_RUNNING)
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
+ else if (enmStateCur == VMSTATE_RUNNING_LS)
+ {
+ vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
+ SSMR3Cancel(pUVM);
+ }
+ else
+ fRc = false;
+
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ return fRc;
+}
+
+
+/**
+ * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void vmR3SetTerminated(PVM pVM)
+{
+ vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
+}
+
+
+/**
+ * Checks if the VM was teleported and hasn't been fully resumed yet.
+ *
+ * This applies to both sides of the teleportation since we may leave a working
+ * clone behind and the user is allowed to resume this...
+ *
+ * @returns true / false.
+ * @param pVM The cross context VM structure.
+ * @thread Any thread.
+ */
+VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
+{
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
+}
+
+
+/**
+ * Registers a VM state change callback.
+ *
+ * You are not allowed to call any function which changes the VM state from a
+ * state callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The VM handle.
+ * @param pfnAtState Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
+{
+ LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Allocate a new record.
+ */
+ PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
+ if (!pNew)
+ return VERR_NO_MEMORY;
+
+ /* fill */
+ pNew->pfnAtState = pfnAtState;
+ pNew->pvUser = pvUser;
+
+ /* insert */
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+ pNew->pNext = *pUVM->vm.s.ppAtStateNext;
+ *pUVM->vm.s.ppAtStateNext = pNew;
+ pUVM->vm.s.ppAtStateNext = &pNew->pNext;
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a VM state change callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The VM handle.
+ * @param pfnAtState Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
+{
+ LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
+
+ /*
+ * Search the list for the entry.
+ */
+ PVMATSTATE pPrev = NULL;
+ PVMATSTATE pCur = pUVM->vm.s.pAtState;
+ while ( pCur
+ && ( pCur->pfnAtState != pfnAtState
+ || pCur->pvUser != pvUser))
+ {
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ if (!pCur)
+ {
+ AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+ return VERR_FILE_NOT_FOUND;
+ }
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ {
+ pPrev->pNext = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
+ }
+ else
+ {
+ pUVM->vm.s.pAtState = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
+ }
+
+ RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
+
+ /*
+ * Free it.
+ */
+ pCur->pfnAtState = NULL;
+ pCur->pNext = NULL;
+ MMR3HeapFree(pCur);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Registers a VM error callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The VM handle.
+ * @param pfnAtError Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
+{
+ LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Allocate a new record.
+ */
+ PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
+ if (!pNew)
+ return VERR_NO_MEMORY;
+
+ /* fill */
+ pNew->pfnAtError = pfnAtError;
+ pNew->pvUser = pvUser;
+
+ /* insert */
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+ pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
+ *pUVM->vm.s.ppAtErrorNext = pNew;
+ pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a VM error callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The VM handle.
+ * @param pfnAtError Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
+{
+ LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+
+ /*
+ * Search the list for the entry.
+ */
+ PVMATERROR pPrev = NULL;
+ PVMATERROR pCur = pUVM->vm.s.pAtError;
+ while ( pCur
+ && ( pCur->pfnAtError != pfnAtError
+ || pCur->pvUser != pvUser))
+ {
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ if (!pCur)
+ {
+ AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+ return VERR_FILE_NOT_FOUND;
+ }
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ {
+ pPrev->pNext = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
+ }
+ else
+ {
+ pUVM->vm.s.pAtError = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
+ }
+
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+
+ /*
+ * Free it.
+ */
+ pCur->pfnAtError = NULL;
+ pCur->pNext = NULL;
+ MMR3HeapFree(pCur);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Ellipsis to va_list wrapper for calling pfnAtError.
+ */
+static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
+ va_end(va);
+}
+
+
+/**
+ * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
+ * The message is found in VMINT.
+ *
+ * @param pVM The cross context VM structure.
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
+{
+ VM_ASSERT_EMT(pVM);
+ AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
+
+ /*
+ * Unpack the error (if we managed to format one).
+ */
+ PVMERROR pErr = pVM->vm.s.pErrorR3;
+ const char *pszFile = NULL;
+ const char *pszFunction = NULL;
+ uint32_t iLine = 0;
+ const char *pszMessage;
+ int32_t rc = VERR_MM_HYPER_NO_MEMORY;
+ if (pErr)
+ {
+ AssertCompile(sizeof(const char) == sizeof(uint8_t));
+ if (pErr->offFile)
+ pszFile = (const char *)pErr + pErr->offFile;
+ iLine = pErr->iLine;
+ if (pErr->offFunction)
+ pszFunction = (const char *)pErr + pErr->offFunction;
+ if (pErr->offMessage)
+ pszMessage = (const char *)pErr + pErr->offMessage;
+ else
+ pszMessage = "No message!";
+ }
+ else
+ pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
+
+ /*
+ * Call the at error callbacks.
+ */
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+ ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
+ for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
+ vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+}
+
+
+/**
+ * Gets the number of errors raised via VMSetError.
+ *
+ * This can be used avoid double error messages.
+ *
+ * @returns The error count.
+ * @param pUVM The VM handle.
+ */
+VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
+{
+ AssertPtrReturn(pUVM, 0);
+ AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
+ return pUVM->vm.s.cErrors;
+}
+
+
+/**
+ * Creation time wrapper for vmR3SetErrorUV.
+ *
+ * @returns rc.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param rc The VBox status code.
+ * @param SRC_POS The source position of this error.
+ * @param pszFormat Format string.
+ * @param ... The arguments.
+ * @thread Any thread.
+ */
+static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Worker which calls everyone listening to the VM error messages.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param rc The VBox status code.
+ * @param SRC_POS The source position of this error.
+ * @param pszFormat Format string.
+ * @param pArgs Pointer to the format arguments.
+ * @thread EMT
+ */
+DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
+{
+ /*
+ * Log the error.
+ */
+ va_list va3;
+ va_copy(va3, *pArgs);
+ RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
+ "VMSetError: %N\n",
+ pszFile, iLine, pszFunction, rc,
+ pszFormat, &va3);
+ va_end(va3);
+
+#ifdef LOG_ENABLED
+ va_copy(va3, *pArgs);
+ RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
+ "%N\n",
+ pszFile, iLine, pszFunction, rc,
+ pszFormat, &va3);
+ va_end(va3);
+#endif
+
+ /*
+ * Make a copy of the message.
+ */
+ if (pUVM->pVM)
+ vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
+
+ /*
+ * Call the at error callbacks.
+ */
+ bool fCalledSomeone = false;
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+ ASMAtomicIncU32(&pUVM->vm.s.cErrors);
+ for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
+ {
+ va_list va2;
+ va_copy(va2, *pArgs);
+ pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
+ va_end(va2);
+ fCalledSomeone = true;
+ }
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+}
+
+
+/**
+ * Sets the error message.
+ *
+ * @returns rc. Meaning you can do:
+ * @code
+ * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
+ * @endcode
+ * @param pUVM The user mode VM handle.
+ * @param rc VBox status code.
+ * @param SRC_POS Use RT_SRC_POS.
+ * @param pszFormat Error message format string.
+ * @param ... Error message arguments.
+ * @thread Any
+ */
+VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
+ va_end(va);
+ return rcRet;
+}
+
+
+/**
+ * Sets the error message.
+ *
+ * @returns rc. Meaning you can do:
+ * @code
+ * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
+ * @endcode
+ * @param pUVM The user mode VM handle.
+ * @param rc VBox status code.
+ * @param SRC_POS Use RT_SRC_POS.
+ * @param pszFormat Error message format string.
+ * @param va Error message arguments.
+ * @thread Any
+ */
+VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
+ if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
+ {
+ va_list vaCopy;
+ va_copy(vaCopy, va);
+ vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
+ va_end(vaCopy);
+ return rc;
+ }
+
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
+}
+
+
+
+/**
+ * Registers a VM runtime error callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param pfnAtRuntimeError Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
+{
+ LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /*
+ * Allocate a new record.
+ */
+ PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
+ if (!pNew)
+ return VERR_NO_MEMORY;
+
+ /* fill */
+ pNew->pfnAtRuntimeError = pfnAtRuntimeError;
+ pNew->pvUser = pvUser;
+
+ /* insert */
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+ pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
+ *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
+ pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a VM runtime error callback.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pfnAtRuntimeError Pointer to callback.
+ * @param pvUser User argument.
+ * @thread Any.
+ */
+VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
+{
+ LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+
+ /*
+ * Search the list for the entry.
+ */
+ PVMATRUNTIMEERROR pPrev = NULL;
+ PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
+ while ( pCur
+ && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
+ || pCur->pvUser != pvUser))
+ {
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ if (!pCur)
+ {
+ AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+ return VERR_FILE_NOT_FOUND;
+ }
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ {
+ pPrev->pNext = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
+ }
+ else
+ {
+ pUVM->vm.s.pAtRuntimeError = pCur->pNext;
+ if (!pCur->pNext)
+ pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
+ }
+
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+
+ /*
+ * Free it.
+ */
+ pCur->pfnAtRuntimeError = NULL;
+ pCur->pNext = NULL;
+ MMR3HeapFree(pCur);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
+ * the state to FatalError(LS).
+ *
+ * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
+ * return code, see FNVMMEMTRENDEZVOUS.)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser Ignored.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ NOREF(pVCpu);
+ Assert(!pvUser); NOREF(pvUser);
+
+ /*
+ * The first EMT thru here changes the state.
+ */
+ if (pVCpu->idCpu == pVM->cCpus - 1)
+ {
+ int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
+ VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
+ VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (rc == 2)
+ SSMR3Cancel(pVM->pUVM);
+
+ VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
+ }
+
+ /* This'll make sure we get out of whereever we are (e.g. REM). */
+ return VINF_EM_SUSPEND;
+}
+
+
+/**
+ * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
+ *
+ * This does the common parts after the error has been saved / retrieved.
+ *
+ * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFlags The error flags.
+ * @param pszErrorId Error ID string.
+ * @param pszFormat Format string.
+ * @param pVa Pointer to the format arguments.
+ */
+static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
+{
+ LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
+ PUVM pUVM = pVM->pUVM;
+
+ /*
+ * Take actions before the call.
+ */
+ int rc;
+ if (fFlags & VMSETRTERR_FLAGS_FATAL)
+ rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3SetRuntimeErrorChangeState, NULL);
+ else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
+ {
+ /* Make sure we don't call VMR3Suspend when we shouldn't. As seen in
+ @bugref{10111} multiple runtime error may be flagged when we run out
+ of disk space or similar, so don't freak out VMR3Suspend by calling
+ it in an invalid VM state. */
+ VMSTATE enmStateCur = pVM->enmVMState;
+ if (enmStateCur == VMSTATE_RUNNING || enmStateCur == VMSTATE_RUNNING_LS)
+ rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
+ else
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VINF_SUCCESS;
+
+ /*
+ * Do the callback round.
+ */
+ RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
+ ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
+ for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
+ {
+ va_list va;
+ va_copy(va, *pVa);
+ pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
+ va_end(va);
+ }
+ RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
+
+ return rc;
+}
+
+
+/**
+ * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
+ */
+static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * This is a worker function for RC and Ring-0 calls to VMSetError and
+ * VMSetErrorV.
+ *
+ * The message is found in VMINT.
+ *
+ * @returns VBox status code, see VMSetRuntimeError.
+ * @param pVM The cross context VM structure.
+ * @thread EMT.
+ */
+VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
+{
+ VM_ASSERT_EMT(pVM);
+ AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
+
+ /*
+ * Unpack the error (if we managed to format one).
+ */
+ const char *pszErrorId = "SetRuntimeError";
+ const char *pszMessage = "No message!";
+ uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
+ PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
+ if (pErr)
+ {
+ AssertCompile(sizeof(const char) == sizeof(uint8_t));
+ if (pErr->offErrorId)
+ pszErrorId = (const char *)pErr + pErr->offErrorId;
+ if (pErr->offMessage)
+ pszMessage = (const char *)pErr + pErr->offMessage;
+ fFlags = pErr->fFlags;
+ }
+
+ /*
+ * Join cause with vmR3SetRuntimeErrorV.
+ */
+ return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
+}
+
+
+/**
+ * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
+ *
+ * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFlags The error flags.
+ * @param pszErrorId Error ID string.
+ * @param pszMessage The error message residing the MM heap.
+ *
+ * @thread EMT
+ */
+DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
+{
+#if 0 /** @todo make copy of the error msg. */
+ /*
+ * Make a copy of the message.
+ */
+ va_list va2;
+ va_copy(va2, *pVa);
+ vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
+ va_end(va2);
+#endif
+
+ /*
+ * Join paths with VMR3SetRuntimeErrorWorker.
+ */
+ int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
+ MMR3HeapFree(pszMessage);
+ return rc;
+}
+
+
+/**
+ * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
+ *
+ * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFlags The error flags.
+ * @param pszErrorId Error ID string.
+ * @param pszFormat Format string.
+ * @param pVa Pointer to the format arguments.
+ *
+ * @thread EMT
+ */
+DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
+{
+ /*
+ * Make a copy of the message.
+ */
+ va_list va2;
+ va_copy(va2, *pVa);
+ vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
+ va_end(va2);
+
+ /*
+ * Join paths with VMR3SetRuntimeErrorWorker.
+ */
+ return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
+}
+
+
+/**
+ * Gets the number of runtime errors raised via VMR3SetRuntimeError.
+ *
+ * This can be used avoid double error messages.
+ *
+ * @returns The runtime error count.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
+{
+ return pUVM->vm.s.cRuntimeErrors;
+}
+
+
+/**
+ * Gets the ID virtual of the virtual CPU associated with the calling thread.
+ *
+ * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
+{
+ PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
+ return pUVCpu
+ ? pUVCpu->idCpu
+ : NIL_VMCPUID;
+}
+
+
+/**
+ * Checks if the VM is long-mode (64-bit) capable or not.
+ *
+ * @returns true if VM can operate in long-mode, false otherwise.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
+{
+ switch (pVM->bMainExecutionEngine)
+ {
+ case VM_EXEC_ENGINE_HW_VIRT:
+ return HMIsLongModeAllowed(pVM);
+
+ case VM_EXEC_ENGINE_NATIVE_API:
+ return NEMHCIsLongModeAllowed(pVM);
+
+ case VM_EXEC_ENGINE_NOT_SET:
+ AssertFailed();
+ RT_FALL_THRU();
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * Returns the native ID of the current EMT VMCPU thread.
+ *
+ * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
+ * @param pVM The cross context VM structure.
+ * @thread EMT
+ */
+VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
+{
+ PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
+
+ if (!pUVCpu)
+ return NIL_RTNATIVETHREAD;
+
+ return pUVCpu->vm.s.NativeThreadEMT;
+}
+
+
+/**
+ * Returns the native ID of the current EMT VMCPU thread.
+ *
+ * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
+ * @param pUVM The user mode VM structure.
+ * @thread EMT
+ */
+VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
+{
+ PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
+
+ if (!pUVCpu)
+ return NIL_RTNATIVETHREAD;
+
+ return pUVCpu->vm.s.NativeThreadEMT;
+}
+
+
+/**
+ * Returns the handle of the current EMT VMCPU thread.
+ *
+ * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
+ * @param pUVM The user mode VM handle.
+ * @thread EMT
+ */
+VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
+{
+ PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
+
+ if (!pUVCpu)
+ return NIL_RTTHREAD;
+
+ return pUVCpu->vm.s.ThreadEMT;
+}
+
+
+/**
+ * Returns the handle of the current EMT VMCPU thread.
+ *
+ * @returns The IPRT thread handle.
+ * @param pUVCpu The user mode CPU handle.
+ * @thread EMT
+ */
+VMMR3_INT_DECL(RTTHREAD) VMR3GetThreadHandle(PUVMCPU pUVCpu)
+{
+ return pUVCpu->vm.s.ThreadEMT;
+}
+
+
+/**
+ * Return the package and core ID of a CPU.
+ *
+ * @returns VBOX status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu Virtual CPU to get the ID from.
+ * @param pidCpuCore Where to store the core ID of the virtual CPU.
+ * @param pidCpuPackage Where to store the package ID of the virtual CPU.
+ *
+ */
+VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
+ AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
+ if (idCpu >= pVM->cCpus)
+ return VERR_INVALID_CPU_ID;
+
+ /*
+ * Set return values.
+ */
+#ifdef VBOX_WITH_MULTI_CORE
+ *pidCpuCore = idCpu;
+ *pidCpuPackage = 0;
+#else
+ *pidCpuCore = 0;
+ *pidCpuPackage = idCpu;
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for VMR3HotUnplugCpu.
+ *
+ * @returns VINF_EM_WAIT_SPIP (strict status code).
+ * @param pVM The cross context VM structure.
+ * @param idCpu The current CPU.
+ */
+static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
+{
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * Reset per CPU resources.
+ *
+ * Actually only needed for VT-x because the CPU seems to be still in some
+ * paged mode and startup fails after a new hot plug event. SVM works fine
+ * even without this.
+ */
+ Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
+ PGMR3ResetCpu(pVM, pVCpu);
+ PDMR3ResetCpu(pVCpu);
+ TRPMR3ResetCpu(pVCpu);
+ CPUMR3ResetCpu(pVM, pVCpu);
+ EMR3ResetCpu(pVCpu);
+ HMR3ResetCpu(pVCpu);
+ NEMR3ResetCpu(pVCpu, false /*fInitIpi*/);
+ return VINF_EM_WAIT_SIPI;
+}
+
+
+/**
+ * Hot-unplugs a CPU from the guest.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu Virtual CPU to perform the hot unplugging operation on.
+ */
+VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
+ * broadcast requests. Just note down somewhere that the CPU is
+ * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
+ * it out of the EM loops when offline. */
+ return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
+}
+
+
+/**
+ * Hot-plugs a CPU on the guest.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu Virtual CPU to perform the hot plugging operation on.
+ */
+VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Changes the VMM execution cap.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM structure.
+ * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
+ * 100 is max performance (default).
+ */
+VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
+
+ Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
+ /* Note: not called from EMT. */
+ pVM->uCpuExecutionCap = uCpuExecutionCap;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Control whether the VM should power off when resetting.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
+ * resetting.
+ */
+VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /* Note: not called from EMT. */
+ pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMEmt.cpp b/src/VBox/VMM/VMMR3/VMEmt.cpp
new file mode 100644
index 00000000..bbd12fd5
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMEmt.cpp
@@ -0,0 +1,1462 @@
+/* $Id: VMEmt.cpp $ */
+/** @file
+ * VM - Virtual Machine, The Emulation Thread.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VM
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/gvmm.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/tm.h>
+#include "VMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/asm-math.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
+
+
+/**
+ * The emulation thread main function.
+ *
+ * @returns Thread exit code.
+ * @param hThreadSelf The handle to the executing thread.
+ * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
+ */
+DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD hThreadSelf, void *pvArgs)
+{
+ PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
+ return vmR3EmulationThreadWithId(hThreadSelf, pUVCpu, pUVCpu->idCpu);
+}
+
+
+/**
+ * The emulation thread main function, with Virtual CPU ID for debugging.
+ *
+ * @returns Thread exit code.
+ * @param hThreadSelf The handle to the executing thread.
+ * @param pUVCpu Pointer to the user mode per-VCpu structure.
+ * @param idCpu The virtual CPU ID, for backtrace purposes.
+ */
+int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
+{
+ PUVM pUVM = pUVCpu->pUVM;
+ int rc;
+ RT_NOREF_PV(hThreadSelf);
+
+ AssertReleaseMsg(RT_VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
+ ("Invalid arguments to the emulation thread!\n"));
+
+ rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
+ AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
+
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
+ pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
+
+ /*
+ * The request loop.
+ */
+ rc = VINF_SUCCESS;
+ Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", hThreadSelf, pUVM));
+ VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
+ ASMAtomicIncU32(&pUVM->vm.s.cActiveEmts);
+ for (;;)
+ {
+ /*
+ * During early init there is no pVM and/or pVCpu, so make a special path
+ * for that to keep things clearly separate.
+ */
+ PVM pVM = pUVM->pVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ if (!pVCpu || !pVM)
+ {
+ /*
+ * Check for termination first.
+ */
+ if (pUVM->vm.s.fTerminateEMT)
+ {
+ rc = VINF_EM_TERMINATE;
+ break;
+ }
+
+ /*
+ * Only the first VCPU may initialize the VM during early init
+ * and must therefore service all VMCPUID_ANY requests.
+ * See also VMR3Create
+ */
+ if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
+ && pUVCpu->idCpu == 0)
+ {
+ /*
+ * Service execute in any EMT request.
+ */
+ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
+ Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
+ }
+ else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
+ {
+ /*
+ * Service execute in specific EMT request.
+ */
+ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
+ Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
+ }
+ else
+ {
+ /*
+ * Nothing important is pending, so wait for something.
+ */
+ rc = VMR3WaitU(pUVCpu);
+ if (RT_FAILURE(rc))
+ {
+ AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
+ break;
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Pending requests which needs servicing?
+ *
+ * We check for state changes in addition to status codes when
+ * servicing requests. (Look after the ifs.)
+ */
+ enmBefore = pVM->enmVMState;
+ if (pUVM->vm.s.fTerminateEMT)
+ {
+ rc = VINF_EM_TERMINATE;
+ break;
+ }
+
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ rc = VMMR3EmtRendezvousFF(pVM, pVM->apCpusR3[idCpu]);
+ Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
+ }
+ else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
+ {
+ /*
+ * Service execute in any EMT request.
+ */
+ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
+ Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
+ }
+ else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
+ {
+ /*
+ * Service execute in specific EMT request.
+ */
+ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
+ Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
+ }
+ else if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
+ {
+ /*
+ * Service the debugger request.
+ */
+ rc = DBGFR3VMMForcedAction(pVM, pVCpu);
+ Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
+ }
+ else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
+ {
+ /*
+ * Service a delayed reset request.
+ */
+ rc = VBOXSTRICTRC_VAL(VMR3ResetFF(pVM));
+ VM_FF_CLEAR(pVM, VM_FF_RESET);
+ Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
+ }
+ else
+ {
+ /*
+ * Nothing important is pending, so wait for something.
+ */
+ rc = VMR3WaitU(pUVCpu);
+ if (RT_FAILURE(rc))
+ {
+ AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
+ break;
+ }
+ }
+
+ /*
+ * Check for termination requests, these have extremely high priority.
+ */
+ if ( rc == VINF_EM_TERMINATE
+ || pUVM->vm.s.fTerminateEMT)
+ break;
+ }
+
+ /*
+ * Some requests (both VMR3Req* and the DBGF) can potentially resume
+ * or start the VM, in that case we'll get a change in VM status
+ * indicating that we're now running.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ pVM = pUVM->pVM;
+ if (pVM)
+ {
+ pVCpu = pVM->apCpusR3[idCpu];
+ if ( pVM->enmVMState == VMSTATE_RUNNING
+ && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
+ {
+ rc = EMR3ExecuteVM(pVM, pVCpu);
+ Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
+ }
+ }
+ }
+
+ } /* forever */
+
+
+ /*
+ * Decrement the active EMT count if we haven't done it yet in vmR3Destroy.
+ */
+ if (!pUVCpu->vm.s.fBeenThruVmDestroy)
+ ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
+
+
+ /*
+ * Cleanup and exit.
+ * EMT0 does the VM destruction after all other EMTs have deregistered and terminated.
+ */
+ Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
+ hThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
+ PVM pVM;
+ if ( idCpu == 0
+ && (pVM = pUVM->pVM) != NULL)
+ {
+ /* Wait for any other EMTs to terminate before we destroy the VM (see vmR3DestroyVM). */
+ for (VMCPUID iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
+ {
+ RTTHREAD hThread;
+ ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
+ if (hThread != NIL_RTTHREAD)
+ {
+ int rc2 = RTThreadWait(hThread, 5 * RT_MS_1SEC, NULL);
+ AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
+ if (RT_FAILURE(rc2))
+ pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
+ }
+ }
+
+ /* Switch to the terminated state, clearing the VM pointer and finally destroy the VM. */
+ vmR3SetTerminated(pVM);
+
+ pUVM->pVM = NULL;
+ for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
+ {
+ pUVM->aCpus[iCpu].pVM = NULL;
+ pUVM->aCpus[iCpu].pVCpu = NULL;
+ }
+
+ int rc2 = GVMMR3DestroyVM(pUVM, pVM);
+ AssertLogRelRC(rc2);
+ }
+ /* Deregister the EMT with VMMR0. */
+ else if ( idCpu != 0
+ && (pVM = pUVM->pVM) != NULL)
+ {
+ int rc2 = GVMMR3DeregisterVCpu(pVM, idCpu);
+ AssertLogRelRC(rc2);
+ }
+
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
+ pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
+
+ pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
+ Log(("vmR3EmulationThread: EMT is terminated.\n"));
+ return rc;
+}
+
+
+/**
+ * Gets the name of a halt method.
+ *
+ * @returns Pointer to a read only string.
+ * @param enmMethod The method.
+ */
+static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
+{
+ switch (enmMethod)
+ {
+ case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
+ case VMHALTMETHOD_DEFAULT: return "default";
+ case VMHALTMETHOD_OLD: return "old";
+ case VMHALTMETHOD_1: return "method1";
+ //case VMHALTMETHOD_2: return "method2";
+ case VMHALTMETHOD_GLOBAL_1: return "global1";
+ default: return "unknown";
+ }
+}
+
+
+/**
+ * Signal a fatal wait error.
+ *
+ * @returns Fatal error code to be propagated up the call stack.
+ * @param pUVCpu The user mode per CPU structure of the calling
+ * EMT.
+ * @param pszFmt The error format with a single %Rrc in it.
+ * @param rcFmt The status code to format.
+ */
+static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
+{
+ /** @todo This is wrong ... raise a fatal error / guru meditation
+ * instead. */
+ AssertLogRelMsgFailed((pszFmt, rcFmt));
+ ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
+ if (pUVCpu->pVM)
+ VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
+ return VERR_VM_FATAL_WAIT_ERROR;
+}
+
+
+/**
+ * The old halt loop.
+ */
+static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
+{
+ /*
+ * Halt loop.
+ */
+ PVM pVM = pUVCpu->pVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+
+ int rc = VINF_SUCCESS;
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+ //unsigned cLoops = 0;
+ for (;;)
+ {
+ /*
+ * Work the timers and check if we can exit.
+ * The poll call gives us the ticks left to the next event in
+ * addition to perhaps set an FF.
+ */
+ uint64_t const u64StartTimers = RTTimeNanoTS();
+ TMR3TimerQueuesDo(pVM);
+ uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+ uint64_t u64NanoTS;
+ TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ /*
+ * Wait for a while. Someone will wake us up or interrupt the call if
+ * anything needs our attention.
+ */
+ if (u64NanoTS < 50000)
+ {
+ //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
+ /* spin */;
+ }
+ else
+ {
+ VMMR3YieldStop(pVM);
+ //uint64_t u64Start = RTTimeNanoTS();
+ if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
+ {
+ //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
+ uint64_t const u64StartSchedYield = RTTimeNanoTS();
+ RTThreadYield(); /* this is the best we can do here */
+ uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
+ }
+ else if (u64NanoTS < 2000000)
+ {
+ //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
+ uint64_t const u64StartSchedHalt = RTTimeNanoTS();
+ rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
+ uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
+ }
+ else
+ {
+ //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
+ uint64_t const u64StartSchedHalt = RTTimeNanoTS();
+ rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
+ uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
+ }
+ //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
+ //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
+ }
+ if (rc == VERR_TIMEOUT)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
+ break;
+ }
+ }
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * Initialize the configuration of halt method 1 & 2.
+ *
+ * @return VBox status code. Failure on invalid CFGM data.
+ * @param pUVM The user mode VM structure.
+ */
+static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
+{
+ /*
+ * The defaults.
+ */
+#if 1 /* DEBUGGING STUFF - REMOVE LATER */
+ pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
+ pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
+ pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
+ pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
+ pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
+#else
+ pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
+ pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
+ pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
+ pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
+ pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
+#endif
+
+ /*
+ * Query overrides.
+ *
+ * I don't have time to bother with niceties such as invalid value checks
+ * here right now. sorry.
+ */
+ PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
+ if (pCfg)
+ {
+ uint32_t u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
+ pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
+ pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
+ pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
+ pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
+ pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
+ LogRel(("VMEmt: HaltedMethod1 config: %d/%d/%d/%d/%d\n",
+ pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
+ pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
+ pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
+ pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
+ pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initialize halt method 1.
+ *
+ * @return VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
+{
+ return vmR3HaltMethod12ReadConfigU(pUVM);
+}
+
+
+/**
+ * Method 1 - Block whenever possible, and when lagging behind
+ * switch to spinning for 10-30ms with occasional blocking until
+ * the lag has been eliminated.
+ */
+static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
+{
+ PUVM pUVM = pUVCpu->pUVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ PVM pVM = pUVCpu->pVM;
+
+ /*
+ * To simplify things, we decide up-front whether we should switch to spinning or
+ * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
+ * and that it will generate interrupts or other events that will cause us to exit
+ * the halt loop.
+ */
+ bool fBlockOnce = false;
+ bool fSpinning = false;
+ uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
+ if (u32CatchUpPct /* non-zero if catching up */)
+ {
+ if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
+ {
+ fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
+ if (fSpinning)
+ {
+ uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
+ fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
+ > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
+ RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
+ pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
+ }
+ else
+ {
+ //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
+ pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
+ }
+ }
+ else
+ {
+ fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
+ if (fSpinning)
+ pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
+ }
+ }
+ else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
+ {
+ //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
+ pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
+ }
+
+ /*
+ * Halt loop.
+ */
+ int rc = VINF_SUCCESS;
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+ unsigned cLoops = 0;
+ for (;; cLoops++)
+ {
+ /*
+ * Work the timers and check if we can exit.
+ */
+ uint64_t const u64StartTimers = RTTimeNanoTS();
+ TMR3TimerQueuesDo(pVM);
+ uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ /*
+ * Estimate time left to the next event.
+ */
+ uint64_t u64NanoTS;
+ TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ /*
+ * Block if we're not spinning and the interval isn't all that small.
+ */
+ if ( ( !fSpinning
+ || fBlockOnce)
+#if 1 /* DEBUGGING STUFF - REMOVE LATER */
+ && u64NanoTS >= 100000) /* 0.100 ms */
+#else
+ && u64NanoTS >= 250000) /* 0.250 ms */
+#endif
+ {
+ const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
+ VMMR3YieldStop(pVM);
+
+ uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
+ if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
+ cMilliSecs = 1;
+ else
+ cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
+
+ //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
+ uint64_t const u64StartSchedHalt = RTTimeNanoTS();
+ rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
+ uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
+
+ if (rc == VERR_TIMEOUT)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
+ break;
+ }
+
+ /*
+ * Calc the statistics.
+ * Update averages every 16th time, and flush parts of the history every 64th time.
+ */
+ const uint64_t Elapsed = RTTimeNanoTS() - Start;
+ pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
+ if (Elapsed > u64NanoTS)
+ pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
+ pUVCpu->vm.s.Halt.Method12.cBlocks++;
+ if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
+ {
+ pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
+ if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
+ {
+ pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
+ pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
+ }
+ }
+ //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
+
+ /*
+ * Clear the block once flag if we actually blocked.
+ */
+ if ( fBlockOnce
+ && Elapsed > 100000 /* 0.1 ms */)
+ fBlockOnce = false;
+ }
+ }
+ //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * Initialize the global 1 halt method.
+ *
+ * @return VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
+{
+ /*
+ * The defaults.
+ */
+ uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession);
+ if (cNsResolution > 5*RT_NS_100US)
+ pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000;
+ else if (cNsResolution > RT_NS_100US)
+ pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 4;
+ else
+ pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 2000;
+
+ /*
+ * Query overrides.
+ *
+ * I don't have time to bother with niceties such as invalid value checks
+ * here right now. sorry.
+ */
+ PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1");
+ if (pCfg)
+ {
+ uint32_t u32;
+ if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32)))
+ pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32;
+ }
+ LogRel(("VMEmt: HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n",
+ pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * The global 1 halt method - Block in GMM (ring-0) and let it
+ * try take care of the global scheduling of EMT threads.
+ */
+static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
+{
+ PUVM pUVM = pUVCpu->pUVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ PVM pVM = pUVCpu->pVM;
+ Assert(VMMGetCpu(pVM) == pVCpu);
+ NOREF(u64Now);
+
+ /*
+ * Halt loop.
+ */
+ //uint64_t u64NowLog, u64Start;
+ //u64Start = u64NowLog = RTTimeNanoTS();
+ int rc = VINF_SUCCESS;
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+ unsigned cLoops = 0;
+ for (;; cLoops++)
+ {
+ /*
+ * Work the timers and check if we can exit.
+ */
+ uint64_t const u64StartTimers = RTTimeNanoTS();
+ TMR3TimerQueuesDo(pVM);
+ uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ /*
+ * Estimate time left to the next event.
+ */
+ //u64NowLog = RTTimeNanoTS();
+ uint64_t u64Delta;
+ uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ /*
+ * Block if we're not spinning and the interval isn't all that small.
+ */
+ if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
+ {
+ VMMR3YieldStop(pVM);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ break;
+
+ //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
+ uint64_t const u64StartSchedHalt = RTTimeNanoTS();
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
+ uint64_t const u64EndSchedHalt = RTTimeNanoTS();
+ uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
+
+ if (rc == VERR_INTERRUPTED)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Halt: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
+ break;
+ }
+ else
+ {
+ int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
+ if (cNsOverslept > 50000)
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
+ else if (cNsOverslept < -50000)
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);
+ else
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);
+ }
+ }
+ /*
+ * When spinning call upon the GVMM and do some wakups once
+ * in a while, it's not like we're actually busy or anything.
+ */
+ else if (!(cLoops & 0x1fff))
+ {
+ uint64_t const u64StartSchedYield = RTTimeNanoTS();
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
+ uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
+ STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
+ }
+ }
+ //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * The global 1 halt method - VMR3Wait() worker.
+ *
+ * @returns VBox status code.
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ */
+static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
+{
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+
+ PVM pVM = pUVCpu->pUVM->pVM;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ Assert(pVCpu->idCpu == pUVCpu->idCpu);
+
+ int rc = VINF_SUCCESS;
+ for (;;)
+ {
+ /*
+ * Check Relevant FFs.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
+ break;
+
+ /*
+ * Wait for a while. Someone will wake us up or interrupt the call if
+ * anything needs our attention.
+ */
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
+ if (rc == VERR_INTERRUPTED)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Wait: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
+ break;
+ }
+ }
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * The global 1 halt method - VMR3NotifyFF() worker.
+ *
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ */
+static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
+{
+ /*
+ * With ring-0 halting, the fWait flag isn't set, so we have to check the
+ * CPU state to figure out whether to do a wakeup call.
+ */
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ if (pVCpu)
+ {
+ VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
+ if (enmState == VMCPUSTATE_STARTED_HALTED || pUVCpu->vm.s.fWait)
+ {
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
+ AssertRC(rc);
+
+ }
+ else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
+ || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
+ {
+ if (enmState == VMCPUSTATE_STARTED_EXEC)
+ {
+ if (fFlags & VMNOTIFYFF_FLAGS_POKE)
+ {
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
+ AssertRC(rc);
+ }
+ }
+ else if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
+ || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
+ NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
+ }
+ }
+ /* This probably makes little sense: */
+ else if (pUVCpu->vm.s.fWait)
+ {
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
+ AssertRC(rc);
+ }
+}
+
+
+/**
+ * Bootstrap VMR3Wait() worker.
+ *
+ * @returns VBox status code.
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ */
+static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
+{
+ PUVM pUVM = pUVCpu->pUVM;
+
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+
+ int rc = VINF_SUCCESS;
+ for (;;)
+ {
+ /*
+ * Check Relevant FFs.
+ */
+ if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) /* global requests pending? */
+ break;
+ if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
+ break;
+
+ if ( pUVCpu->pVM
+ && ( VM_FF_IS_ANY_SET(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_ANY_SET(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
+ )
+ )
+ break;
+ if (pUVM->vm.s.fTerminateEMT)
+ break;
+
+ /*
+ * Wait for a while. Someone will wake us up or interrupt the call if
+ * anything needs our attention.
+ */
+ rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
+ if (rc == VERR_TIMEOUT)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
+ break;
+ }
+ }
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * Bootstrap VMR3NotifyFF() worker.
+ *
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ */
+static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
+{
+ if (pUVCpu->vm.s.fWait)
+ {
+ int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
+ AssertRC(rc);
+ }
+ NOREF(fFlags);
+}
+
+
+/**
+ * Default VMR3Wait() worker.
+ *
+ * @returns VBox status code.
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ */
+static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
+{
+ ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
+
+ PVM pVM = pUVCpu->pVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ int rc = VINF_SUCCESS;
+ for (;;)
+ {
+ /*
+ * Check Relevant FFs.
+ */
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
+ break;
+
+ /*
+ * Wait for a while. Someone will wake us up or interrupt the call if
+ * anything needs our attention.
+ */
+ rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
+ if (rc == VERR_TIMEOUT)
+ rc = VINF_SUCCESS;
+ else if (RT_FAILURE(rc))
+ {
+ rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
+ break;
+ }
+ }
+
+ ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
+ return rc;
+}
+
+
+/**
+ * Default VMR3NotifyFF() worker.
+ *
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ */
+static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
+{
+ if (pUVCpu->vm.s.fWait)
+ {
+ int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
+ AssertRC(rc);
+ }
+ else
+ {
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+ if (pVCpu)
+ {
+ VMCPUSTATE enmState = pVCpu->enmState;
+ if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
+ || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
+ NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
+ }
+ }
+}
+
+
+/**
+ * Array with halt method descriptors.
+ * VMINT::iHaltMethod contains an index into this array.
+ */
+static const struct VMHALTMETHODDESC
+{
+ /** The halt method ID. */
+ VMHALTMETHOD enmHaltMethod;
+ /** Set if the method support halting directly in ring-0. */
+ bool fMayHaltInRing0;
+ /** The init function for loading config and initialize variables. */
+ DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
+ /** The term function. */
+ DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
+ /** The VMR3WaitHaltedU function. */
+ DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
+ /** The VMR3WaitU function. */
+ DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
+ /** The VMR3NotifyCpuFFU function. */
+ DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
+ /** The VMR3NotifyGlobalFFU function. */
+ DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
+} g_aHaltMethods[] =
+{
+ { VMHALTMETHOD_BOOTSTRAP, false, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
+ { VMHALTMETHOD_OLD, false, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
+ { VMHALTMETHOD_1, false, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
+ { VMHALTMETHOD_GLOBAL_1, true, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
+};
+
+
+/**
+ * Notify the emulation thread (EMT) about pending Forced Action (FF).
+ *
+ * This function is called by thread other than EMT to make
+ * sure EMT wakes up and promptly service an FF request.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ * @internal
+ */
+VMMR3_INT_DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
+{
+ LogFlow(("VMR3NotifyGlobalFFU:\n"));
+ uint32_t iHaltMethod = pUVM->vm.s.iHaltMethod;
+
+ if (g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
+ g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF(pUVM, fFlags);
+ else
+ for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
+ g_aHaltMethods[iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
+}
+
+
+/**
+ * Notify the emulation thread (EMT) about pending Forced Action (FF).
+ *
+ * This function is called by thread other than EMT to make
+ * sure EMT wakes up and promptly service an FF request.
+ *
+ * @param pUVCpu Pointer to the user mode per CPU VM structure.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ * @internal
+ */
+VMMR3_INT_DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
+{
+ PUVM pUVM = pUVCpu->pUVM;
+
+ LogFlow(("VMR3NotifyCpuFFU:\n"));
+ g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
+}
+
+
+/**
+ * Halted VM Wait.
+ * Any external event will unblock the thread.
+ *
+ * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
+ * case an appropriate status code is returned.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
+ * @thread The emulation thread.
+ * @remarks Made visible for implementing vmsvga sync register.
+ * @internal
+ */
+VMMR3_INT_DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
+{
+ LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
+
+ /*
+ * Check Relevant FFs.
+ */
+ const uint32_t fMask = !fIgnoreInterrupts
+ ? VMCPU_FF_EXTERNAL_HALTED_MASK
+ : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
+ if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
+ {
+ LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#RX64)\n", pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * The yielder is suspended while we're halting, while TM might have clock(s) running
+ * only at certain times and need to be notified..
+ */
+ if (pVCpu->idCpu == 0)
+ VMMR3YieldSuspend(pVM);
+ TMNotifyStartOfHalt(pVCpu);
+
+ /*
+ * Record halt averages for the last second.
+ */
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
+ uint64_t u64Now = RTTimeNanoTS();
+ int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
+ if (off > 1000000000)
+ {
+ if (off > _4G || !pUVCpu->vm.s.cHalts)
+ {
+ pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
+ pUVCpu->vm.s.HaltFrequency = 1;
+ }
+ else
+ {
+ pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
+ pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
+ }
+ pUVCpu->vm.s.u64HaltsStartTS = u64Now;
+ pUVCpu->vm.s.cHalts = 0;
+ }
+ pUVCpu->vm.s.cHalts++;
+
+ /*
+ * Do the halt.
+ */
+ VMCPU_ASSERT_STATE_2(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM);
+ VMCPUSTATE enmStateOld = VMCPU_GET_STATE(pVCpu);
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
+ PUVM pUVM = pUVCpu->pUVM;
+ int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
+ VMCPU_SET_STATE(pVCpu, enmStateOld);
+
+ /*
+ * Notify TM and resume the yielder
+ */
+ TMNotifyEndOfHalt(pVCpu);
+ if (pVCpu->idCpu == 0)
+ VMMR3YieldResume(pVM);
+
+ LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
+ return rc;
+}
+
+
+/**
+ * Suspended VM Wait.
+ * Only a handful of forced actions will cause the function to
+ * return to the caller.
+ *
+ * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
+ * case an appropriate status code is returned.
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ * @thread The emulation thread.
+ * @internal
+ */
+VMMR3_INT_DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
+{
+ LogFlow(("VMR3WaitU:\n"));
+
+ /*
+ * Check Relevant FFs.
+ */
+ PVM pVM = pUVCpu->pVM;
+ PVMCPU pVCpu = pUVCpu->pVCpu;
+
+ if ( pVM
+ && ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
+ )
+ )
+ {
+ LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Do waiting according to the halt method (so VMR3NotifyFF
+ * doesn't have to special case anything).
+ */
+ PUVM pUVM = pUVCpu->pUVM;
+ int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
+ LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pUVM->pVM ? pUVM->pVM->fGlobalForcedActions : 0));
+ return rc;
+}
+
+
+/**
+ * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait
+ * for the handling of asynchronous notifications to complete.
+ *
+ * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
+ * case an appropriate status code is returned.
+ * @param pUVCpu Pointer to the user mode VMCPU structure.
+ * @thread The emulation thread.
+ */
+VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu)
+{
+ LogFlow(("VMR3AsyncPdmNotificationWaitU:\n"));
+ return VMR3WaitU(pUVCpu);
+}
+
+
+/**
+ * Interface that PDM the helper asynchronous notification completed methods
+ * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU().
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ */
+VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM)
+{
+ LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n"));
+ VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */
+ g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/);
+}
+
+
+/**
+ * Rendezvous callback that will be called once.
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvUser The new g_aHaltMethods index.
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ uintptr_t i = (uintptr_t)pvUser;
+ Assert(i < RT_ELEMENTS(g_aHaltMethods));
+
+ /*
+ * Main job is done once on EMT0 (it goes thru here first).
+ */
+ if (pVCpu->idCpu == 0)
+ {
+ /*
+ * Terminate the old one.
+ */
+ if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
+ && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
+ {
+ g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
+ pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
+ }
+
+ /* Assert that the failure fallback is where we expect. */
+ Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
+ Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
+
+ /*
+ * Init the new one.
+ */
+ memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
+ if (g_aHaltMethods[i].pfnInit)
+ {
+ rc = g_aHaltMethods[i].pfnInit(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ /* Fall back on the bootstrap method. This requires no
+ init/term (see assertion above), and will always work. */
+ AssertLogRelRC(rc);
+ i = 0;
+ }
+ }
+
+ /*
+ * Commit it.
+ */
+ pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
+ ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
+ }
+ else
+ i = pUVM->vm.s.iHaltMethod;
+
+ /*
+ * All EMTs must update their ring-0 halt configuration.
+ */
+ VMMR3SetMayHaltInRing0(pVCpu, g_aHaltMethods[i].fMayHaltInRing0,
+ g_aHaltMethods[i].enmHaltMethod == VMHALTMETHOD_GLOBAL_1
+ ? pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg : 0);
+
+ return rc;
+}
+
+
+/**
+ * Changes the halt method.
+ *
+ * @returns VBox status code.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param enmHaltMethod The new halt method.
+ * @thread EMT.
+ */
+int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
+{
+ PVM pVM = pUVM->pVM; Assert(pVM);
+ VM_ASSERT_EMT(pVM);
+ AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
+
+ /*
+ * Resolve default (can be overridden in the configuration).
+ */
+ if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
+ {
+ uint32_t u32;
+ int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
+ if (RT_SUCCESS(rc))
+ {
+ enmHaltMethod = (VMHALTMETHOD)u32;
+ if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
+ }
+ else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
+ return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
+ else
+ enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
+ //enmHaltMethod = VMHALTMETHOD_1;
+ //enmHaltMethod = VMHALTMETHOD_OLD;
+ }
+
+ /*
+ * The global halt method doesn't work in driverless mode, so fall back on
+ * method #1 instead.
+ */
+ if (!SUPR3IsDriverless() || enmHaltMethod != VMHALTMETHOD_GLOBAL_1)
+ LogRel(("VMEmt: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
+ else
+ {
+ LogRel(("VMEmt: Halt method %s (%d) not available in driverless mode, using %s (%d) instead\n",
+ vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod, vmR3GetHaltMethodName(VMHALTMETHOD_1), VMHALTMETHOD_1));
+ enmHaltMethod = VMHALTMETHOD_1;
+ }
+
+
+ /*
+ * Find the descriptor.
+ */
+ unsigned i = 0;
+ while ( i < RT_ELEMENTS(g_aHaltMethods)
+ && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
+ i++;
+ AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
+
+ /*
+ * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
+ */
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
+}
+
+
+/**
+ * Special interface for implementing a HLT-like port on a device.
+ *
+ * This can be called directly from device code, provide the device is trusted
+ * to access the VMM directly. Since we may not have an accurate register set
+ * and the caller certainly shouldn't (device code does not access CPU
+ * registers), this function will return when interrupts are pending regardless
+ * of the actual EFLAGS.IF state.
+ *
+ * @returns VBox error status (never informational statuses).
+ * @param pVM The cross context VM structure.
+ * @param idCpu The id of the calling EMT.
+ */
+VMMR3DECL(int) VMR3WaitForDeviceReady(PVM pVM, VMCPUID idCpu)
+{
+ /*
+ * Validate caller and resolve the CPU ID.
+ */
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+ VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Tag along with the HLT mechanics for now.
+ */
+ int rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * Wakes up a CPU that has called VMR3WaitForDeviceReady.
+ *
+ * @returns VBox error status (never informational statuses).
+ * @param pVM The cross context VM structure.
+ * @param idCpu The id of the calling EMT.
+ */
+VMMR3DECL(int) VMR3NotifyCpuDeviceReady(PVM pVM, VMCPUID idCpu)
+{
+ /*
+ * Validate caller and resolve the CPU ID.
+ */
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ PVMCPU pVCpu = pVM->apCpusR3[idCpu];
+
+ /*
+ * Pretend it was an FF that got set since we've got logic for that already.
+ */
+ VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Returns the number of active EMTs.
+ *
+ * This is used by the rendezvous code during VM destruction to avoid waiting
+ * for EMTs that aren't around any more.
+ *
+ * @returns Number of active EMTs. 0 if invalid parameter.
+ * @param pUVM The user mode VM structure.
+ */
+VMMR3_INT_DECL(uint32_t) VMR3GetActiveEmts(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, 0);
+ return pUVM->vm.s.cActiveEmts;
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMM.cpp b/src/VBox/VMM/VMMR3/VMM.cpp
new file mode 100644
index 00000000..e235184c
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMM.cpp
@@ -0,0 +1,2582 @@
+/* $Id: VMM.cpp $ */
+/** @file
+ * VMM - The Virtual Machine Monitor Core.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+//#define NO_SUPCALLR0VMM
+
+/** @page pg_vmm VMM - The Virtual Machine Monitor
+ *
+ * The VMM component is two things at the moment, it's a component doing a few
+ * management and routing tasks, and it's the whole virtual machine monitor
+ * thing. For hysterical reasons, it is not doing all the management that one
+ * would expect, this is instead done by @ref pg_vm. We'll address this
+ * misdesign eventually, maybe.
+ *
+ * VMM is made up of these components:
+ * - @subpage pg_cfgm
+ * - @subpage pg_cpum
+ * - @subpage pg_dbgf
+ * - @subpage pg_em
+ * - @subpage pg_gim
+ * - @subpage pg_gmm
+ * - @subpage pg_gvmm
+ * - @subpage pg_hm
+ * - @subpage pg_iem
+ * - @subpage pg_iom
+ * - @subpage pg_mm
+ * - @subpage pg_nem
+ * - @subpage pg_pdm
+ * - @subpage pg_pgm
+ * - @subpage pg_selm
+ * - @subpage pg_ssm
+ * - @subpage pg_stam
+ * - @subpage pg_tm
+ * - @subpage pg_trpm
+ * - @subpage pg_vm
+ *
+ *
+ * @see @ref grp_vmm @ref grp_vm @subpage pg_vmm_guideline @subpage pg_raw
+ *
+ *
+ * @section sec_vmmstate VMM State
+ *
+ * @image html VM_Statechart_Diagram.gif
+ *
+ * To be written.
+ *
+ *
+ * @subsection subsec_vmm_init VMM Initialization
+ *
+ * To be written.
+ *
+ *
+ * @subsection subsec_vmm_term VMM Termination
+ *
+ * To be written.
+ *
+ *
+ * @section sec_vmm_limits VMM Limits
+ *
+ * There are various resource limits imposed by the VMM and it's
+ * sub-components. We'll list some of them here.
+ *
+ * On 64-bit hosts:
+ * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
+ * can be increased up to 64K - 1.
+ * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
+ * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
+ * - A VM can be assigned all the memory we can use (16TB), however, the
+ * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
+ * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
+ *
+ * On 32-bit hosts:
+ * - Max 127 VMs. Imposed by GMM's per page structure.
+ * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
+ * ROM pages. The limit is imposed by the 28-bit page ID used
+ * internally in GMM. It is also limited by PAE.
+ * - A VM can be assigned all the memory GMM can allocate, however, the
+ * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
+ * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VMM
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/pdmqueue.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/nem.h>
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+# include <VBox/vmm/iem.h>
+#endif
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/tm.h>
+#include "VMMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/version.h>
+#include <VBox/vmm/hm.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/time.h>
+#include <iprt/semaphore.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+#include <iprt/ctype.h>
+#include <iprt/x86.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The saved state version. */
+#define VMM_SAVED_STATE_VERSION 4
+/** The saved state version used by v3.0 and earlier. (Teleportation) */
+#define VMM_SAVED_STATE_VERSION_3_0 3
+
+/** Macro for flushing the ring-0 logging. */
+#define VMM_FLUSH_R0_LOG(a_pVM, a_pVCpu, a_pLogger, a_pR3Logger) \
+ do { \
+ size_t const idxBuf = (a_pLogger)->idxBuf % VMMLOGGER_BUFFER_COUNT; \
+ if ( (a_pLogger)->aBufs[idxBuf].AuxDesc.offBuf == 0 \
+ || (a_pLogger)->aBufs[idxBuf].AuxDesc.fFlushedIndicator) \
+ { /* likely? */ } \
+ else \
+ vmmR3LogReturnFlush(a_pVM, a_pVCpu, a_pLogger, idxBuf, a_pR3Logger); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void vmmR3InitRegisterStats(PVM pVM);
+static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+#if 0 /* pointless when timers doesn't run on EMT */
+static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser);
+#endif
+static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
+ uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
+static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
+static FNRTTHREAD vmmR3LogFlusher;
+static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
+ PRTLOGGER pDstLogger);
+static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+
+
+/**
+ * Initializes the VMM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
+{
+ LogFlow(("VMMR3Init\n"));
+
+ /*
+ * Assert alignment, sizes and order.
+ */
+ AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
+ AssertCompile(RT_SIZEOFMEMB(VMCPU, vmm.s) <= RT_SIZEOFMEMB(VMCPU, vmm.padding));
+
+ /*
+ * Init basic VM VMM members.
+ */
+ pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
+ pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
+ pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
+ pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
+ pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
+ pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
+ pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
+ pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
+ pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
+ pVM->vmm.s.nsProgramStart = RTTimeProgramStartNanoTS();
+
+#if 0 /* pointless when timers doesn't run on EMT */
+ /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
+ * The EMT yield interval. The EMT yielding is a hack we employ to play a
+ * bit nicer with the rest of the system (like for instance the GUI).
+ */
+ int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
+ 23 /* Value arrived at after experimenting with the grub boot prompt. */);
+ AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
+#endif
+
+ /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
+ * Controls whether we employ per-cpu preemption timers to limit the time
+ * spent executing guest code. This option is not available on all
+ * platforms and we will silently ignore this setting then. If we are
+ * running in VT-x mode, we will use the VMX-preemption timer instead of
+ * this one when possible.
+ */
+ PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
+ int rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
+ AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
+
+ /*
+ * Initialize the VMM rendezvous semaphores.
+ */
+ pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
+ if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
+ return VERR_NO_MEMORY;
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
+ AssertRCReturn(rc, rc);
+ }
+ rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPush);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPop);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
+ AssertRCReturn(rc, rc);
+ rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the saved state data unit.
+ */
+ rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
+ NULL, NULL, NULL,
+ NULL, vmmR3Save, NULL,
+ NULL, vmmR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Register the Ring-0 VM handle with the session for fast ioctl calls.
+ */
+ bool const fDriverless = SUPR3IsDriverless();
+ if (!fDriverless)
+ {
+ rc = SUPR3SetVMForFastIOCtl(VMCC_GET_VMR0_FOR_CALL(pVM));
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+#ifdef VBOX_WITH_NMI
+ /*
+ * Allocate mapping for the host APIC.
+ */
+ rc = MMR3HyperReserve(pVM, HOST_PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
+ AssertRC(rc);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Start the log flusher thread.
+ */
+ if (!fDriverless)
+ rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/,
+ RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk");
+ if (RT_SUCCESS(rc))
+ {
+
+ /*
+ * Debug info and statistics.
+ */
+ DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
+ vmmR3InitRegisterStats(pVM);
+ vmmInitFormatTypes();
+
+ return VINF_SUCCESS;
+ }
+ }
+ /** @todo Need failure cleanup? */
+
+ return rc;
+}
+
+
+/**
+ * VMMR3Init worker that register the statistics with STAM.
+ *
+ * @param pVM The cross context VM structure.
+ */
+static void vmmR3InitRegisterStats(PVM pVM)
+{
+ RT_NOREF_PV(pVM);
+
+ /* Nothing to do here in driverless mode. */
+ if (SUPR3IsDriverless())
+ return;
+
+ /*
+ * Statistics.
+ */
+ STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_COMMIT_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_COMMIT_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Total, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns without responsible force flag.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3FF, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TO_R3.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_TM_VIRTUAL_SYNC.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PGM_NEED_HANDY_PAGES.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_QUEUES.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_EMT_RENDEZVOUS.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TIMER.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_DMA.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_PDM_CRITSECT.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iem, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IEM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IEM.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iom, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IOM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IOM.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
+
+ STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes", STAMUNIT_OCCURENCES, "Total number of buffer flushes");
+ STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up.");
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlock", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOnTime", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOverslept", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockInsomnia", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExec, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromSpin", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromBlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromBlock", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3FromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/FromSpin", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3Other, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/Other", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PendingFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PendingFF", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3SmallDelta, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/SmallDelta", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostNoInt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitNoInt", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostPendingFF,STAMTYPE_COUNTER,STAMVISIBILITY_ALWAYS,STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitPendingFF", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0Halts, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryCounter", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i);
+ STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i);
+
+ STAMR3RegisterF(pVM, &pVCpu->cEmtHashCollisions, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/VMM/EmtHashCollisions/Emt%02u", i);
+
+ PVMMR3CPULOGGER pShared = &pVCpu->vmm.s.u.s.Logger;
+ STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg", i);
+ STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/CannotBlock", i);
+ STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Wait", i);
+ STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Races", i);
+ STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/RacesToR0", i);
+ STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbDropped", i);
+ STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbBuf", i);
+ STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/idxBuf", i);
+
+ pShared = &pVCpu->vmm.s.u.s.RelLogger;
+ STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel", i);
+ STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/CannotBlock", i);
+ STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Wait", i);
+ STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Races", i);
+ STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/RacesToR0", i);
+ STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbDropped", i);
+ STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbBuf", i);
+ STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/idxBuf", i);
+ }
+}
+
+
+/**
+ * Worker for VMMR3InitR0 that calls ring-0 to do EMT specific initialization.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context per CPU structure.
+ * @thread EMT(pVCpu)
+ */
+static DECLCALLBACK(int) vmmR3InitR0Emt(PVM pVM, PVMCPU pVCpu)
+{
+ return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_INIT_EMT, 0, NULL);
+}
+
+
+/**
+ * Initializes the R0 VMM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
+{
+ int rc;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ Assert(pVCpu && pVCpu->idCpu == 0);
+
+ /*
+ * Nothing to do here in driverless mode.
+ */
+ if (SUPR3IsDriverless())
+ return VINF_SUCCESS;
+
+ /*
+ * Make sure the ring-0 loggers are up to date.
+ */
+ rc = VMMR3UpdateLoggers(pVM);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Call Ring-0 entry with init code.
+ */
+#ifdef NO_SUPCALLR0VMM
+ //rc = VERR_GENERAL_FAILURE;
+ rc = VINF_SUCCESS;
+#else
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
+#endif
+
+ /*
+ * Flush the logs & deal with assertions.
+ */
+#ifdef LOG_ENABLED
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+ if (rc == VERR_VMM_RING0_ASSERTION)
+ rc = vmmR3HandleRing0Assert(pVM, pVCpu);
+ if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
+ {
+ LogRel(("VMM: R0 init failed, rc=%Rra\n", rc));
+ if (RT_SUCCESS(rc))
+ rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
+ }
+
+ /*
+ * Log stuff we learned in ring-0.
+ */
+ /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
+ if (pVM->vmm.s.fIsUsingContextHooks)
+ LogRel(("VMM: Enabled thread-context hooks\n"));
+ else
+ LogRel(("VMM: Thread-context hooks unavailable\n"));
+
+ /* Log RTThreadPreemptIsPendingTrusty() and RTThreadPreemptIsPossible() results. */
+ if (pVM->vmm.s.fIsPreemptPendingApiTrusty)
+ LogRel(("VMM: RTThreadPreemptIsPending() can be trusted\n"));
+ else
+ LogRel(("VMM: Warning! RTThreadPreemptIsPending() cannot be trusted! Need to update kernel info?\n"));
+ if (pVM->vmm.s.fIsPreemptPossible)
+ LogRel(("VMM: Kernel preemption is possible\n"));
+ else
+ LogRel(("VMM: Kernel preemption is not possible it seems\n"));
+
+ /*
+ * Send all EMTs to ring-0 to get their logger initialized.
+ */
+ for (VMCPUID idCpu = 0; RT_SUCCESS(rc) && idCpu < pVM->cCpus; idCpu++)
+ rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmmR3InitR0Emt, 2, pVM, pVM->apCpusR3[idCpu]);
+
+ return rc;
+}
+
+
+/**
+ * Called when an init phase completes.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param enmWhat Which init phase.
+ */
+VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ int rc = VINF_SUCCESS;
+
+ switch (enmWhat)
+ {
+ case VMINITCOMPLETED_RING3:
+ {
+#if 0 /* pointless when timers doesn't run on EMT */
+ /*
+ * Create the EMT yield timer.
+ */
+ rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, TMTIMER_FLAGS_NO_RING0,
+ "EMT Yielder", &pVM->vmm.s.hYieldTimer);
+ AssertRCReturn(rc, rc);
+
+ rc = TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldEveryMillies);
+ AssertRCReturn(rc, rc);
+#endif
+ break;
+ }
+
+ case VMINITCOMPLETED_HM:
+ {
+ /*
+ * Disable the periodic preemption timers if we can use the
+ * VMX-preemption timer instead.
+ */
+ if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
+ && HMR3IsVmxPreemptionTimerUsed(pVM))
+ pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
+ LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
+
+ /*
+ * Last chance for GIM to update its CPUID leaves if it requires
+ * knowledge/information from HM initialization.
+ */
+/** @todo r=bird: This shouldn't be done from here, but rather from VM.cpp. There is no dependency on VMM here. */
+ rc = GIMR3InitCompleted(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * CPUM's post-initialization (print CPUIDs).
+ */
+ CPUMR3LogCpuIdAndMsrFeatures(pVM);
+ break;
+ }
+
+ default: /* shuts up gcc */
+ break;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Terminate the VMM bits.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ Assert(pVCpu && pVCpu->idCpu == 0);
+
+ /*
+ * Call Ring-0 entry with termination code.
+ */
+ int rc = VINF_SUCCESS;
+ if (!SUPR3IsDriverless())
+ {
+#ifndef NO_SUPCALLR0VMM
+ rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
+#endif
+ }
+
+ /*
+ * Flush the logs & deal with assertions.
+ */
+#ifdef LOG_ENABLED
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+ if (rc == VERR_VMM_RING0_ASSERTION)
+ rc = vmmR3HandleRing0Assert(pVM, pVCpu);
+ if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
+ {
+ LogRel(("VMM: VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
+ if (RT_SUCCESS(rc))
+ rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
+ }
+
+ /*
+ * Do clean ups.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
+ pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
+ }
+ RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
+ pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
+ RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
+ pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
+ RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
+ pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
+ RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
+ pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
+ RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
+ pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
+ RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
+ pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
+ RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
+ pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
+ RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
+ pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
+
+ vmmTermFormatTypes();
+
+ /*
+ * Wait for the log flusher thread to complete.
+ */
+ if (pVM->vmm.s.hLogFlusherThread != NIL_RTTHREAD)
+ {
+ int rc2 = RTThreadWait(pVM->vmm.s.hLogFlusherThread, RT_MS_30SEC, NULL);
+ AssertLogRelRC(rc2);
+ if (RT_SUCCESS(rc2))
+ pVM->vmm.s.hLogFlusherThread = NIL_RTTHREAD;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * The VMM will need to apply relocations to the core code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offDelta The relocation delta.
+ */
+VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+{
+ LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
+ RT_NOREF(offDelta);
+
+ /*
+ * Update the logger.
+ */
+ VMMR3UpdateLoggers(pVM);
+}
+
+
+/**
+ * Worker for VMMR3UpdateLoggers.
+ */
+static int vmmR3UpdateLoggersWorker(PVM pVM, PVMCPU pVCpu, PRTLOGGER pSrcLogger, bool fReleaseLogger)
+{
+ /*
+ * Get the group count.
+ */
+ uint32_t uGroupsCrc32 = 0;
+ uint32_t cGroups = 0;
+ uint64_t fFlags = 0;
+ int rc = RTLogQueryBulk(pSrcLogger, &fFlags, &uGroupsCrc32, &cGroups, NULL);
+ Assert(rc == VERR_BUFFER_OVERFLOW);
+
+ /*
+ * Allocate the request of the right size.
+ */
+ uint32_t const cbReq = RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[cGroups]);
+ PVMMR0UPDATELOGGERSREQ pReq = (PVMMR0UPDATELOGGERSREQ)RTMemAllocZVar(cbReq);
+ if (pReq)
+ {
+ pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ pReq->Hdr.cbReq = cbReq;
+ pReq->cGroups = cGroups;
+ rc = RTLogQueryBulk(pSrcLogger, &pReq->fFlags, &pReq->uGroupCrc32, &pReq->cGroups, pReq->afGroups);
+ AssertRC(rc);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * The 64-bit value argument.
+ */
+ uint64_t fExtraArg = fReleaseLogger;
+
+ /* Only outputting to the parent VMM's logs? Enable ring-0 to flush directly. */
+ uint32_t fDst = RTLogGetDestinations(pSrcLogger);
+ fDst &= ~(RTLOGDEST_DUMMY | RTLOGDEST_F_NO_DENY | RTLOGDEST_F_DELAY_FILE | RTLOGDEST_FIXED_FILE | RTLOGDEST_FIXED_DIR);
+ if ( (fDst & (RTLOGDEST_VMM | RTLOGDEST_VMM_REL))
+ && !(fDst & ~(RTLOGDEST_VMM | RTLOGDEST_VMM_REL)))
+ fExtraArg |= (fDst & RTLOGDEST_VMM ? VMMR0UPDATELOGGER_F_TO_PARENT_VMM_DBG : 0)
+ | (fDst & RTLOGDEST_VMM_REL ? VMMR0UPDATELOGGER_F_TO_PARENT_VMM_REL : 0);
+
+ rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_UPDATE_LOGGERS, fExtraArg, &pReq->Hdr);
+ }
+
+ RTMemFree(pReq);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Updates the settings for the RC and R0 loggers.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @thread EMT
+ */
+VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
+{
+ /* Nothing to do here if we're in driverless mode: */
+ if (SUPR3IsDriverless())
+ return VINF_SUCCESS;
+
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+
+ /*
+ * Each EMT has each own logger instance.
+ */
+ /* Debug logging.*/
+ int rcDebug = VINF_SUCCESS;
+#ifdef LOG_ENABLED
+ PRTLOGGER const pDefault = RTLogDefaultInstance();
+ if (pDefault)
+ rcDebug = vmmR3UpdateLoggersWorker(pVM, pVCpu, pDefault, false /*fReleaseLogger*/);
+#else
+ RT_NOREF(pVM);
+#endif
+
+ /* Release logging. */
+ int rcRelease = VINF_SUCCESS;
+ PRTLOGGER const pRelease = RTLogRelGetDefaultInstance();
+ if (pRelease)
+ rcRelease = vmmR3UpdateLoggersWorker(pVM, pVCpu, pRelease, true /*fReleaseLogger*/);
+
+ return RT_SUCCESS(rcDebug) ? rcRelease : rcDebug;
+}
+
+
+/**
+ * @callback_method_impl{FNRTTHREAD, Ring-0 log flusher thread.}
+ */
+static DECLCALLBACK(int) vmmR3LogFlusher(RTTHREAD hThreadSelf, void *pvUser)
+{
+ PVM const pVM = (PVM)pvUser;
+ RT_NOREF(hThreadSelf);
+
+ /* Reset the flusher state before we start: */
+ pVM->vmm.s.LogFlusherItem.u32 = UINT32_MAX;
+
+ /*
+ * The work loop.
+ */
+ for (;;)
+ {
+ /*
+ * Wait for work.
+ */
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_VMMR0_LOG_FLUSHER, 0, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ /* Paranoia: Make another copy of the request, to make sure the validated data can't be changed. */
+ VMMLOGFLUSHERENTRY Item;
+ Item.u32 = pVM->vmm.s.LogFlusherItem.u32;
+ if ( Item.s.idCpu < pVM->cCpus
+ && Item.s.idxLogger < VMMLOGGER_IDX_MAX
+ && Item.s.idxBuffer < VMMLOGGER_BUFFER_COUNT)
+ {
+ /*
+ * Verify the request.
+ */
+ PVMCPU const pVCpu = pVM->apCpusR3[Item.s.idCpu];
+ PVMMR3CPULOGGER const pShared = &pVCpu->vmm.s.u.aLoggers[Item.s.idxLogger];
+ uint32_t const cbToFlush = pShared->aBufs[Item.s.idxBuffer].AuxDesc.offBuf;
+ if (cbToFlush > 0)
+ {
+ if (cbToFlush <= pShared->cbBuf)
+ {
+ char * const pchBufR3 = pShared->aBufs[Item.s.idxBuffer].pchBufR3;
+ if (pchBufR3)
+ {
+ /*
+ * Do the flushing.
+ */
+ PRTLOGGER const pLogger = Item.s.idxLogger == VMMLOGGER_IDX_REGULAR
+ ? RTLogGetDefaultInstance() : RTLogRelGetDefaultInstance();
+ if (pLogger)
+ {
+ char szBefore[128];
+ RTStrPrintf(szBefore, sizeof(szBefore),
+ "*FLUSH* idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x fFlushed=%RTbool cbDropped=%#x\n",
+ Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush,
+ pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator, pShared->cbDropped);
+ RTLogBulkWrite(pLogger, szBefore, pchBufR3, cbToFlush, "*FLUSH DONE*\n");
+ }
+ }
+ else
+ Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! No ring-3 buffer pointer!\n",
+ Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
+ }
+ else
+ Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Exceeds %#x bytes buffer size!\n",
+ Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush, pShared->cbBuf));
+ }
+ else
+ Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Zero bytes to flush!\n",
+ Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
+
+ /*
+ * Mark the descriptor as flushed and set the request flag for same.
+ */
+ pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator = true;
+ }
+ else
+ {
+ Assert(Item.s.idCpu == UINT16_MAX);
+ Assert(Item.s.idxLogger == UINT8_MAX);
+ Assert(Item.s.idxBuffer == UINT8_MAX);
+ }
+ }
+ /*
+ * Interrupted can happen, just ignore it.
+ */
+ else if (rc == VERR_INTERRUPTED)
+ { /* ignore*/ }
+ /*
+ * The ring-0 termination code will set the shutdown flag and wake us
+ * up, and we should return with object destroyed. In case there is
+ * some kind of race, we might also get sempahore destroyed.
+ */
+ else if ( rc == VERR_OBJECT_DESTROYED
+ || rc == VERR_SEM_DESTROYED
+ || rc == VERR_INVALID_HANDLE)
+ {
+ LogRel(("vmmR3LogFlusher: Terminating (%Rrc)\n", rc));
+ return VINF_SUCCESS;
+ }
+ /*
+ * There shouldn't be any other errors...
+ */
+ else
+ {
+ LogRelMax(64, ("vmmR3LogFlusher: VMMR0_DO_VMMR0_LOG_FLUSHER -> %Rrc\n", rc));
+ AssertRC(rc);
+ RTThreadSleep(1);
+ }
+ }
+}
+
+
+/**
+ * Helper for VMM_FLUSH_R0_LOG that does the flushing.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling
+ * EMT.
+ * @param pShared The shared logger data.
+ * @param idxBuf The buffer to flush.
+ * @param pDstLogger The destination IPRT logger.
+ */
+static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf, PRTLOGGER pDstLogger)
+{
+ uint32_t const cbToFlush = pShared->aBufs[idxBuf].AuxDesc.offBuf;
+ const char *pszBefore = cbToFlush < 256 ? NULL : "*FLUSH*\n";
+ const char *pszAfter = cbToFlush < 256 ? NULL : "*END*\n";
+
+#if VMMLOGGER_BUFFER_COUNT > 1
+ /*
+ * When we have more than one log buffer, the flusher thread may still be
+ * working on the previous buffer when we get here.
+ */
+ char szBefore[64];
+ if (pShared->cFlushing > 0)
+ {
+ STAM_REL_PROFILE_START(&pShared->StatRaces, a);
+ uint64_t const nsStart = RTTimeNanoTS();
+
+ /* A no-op, but it takes the lock and the hope is that we end up waiting
+ on the flusher to finish up. */
+ RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
+ if (pShared->cFlushing != 0)
+ {
+ RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
+
+ /* If no luck, go to ring-0 and to proper waiting. */
+ if (pShared->cFlushing != 0)
+ {
+ STAM_REL_COUNTER_INC(&pShared->StatRacesToR0);
+ SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED, 0, NULL);
+ }
+ }
+
+ RTStrPrintf(szBefore, sizeof(szBefore), "*%sFLUSH* waited %'RU64 ns\n",
+ pShared->cFlushing == 0 ? "" : " MISORDERED", RTTimeNanoTS() - nsStart);
+ pszBefore = szBefore;
+ STAM_REL_PROFILE_STOP(&pShared->StatRaces, a);
+ }
+#else
+ RT_NOREF(pVM, pVCpu);
+#endif
+
+ RTLogBulkWrite(pDstLogger, pszBefore, pShared->aBufs[idxBuf].pchBufR3, cbToFlush, pszAfter);
+ pShared->aBufs[idxBuf].AuxDesc.fFlushedIndicator = true;
+}
+
+
+/**
+ * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
+ *
+ * @returns Pointer to the buffer.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
+{
+ return pVM->vmm.s.szRing0AssertMsg1;
+}
+
+
+/**
+ * Returns the VMCPU of the specified virtual CPU.
+ *
+ * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the virtual CPU.
+ */
+VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ return pUVM->pVM->apCpusR3[idCpu];
+}
+
+
+/**
+ * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
+ *
+ * @returns Pointer to the buffer.
+ * @param pVM The cross context VM structure.
+ */
+VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
+{
+ return pVM->vmm.s.szRing0AssertMsg2;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ LogFlow(("vmmR3Save:\n"));
+
+ /*
+ * Save the started/stopped state of all CPUs except 0 as it will always
+ * be running. This avoids breaking the saved state version. :-)
+ */
+ for (VMCPUID i = 1; i < pVM->cCpus; i++)
+ SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVM->apCpusR3[i])));
+
+ return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ LogFlow(("vmmR3Load:\n"));
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != VMM_SAVED_STATE_VERSION
+ && uVersion != VMM_SAVED_STATE_VERSION_3_0)
+ {
+ AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+
+ if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
+ {
+ /* Ignore the stack bottom, stack pointer and stack bits. */
+ RTRCPTR RCPtrIgnored;
+ SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
+ SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
+#ifdef RT_OS_DARWIN
+ if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
+ && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
+ && SSMR3HandleRevision(pSSM) >= 48858
+ && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
+ || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
+ )
+ SSMR3Skip(pSSM, 16384);
+ else
+ SSMR3Skip(pSSM, 8192);
+#else
+ SSMR3Skip(pSSM, 8192);
+#endif
+ }
+
+ /*
+ * Restore the VMCPU states. VCPU 0 is always started.
+ */
+ VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
+ for (VMCPUID i = 1; i < pVM->cCpus; i++)
+ {
+ bool fStarted;
+ int rc = SSMR3GetBool(pSSM, &fStarted);
+ if (RT_FAILURE(rc))
+ return rc;
+ VMCPU_SET_STATE(pVM->apCpusR3[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
+ }
+
+ /* terminator */
+ uint32_t u32;
+ int rc = SSMR3GetU32(pSSM, &u32);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (u32 != UINT32_MAX)
+ {
+ AssertMsgFailed(("u32=%#x\n", u32));
+ return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Suspends the CPU yielder.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
+{
+#if 0 /* pointless when timers doesn't run on EMT */
+ VMCPU_ASSERT_EMT(pVM->apCpusR3[0]);
+ if (!pVM->vmm.s.cYieldResumeMillies)
+ {
+ uint64_t u64Now = TMTimerGet(pVM, pVM->vmm.s.hYieldTimer);
+ uint64_t u64Expire = TMTimerGetExpire(pVM, pVM->vmm.s.hYieldTimer);
+ if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
+ pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
+ else
+ pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM, pVM->vmm.s.hYieldTimer, u64Expire - u64Now);
+ TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
+ }
+ pVM->vmm.s.u64LastYield = RTTimeNanoTS();
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+/**
+ * Stops the CPU yielder.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
+{
+#if 0 /* pointless when timers doesn't run on EMT */
+ if (!pVM->vmm.s.cYieldResumeMillies)
+ TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
+ pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
+ pVM->vmm.s.u64LastYield = RTTimeNanoTS();
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+/**
+ * Resumes the CPU yielder when it has been a suspended or stopped.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
+{
+#if 0 /* pointless when timers doesn't run on EMT */
+ if (pVM->vmm.s.cYieldResumeMillies)
+ {
+ TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldResumeMillies);
+ pVM->vmm.s.cYieldResumeMillies = 0;
+ }
+#else
+ RT_NOREF(pVM);
+#endif
+}
+
+
+#if 0 /* pointless when timers doesn't run on EMT */
+/**
+ * @callback_method_impl{FNTMTIMERINT, EMT yielder}
+ *
+ * @todo This is a UNI core/thread thing, really... Should be reconsidered.
+ */
+static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ NOREF(pvUser);
+
+ /*
+ * This really needs some careful tuning. While we shouldn't be too greedy since
+ * that'll cause the rest of the system to stop up, we shouldn't be too nice either
+ * because that'll cause us to stop up.
+ *
+ * The current logic is to use the default interval when there is no lag worth
+ * mentioning, but when we start accumulating lag we don't bother yielding at all.
+ *
+ * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
+ * so the lag is up to date.)
+ */
+ const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
+ if ( u64Lag < 50000000 /* 50ms */
+ || ( u64Lag < 1000000000 /* 1s */
+ && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
+ )
+ {
+ uint64_t u64Elapsed = RTTimeNanoTS();
+ pVM->vmm.s.u64LastYield = u64Elapsed;
+
+ RTThreadYield();
+
+#ifdef LOG_ENABLED
+ u64Elapsed = RTTimeNanoTS() - u64Elapsed;
+ Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
+#endif
+ }
+ TMTimerSetMillies(pVM, hTimer, pVM->vmm.s.cYieldEveryMillies);
+}
+#endif
+
+
+/**
+ * Executes guest code (Intel VT-x and AMD-V).
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
+{
+ Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
+
+ int rc;
+ do
+ {
+#ifdef NO_SUPCALLR0VMM
+ rc = VERR_GENERAL_FAILURE;
+#else
+ rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
+ if (RT_LIKELY(rc == VINF_SUCCESS))
+ rc = pVCpu->vmm.s.iLastGZRc;
+#endif
+ } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
+
+#if 0 /** @todo triggers too often */
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
+#endif
+
+ /*
+ * Flush the logs
+ */
+#ifdef LOG_ENABLED
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+ if (rc != VERR_VMM_RING0_ASSERTION)
+ {
+ Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
+ return rc;
+ }
+ return vmmR3HandleRing0Assert(pVM, pVCpu);
+}
+
+
+/**
+ * Perform one of the fast I/O control VMMR0 operation.
+ *
+ * @returns VBox strict status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param enmOperation The operation to perform.
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
+{
+ VBOXSTRICTRC rcStrict;
+ do
+ {
+#ifdef NO_SUPCALLR0VMM
+ rcStrict = VERR_GENERAL_FAILURE;
+#else
+ rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
+ if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+ rcStrict = pVCpu->vmm.s.iLastGZRc;
+#endif
+ } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
+
+ /*
+ * Flush the logs
+ */
+#ifdef LOG_ENABLED
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+ if (rcStrict != VERR_VMM_RING0_ASSERTION)
+ return rcStrict;
+ return vmmR3HandleRing0Assert(pVM, pVCpu);
+}
+
+
+/**
+ * VCPU worker for VMMR3SendStartupIpi.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu Virtual CPU to perform SIPI on.
+ * @param uVector The SIPI vector.
+ */
+static DECLCALLBACK(int) vmmR3SendStarupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
+{
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ /*
+ * In the INIT state, the target CPU is only responsive to an SIPI.
+ * This is also true for when when the CPU is in VMX non-root mode.
+ *
+ * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)".
+ * See Intel spec. 26.6.2 "Activity State".
+ */
+ if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
+ return VINF_SUCCESS;
+
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if (CPUMIsGuestInVmxRootMode(pCtx))
+ {
+ /* If the CPU is in VMX non-root mode we must cause a VM-exit. */
+ if (CPUMIsGuestInVmxNonRootMode(pCtx))
+ return VBOXSTRICTRC_TODO(IEMExecVmxVmexitStartupIpi(pVCpu, uVector));
+
+ /* If the CPU is in VMX root mode (and not in VMX non-root mode) SIPIs are blocked. */
+ return VINF_SUCCESS;
+ }
+#endif
+
+ pCtx->cs.Sel = uVector << 8;
+ pCtx->cs.ValidSel = uVector << 8;
+ pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
+ pCtx->cs.u64Base = uVector << 12;
+ pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
+ pCtx->rip = 0;
+
+ Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
+
+# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
+ EMSetState(pVCpu, EMSTATE_HALTED);
+ return VINF_EM_RESCHEDULE;
+# else /* And if we go the VMCPU::enmState way it can stay here. */
+ VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
+ return VINF_SUCCESS;
+# endif
+}
+
+
+/**
+ * VCPU worker for VMMR3SendInitIpi.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param idCpu Virtual CPU to perform SIPI on.
+ */
+static DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
+{
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ VMCPU_ASSERT_EMT(pVCpu);
+
+ Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
+
+ /** @todo r=ramshankar: We should probably block INIT signal when the CPU is in
+ * wait-for-SIPI state. Verify. */
+
+ /* If the CPU is in VMX non-root mode, INIT signals cause VM-exits. */
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ if (CPUMIsGuestInVmxNonRootMode(pCtx))
+ return VBOXSTRICTRC_TODO(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL, 0 /* uExitQual */));
+#endif
+
+ /** @todo Figure out how to handle a SVM nested-guest intercepts here for INIT
+ * IPI (e.g. SVM_EXIT_INIT). */
+
+ PGMR3ResetCpu(pVM, pVCpu);
+ PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */
+ APICR3InitIpi(pVCpu);
+ TRPMR3ResetCpu(pVCpu);
+ CPUMR3ResetCpu(pVM, pVCpu);
+ EMR3ResetCpu(pVCpu);
+ HMR3ResetCpu(pVCpu);
+ NEMR3ResetCpu(pVCpu, true /*fInitIpi*/);
+
+ /* This will trickle up on the target EMT. */
+ return VINF_EM_WAIT_SIPI;
+}
+
+
+/**
+ * Sends a Startup IPI to the virtual CPU by setting CS:EIP into
+ * vector-dependent state and unhalting processor.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu Virtual CPU to perform SIPI on.
+ * @param uVector SIPI vector.
+ */
+VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
+{
+ AssertReturnVoid(idCpu < pVM->cCpus);
+
+ int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendStarupIpi, 3, pVM, idCpu, uVector);
+ AssertRC(rc);
+}
+
+
+/**
+ * Sends init IPI to the virtual CPU.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu Virtual CPU to perform int IPI on.
+ */
+VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
+{
+ AssertReturnVoid(idCpu < pVM->cCpus);
+
+ int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
+ AssertRC(rc);
+}
+
+
+/**
+ * Registers the guest memory range that can be used for patching.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ VM_ASSERT_EMT(pVM);
+ if (HMIsEnabled(pVM))
+ return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
+
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Deregisters the guest memory range that can be used for patching.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ if (HMIsEnabled(pVM))
+ return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Common recursion handler for the other EMTs.
+ *
+ * @returns Strict VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param rcStrict Current status code to be combined with the one
+ * from this recursion and returned.
+ */
+static VBOXSTRICTRC vmmR3EmtRendezvousCommonRecursion(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
+{
+ int rc2;
+
+ /*
+ * We wait here while the initiator of this recursion reconfigures
+ * everything. The last EMT to get in signals the initiator.
+ */
+ if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) == pVM->cCpus)
+ {
+ rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
+ AssertLogRelRC(rc2);
+ }
+
+ rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPush, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc2);
+
+ /*
+ * Do the normal rendezvous processing.
+ */
+ VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
+ pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
+
+ /*
+ * Wait for the initiator to restore everything.
+ */
+ rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPop, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc2);
+
+ /*
+ * Last thread out of here signals the initiator.
+ */
+ if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) == pVM->cCpus)
+ {
+ rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
+ AssertLogRelRC(rc2);
+ }
+
+ /*
+ * Merge status codes and return.
+ */
+ AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
+ if ( rcStrict2 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rcStrict2))
+ rcStrict = rcStrict2;
+ return rcStrict;
+}
+
+
+/**
+ * Count returns and have the last non-caller EMT wake up the caller.
+ *
+ * @returns VBox strict informational status code for EM scheduling. No failures
+ * will be returned here, those are for the caller only.
+ *
+ * @param pVM The cross context VM structure.
+ * @param rcStrict The current accumulated recursive status code,
+ * to be merged with i32RendezvousStatus and
+ * returned.
+ */
+DECL_FORCE_INLINE(VBOXSTRICTRC) vmmR3EmtRendezvousNonCallerReturn(PVM pVM, VBOXSTRICTRC rcStrict)
+{
+ VBOXSTRICTRC rcStrict2 = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
+
+ uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
+ if (cReturned == pVM->cCpus - 1U)
+ {
+ int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
+ AssertLogRelRC(rc);
+ }
+
+ /*
+ * Merge the status codes, ignoring error statuses in this code path.
+ */
+ AssertLogRelMsgReturn( rcStrict2 <= VINF_SUCCESS
+ || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
+ ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
+ VERR_IPE_UNEXPECTED_INFO_STATUS);
+
+ if (RT_SUCCESS(rcStrict2))
+ {
+ if ( rcStrict2 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rcStrict2))
+ rcStrict = rcStrict2;
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
+ *
+ * @returns VBox strict informational status code for EM scheduling. No failures
+ * will be returned here, those are for the caller only. When
+ * fIsCaller is set, VINF_SUCCESS is always returned.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
+ * not.
+ * @param fFlags The flags.
+ * @param pfnRendezvous The callback.
+ * @param pvUser The user argument for the callback.
+ */
+static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
+ uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
+{
+ int rc;
+ VBOXSTRICTRC rcStrictRecursion = VINF_SUCCESS;
+
+ /*
+ * Enter, the last EMT triggers the next callback phase.
+ */
+ uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
+ if (cEntered != pVM->cCpus)
+ {
+ if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
+ {
+ /* Wait for our turn. */
+ for (;;)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
+ }
+ }
+ else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
+ {
+ /* Wait for the last EMT to arrive and wake everyone up. */
+ rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ Assert(!pVM->vmm.s.fRendezvousRecursion);
+ }
+ else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
+ {
+ /* Wait for our turn. */
+ for (;;)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
+ }
+ }
+ else
+ {
+ Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
+
+ /*
+ * The execute once is handled specially to optimize the code flow.
+ *
+ * The last EMT to arrive will perform the callback and the other
+ * EMTs will wait on the Done/DoneCaller semaphores (instead of
+ * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
+ * returns, that EMT will initiate the normal return sequence.
+ */
+ if (!fIsCaller)
+ {
+ for (;;)
+ {
+ rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
+ }
+
+ return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
+ }
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ /*
+ * All EMTs are waiting, clear the FF and take action according to the
+ * execution method.
+ */
+ VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
+
+ if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
+ {
+ /* Wake up everyone. */
+ rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
+ AssertLogRelRC(rc);
+ }
+ else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
+ {
+ /* Figure out who to wake up and wake it up. If it's ourself, then
+ it's easy otherwise wait for our turn. */
+ VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ ? 0
+ : pVM->cCpus - 1U;
+ if (pVCpu->idCpu != iFirst)
+ {
+ rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
+ AssertLogRelRC(rc);
+ for (;;)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
+ }
+ }
+ }
+ /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
+ }
+
+
+ /*
+ * Do the callback and update the status if necessary.
+ */
+ if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
+ || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
+ {
+ VBOXSTRICTRC rcStrict2 = pfnRendezvous(pVM, pVCpu, pvUser);
+ if (rcStrict2 != VINF_SUCCESS)
+ {
+ AssertLogRelMsg( rcStrict2 <= VINF_SUCCESS
+ || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
+ ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
+ int32_t i32RendezvousStatus;
+ do
+ {
+ i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
+ if ( rcStrict2 == i32RendezvousStatus
+ || RT_FAILURE(i32RendezvousStatus)
+ || ( i32RendezvousStatus != VINF_SUCCESS
+ && rcStrict2 > i32RendezvousStatus))
+ break;
+ } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict2), i32RendezvousStatus));
+ }
+ }
+
+ /*
+ * Increment the done counter and take action depending on whether we're
+ * the last to finish callback execution.
+ */
+ uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
+ if ( cDone != pVM->cCpus
+ && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
+ {
+ /* Signal the next EMT? */
+ if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
+ {
+ rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
+ AssertLogRelRC(rc);
+ }
+ else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
+ {
+ Assert(cDone == pVCpu->idCpu + 1U);
+ rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
+ AssertLogRelRC(rc);
+ }
+ else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
+ {
+ Assert(pVM->cCpus - cDone == pVCpu->idCpu);
+ rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
+ AssertLogRelRC(rc);
+ }
+
+ /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
+ if (!fIsCaller)
+ {
+ for (;;)
+ {
+ rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
+ }
+ }
+ }
+ else
+ {
+ /* Callback execution is all done, tell the rest to return. */
+ rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
+ AssertLogRelRC(rc);
+ }
+
+ if (!fIsCaller)
+ return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
+ return rcStrictRecursion;
+}
+
+
+/**
+ * Called in response to VM_FF_EMT_RENDEZVOUS.
+ *
+ * @returns VBox strict status code - EM scheduling. No errors will be returned
+ * here, nor will any non-EM scheduling status codes be returned.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ *
+ * @thread EMT
+ */
+VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
+{
+ Assert(!pVCpu->vmm.s.fInRendezvous);
+ Log(("VMMR3EmtRendezvousFF: EMT%#u\n", pVCpu->idCpu));
+ pVCpu->vmm.s.fInRendezvous = true;
+ VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
+ pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
+ pVCpu->vmm.s.fInRendezvous = false;
+ Log(("VMMR3EmtRendezvousFF: EMT%#u returns %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
+ return VBOXSTRICTRC_TODO(rcStrict);
+}
+
+
+/**
+ * Helper for resetting an single wakeup event sempahore.
+ *
+ * @returns VERR_TIMEOUT on success, RTSemEventWait status otherwise.
+ * @param hEvt The event semaphore to reset.
+ */
+static int vmmR3HlpResetEvent(RTSEMEVENT hEvt)
+{
+ for (uint32_t cLoops = 0; ; cLoops++)
+ {
+ int rc = RTSemEventWait(hEvt, 0 /*cMsTimeout*/);
+ if (rc != VINF_SUCCESS || cLoops > _4K)
+ return rc;
+ }
+}
+
+
+/**
+ * Worker for VMMR3EmtRendezvous that handles recursion.
+ *
+ * @returns VBox strict status code. This will be the first error,
+ * VINF_SUCCESS, or an EM scheduling status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param fFlags Flags indicating execution methods. See
+ * grp_VMMR3EmtRendezvous_fFlags.
+ * @param pfnRendezvous The callback.
+ * @param pvUser User argument for the callback.
+ *
+ * @thread EMT(pVCpu)
+ */
+static VBOXSTRICTRC vmmR3EmtRendezvousRecursive(PVM pVM, PVMCPU pVCpu, uint32_t fFlags,
+ PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
+{
+ Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d\n", fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions));
+ AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
+ Assert(pVCpu->vmm.s.fInRendezvous);
+
+ /*
+ * Save the current state.
+ */
+ uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
+ uint32_t const cParentDone = pVM->vmm.s.cRendezvousEmtsDone;
+ int32_t const iParentStatus = pVM->vmm.s.i32RendezvousStatus;
+ PFNVMMEMTRENDEZVOUS const pfnParent = pVM->vmm.s.pfnRendezvous;
+ void * const pvParentUser = pVM->vmm.s.pvRendezvousUser;
+
+ /*
+ * Check preconditions and save the current state.
+ */
+ AssertReturn( (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
+ || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
+ || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
+ VERR_INTERNAL_ERROR);
+ AssertReturn(pVM->vmm.s.cRendezvousEmtsEntered == pVM->cCpus, VERR_INTERNAL_ERROR_2);
+ AssertReturn(pVM->vmm.s.cRendezvousEmtsReturned == 0, VERR_INTERNAL_ERROR_3);
+
+ /*
+ * Reset the recursion prep and pop semaphores.
+ */
+ int rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
+ AssertLogRelRCReturn(rc, rc);
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
+ AssertLogRelRCReturn(rc, rc);
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
+ AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
+ AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
+
+ /*
+ * Usher the other thread into the recursion routine.
+ */
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush, 0);
+ ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, true);
+
+ uint32_t cLeft = pVM->cCpus - (cParentDone + 1U);
+ if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
+ while (cLeft-- > 0)
+ {
+ rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
+ AssertLogRelRC(rc);
+ }
+ else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
+ {
+ Assert(cLeft == pVM->cCpus - (pVCpu->idCpu + 1U));
+ for (VMCPUID iCpu = pVCpu->idCpu + 1U; iCpu < pVM->cCpus; iCpu++)
+ {
+ rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu]);
+ AssertLogRelRC(rc);
+ }
+ }
+ else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
+ {
+ Assert(cLeft == pVCpu->idCpu);
+ for (VMCPUID iCpu = pVCpu->idCpu; iCpu > 0; iCpu--)
+ {
+ rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu - 1U]);
+ AssertLogRelRC(rc);
+ }
+ }
+ else
+ AssertLogRelReturn((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
+ VERR_INTERNAL_ERROR_4);
+
+ rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
+ AssertLogRelRC(rc);
+ rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
+ AssertLogRelRC(rc);
+
+
+ /*
+ * Wait for the EMTs to wake up and get out of the parent rendezvous code.
+ */
+ if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) != pVM->cCpus)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPushCaller, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ }
+
+ ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, false);
+
+ /*
+ * Clear the slate and setup the new rendezvous.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
+ AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+ }
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
+ ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
+ ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
+ ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
+ ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
+ ASMAtomicIncU32(&pVM->vmm.s.cRendezvousRecursions);
+
+ /*
+ * We're ready to go now, do normal rendezvous processing.
+ */
+ rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
+ AssertLogRelRC(rc);
+
+ VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /*fIsCaller*/, fFlags, pfnRendezvous, pvUser);
+
+ /*
+ * The caller waits for the other EMTs to be done, return and waiting on the
+ * pop semaphore.
+ */
+ for (;;)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrict = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict);
+ }
+
+ /*
+ * Get the return code and merge it with the above recursion status.
+ */
+ VBOXSTRICTRC rcStrict2 = pVM->vmm.s.i32RendezvousStatus;
+ if ( rcStrict2 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rcStrict2))
+ rcStrict = rcStrict2;
+
+ /*
+ * Restore the parent rendezvous state.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
+ AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+ }
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, pVM->cCpus);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, cParentDone);
+ ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, iParentStatus);
+ ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fParentFlags);
+ ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvParentUser);
+ ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnParent);
+
+ /*
+ * Usher the other EMTs back to their parent recursion routine, waiting
+ * for them to all get there before we return (makes sure they've been
+ * scheduled and are past the pop event sem, see below).
+ */
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop, 0);
+ rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
+ AssertLogRelRC(rc);
+
+ if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) != pVM->cCpus)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPopCaller, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ }
+
+ /*
+ * We must reset the pop semaphore on the way out (doing the pop caller too,
+ * just in case). The parent may be another recursion.
+ */
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop); AssertLogRelRC(rc);
+ rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
+
+ ASMAtomicDecU32(&pVM->vmm.s.cRendezvousRecursions);
+
+ Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d returns %Rrc\n",
+ fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions, VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+}
+
+
+/**
+ * EMT rendezvous.
+ *
+ * Gathers all the EMTs and execute some code on each of them, either in a one
+ * by one fashion or all at once.
+ *
+ * @returns VBox strict status code. This will be the first error,
+ * VINF_SUCCESS, or an EM scheduling status code.
+ *
+ * @retval VERR_DEADLOCK if recursion is attempted using a rendezvous type that
+ * doesn't support it or if the recursion is too deep.
+ *
+ * @param pVM The cross context VM structure.
+ * @param fFlags Flags indicating execution methods. See
+ * grp_VMMR3EmtRendezvous_fFlags. The one-by-one,
+ * descending and ascending rendezvous types support
+ * recursion from inside @a pfnRendezvous.
+ * @param pfnRendezvous The callback.
+ * @param pvUser User argument for the callback.
+ *
+ * @thread Any.
+ */
+VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
+{
+ /*
+ * Validate input.
+ */
+ AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
+ AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
+ && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
+ && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
+ AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
+ || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
+ && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
+ ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
+
+ VBOXSTRICTRC rcStrict;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ if (!pVCpu)
+ {
+ /*
+ * Forward the request to an EMT thread.
+ */
+ Log(("VMMR3EmtRendezvous: %#x non-EMT\n", fFlags));
+ if (!(fFlags & VMMEMTRENDEZVOUS_FLAGS_PRIORITY))
+ rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
+ else
+ rcStrict = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
+ Log(("VMMR3EmtRendezvous: %#x non-EMT returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ else if ( pVM->cCpus == 1
+ || ( pVM->enmVMState == VMSTATE_DESTROYING
+ && VMR3GetActiveEmts(pVM->pUVM) < pVM->cCpus ) )
+ {
+ /*
+ * Shortcut for the single EMT case.
+ *
+ * We also ends up here if EMT(0) (or others) tries to issue a rendezvous
+ * during vmR3Destroy after other emulation threads have started terminating.
+ */
+ if (!pVCpu->vmm.s.fInRendezvous)
+ {
+ Log(("VMMR3EmtRendezvous: %#x EMT (uni)\n", fFlags));
+ pVCpu->vmm.s.fInRendezvous = true;
+ pVM->vmm.s.fRendezvousFlags = fFlags;
+ rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
+ pVCpu->vmm.s.fInRendezvous = false;
+ }
+ else
+ {
+ /* Recursion. Do the same checks as in the SMP case. */
+ Log(("VMMR3EmtRendezvous: %#x EMT (uni), recursion depth=%d\n", fFlags, pVM->vmm.s.cRendezvousRecursions));
+ uint32_t fType = pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK;
+ AssertLogRelReturn( !pVCpu->vmm.s.fInRendezvous
+ || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
+ || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
+ || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
+ , VERR_DEADLOCK);
+
+ AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
+ pVM->vmm.s.cRendezvousRecursions++;
+ uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
+ pVM->vmm.s.fRendezvousFlags = fFlags;
+
+ rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
+
+ pVM->vmm.s.fRendezvousFlags = fParentFlags;
+ pVM->vmm.s.cRendezvousRecursions--;
+ }
+ Log(("VMMR3EmtRendezvous: %#x EMT (uni) returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ else
+ {
+ /*
+ * Spin lock. If busy, check for recursion, if not recursing wait for
+ * the other EMT to finish while keeping a lookout for the RENDEZVOUS FF.
+ */
+ int rc;
+ rcStrict = VINF_SUCCESS;
+ if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
+ {
+ /* Allow recursion in some cases. */
+ if ( pVCpu->vmm.s.fInRendezvous
+ && ( (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
+ || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
+ || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
+ || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
+ ))
+ return VBOXSTRICTRC_TODO(vmmR3EmtRendezvousRecursive(pVM, pVCpu, fFlags, pfnRendezvous, pvUser));
+
+ AssertLogRelMsgReturn(!pVCpu->vmm.s.fInRendezvous, ("fRendezvousFlags=%#x\n", pVM->vmm.s.fRendezvousFlags),
+ VERR_DEADLOCK);
+
+ Log(("VMMR3EmtRendezvous: %#x EMT#%u, waiting for lock...\n", fFlags, pVCpu->idCpu));
+ while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
+ {
+ if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
+ {
+ rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
+ if ( rc != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rc))
+ rcStrict = rc;
+ /** @todo Perhaps deal with termination here? */
+ }
+ ASMNopPause();
+ }
+ }
+
+ Log(("VMMR3EmtRendezvous: %#x EMT#%u\n", fFlags, pVCpu->idCpu));
+ Assert(!VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS));
+ Assert(!pVCpu->vmm.s.fInRendezvous);
+ pVCpu->vmm.s.fInRendezvous = true;
+
+ /*
+ * Clear the slate and setup the rendezvous. This is a semaphore ping-pong orgy. :-)
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
+ AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
+ }
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
+ rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
+ ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
+ ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
+ ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
+ ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
+ ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
+
+ /*
+ * Set the FF and poke the other EMTs.
+ */
+ VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
+ VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
+
+ /*
+ * Do the same ourselves.
+ */
+ VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
+
+ /*
+ * The caller waits for the other EMTs to be done and return before doing
+ * the cleanup. This makes away with wakeup / reset races we would otherwise
+ * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
+ */
+ for (;;)
+ {
+ rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
+ AssertLogRelRC(rc);
+ if (!pVM->vmm.s.fRendezvousRecursion)
+ break;
+ rcStrict2 = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict2);
+ }
+
+ /*
+ * Get the return code and clean up a little bit.
+ */
+ VBOXSTRICTRC rcStrict3 = pVM->vmm.s.i32RendezvousStatus;
+ ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
+
+ ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
+ pVCpu->vmm.s.fInRendezvous = false;
+
+ /*
+ * Merge rcStrict, rcStrict2 and rcStrict3.
+ */
+ AssertRC(VBOXSTRICTRC_VAL(rcStrict));
+ AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
+ if ( rcStrict2 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rcStrict2))
+ rcStrict = rcStrict2;
+ if ( rcStrict3 != VINF_SUCCESS
+ && ( rcStrict == VINF_SUCCESS
+ || rcStrict > rcStrict3))
+ rcStrict = rcStrict3;
+ Log(("VMMR3EmtRendezvous: %#x EMT#%u returns %Rrc\n", fFlags, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+
+ AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
+ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
+ ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
+ VERR_IPE_UNEXPECTED_INFO_STATUS);
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * Interface for vmR3SetHaltMethodU.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling EMT.
+ * @param fMayHaltInRing0 The new state.
+ * @param cNsSpinBlockThreshold The spin-vs-blocking threashold.
+ * @thread EMT(pVCpu)
+ *
+ * @todo Move the EMT handling to VMM (or EM). I soooooo regret that VM
+ * component.
+ */
+VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold)
+{
+ LogFlow(("VMMR3SetMayHaltInRing0(#%u, %d, %u)\n", pVCpu->idCpu, fMayHaltInRing0, cNsSpinBlockThreshold));
+ pVCpu->vmm.s.fMayHaltInRing0 = fMayHaltInRing0;
+ pVCpu->vmm.s.cNsSpinBlockThreshold = cNsSpinBlockThreshold;
+}
+
+
+/**
+ * Read from the ring 0 jump buffer stack.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param R0Addr Where to start reading.
+ * @param pvBuf Where to store the data we've read.
+ * @param cbRead The number of bytes to read.
+ */
+VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
+{
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
+ AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER);
+
+ /*
+ * Hopefully we've got all the requested bits. If not supply what we
+ * can and zero the remaining stuff.
+ */
+ RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
+ if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid)
+ {
+ size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off;
+ if (cbRead <= cbValid)
+ {
+ memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead);
+ return VINF_SUCCESS;
+ }
+
+ memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid);
+ RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid);
+ }
+ else
+ RT_BZERO(pvBuf, cbRead);
+
+ /*
+ * Supply the setjmp return RIP/EIP if requested.
+ */
+ if ( pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
+ && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead)
+ {
+ uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue;
+ size_t cbSrc = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue);
+ size_t offDst = 0;
+ if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
+ offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr;
+ else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
+ {
+ size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation;
+ Assert(offSrc < cbSrc);
+ pbSrc -= offSrc;
+ cbSrc -= offSrc;
+ }
+ if (cbSrc > cbRead - offDst)
+ cbSrc = cbRead - offDst;
+ memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc);
+
+ //if (cbSrc == cbRead)
+ // rc = VINF_SUCCESS;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Used by the DBGF stack unwinder to initialize the register state.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the CPU being unwound.
+ * @param pState The unwind state to initialize.
+ */
+VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, struct RTDBGUNWINDSTATE *pState)
+{
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
+ AssertReturnVoid(pVCpu);
+
+ /*
+ * This is all we really need here if we had proper unwind info (win64 only)...
+ */
+ pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp;
+ pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
+ pState->uPc = pVCpu->vmm.s.AssertJmpBuf.UnwindPc;
+
+ /*
+ * Locate the resume point on the stack.
+ */
+#ifdef RT_ARCH_AMD64
+ /* This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly. */
+ uintptr_t off = 0;
+# ifdef RT_OS_WINDOWS
+ off += 0xa0; /* XMM6 thru XMM15 */
+# endif
+ pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+# ifdef RT_OS_WINDOWS
+ pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+# endif
+ pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 8;
+ pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
+
+#elif defined(RT_ARCH_X86)
+ /* This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly. */
+ uintptr_t off = 0;
+ pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 4;
+ pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 4;
+ pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 4;
+ pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 4;
+ pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ off += 4;
+ pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
+ pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
+
+#elif defined(RT_ARCH_ARM64)
+ /** @todo PORTME: arm ring-0 */
+
+#else
+# error "Port me"
+#endif
+}
+
+
+/**
+ * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param uOperation Operation to execute.
+ * @param u64Arg Constant argument.
+ * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
+ * details.
+ */
+VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
+ return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
+}
+
+
+/**
+ * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context VM structure.
+ * @param enmOperation Operation to execute.
+ * @param u64Arg Constant argument.
+ * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
+ * details.
+ */
+VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
+{
+ /*
+ * Call ring-0.
+ */
+#ifdef NO_SUPCALLR0VMM
+ int rc = VERR_GENERAL_FAILURE;
+#else
+ int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
+#endif
+
+ /*
+ * Flush the logs and deal with ring-0 assertions.
+ */
+#ifdef LOG_ENABLED
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+ VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+ if (rc != VERR_VMM_RING0_ASSERTION)
+ {
+ AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
+ ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
+ VERR_IPE_UNEXPECTED_INFO_STATUS);
+ return rc;
+ }
+ return vmmR3HandleRing0Assert(pVM, pVCpu);
+}
+
+
+/**
+ * Logs a ring-0 assertion ASAP after returning to ring-3.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
+{
+ RT_NOREF(pVCpu);
+ LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
+ LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
+ return VERR_VMM_RING0_ASSERTION;
+}
+
+
+/**
+ * Displays the Force action Flags.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pHlp The output helpers.
+ * @param pszArgs The additional arguments (ignored).
+ */
+static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
+{
+ int c;
+ uint32_t f;
+ NOREF(pszArgs);
+
+#define PRINT_FLAG(prf,flag) do { \
+ if (f & (prf##flag)) \
+ { \
+ static const char *s_psz = #flag; \
+ if (!(c % 6)) \
+ pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
+ else \
+ pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
+ c++; \
+ f &= ~(prf##flag); \
+ } \
+ } while (0)
+
+#define PRINT_GROUP(prf,grp,sfx) do { \
+ if (f & (prf##grp##sfx)) \
+ { \
+ static const char *s_psz = #grp; \
+ if (!(c % 5)) \
+ pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
+ else \
+ pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
+ c++; \
+ } \
+ } while (0)
+
+ /*
+ * The global flags.
+ */
+ const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
+ pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
+
+ /* show the flag mnemonics */
+ c = 0;
+ f = fGlobalForcedActions;
+ PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
+ PRINT_FLAG(VM_FF_,PDM_QUEUES);
+ PRINT_FLAG(VM_FF_,PDM_DMA);
+ PRINT_FLAG(VM_FF_,DBGF);
+ PRINT_FLAG(VM_FF_,REQUEST);
+ PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
+ PRINT_FLAG(VM_FF_,RESET);
+ PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
+ PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
+ PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
+ PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
+ PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
+ if (f)
+ pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
+ else
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /* the groups */
+ c = 0;
+ f = fGlobalForcedActions;
+ PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
+ PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
+ PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
+ PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
+ PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
+ PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
+ PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
+ PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
+ if (c)
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /*
+ * Per CPU flags.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = pVM->apCpusR3[i];
+ const uint64_t fLocalForcedActions = pVCpu->fLocalForcedActions;
+ pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX64", i, fLocalForcedActions);
+
+ /* show the flag mnemonics */
+ c = 0;
+ f = fLocalForcedActions;
+ PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
+ PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
+ PRINT_FLAG(VMCPU_FF_,TIMER);
+ PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
+ PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
+ PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
+ PRINT_FLAG(VMCPU_FF_,UNHALT);
+ PRINT_FLAG(VMCPU_FF_,IEM);
+ PRINT_FLAG(VMCPU_FF_,UPDATE_APIC);
+ PRINT_FLAG(VMCPU_FF_,DBGF);
+ PRINT_FLAG(VMCPU_FF_,REQUEST);
+ PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
+ PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
+ PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
+ PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
+ PRINT_FLAG(VMCPU_FF_,TO_R3);
+ PRINT_FLAG(VMCPU_FF_,IOM);
+ if (f)
+ pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX64\n", c ? "," : "", f);
+ else
+ pHlp->pfnPrintf(pHlp, "\n");
+
+ /* the groups */
+ c = 0;
+ f = fLocalForcedActions;
+ PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
+ PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
+ PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
+ PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
+ PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
+ PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
+ PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
+ PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
+ PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
+ PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
+ if (c)
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+
+#undef PRINT_FLAG
+#undef PRINT_GROUP
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp b/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
new file mode 100644
index 00000000..bbd2c366
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
@@ -0,0 +1,701 @@
+/* $Id: VMMGuruMeditation.cpp $ */
+/** @file
+ * VMM - The Virtual Machine Monitor, Guru Meditation Code.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VMM
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/dbgf.h>
+#include "VMMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/em.h>
+
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/version.h>
+#include <VBox/vmm/hm.h>
+#include <iprt/assert.h>
+#include <iprt/dbg.h>
+#include <iprt/time.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Structure to pass to DBGFR3Info() and for doing all other
+ * output during fatal dump.
+ */
+typedef struct VMMR3FATALDUMPINFOHLP
+{
+ /** The helper core. */
+ DBGFINFOHLP Core;
+ /** The release logger instance. */
+ PRTLOGGER pRelLogger;
+ /** The saved release logger flags. */
+ uint32_t fRelLoggerFlags;
+ /** The logger instance. */
+ PRTLOGGER pLogger;
+ /** The saved logger flags. */
+ uint32_t fLoggerFlags;
+ /** The saved logger destination flags. */
+ uint32_t fLoggerDestFlags;
+ /** Whether to output to stderr or not. */
+ bool fStdErr;
+ /** Whether we're still recording the summary or not. */
+ bool fRecSummary;
+ /** Buffer for the summary. */
+ char szSummary[4096 - 2];
+ /** The current summary offset. */
+ size_t offSummary;
+ /** Standard error buffer. */
+ char achStdErrBuf[4096 - 8];
+ /** Standard error buffer offset. */
+ size_t offStdErrBuf;
+} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
+/** Pointer to a VMMR3FATALDUMPINFOHLP structure. */
+typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
+
+
+/**
+ * Flushes the content of achStdErrBuf, setting offStdErrBuf to zero.
+ *
+ * @param pHlp The instance to flush.
+ */
+static void vmmR3FatalDumpInfoHlpFlushStdErr(PVMMR3FATALDUMPINFOHLP pHlp)
+{
+ size_t cch = pHlp->offStdErrBuf;
+ if (cch)
+ {
+ RTStrmWrite(g_pStdErr, pHlp->achStdErrBuf, cch);
+ pHlp->offStdErrBuf = 0;
+ }
+}
+
+/**
+ * @callback_method_impl{FNRTSTROUTPUT, For buffering stderr output.}
+ */
+static DECLCALLBACK(size_t) vmmR3FatalDumpInfoHlp_BufferedStdErrOutput(void *pvArg, const char *pachChars, size_t cbChars)
+{
+ PVMMR3FATALDUMPINFOHLP pHlp = (PVMMR3FATALDUMPINFOHLP)pvArg;
+ if (cbChars)
+ {
+ size_t offBuf = pHlp->offStdErrBuf;
+ if (cbChars < sizeof(pHlp->achStdErrBuf) - offBuf)
+ { /* likely */ }
+ else
+ {
+ vmmR3FatalDumpInfoHlpFlushStdErr(pHlp);
+ if (cbChars < sizeof(pHlp->achStdErrBuf))
+ offBuf = 0;
+ else
+ {
+ RTStrmWrite(g_pStdErr, pachChars, cbChars);
+ return cbChars;
+ }
+ }
+ memcpy(&pHlp->achStdErrBuf[offBuf], pachChars, cbChars);
+ pHlp->offStdErrBuf = offBuf + cbChars;
+ }
+ return cbChars;
+}
+
+
+/**
+ * Print formatted string.
+ *
+ * @param pHlp Pointer to this structure.
+ * @param pszFormat The format string.
+ * @param ... Arguments.
+ */
+static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ va_list args;
+ va_start(args, pszFormat);
+ pHlp->pfnPrintfV(pHlp, pszFormat, args);
+ va_end(args);
+}
+
+/**
+ * Print formatted string.
+ *
+ * @param pHlp Pointer to this structure.
+ * @param pszFormat The format string.
+ * @param args Argument list.
+ */
+static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
+{
+ PVMMR3FATALDUMPINFOHLP pMyHlp = (PVMMR3FATALDUMPINFOHLP)pHlp;
+
+ if (pMyHlp->pRelLogger)
+ {
+ va_list args2;
+ va_copy(args2, args);
+ RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
+ va_end(args2);
+ }
+ if (pMyHlp->pLogger)
+ {
+ va_list args2;
+ va_copy(args2, args);
+ RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
+ va_end(args2);
+ }
+ if (pMyHlp->fStdErr)
+ {
+ va_list args2;
+ va_copy(args2, args);
+ RTStrFormatV(vmmR3FatalDumpInfoHlp_BufferedStdErrOutput, pMyHlp, NULL, NULL, pszFormat, args2);
+ //RTStrmPrintfV(g_pStdErr, pszFormat, args2);
+ va_end(args2);
+ }
+ if (pMyHlp->fRecSummary)
+ {
+ size_t cchLeft = sizeof(pMyHlp->szSummary) - pMyHlp->offSummary;
+ if (cchLeft > 1)
+ {
+ va_list args2;
+ va_copy(args2, args);
+ size_t cch = RTStrPrintfV(&pMyHlp->szSummary[pMyHlp->offSummary], cchLeft, pszFormat, args);
+ va_end(args2);
+ Assert(cch <= cchLeft);
+ pMyHlp->offSummary += cch;
+ }
+ }
+}
+
+
+/**
+ * Initializes the fatal dump output helper.
+ *
+ * @param pHlp The structure to initialize.
+ */
+static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
+{
+ RT_BZERO(pHlp, sizeof(*pHlp));
+
+ pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
+ pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
+ pHlp->Core.pfnGetOptError = DBGFR3InfoGenericGetOptError;
+
+ /*
+ * The loggers.
+ */
+ pHlp->pRelLogger = RTLogRelGetDefaultInstance();
+#ifdef LOG_ENABLED
+ pHlp->pLogger = RTLogDefaultInstance();
+#else
+ if (pHlp->pRelLogger)
+ pHlp->pLogger = RTLogGetDefaultInstance();
+ else
+ pHlp->pLogger = RTLogDefaultInstance();
+#endif
+
+ if (pHlp->pRelLogger)
+ {
+ pHlp->fRelLoggerFlags = RTLogGetFlags(pHlp->pRelLogger);
+ RTLogChangeFlags(pHlp->pRelLogger, RTLOGFLAGS_BUFFERED, RTLOGFLAGS_DISABLED);
+ }
+
+ if (pHlp->pLogger)
+ {
+ pHlp->fLoggerFlags = RTLogGetFlags(pHlp->pLogger);
+ pHlp->fLoggerDestFlags = RTLogGetDestinations(pHlp->pLogger);
+ RTLogChangeFlags(pHlp->pLogger, RTLOGFLAGS_BUFFERED, RTLOGFLAGS_DISABLED);
+#ifndef DEBUG_sandervl
+ RTLogChangeDestinations(pHlp->pLogger, RTLOGDEST_DEBUGGER, 0);
+#endif
+ }
+
+ /*
+ * Check if we need write to stderr.
+ */
+ pHlp->fStdErr = (!pHlp->pRelLogger || !(RTLogGetDestinations(pHlp->pRelLogger) & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
+ && (!pHlp->pLogger || !(RTLogGetDestinations(pHlp->pLogger) & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
+#ifdef DEBUG_sandervl
+ pHlp->fStdErr = false; /* takes too long to display here */
+#endif
+ pHlp->offStdErrBuf = 0;
+
+ /*
+ * Init the summary recording.
+ */
+ pHlp->fRecSummary = true;
+ pHlp->offSummary = 0;
+ pHlp->szSummary[0] = '\0';
+}
+
+
+/**
+ * Deletes the fatal dump output helper.
+ *
+ * @param pHlp The structure to delete.
+ */
+static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
+{
+ if (pHlp->pRelLogger)
+ {
+ RTLogFlush(pHlp->pRelLogger);
+ RTLogChangeFlags(pHlp->pRelLogger,
+ pHlp->fRelLoggerFlags & RTLOGFLAGS_DISABLED,
+ pHlp->fRelLoggerFlags & RTLOGFLAGS_BUFFERED);
+ }
+
+ if (pHlp->pLogger)
+ {
+ RTLogFlush(pHlp->pLogger);
+ RTLogChangeFlags(pHlp->pLogger,
+ pHlp->fLoggerFlags & RTLOGFLAGS_DISABLED,
+ pHlp->fLoggerFlags & RTLOGFLAGS_BUFFERED);
+ RTLogChangeDestinations(pHlp->pLogger, 0, pHlp->fLoggerDestFlags & RTLOGDEST_DEBUGGER);
+ }
+
+ if (pHlp->fStdErr)
+ vmmR3FatalDumpInfoHlpFlushStdErr(pHlp);
+}
+
+
+/**
+ * @callback_method_impl{FNVMMEMTRENDEZVOUS}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmmR3FatalDumpRendezvousDoneCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
+ RT_NOREF(pVCpu, pvUser);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Dumps the VM state on a fatal error.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rcErr VBox status code.
+ */
+VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
+{
+ /*
+ * Create our output helper and sync it with the log settings.
+ * This helper will be used for all the output.
+ */
+ VMMR3FATALDUMPINFOHLP Hlp;
+ PCDBGFINFOHLP pHlp = &Hlp.Core;
+ vmmR3FatalDumpInfoHlpInit(&Hlp);
+
+ /* Release owned locks to make sure other VCPUs can continue in case they were waiting for one. */
+ PDMR3CritSectLeaveAll(pVM);
+
+ /*
+ * Header.
+ */
+ pHlp->pfnPrintf(pHlp,
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ "!!\n"
+ "!! VCPU%u: Guru Meditation %d (%Rrc)\n"
+ "!!\n",
+ pVCpu->idCpu, rcErr, rcErr);
+
+ /*
+ * Continue according to context.
+ */
+ bool fDoneHyper = false;
+ bool fDoneImport = false;
+ switch (rcErr)
+ {
+ /*
+ * Hypervisor errors.
+ */
+ case VERR_VMM_RING0_ASSERTION:
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ case VERR_VMM_RING3_CALL_DISABLED:
+ case VERR_VMM_WRONG_HM_VMCPU_STATE:
+ case VERR_VMM_CONTEXT_HOOK_STILL_ENABLED:
+ {
+ const char *pszMsg1 = VMMR3GetRZAssertMsg1(pVM);
+ while (pszMsg1 && *pszMsg1 == '\n')
+ pszMsg1++;
+ const char *pszMsg2 = VMMR3GetRZAssertMsg2(pVM);
+ while (pszMsg2 && *pszMsg2 == '\n')
+ pszMsg2++;
+ pHlp->pfnPrintf(pHlp,
+ "%s"
+ "%s",
+ pszMsg1,
+ pszMsg2);
+ if ( !pszMsg2
+ || !*pszMsg2
+ || strchr(pszMsg2, '\0')[-1] != '\n')
+ pHlp->pfnPrintf(pHlp, "\n");
+ }
+ RT_FALL_THRU();
+ case VERR_TRPM_DONT_PANIC:
+ case VERR_TRPM_PANIC:
+ case VINF_EM_RAW_STALE_SELECTOR:
+ case VINF_EM_RAW_IRET_TRAP:
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_TRIPLE_FAULT:
+ case VERR_VMM_HYPER_CR3_MISMATCH:
+ case VERR_VMM_LONG_JMP_ERROR:
+ {
+ /*
+ * Active trap? This is only of partial interest when in hardware
+ * assisted virtualization mode, thus the different messages.
+ */
+ TRPMEVENT enmType;
+ uint8_t u8TrapNo = 0xce;
+ uint32_t uErrorCode = 0xdeadface;
+ RTGCUINTPTR uCR2 = 0xdeadface;
+ uint8_t cbInstr = UINT8_MAX;
+ bool fIcebp = false;
+ int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2, &cbInstr, &fIcebp);
+ if (RT_SUCCESS(rc2))
+ pHlp->pfnPrintf(pHlp,
+ "!! ACTIVE TRAP=%02x ERRCD=%RX32 CR2=%RGv PC=%RGr Type=%d cbInstr=%02x fIcebp=%RTbool (Guest!)\n",
+ u8TrapNo, uErrorCode, uCR2, CPUMGetGuestRIP(pVCpu), enmType, cbInstr, fIcebp);
+
+ /*
+ * Dump the relevant hypervisor registers and stack.
+ */
+ if (rcErr == VERR_VMM_RING0_ASSERTION)
+ {
+ /* Dump the jmpbuf. */
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! AssertJmpBuf:\n"
+ "!!\n");
+ pHlp->pfnPrintf(pHlp,
+ "UnwindSp=%RHv UnwindRetSp=%RHv UnwindBp=%RHv UnwindPc=%RHv\n",
+ pVCpu->vmm.s.AssertJmpBuf.UnwindSp,
+ pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp,
+ pVCpu->vmm.s.AssertJmpBuf.UnwindBp,
+ pVCpu->vmm.s.AssertJmpBuf.UnwindPc);
+ pHlp->pfnPrintf(pHlp,
+ "UnwindRetPcValue=%RHv UnwindRetPcLocation=%RHv\n",
+ pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue,
+ pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation);
+ pHlp->pfnPrintf(pHlp,
+ "pfn=%RHv pvUser1=%RHv pvUser2=%RHv\n",
+ pVCpu->vmm.s.AssertJmpBuf.pfn,
+ pVCpu->vmm.s.AssertJmpBuf.pvUser1,
+ pVCpu->vmm.s.AssertJmpBuf.pvUser2);
+
+ /* Dump the resume register frame on the stack. */
+ PRTHCUINTPTR const pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.abAssertStack[ pVCpu->vmm.s.AssertJmpBuf.UnwindBp
+ - pVCpu->vmm.s.AssertJmpBuf.UnwindSp];
+#if HC_ARCH_BITS == 32
+ pHlp->pfnPrintf(pHlp,
+ "eax=volatile ebx=%08x ecx=volatile edx=volatile esi=%08x edi=%08x\n"
+ "eip=%08x esp=%08x ebp=%08x efl=%08x\n"
+ ,
+ pBP[-3], pBP[-2], pBP[-1],
+ pBP[1], pVCpu->vmm.s.AssertJmpBuf.SavedEbp - 8, pBP[0], pBP[-4]);
+#else
+# ifdef RT_OS_WINDOWS
+ pHlp->pfnPrintf(pHlp,
+ "rax=volatile rbx=%016RX64 rcx=volatile rdx=volatile\n"
+ "rsi=%016RX64 rdi=%016RX64 r8=volatile r9=volatile \n"
+ "r10=volatile r11=volatile r12=%016RX64 r13=%016RX64\n"
+ "r14=%016RX64 r15=%016RX64\n"
+ "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 rfl=%08RX64\n"
+ ,
+ pBP[-7],
+ pBP[-6], pBP[-5],
+ pBP[-4], pBP[-3],
+ pBP[-2], pBP[-1],
+ pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-8]);
+# else
+ pHlp->pfnPrintf(pHlp,
+ "rax=volatile rbx=%016RX64 rcx=volatile rdx=volatile\n"
+ "rsi=volatile rdi=volatile r8=volatile r9=volatile \n"
+ "r10=volatile r11=volatile r12=%016RX64 r13=%016RX64\n"
+ "r14=%016RX64 r15=%016RX64\n"
+ "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 rflags=%08RX64\n"
+ ,
+ pBP[-5],
+ pBP[-4], pBP[-3],
+ pBP[-2], pBP[-1],
+ pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-6]);
+# endif
+#endif
+
+ /* Callstack. */
+ DBGFADDRESS AddrPc, AddrBp, AddrSp;
+ PCDBGFSTACKFRAME pFirstFrame;
+ rc2 = DBGFR3StackWalkBeginEx(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_RING0,
+ DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s.AssertJmpBuf.UnwindBp),
+ DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s.AssertJmpBuf.UnwindSp),
+ DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s.AssertJmpBuf.UnwindPc),
+ RTDBGRETURNTYPE_INVALID, &pFirstFrame);
+ if (RT_SUCCESS(rc2))
+ {
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! Call Stack:\n"
+ "!!\n");
+#if HC_ARCH_BITS == 32
+ pHlp->pfnPrintf(pHlp, "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
+#else
+ pHlp->pfnPrintf(pHlp, "RBP Ret RBP Ret RIP RIP Symbol [line]\n");
+#endif
+ for (PCDBGFSTACKFRAME pFrame = pFirstFrame;
+ pFrame;
+ pFrame = DBGFR3StackWalkNext(pFrame))
+ {
+#if HC_ARCH_BITS == 32
+ pHlp->pfnPrintf(pHlp,
+ "%RHv %RHv %04RX32:%RHv %RHv %RHv %RHv %RHv",
+ (RTHCUINTPTR)pFrame->AddrFrame.off,
+ (RTHCUINTPTR)pFrame->AddrReturnFrame.off,
+ (RTHCUINTPTR)pFrame->AddrReturnPC.Sel,
+ (RTHCUINTPTR)pFrame->AddrReturnPC.off,
+ pFrame->Args.au32[0],
+ pFrame->Args.au32[1],
+ pFrame->Args.au32[2],
+ pFrame->Args.au32[3]);
+ pHlp->pfnPrintf(pHlp, " %RTsel:%08RHv", pFrame->AddrPC.Sel, pFrame->AddrPC.off);
+#else
+ pHlp->pfnPrintf(pHlp,
+ "%RHv %RHv %RHv %RHv",
+ (RTHCUINTPTR)pFrame->AddrFrame.off,
+ (RTHCUINTPTR)pFrame->AddrReturnFrame.off,
+ (RTHCUINTPTR)pFrame->AddrReturnPC.off,
+ (RTHCUINTPTR)pFrame->AddrPC.off);
+#endif
+ if (pFrame->pSymPC)
+ {
+ RTGCINTPTR offDisp = pFrame->AddrPC.FlatPtr - pFrame->pSymPC->Value;
+ if (offDisp > 0)
+ pHlp->pfnPrintf(pHlp, " %s+%llx", pFrame->pSymPC->szName, (int64_t)offDisp);
+ else if (offDisp < 0)
+ pHlp->pfnPrintf(pHlp, " %s-%llx", pFrame->pSymPC->szName, -(int64_t)offDisp);
+ else
+ pHlp->pfnPrintf(pHlp, " %s", pFrame->pSymPC->szName);
+ }
+ if (pFrame->pLinePC)
+ pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", pFrame->pLinePC->szFilename, pFrame->pLinePC->uLineNo);
+ pHlp->pfnPrintf(pHlp, "\n");
+ for (uint32_t iReg = 0; iReg < pFrame->cSureRegs; iReg++)
+ {
+ const char *pszName = pFrame->paSureRegs[iReg].pszName;
+ if (!pszName)
+ pszName = DBGFR3RegCpuName(pVM->pUVM, pFrame->paSureRegs[iReg].enmReg,
+ pFrame->paSureRegs[iReg].enmType);
+ char szValue[1024];
+ szValue[0] = '\0';
+ DBGFR3RegFormatValue(szValue, sizeof(szValue), &pFrame->paSureRegs[iReg].Value,
+ pFrame->paSureRegs[iReg].enmType, false);
+ pHlp->pfnPrintf(pHlp, " %-3s=%s\n", pszName, szValue);
+ }
+ }
+ DBGFR3StackWalkEnd(pFirstFrame);
+ }
+
+ /* Symbols on the stack. */
+ uint32_t const cbRawStack = RT_MIN(pVCpu->vmm.s.AssertJmpBuf.cbStackValid, sizeof(pVCpu->vmm.s.abAssertStack));
+ uintptr_t const * const pauAddr = (uintptr_t const *)&pVCpu->vmm.s.abAssertStack[0];
+ uint32_t const iEnd = cbRawStack / sizeof(uintptr_t);
+ uint32_t iAddr = 0;
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! Addresses on the stack (iAddr=%#x, iEnd=%#x)\n"
+ "!!\n",
+ iAddr, iEnd);
+ while (iAddr < iEnd)
+ {
+ uintptr_t const uAddr = pauAddr[iAddr];
+ if (uAddr > X86_PAGE_SIZE)
+ {
+ DBGFADDRESS Addr;
+ DBGFR3AddrFromFlat(pVM->pUVM, &Addr, uAddr);
+ RTGCINTPTR offDisp = 0;
+ RTGCINTPTR offLineDisp = 0;
+ PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr,
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
+ | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
+ &offDisp, NULL);
+ PRTDBGLINE pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL);
+ if (pLine || pSym)
+ {
+ pHlp->pfnPrintf(pHlp, "%#06x: %p =>", iAddr * sizeof(uintptr_t), uAddr);
+ if (pSym)
+ pHlp->pfnPrintf(pHlp, " %s + %#x", pSym->szName, (intptr_t)offDisp);
+ if (pLine)
+ pHlp->pfnPrintf(pHlp, " [%s:%u + %#x]\n", pLine->szFilename, pLine->uLineNo, offLineDisp);
+ else
+ pHlp->pfnPrintf(pHlp, "\n");
+ RTDbgSymbolFree(pSym);
+ RTDbgLineFree(pLine);
+ }
+ }
+ iAddr++;
+ }
+
+ /* raw stack */
+ Hlp.fRecSummary = false;
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! Raw stack (mind the direction).\n"
+ "!! pbEMTStackR0=%RHv cbRawStack=%#x\n"
+ "!! pbEmtStackR3=%p\n"
+ "!!\n"
+ "%.*Rhxd\n",
+ pVCpu->vmm.s.AssertJmpBuf.UnwindSp, cbRawStack,
+ &pVCpu->vmm.s.abAssertStack[0],
+ cbRawStack, &pVCpu->vmm.s.abAssertStack[0]);
+ }
+ else
+ {
+ pHlp->pfnPrintf(pHlp,
+ "!! Skipping ring-0 registers and stack, rcErr=%Rrc\n", rcErr);
+ }
+ break;
+ }
+
+ case VERR_IEM_INSTR_NOT_IMPLEMENTED:
+ case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
+ case VERR_PATM_IPE_TRAP_IN_PATCH_CODE:
+ case VERR_EM_GUEST_CPU_HANG:
+ {
+ CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ABSOLUTELY_ALL);
+ fDoneImport = true;
+
+ DBGFR3Info(pVM->pUVM, "cpumguest", NULL, pHlp);
+ DBGFR3Info(pVM->pUVM, "cpumguestinstr", NULL, pHlp);
+ DBGFR3Info(pVM->pUVM, "cpumguesthwvirt", NULL, pHlp);
+ break;
+ }
+
+ /*
+ * For some problems (e.g. VERR_INVALID_STATE in VMMR0.cpp), there could be
+ * additional details in the assertion messages.
+ */
+ default:
+ {
+ const char *pszMsg1 = VMMR3GetRZAssertMsg1(pVM);
+ while (pszMsg1 && *pszMsg1 == '\n')
+ pszMsg1++;
+ if (pszMsg1 && *pszMsg1 != '\0')
+ pHlp->pfnPrintf(pHlp, "AssertMsg1: %s\n", pszMsg1);
+
+ const char *pszMsg2 = VMMR3GetRZAssertMsg2(pVM);
+ while (pszMsg2 && *pszMsg2 == '\n')
+ pszMsg2++;
+ if (pszMsg2 && *pszMsg2 != '\0')
+ pHlp->pfnPrintf(pHlp, "AssertMsg2: %s\n", pszMsg2);
+ break;
+ }
+
+ } /* switch (rcErr) */
+ Hlp.fRecSummary = false;
+
+
+ /*
+ * Generic info dumper loop.
+ */
+ if (!fDoneImport)
+ CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ABSOLUTELY_ALL);
+ static struct
+ {
+ const char *pszInfo;
+ const char *pszArgs;
+ } const aInfo[] =
+ {
+ { "mappings", NULL },
+ { "hma", NULL },
+ { "cpumguest", "verbose" },
+ { "cpumguesthwvirt", "verbose" },
+ { "cpumguestinstr", "verbose" },
+ { "cpumhyper", "verbose" },
+ { "cpumhost", "verbose" },
+ { "mode", "all" },
+ { "cpuid", "verbose" },
+ { "handlers", "phys virt hyper stats" },
+ { "timers", NULL },
+ { "activetimers", NULL },
+ };
+ for (unsigned i = 0; i < RT_ELEMENTS(aInfo); i++)
+ {
+ if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
+ continue;
+ pHlp->pfnPrintf(pHlp,
+ "!!\n"
+ "!! {%s, %s}\n"
+ "!!\n",
+ aInfo[i].pszInfo, aInfo[i].pszArgs);
+ DBGFR3Info(pVM->pUVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
+ }
+
+ /* All other info items */
+ DBGFR3InfoMulti(pVM,
+ "*",
+ "mappings|hma|cpum|cpumguest|cpumguesthwvirt|cpumguestinstr|cpumhyper|cpumhost|mode|cpuid"
+ "|pgmpd|pgmcr3|timers|activetimers|handlers|help|exithistory",
+ "!!\n"
+ "!! {%s}\n"
+ "!!\n",
+ pHlp);
+
+
+ /* done */
+ pHlp->pfnPrintf(pHlp,
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
+
+
+ /*
+ * Repeat the summary to stderr so we don't have to scroll half a mile up.
+ */
+ vmmR3FatalDumpInfoHlpFlushStdErr(&Hlp);
+ if (Hlp.szSummary[0])
+ RTStrmPrintf(g_pStdErr,
+ "%s\n"
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
+ Hlp.szSummary);
+
+ /*
+ * Delete the output instance (flushing and restoring of flags).
+ */
+ vmmR3FatalDumpInfoHlpDelete(&Hlp);
+
+ /*
+ * Rendezvous with the other EMTs and clear the VM_FF_CHECK_VM_STATE so we can
+ * stop burning CPU cycles.
+ */
+ VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmmR3FatalDumpRendezvousDoneCallback, NULL);
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMMR3.def b/src/VBox/VMM/VMMR3/VMMR3.def
new file mode 100644
index 00000000..f22b2841
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMMR3.def
@@ -0,0 +1,520 @@
+; $Id: VMMR3.def $
+;; @file
+; VMM Ring-3 Context DLL - Definition file.
+
+;
+; Copyright (C) 2010-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+LIBRARY VBoxVMM.dll
+EXPORTS
+ ; data
+
+ ; code
+ CFGMR3GetRoot
+ CFGMR3GetFirstChild
+ CFGMR3GetNextChild
+ CFGMR3GetNameLen
+ CFGMR3GetFirstValue
+ CFGMR3GetNextValue
+ CFGMR3GetValueNameLen
+ CFGMR3GetValueType
+ CFGMR3Dump
+ CFGMR3CreateTree
+ CFGMR3DestroyTree
+ CFGMR3GetValueName
+ CFGMR3GetName
+ CFGMR3RemoveNode
+ CFGMR3InsertBytes
+ CFGMR3InsertStringFV
+ CFGMR3InsertStringF
+ CFGMR3InsertStringN
+ CFGMR3InsertString
+ CFGMR3InsertStringW
+ CFGMR3InsertInteger
+ CFGMR3InsertPasswordN
+ CFGMR3InsertPassword
+ CFGMR3QueryStringAllocDef
+ CFGMR3RemoveValue
+ CFGMR3QueryIntegerDef
+ CFGMR3QueryGCPtrSDef
+ CFGMR3QueryGCPtrUDef
+ CFGMR3QueryGCPtrDef
+ CFGMR3QueryBoolDef
+ CFGMR3QueryS8Def
+ CFGMR3QueryU8Def
+ CFGMR3QueryS16Def
+ CFGMR3QueryU16Def
+ CFGMR3QueryPortDef
+ CFGMR3QueryS32Def
+ CFGMR3QuerySIntDef
+ CFGMR3QueryU32Def
+ CFGMR3QueryUIntDef
+ CFGMR3QueryS64Def
+ CFGMR3QueryU64Def
+ CFGMR3QueryInteger
+ CFGMR3QueryGCPtrS
+ CFGMR3QueryGCPtrU
+ CFGMR3QueryGCPtr
+ CFGMR3QueryBool
+ CFGMR3QueryS8
+ CFGMR3QueryU8
+ CFGMR3QueryS16
+ CFGMR3QueryU16
+ CFGMR3QueryPort
+ CFGMR3QueryS32
+ CFGMR3QuerySInt
+ CFGMR3QueryU32
+ CFGMR3QueryUInt
+ CFGMR3QueryS64
+ CFGMR3QueryU64
+ CFGMR3QuerySize
+ CFGMR3QueryType
+ CFGMR3AreValuesValid
+ CFGMR3AreChildrenValid
+ CFGMR3GetChildFV
+ CFGMR3GetChildF
+ CFGMR3GetChild
+ CFGMR3InsertNode
+ CFGMR3InsertNodeFV
+ CFGMR3InsertNodeF
+ CFGMR3InsertSubTree
+ CFGMR3ValidateConfig
+ CFGMR3QueryBytes
+ CFGMR3QueryStringDef
+ CFGMR3QueryString
+ CFGMR3QueryStringAlloc
+ CFGMR3GetParent
+ CFGMR3GetRootU
+
+ CPUMGetHostMicroarch
+ CPUMGetGuestMicroarch
+
+ DBGCCreate
+
+ DBGFR3BpClear
+ DBGFR3BpDisable
+ DBGFR3BpEnable
+ DBGFR3BpOwnerCreate
+ DBGFR3BpOwnerDestroy
+ DBGFR3BpSetInt3
+ DBGFR3BpSetInt3Ex
+ DBGFR3BpSetMmio
+ DBGFR3BpSetMmioEx
+ DBGFR3BpSetPortIo
+ DBGFR3BpSetPortIoEx
+ DBGFR3BpSetReg
+ DBGFR3BpSetRegEx
+ DBGFR3BpSetREM
+ DBGFR3CoreWrite
+ DBGFR3Info
+ DBGFR3InfoRegisterExternal
+ DBGFR3InfoDeregisterExternal
+ DBGFR3InfoGenericGetOptError
+ DBGFR3InjectNMI
+ DBGFR3LogModifyDestinations
+ DBGFR3LogModifyFlags
+ DBGFR3LogModifyGroups
+ DBGFR3OSDetect
+ DBGFR3OSQueryNameAndVersion
+ DBGFR3RegCpuQueryU8
+ DBGFR3RegCpuQueryU16
+ DBGFR3RegCpuQueryU32
+ DBGFR3RegCpuQueryU64
+ DBGFR3RegCpuQueryXdtr
+ DBGFR3RegCpuQueryLrd
+ DBGFR3RegFormatValue
+ DBGFR3RegNmQuery
+ DBGFR3RegNmQueryAll
+ DBGFR3RegNmQueryAllCount
+ DBGFR3RegNmSetBatch
+ DBGFR3OSDeregister
+ DBGFR3OSRegister
+ DBGFR3OSQueryInterface
+ DBGFR3MemReadString
+ DBGFR3MemRead
+ DBGFR3MemScan
+ DBGFR3ModInMem
+ DBGFR3AddrFromFlat
+ DBGFR3AsSymbolByName
+ DBGFR3AsResolveAndRetain
+ DBGFR3AsSetAlias
+ DBGFR3AddrAdd
+ DBGFR3AddrSub
+ DBGFR3AsGetConfig
+ DBGFR3CpuGetCount
+ DBGFR3CpuGetMode
+ DBGFR3CpuGetState
+ DBGFR3AddrFromSelOff
+ DBGFR3FlowCreate
+ DBGFR3FlowRetain
+ DBGFR3FlowRelease
+ DBGFR3FlowQueryStartBb
+ DBGFR3FlowQueryBbByAddress
+ DBGFR3FlowQueryBranchTblByAddress
+ DBGFR3FlowGetBbCount
+ DBGFR3FlowGetBranchTblCount
+ DBGFR3FlowGetCallInsnCount
+ DBGFR3FlowBbRetain
+ DBGFR3FlowBbRelease
+ DBGFR3FlowBbGetStartAddress
+ DBGFR3FlowBbGetEndAddress
+ DBGFR3FlowBbGetBranchAddress
+ DBGFR3FlowBbGetFollowingAddress
+ DBGFR3FlowBbGetType
+ DBGFR3FlowBbGetInstrCount
+ DBGFR3FlowBbGetFlags
+ DBGFR3FlowBbQueryBranchTbl
+ DBGFR3FlowBbQueryError
+ DBGFR3FlowBbQueryInstr
+ DBGFR3FlowBbQuerySuccessors
+ DBGFR3FlowBbGetRefBbCount
+ DBGFR3FlowBbGetRefBb
+ DBGFR3FlowBranchTblRetain
+ DBGFR3FlowBranchTblRelease
+ DBGFR3FlowBranchTblGetSlots
+ DBGFR3FlowBranchTblGetStartAddress
+ DBGFR3FlowBranchTblGetAddrAtSlot
+ DBGFR3FlowBranchTblQueryAddresses
+ DBGFR3FlowItCreate
+ DBGFR3FlowItDestroy
+ DBGFR3FlowItNext
+ DBGFR3FlowItReset
+ DBGFR3FlowBranchTblItCreate
+ DBGFR3FlowBranchTblItDestroy
+ DBGFR3FlowBranchTblItNext
+ DBGFR3FlowBranchTblItReset
+ DBGFR3FlowTraceModCreate
+ DBGFR3FlowTraceModCreateFromFlowGraph
+ DBGFR3FlowTraceModRetain
+ DBGFR3FlowTraceModRelease
+ DBGFR3FlowTraceModEnable
+ DBGFR3FlowTraceModDisable
+ DBGFR3FlowTraceModQueryReport
+ DBGFR3FlowTraceModClear
+ DBGFR3FlowTraceModAddProbe
+ DBGFR3FlowTraceProbeCreate
+ DBGFR3FlowTraceProbeRetain
+ DBGFR3FlowTraceProbeRelease
+ DBGFR3FlowTraceProbeEntriesAdd
+ DBGFR3FlowTraceReportRetain
+ DBGFR3FlowTraceReportRelease
+ DBGFR3FlowTraceReportGetRecordCount
+ DBGFR3FlowTraceReportQueryRecord
+ DBGFR3FlowTraceReportQueryFiltered
+ DBGFR3FlowTraceReportEnumRecords
+ DBGFR3FlowTraceRecordRetain
+ DBGFR3FlowTraceRecordRelease
+ DBGFR3FlowTraceRecordGetSeqNo
+ DBGFR3FlowTraceRecordGetTimestamp
+ DBGFR3FlowTraceRecordGetAddr
+ DBGFR3FlowTraceRecordGetProbe
+ DBGFR3FlowTraceRecordGetValCount
+ DBGFR3FlowTraceRecordGetVals
+ DBGFR3FlowTraceRecordGetValsCommon
+ DBGFR3FlowTraceRecordGetCpuId
+ DBGFR3PlugInLoad
+ DBGFR3PlugInUnload
+ DBGFR3PlugInLoadAll
+ DBGFR3PlugInUnloadAll
+ DBGFR3SampleReportCreate
+ DBGFR3SampleReportRetain
+ DBGFR3SampleReportRelease
+ DBGFR3SampleReportStart
+ DBGFR3SampleReportStop
+ DBGFR3SampleReportDumpToFile
+ DBGFR3SelQueryInfo
+ DBGFR3StackWalkBegin
+ DBGFR3StackWalkNext
+ DBGFR3StackWalkEnd
+ DBGFR3TypeDeregister
+ DBGFR3TypeDumpEx
+ DBGFR3TypeQueryReg
+ DBGFR3TypeQuerySize
+ DBGFR3TypeQueryValByType
+ DBGFR3TypeRegister
+ DBGFR3TypeSetSize
+ DBGFR3TypeValFree
+ DBGFR3TypeValDumpEx
+
+ EMR3QueryExecutionPolicy
+ EMR3QueryMainExecutionEngine
+ EMR3SetExecutionPolicy
+
+ MMHyperR3ToR0
+ MMHyperR3ToRC
+
+ HMR3IsEnabled
+ HMR3IsNestedPagingActive
+ HMR3IsUXActive
+ HMR3IsVpidActive
+
+ MMR3HeapFree
+ MMR3HeapRealloc
+ MMR3HeapAllocU
+
+ MMR3HyperAllocOnceNoRel
+
+ PDMR3AsyncCompletionBwMgrSetMaxForFile
+ PDMR3DeviceAttach
+ PDMR3DeviceDetach
+ PDMR3DriverAttach
+ PDMR3DriverDetach
+ PDMR3NsBwGroupSetLimit
+ PDMR3QueryDeviceLun
+ PDMR3QueryDriverOnLun
+ PDMR3QueryLun
+
+ PDMCritSectEnter
+ PDMCritSectEnterDebug
+ PDMCritSectTryEnter
+ PDMCritSectTryEnterDebug
+ PDMR3CritSectEnterEx
+ PDMCritSectLeave
+ PDMCritSectIsOwner
+ PDMCritSectIsOwnerEx
+ PDMCritSectIsOwned
+ PDMCritSectIsInitialized
+ PDMCritSectHasWaiters
+ PDMCritSectGetRecursion
+ PDMR3CritSectYield
+ PDMR3CritSectName
+ PDMR3CritSectScheduleExitEvent
+ PDMR3CritSectDelete
+
+ PDMR3QueueDestroy
+ PDMQueueAlloc
+ PDMQueueInsert
+ PDMQueueInsertEx
+ PDMQueueR0Ptr
+ PDMQueueRCPtr
+
+ PDMR3ThreadDestroy
+ PDMR3ThreadIAmRunning
+ PDMR3ThreadIAmSuspending
+ PDMR3ThreadResume
+ PDMR3ThreadSleep
+ PDMR3ThreadSuspend
+
+ PDMR3UsbCreateEmulatedDevice
+ PDMR3UsbCreateProxyDevice
+ PDMR3UsbDetachDevice
+ PDMR3UsbHasHub
+ PDMR3UsbDriverAttach
+ PDMR3UsbDriverDetach
+ PDMR3UsbQueryLun
+ PDMR3UsbQueryDriverOnLun
+
+ PGMHandlerPhysicalPageTempOff
+ PGMPhysReadGCPtr
+ PGMPhysSimpleDirtyWriteGCPtr
+ PGMPhysSimpleReadGCPtr
+ PGMPhysSimpleWriteGCPhys
+ PGMPhysSimpleWriteGCPtr
+ PGMPhysWriteGCPtr
+ PGMShwMakePageWritable
+ PGMR3QueryGlobalMemoryStats
+ PGMR3QueryMemoryStats
+
+ SSMR3Close
+ SSMR3DeregisterExternal
+ SSMR3DeregisterInternal
+ SSMR3GetBool
+ SSMR3GetBoolV
+ SSMR3GetGCPhys
+ SSMR3GetGCPhysV
+ SSMR3GetGCPhys32
+ SSMR3GetGCPhys32V
+ SSMR3GetGCPhys64
+ SSMR3GetGCPhys64V
+ SSMR3GetGCPtr
+ SSMR3GetGCUInt
+ SSMR3GetGCUIntPtr
+ SSMR3GetGCUIntReg
+ SSMR3GetIOPort
+ SSMR3GetMem
+ SSMR3GetRCPtr
+ SSMR3GetS128
+ SSMR3GetS128V
+ SSMR3GetS16
+ SSMR3GetS16V
+ SSMR3GetS32
+ SSMR3GetS32V
+ SSMR3GetS64
+ SSMR3GetS64V
+ SSMR3GetS8
+ SSMR3GetS8V
+ SSMR3GetSInt
+ SSMR3GetSel
+ SSMR3GetStrZ
+ SSMR3GetStrZEx
+ SSMR3GetStruct
+ SSMR3GetStructEx
+ SSMR3GetU128
+ SSMR3GetU128V
+ SSMR3GetU16
+ SSMR3GetU16V
+ SSMR3GetU32
+ SSMR3GetU32V
+ SSMR3GetU64
+ SSMR3GetU64V
+ SSMR3GetU8
+ SSMR3GetU8V
+ SSMR3GetUInt
+ SSMR3HandleGetAfter
+ SSMR3HandleGetStatus
+ SSMR3HandleHostBits
+ SSMR3HandleHostOSAndArch
+ SSMR3HandleIsLiveSave
+ SSMR3HandleMaxDowntime
+ SSMR3HandleReportLivePercent
+ SSMR3HandleRevision
+ SSMR3HandleSetStatus
+ SSMR3HandleVersion
+ SSMR3Open
+ SSMR3PutBool
+ SSMR3PutGCPhys
+ SSMR3PutGCPhys32
+ SSMR3PutGCPhys64
+ SSMR3PutGCPtr
+ SSMR3PutGCUInt
+ SSMR3PutGCUIntPtr
+ SSMR3PutGCUIntReg
+ SSMR3PutIOPort
+ SSMR3PutMem
+ SSMR3PutRCPtr
+ SSMR3PutS128
+ SSMR3PutS16
+ SSMR3PutS32
+ SSMR3PutS64
+ SSMR3PutS8
+ SSMR3PutSInt
+ SSMR3PutSel
+ SSMR3PutStrZ
+ SSMR3PutStruct
+ SSMR3PutStructEx
+ SSMR3PutU128
+ SSMR3PutU16
+ SSMR3PutU32
+ SSMR3PutU64
+ SSMR3PutU8
+ SSMR3PutUInt
+ SSMR3Seek
+ SSMR3SetCfgError
+ SSMR3SetLoadError
+ SSMR3SetLoadErrorV
+ SSMR3Skip
+ SSMR3SkipToEndOfUnit
+ SSMR3ValidateFile
+ SSMR3Cancel
+ SSMR3RegisterExternal
+
+ STAMR3Dump
+ STAMR3Enum
+ STAMR3Reset
+ STAMR3Snapshot
+ STAMR3SnapshotFree
+ STAMR3GetUnit
+ STAMR3RegisterFU
+ STAMR3RegisterVU
+ STAMR3DeregisterF
+ STAMR3DeregisterV
+
+ TMR3GetCpuLoadPercents
+ TMR3TimerSetCritSect
+ TMR3TimerLoad
+ TMR3TimerSave
+ TMR3TimerSkip
+ TMR3TimerDestroy
+ TMTimerFromMicro
+ TMTimerFromMilli
+ TMTimerFromNano
+ TMTimerGet
+ TMTimerGetFreq
+ TMTimerGetMicro
+ TMTimerGetMilli
+ TMTimerGetNano
+ TMTimerIsActive
+ TMTimerIsLockOwner
+ TMTimerLock
+ TMTimerR0Ptr
+ TMTimerR3Ptr
+ TMTimerRCPtr
+ TMTimerSet
+ TMTimerSetFrequencyHint
+ TMTimerSetMicro
+ TMTimerSetMillies
+ TMTimerSetNano
+ TMTimerSetRelative
+ TMTimerStop
+ TMTimerToMicro
+ TMTimerToMilli
+ TMTimerToNano
+ TMTimerUnlock
+ TMR3GetWarpDrive
+ TMR3SetWarpDrive
+ TMR3TimeVirtGet
+ TMR3TimeVirtGetMicro
+ TMR3TimeVirtGetMilli
+ TMR3TimeVirtGetNano
+
+ VMMGetCpu
+
+ VMMGetSvnRev
+ VMSetError
+ VMSetErrorV
+ VMR3AtErrorDeregister
+ VMR3AtErrorRegister
+ VMR3AtRuntimeErrorRegister
+ VMR3AtStateRegister
+ VMR3Create
+ VMR3Destroy
+ VMR3GetCpuCoreAndPackageIdFromCpuId
+ VMR3GetStateName
+ VMR3GetStateU
+ VMR3GetSuspendReason
+ VMR3GetVM
+ VMR3HotPlugCpu
+ VMR3HotUnplugCpu
+ VMR3LoadFromFile
+ VMR3LoadFromStream
+ VMR3PowerOff
+ VMR3PowerOn
+ VMR3ReleaseUVM
+ VMR3ReqCallNoWaitU
+ VMR3ReqCallU
+ VMR3ReqCallVoidWaitU
+ VMR3ReqCallWaitU
+ VMR3ReqFree
+ VMR3ReqPriorityCallWaitU
+ VMR3ReqWait
+ VMR3Reset
+ VMR3Resume
+ VMR3RetainUVM
+ VMR3Save
+ VMR3SetCpuExecutionCap
+ VMR3SetError
+ VMR3SetPowerOffInsteadOfReset
+ VMR3Suspend
+ VMR3Teleport
+ VMR3AtStateDeregister
+ VMR3GetUVM
+
diff --git a/src/VBox/VMM/VMMR3/VMMR3VTable.cpp b/src/VBox/VMM/VMMR3/VMMR3VTable.cpp
new file mode 100644
index 00000000..66d69b67
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMMR3VTable.cpp
@@ -0,0 +1,82 @@
+/* $Id: VMMR3VTable.cpp $ */
+/** @file
+ * VM - The Virtual Machine Monitor, Ring-3 API VTable Definitions.
+ */
+
+/*
+ * Copyright (C) 2022-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RT_RELAXED_CALLBACKS_TYPES
+#define LOG_GROUP LOG_GROUP_VMM
+#include <VBox/vmm/vmmr3vtable.h>
+
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) vmmR3ReservedVTableEntry(void);
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static const VMMR3VTABLE g_VMMR3VTable =
+{
+ /* .uMagicVersion = */ VMMR3VTABLE_MAGIC_VERSION,
+ /* .fFlags = */ 0,
+ /* .pszDescription = */ "x86 & amd64",
+
+#define VTABLE_ENTRY(a_Api) a_Api,
+#define VTABLE_RESERVED(a_Name) vmmR3ReservedVTableEntry,
+
+#include <VBox/vmm/vmmr3vtable-def.h>
+
+#undef VTABLE_ENTRY
+#undef VTABLE_RESERVED
+
+ /* .uMagicVersionEnd = */ VMMR3VTABLE_MAGIC_VERSION,
+};
+
+
+/**
+ * Reserved VMM function table entry.
+ */
+static DECLCALLBACK(int) vmmR3ReservedVTableEntry(void)
+{
+ void * volatile pvCaller = ASMReturnAddress();
+ AssertLogRel(("Reserved VMM function table entry called from %p!\n", pvCaller ));
+ return VERR_INTERNAL_ERROR;
+}
+
+
+VMMR3DECL(PCVMMR3VTABLE) VMMR3GetVTable(void)
+{
+ return &g_VMMR3VTable;
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMMTests.cpp b/src/VBox/VMM/VMMR3/VMMTests.cpp
new file mode 100644
index 00000000..23fbbc16
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMMTests.cpp
@@ -0,0 +1,197 @@
+/* $Id: VMMTests.cpp $ */
+/** @file
+ * VMM - The Virtual Machine Monitor Core, Tests.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+//#define NO_SUPCALLR0VMM
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VMM
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/dbg.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/selm.h>
+#include "VMMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <iprt/errcore.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/time.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/x86.h>
+
+
+#define SYNC_SEL(pHyperCtx, reg) \
+ if (pHyperCtx->reg.Sel) \
+ { \
+ DBGFSELINFO selInfo; \
+ int rc2 = SELMR3GetShadowSelectorInfo(pVM, pHyperCtx->reg.Sel, &selInfo); \
+ AssertRC(rc2); \
+ \
+ pHyperCtx->reg.u64Base = selInfo.GCPtrBase; \
+ pHyperCtx->reg.u32Limit = selInfo.cbLimit; \
+ pHyperCtx->reg.Attr.n.u1Present = selInfo.u.Raw.Gen.u1Present; \
+ pHyperCtx->reg.Attr.n.u1DefBig = selInfo.u.Raw.Gen.u1DefBig; \
+ pHyperCtx->reg.Attr.n.u1Granularity = selInfo.u.Raw.Gen.u1Granularity; \
+ pHyperCtx->reg.Attr.n.u4Type = selInfo.u.Raw.Gen.u4Type; \
+ pHyperCtx->reg.Attr.n.u2Dpl = selInfo.u.Raw.Gen.u2Dpl; \
+ pHyperCtx->reg.Attr.n.u1DescType = selInfo.u.Raw.Gen.u1DescType; \
+ pHyperCtx->reg.Attr.n.u1Long = selInfo.u.Raw.Gen.u1Long; \
+ }
+
+/* execute the switch. */
+VMMR3DECL(int) VMMDoHmTest(PVM pVM)
+{
+#if 1
+ RTPrintf("FIXME!\n");
+ RT_NOREF(pVM);
+ return 0;
+#else
+
+ uint32_t i;
+ int rc;
+ PCPUMCTX pHyperCtx, pGuestCtx;
+ RTGCPHYS CR3Phys = 0x0; /* fake address */
+ PVMCPU pVCpu = &pVM->aCpus[0];
+
+ if (!HMIsEnabled(pVM))
+ {
+ RTPrintf("VMM: Hardware accelerated test not available!\n");
+ return VERR_ACCESS_DENIED;
+ }
+
+ /* Enable mapping of the hypervisor into the shadow page table. */
+ uint32_t cb;
+ rc = PGMR3MappingsSize(pVM, &cb);
+ AssertRCReturn(rc, rc);
+
+ /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
+ rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
+ AssertRCReturn(rc, rc);
+
+ pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
+
+ pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
+ pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT;
+ PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
+ PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
+
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
+ VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
+ VM_FF_CLEAR(pVM, VM_FF_REQUEST);
+
+ /*
+ * Setup stack for calling VMMRCEntry().
+ */
+ RTRCPTR RCPtrEP;
+ rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
+ if (RT_SUCCESS(rc))
+ {
+ RTPrintf("VMM: VMMRCEntry=%RRv\n", RCPtrEP);
+
+ pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
+
+ /* Fill in hidden selector registers for the hypervisor state. */
+ SYNC_SEL(pHyperCtx, cs);
+ SYNC_SEL(pHyperCtx, ds);
+ SYNC_SEL(pHyperCtx, es);
+ SYNC_SEL(pHyperCtx, fs);
+ SYNC_SEL(pHyperCtx, gs);
+ SYNC_SEL(pHyperCtx, ss);
+ SYNC_SEL(pHyperCtx, tr);
+
+ /*
+ * Profile switching.
+ */
+ RTPrintf("VMM: profiling switcher...\n");
+ Log(("VMM: profiling switcher...\n"));
+ uint64_t TickMin = UINT64_MAX;
+ uint64_t tsBegin = RTTimeNanoTS();
+ uint64_t TickStart = ASMReadTSC();
+ for (i = 0; i < 1000000; i++)
+ {
+ CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
+ CPUMPushHyper(pVCpu, 0);
+ CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_HM_NOP);
+ CPUMPushHyper(pVCpu, pVM->pVMRC);
+ CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
+ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
+
+ pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
+ pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ /* Copy the hypervisor context to make sure we have a valid guest context. */
+ *pGuestCtx = *pHyperCtx;
+ pGuestCtx->cr3 = CR3Phys;
+
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
+ VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
+
+ uint64_t TickThisStart = ASMReadTSC();
+ rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
+ uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
+ if (RT_FAILURE(rc))
+ {
+ Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
+ VMMR3FatalDump(pVM, pVCpu, rc);
+ return rc;
+ }
+ if (TickThisElapsed < TickMin)
+ TickMin = TickThisElapsed;
+ }
+ uint64_t TickEnd = ASMReadTSC();
+ uint64_t tsEnd = RTTimeNanoTS();
+
+ uint64_t Elapsed = tsEnd - tsBegin;
+ uint64_t PerIteration = Elapsed / (uint64_t)i;
+ uint64_t cTicksElapsed = TickEnd - TickStart;
+ uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
+
+ RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
+ i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
+ Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
+ i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
+
+ rc = VINF_SUCCESS;
+ }
+ else
+ AssertMsgFailed(("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc));
+
+ return rc;
+#endif
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMReq.cpp b/src/VBox/VMM/VMMR3/VMReq.cpp
new file mode 100644
index 00000000..1ad1e7f0
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/VMReq.cpp
@@ -0,0 +1,1343 @@
+/* $Id: VMReq.cpp $ */
+/** @file
+ * VM - Virtual Machine
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VM
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vmm.h>
+#include "VMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#include <iprt/semaphore.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int vmR3ReqProcessOne(PVMREQ pReq);
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, and (3) that you wish to wait
+ * for ever for it to return.
+ *
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3_INT_DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, and (3) that you wish to wait
+ * for ever for it to return.
+ *
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
+ *
+ * @param pUVM The user mode VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code
+ * and that you do not wish to wait for it to complete.
+ *
+ * @returns VBox status code returned by VMR3ReqCallVU.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code
+ * and that you do not wish to wait for it to complete.
+ *
+ * @returns VBox status code returned by VMR3ReqCallVU.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns void, and (2) that
+ * you wish to wait for ever for it to return.
+ *
+ * @returns VBox status code of VMR3ReqCallVU.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3_INT_DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns void, and (2) that
+ * you wish to wait for ever for it to return.
+ *
+ * @returns VBox status code of VMR3ReqCallVU.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns void, and (2) that
+ * you do not wish to wait for it to complete.
+ *
+ * @returns VBox status code of VMR3ReqCallVU.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, (3) that you wish to wait for
+ * ever for it to return, and (4) that it's priority request that can be safely
+ * be handled during async suspend and power off.
+ *
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
+ *
+ * @param pVM The cross context VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ * @internal
+ */
+VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, (3) that you wish to wait for
+ * ever for it to return, and (4) that it's priority request that can be safely
+ * be handled during async suspend and power off.
+ *
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqPriorityCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns void, (2) that you
+ * wish to wait for ever for it to return, and (3) that it's priority request
+ * that can be safely be handled during async suspend and power off.
+ *
+ * @returns VBox status code of VMR3ReqCallVU.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqPriorityCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Allocate and queue a call request to a void function.
+ *
+ * If it's desired to poll on the completion of the request set cMillies
+ * to 0 and use VMR3ReqWait() to check for completion. In the other case
+ * use RT_INDEFINITE_WAIT.
+ * The returned request packet must be freed using VMR3ReqFree().
+ *
+ * @returns VBox status code.
+ * Will not return VERR_INTERRUPTED.
+ * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param ppReq Where to store the pointer to the request.
+ * This will be NULL or a valid request pointer not matter what happens, unless fFlags
+ * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
+ * @param cMillies Number of milliseconds to wait for the request to
+ * be completed. Use RT_INDEFINITE_WAIT to only
+ * wait till it's completed.
+ * @param fFlags A combination of the VMREQFLAGS values.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
+ PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Allocate and queue a call request.
+ *
+ * If it's desired to poll on the completion of the request set cMillies
+ * to 0 and use VMR3ReqWait() to check for completion. In the other case
+ * use RT_INDEFINITE_WAIT.
+ * The returned request packet must be freed using VMR3ReqFree().
+ *
+ * @returns VBox status code.
+ * Will not return VERR_INTERRUPTED.
+ * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param ppReq Where to store the pointer to the request.
+ * This will be NULL or a valid request pointer not matter what happens, unless fFlags
+ * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
+ * @param cMillies Number of milliseconds to wait for the request to
+ * be completed. Use RT_INDEFINITE_WAIT to only
+ * wait till it's completed.
+ * @param pfnFunction Pointer to the function to call.
+ * @param fFlags A combination of the VMREQFLAGS values.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
+ * @param Args Argument vector.
+ *
+ * @remarks Caveats:
+ * - Do not pass anything which is larger than an uintptr_t.
+ * - 64-bit integers are larger than uintptr_t on 32-bit hosts.
+ * Pass integers > 32-bit by reference (pointers).
+ * - Don't use NULL since it should be the integer 0 in C++ and may
+ * therefore end up with garbage in the bits 63:32 on 64-bit
+ * hosts because 'int' is 32-bit.
+ * Use (void *)NULL or (uintptr_t)0 instead of NULL.
+ */
+VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
+ PFNRT pfnFunction, unsigned cArgs, va_list Args)
+{
+ LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)), VERR_INVALID_PARAMETER);
+ if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
+ {
+ AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
+ *ppReq = NULL;
+ }
+ PVMREQ pReq = NULL;
+ AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
+ ("cArg=%d\n", cArgs),
+ VERR_TOO_MUCH_DATA);
+
+ /*
+ * Allocate request
+ */
+ int rc = VMR3ReqAlloc(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Initialize the request data.
+ */
+ pReq->fFlags = fFlags;
+ pReq->u.Internal.pfn = pfnFunction;
+ pReq->u.Internal.cArgs = cArgs;
+ for (unsigned iArg = 0; iArg < cArgs; iArg++)
+ pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
+
+ /*
+ * Queue the request and return.
+ */
+ rc = VMR3ReqQueue(pReq, cMillies);
+ if ( RT_FAILURE(rc)
+ && rc != VERR_TIMEOUT)
+ {
+ VMR3ReqFree(pReq);
+ pReq = NULL;
+ }
+ if (!(fFlags & VMREQFLAGS_NO_WAIT))
+ {
+ *ppReq = pReq;
+ LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
+ }
+ else
+ LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
+ Assert(rc != VERR_INTERRUPTED);
+ return rc;
+}
+
+
+/**
+ * Joins the list pList with whatever is linked up at *pHead.
+ */
+static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
+{
+ for (unsigned cIterations = 0;; cIterations++)
+ {
+ PVMREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PVMREQ);
+ if (!pHead)
+ return;
+ PVMREQ pTail = pHead;
+ while (pTail->pNext)
+ pTail = pTail->pNext;
+ ASMAtomicWritePtr(&pTail->pNext, pList);
+ ASMCompilerBarrier();
+ if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
+ return;
+ ASMAtomicWriteNullPtr(&pTail->pNext);
+ ASMCompilerBarrier();
+ if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
+ return;
+ pList = pHead;
+ Assert(cIterations != 32);
+ Assert(cIterations != 64);
+ }
+}
+
+
+/**
+ * Joins the list pList with whatever is linked up at *pHead.
+ */
+static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
+{
+ /*
+ * Split the list if it's too long.
+ */
+ unsigned cReqs = 1;
+ PVMREQ pTail = pList;
+ while (pTail->pNext)
+ {
+ if (cReqs++ > 25)
+ {
+ const uint32_t i = pVMInt->iReqFree;
+ vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
+
+ pTail->pNext = NULL;
+ vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
+ return;
+ }
+ pTail = pTail->pNext;
+ }
+ vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
+}
+
+
+/**
+ * Allocates a request packet.
+ *
+ * The caller allocates a request packet, fills in the request data
+ * union and queues the request.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param ppReq Where to store the pointer to the allocated packet.
+ * @param enmType Package type.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ */
+VMMR3DECL(int) VMR3ReqAlloc(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
+{
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
+ ("Invalid package type %d valid range %d-%d inclusively.\n",
+ enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
+ VERR_VM_REQUEST_INVALID_TYPE);
+ AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
+ AssertMsgReturn( idDstCpu == VMCPUID_ANY
+ || idDstCpu == VMCPUID_ANY_QUEUE
+ || idDstCpu < pUVM->cCpus
+ || idDstCpu == VMCPUID_ALL
+ || idDstCpu == VMCPUID_ALL_REVERSE,
+ ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
+
+ /*
+ * Try get a recycled packet.
+ * While this could all be solved with a single list with a lock, it's a sport
+ * of mine to avoid locks.
+ */
+ int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
+ while (--cTries >= 0)
+ {
+ PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
+#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
+ PVMREQ pNext = NULL;
+ PVMREQ pReq = *ppHead;
+ if ( pReq
+ && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq)
+ && (pReq = *ppHead)
+ && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq))
+ pReq = NULL;
+ if (pReq)
+ {
+ Assert(pReq->pNext == pNext); NOREF(pReq);
+#else
+ PVMREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PVMREQ);
+ if (pReq)
+ {
+ PVMREQ pNext = pReq->pNext;
+ if ( pNext
+ && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
+ {
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
+ vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
+ }
+#endif
+ ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
+
+ /*
+ * Make sure the event sem is not signaled.
+ */
+ if (!pReq->fEventSemClear)
+ {
+ int rc = RTSemEventWait(pReq->EventSem, 0);
+ if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
+ {
+ /*
+ * This shall not happen, but if it does we'll just destroy
+ * the semaphore and create a new one.
+ */
+ AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
+ RTSemEventDestroy(pReq->EventSem);
+ rc = RTSemEventCreate(&pReq->EventSem);
+ AssertRC(rc);
+ if (RT_FAILURE(rc))
+ return rc;
+#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
+ for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
+ RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
+#endif
+ }
+ pReq->fEventSemClear = true;
+ }
+ else
+ Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
+
+ /*
+ * Initialize the packet and return it.
+ */
+ Assert(pReq->enmType == VMREQTYPE_INVALID);
+ Assert(pReq->enmState == VMREQSTATE_FREE);
+ Assert(pReq->pUVM == pUVM);
+ ASMAtomicWriteNullPtr(&pReq->pNext);
+ pReq->enmState = VMREQSTATE_ALLOCATED;
+ pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
+ pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
+ pReq->enmType = enmType;
+ pReq->idDstCpu = idDstCpu;
+
+ *ppReq = pReq;
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
+ LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
+ return VINF_SUCCESS;
+ }
+ }
+
+ /*
+ * Ok allocate one.
+ */
+ PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
+ if (!pReq)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Create the semaphore.
+ */
+ int rc = RTSemEventCreate(&pReq->EventSem);
+ AssertRC(rc);
+ if (RT_FAILURE(rc))
+ {
+ MMR3HeapFree(pReq);
+ return rc;
+ }
+#if 0 /// @todo @bugref{4725} - def RT_LOCK_STRICT
+ for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
+ RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
+#endif
+
+ /*
+ * Initialize the packet and return it.
+ */
+ pReq->pNext = NULL;
+ pReq->pUVM = pUVM;
+ pReq->enmState = VMREQSTATE_ALLOCATED;
+ pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
+ pReq->fEventSemClear = true;
+ pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
+ pReq->enmType = enmType;
+ pReq->idDstCpu = idDstCpu;
+
+ *ppReq = pReq;
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
+ LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Free a request packet.
+ *
+ * @returns VBox status code.
+ *
+ * @param pReq Package to free.
+ * @remark The request packet must be in allocated or completed state!
+ */
+VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
+{
+ /*
+ * Ignore NULL (all free functions should do this imho).
+ */
+ if (!pReq)
+ return VINF_SUCCESS;
+
+ /*
+ * Check packet state.
+ */
+ switch (pReq->enmState)
+ {
+ case VMREQSTATE_ALLOCATED:
+ case VMREQSTATE_COMPLETED:
+ break;
+ default:
+ AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
+ return VERR_VM_REQUEST_STATE;
+ }
+
+ /*
+ * Make it a free packet and put it into one of the free packet lists.
+ */
+ pReq->enmState = VMREQSTATE_FREE;
+ pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
+ pReq->enmType = VMREQTYPE_INVALID;
+
+ PUVM pUVM = pReq->pUVM;
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
+
+ if (pUVM->vm.s.cReqFree < 128)
+ {
+ ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
+ PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
+ PVMREQ pNext;
+ do
+ {
+ pNext = ASMAtomicUoReadPtrT(ppHead, PVMREQ);
+ ASMAtomicWritePtr(&pReq->pNext, pNext);
+ ASMCompilerBarrier();
+ } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
+ }
+ else
+ {
+ STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
+ RTSemEventDestroy(pReq->EventSem);
+ MMR3HeapFree(pReq);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Queue a request.
+ *
+ * The quest must be allocated using VMR3ReqAlloc() and contain
+ * all the required data.
+ * If it's desired to poll on the completion of the request set cMillies
+ * to 0 and use VMR3ReqWait() to check for completion. In the other case
+ * use RT_INDEFINITE_WAIT.
+ *
+ * @returns VBox status code.
+ * Will not return VERR_INTERRUPTED.
+ * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
+ *
+ * @param pReq The request to queue.
+ * @param cMillies Number of milliseconds to wait for the request to
+ * be completed. Use RT_INDEFINITE_WAIT to only
+ * wait till it's completed.
+ */
+VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, RTMSINTERVAL cMillies)
+{
+ LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
+ /*
+ * Verify the supplied package.
+ */
+ AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
+ AssertMsgReturn( RT_VALID_PTR(pReq->pUVM)
+ && !pReq->pNext
+ && pReq->EventSem != NIL_RTSEMEVENT,
+ ("Invalid request package! Anyone cooking their own packages???\n"),
+ VERR_VM_REQUEST_INVALID_PACKAGE);
+ AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
+ && pReq->enmType < VMREQTYPE_MAX,
+ ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
+ pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
+ VERR_VM_REQUEST_INVALID_TYPE);
+ Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)));
+
+ /*
+ * Are we the EMT or not?
+ * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
+ */
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
+ PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
+
+ if (pReq->idDstCpu == VMCPUID_ALL)
+ {
+ /* One-by-one. */
+ Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
+ for (unsigned i = 0; i < pUVM->cCpus; i++)
+ {
+ /* Reinit some members. */
+ pReq->enmState = VMREQSTATE_ALLOCATED;
+ pReq->idDstCpu = i;
+ rc = VMR3ReqQueue(pReq, cMillies);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ }
+ else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
+ {
+ /* One-by-one. */
+ Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
+ for (int i = pUVM->cCpus-1; i >= 0; i--)
+ {
+ /* Reinit some members. */
+ pReq->enmState = VMREQSTATE_ALLOCATED;
+ pReq->idDstCpu = i;
+ rc = VMR3ReqQueue(pReq, cMillies);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ }
+ else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
+ && pReq->idDstCpu != VMCPUID_ANY_QUEUE
+ && ( !pUVCpu /* and it's not the current thread. */
+ || pUVCpu->idCpu != pReq->idDstCpu))
+ {
+ VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
+ PVMCPU pVCpu = pUVM->pVM->apCpusR3[idTarget];
+ unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
+
+ /* Fetch the right UVMCPU */
+ pUVCpu = &pUVM->aCpus[idTarget];
+
+ /*
+ * Insert it.
+ */
+ volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVCpu->vm.s.pPriorityReqs : &pUVCpu->vm.s.pNormalReqs;
+ pReq->enmState = VMREQSTATE_QUEUED;
+ PVMREQ pNext;
+ do
+ {
+ pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
+ ASMAtomicWritePtr(&pReq->pNext, pNext);
+ ASMCompilerBarrier();
+ } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
+
+ /*
+ * Notify EMT.
+ */
+ if (pUVM->pVM)
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
+ VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
+
+ /*
+ * Wait and return.
+ */
+ if (!(fFlags & VMREQFLAGS_NO_WAIT))
+ rc = VMR3ReqWait(pReq, cMillies);
+ LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
+ }
+ else if ( ( pReq->idDstCpu == VMCPUID_ANY
+ && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
+ || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
+ {
+ unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
+
+ /* Note: pUVCpu may or may not be NULL in the VMCPUID_ANY_QUEUE case; we don't care. */
+
+ /*
+ * Insert it.
+ */
+ volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVM->vm.s.pPriorityReqs : &pUVM->vm.s.pNormalReqs;
+ pReq->enmState = VMREQSTATE_QUEUED;
+ PVMREQ pNext;
+ do
+ {
+ pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
+ ASMAtomicWritePtr(&pReq->pNext, pNext);
+ ASMCompilerBarrier();
+ } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
+
+ /*
+ * Notify EMT.
+ */
+ if (pUVM->pVM)
+ VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
+ VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
+
+ /*
+ * Wait and return.
+ */
+ if (!(fFlags & VMREQFLAGS_NO_WAIT))
+ rc = VMR3ReqWait(pReq, cMillies);
+ LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
+ }
+ else
+ {
+ Assert(pUVCpu);
+
+ /*
+ * The requester was an EMT, just execute it.
+ */
+ pReq->enmState = VMREQSTATE_QUEUED;
+ rc = vmR3ReqProcessOne(pReq);
+ LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
+ }
+ return rc;
+}
+
+
+/**
+ * Wait for a request to be completed.
+ *
+ * @returns VBox status code.
+ * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
+ *
+ * @param pReq The request to wait for.
+ * @param cMillies Number of milliseconds to wait.
+ * Use RT_INDEFINITE_WAIT to only wait till it's completed.
+ */
+VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, RTMSINTERVAL cMillies)
+{
+ LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
+
+ /*
+ * Verify the supplied package.
+ */
+ AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
+ || pReq->enmState == VMREQSTATE_PROCESSING
+ || pReq->enmState == VMREQSTATE_COMPLETED,
+ ("Invalid state %d\n", pReq->enmState),
+ VERR_VM_REQUEST_STATE);
+ AssertMsgReturn( RT_VALID_PTR(pReq->pUVM)
+ && pReq->EventSem != NIL_RTSEMEVENT,
+ ("Invalid request package! Anyone cooking their own packages???\n"),
+ VERR_VM_REQUEST_INVALID_PACKAGE);
+ AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
+ && pReq->enmType < VMREQTYPE_MAX,
+ ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
+ pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
+ VERR_VM_REQUEST_INVALID_TYPE);
+
+ /*
+ * Check for deadlock condition
+ */
+ PUVM pUVM = pReq->pUVM;
+ NOREF(pUVM);
+
+ /*
+ * Wait on the package.
+ */
+ int rc;
+ if (cMillies != RT_INDEFINITE_WAIT)
+ rc = RTSemEventWait(pReq->EventSem, cMillies);
+ else
+ {
+ do
+ {
+ rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
+ Assert(rc != VERR_TIMEOUT);
+ } while ( pReq->enmState != VMREQSTATE_COMPLETED
+ && pReq->enmState != VMREQSTATE_INVALID);
+ }
+ if (RT_SUCCESS(rc))
+ ASMAtomicXchgSize(&pReq->fEventSemClear, true);
+ if (pReq->enmState == VMREQSTATE_COMPLETED)
+ rc = VINF_SUCCESS;
+ LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
+ Assert(rc != VERR_INTERRUPTED);
+ return rc;
+}
+
+
+/**
+ * Sets the relevant FF.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param idDstCpu VMCPUID_ANY or the ID of the current CPU.
+ */
+DECLINLINE(void) vmR3ReqSetFF(PUVM pUVM, VMCPUID idDstCpu)
+{
+ if (RT_LIKELY(pUVM->pVM))
+ {
+ if (idDstCpu == VMCPUID_ANY)
+ VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
+ else
+ VMCPU_FF_SET(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
+ }
+}
+
+
+/**
+ * VMR3ReqProcessU helper that handles cases where there are more than one
+ * pending request.
+ *
+ * @returns The oldest request.
+ * @param pUVM Pointer to the user mode VM structure
+ * @param idDstCpu VMCPUID_ANY or virtual CPU ID.
+ * @param pReqList The list of requests.
+ * @param ppReqs Pointer to the list head.
+ */
+static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ pReqList, PVMREQ volatile *ppReqs)
+{
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqMoreThan1);
+
+ /*
+ * Chop off the last one (pReq).
+ */
+ PVMREQ pPrev;
+ PVMREQ pReqRet = pReqList;
+ do
+ {
+ pPrev = pReqRet;
+ pReqRet = pReqRet->pNext;
+ } while (pReqRet->pNext);
+ ASMAtomicWriteNullPtr(&pPrev->pNext);
+
+ /*
+ * Push the others back onto the list (end of it).
+ */
+ Log2(("VMR3ReqProcess: Pushing back %p %p...\n", pReqList, pReqList->pNext));
+ if (RT_UNLIKELY(!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL)))
+ {
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqPushBackRaces);
+ do
+ {
+ ASMNopPause();
+ PVMREQ pReqList2 = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
+ if (pReqList2)
+ {
+ PVMREQ pLast = pReqList2;
+ while (pLast->pNext)
+ pLast = pLast->pNext;
+ ASMAtomicWritePtr(&pLast->pNext, pReqList);
+ pReqList = pReqList2;
+ }
+ } while (!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL));
+ }
+
+ vmR3ReqSetFF(pUVM, idDstCpu);
+ return pReqRet;
+}
+
+
+/**
+ * Process pending request(s).
+ *
+ * This function is called from a forced action handler in the EMT
+ * or from one of the EMT loops.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
+ * and the CPU ID for a CPU specific one. In the latter
+ * case the calling thread must be the EMT of that CPU.
+ * @param fPriorityOnly When set, only process the priority request queue.
+ *
+ * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
+ *
+ * @remarks This was made reentrant for async PDM handling, the debugger and
+ * others.
+ * @internal
+ */
+VMMR3_INT_DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
+{
+ LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
+
+ /*
+ * Determine which queues to process.
+ */
+ PVMREQ volatile *ppNormalReqs;
+ PVMREQ volatile *ppPriorityReqs;
+ if (idDstCpu == VMCPUID_ANY)
+ {
+ ppPriorityReqs = &pUVM->vm.s.pPriorityReqs;
+ ppNormalReqs = !fPriorityOnly ? &pUVM->vm.s.pNormalReqs : ppPriorityReqs;
+ }
+ else
+ {
+ Assert(idDstCpu < pUVM->cCpus);
+ Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
+ ppPriorityReqs = &pUVM->aCpus[idDstCpu].vm.s.pPriorityReqs;
+ ppNormalReqs = !fPriorityOnly ? &pUVM->aCpus[idDstCpu].vm.s.pNormalReqs : ppPriorityReqs;
+ }
+
+ /*
+ * Process loop.
+ *
+ * We do not repeat the outer loop if we've got an informational status code
+ * since that code needs processing by our caller (usually EM).
+ */
+ int rc = VINF_SUCCESS;
+ for (;;)
+ {
+ /*
+ * Get the pending requests.
+ *
+ * If there are more than one request, unlink the oldest and put the
+ * rest back so that we're reentrant.
+ */
+ if (RT_LIKELY(pUVM->pVM))
+ {
+ if (idDstCpu == VMCPUID_ANY)
+ VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
+ else
+ VMCPU_FF_CLEAR(pUVM->pVM->apCpusR3[idDstCpu], VMCPU_FF_REQUEST);
+ }
+
+ PVMREQ pReq = ASMAtomicXchgPtrT(ppPriorityReqs, NULL, PVMREQ);
+ if (pReq)
+ {
+ if (RT_UNLIKELY(pReq->pNext))
+ pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppPriorityReqs);
+ else if (ASMAtomicReadPtrT(ppNormalReqs, PVMREQ))
+ vmR3ReqSetFF(pUVM, idDstCpu);
+ }
+ else
+ {
+ pReq = ASMAtomicXchgPtrT(ppNormalReqs, NULL, PVMREQ);
+ if (!pReq)
+ break;
+ if (RT_UNLIKELY(pReq->pNext))
+ pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppNormalReqs);
+ }
+
+ /*
+ * Process the request
+ */
+ STAM_COUNTER_INC(&pUVM->vm.s.StatReqProcessed);
+ int rc2 = vmR3ReqProcessOne(pReq);
+ if ( rc2 >= VINF_EM_FIRST
+ && rc2 <= VINF_EM_LAST)
+ {
+ rc = rc2;
+ break;
+ }
+ }
+
+ LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
+ return rc;
+}
+
+
+/**
+ * Process one request.
+ *
+ * @returns VBox status code.
+ *
+ * @param pReq Request packet to process.
+ */
+static int vmR3ReqProcessOne(PVMREQ pReq)
+{
+ LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
+
+ /*
+ * Process the request.
+ */
+ Assert(pReq->enmState == VMREQSTATE_QUEUED);
+ pReq->enmState = VMREQSTATE_PROCESSING;
+ int rcRet = VINF_SUCCESS; /* the return code of this function. */
+ int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
+ switch (pReq->enmType)
+ {
+ /*
+ * A packed down call frame.
+ */
+ case VMREQTYPE_INTERNAL:
+ {
+ uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
+ union
+ {
+ PFNRT pfn;
+ DECLCALLBACKMEMBER(int, pfn00,(void));
+ DECLCALLBACKMEMBER(int, pfn01,(uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn02,(uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn03,(uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn04,(uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn05,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn06,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn07,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn08,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn09,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn10,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn11,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn12,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn13,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn14,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ DECLCALLBACKMEMBER(int, pfn15,(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t));
+ } u;
+ u.pfn = pReq->u.Internal.pfn;
+#ifndef RT_ARCH_X86
+ switch (pReq->u.Internal.cArgs)
+ {
+ case 0: rcRet = u.pfn00(); break;
+ case 1: rcRet = u.pfn01(pauArgs[0]); break;
+ case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
+ case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
+ case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
+ case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
+ case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
+ case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
+ case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
+ case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
+ case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
+ case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
+ case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
+ case 13: rcRet = u.pfn13(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12]); break;
+ case 14: rcRet = u.pfn14(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13]); break;
+ case 15: rcRet = u.pfn15(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13], pauArgs[14]); break;
+ default:
+ AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
+ rcRet = rcReq = VERR_VM_REQUEST_TOO_MANY_ARGS_IPE;
+ break;
+ }
+#else /* x86: */
+ size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
+# ifdef __GNUC__
+ __asm__ __volatile__("movl %%esp, %%edx\n\t"
+ "subl %2, %%esp\n\t"
+ "andl $0xfffffff0, %%esp\n\t"
+ "shrl $2, %2\n\t"
+ "movl %%esp, %%edi\n\t"
+ "rep movsl\n\t"
+ "movl %%edx, %%edi\n\t"
+ "call *%%eax\n\t"
+ "mov %%edi, %%esp\n\t"
+ : "=a" (rcRet),
+ "=S" (pauArgs),
+ "=c" (cbArgs)
+ : "0" (u.pfn),
+ "1" (pauArgs),
+ "2" (cbArgs)
+ : "edi", "edx");
+# else
+ __asm
+ {
+ xor edx, edx /* just mess it up. */
+ mov eax, u.pfn
+ mov ecx, cbArgs
+ shr ecx, 2
+ mov esi, pauArgs
+ mov ebx, esp
+ sub esp, cbArgs
+ and esp, 0xfffffff0
+ mov edi, esp
+ rep movsd
+ call eax
+ mov esp, ebx
+ mov rcRet, eax
+ }
+# endif
+#endif /* x86 */
+ if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
+ rcRet = VINF_SUCCESS;
+ rcReq = rcRet;
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
+ rcReq = VERR_NOT_IMPLEMENTED;
+ break;
+ }
+
+ /*
+ * Complete the request.
+ */
+ pReq->iStatus = rcReq;
+ pReq->enmState = VMREQSTATE_COMPLETED;
+ if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
+ {
+ /* Free the packet, nobody is waiting. */
+ LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
+ pReq, rcReq, rcRet));
+ VMR3ReqFree(pReq);
+ }
+ else
+ {
+ /* Notify the waiter and him free up the packet. */
+ LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
+ pReq, rcReq, rcRet));
+ ASMAtomicXchgSize(&pReq->fEventSemClear, false);
+ int rc2 = RTSemEventSignal(pReq->EventSem);
+ if (RT_FAILURE(rc2))
+ {
+ AssertRC(rc2);
+ rcRet = rc2;
+ }
+ }
+
+ return rcRet;
+}
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h
new file mode 100644
index 00000000..53cbd303
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h
@@ -0,0 +1,234 @@
+/* $Id: AMD_Athlon_64_3200.h $ */
+/** @file
+ * CPU database entry "AMD Athlon 64 3200+".
+ * Generated at 2013-07-12T02:09:05Z by VBoxCpuReport v4.3.53r91376 on win.x86.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_AMD_Athlon_64_3200_h
+#define VBOX_CPUDB_AMD_Athlon_64_3200_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Athlon_64_3200[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000f48, 0x00000800, 0x00000000, 0x078bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000018, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000f48, 0x0000010a, 0x00000000, 0xe1d3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6c687441, 0x74286e6f, 0x3620296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x72502034, 0x7365636f, 0x20726f73, 0x30303233, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x0000002b, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff08ff08, 0xff20ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x42004200, 0x04008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003028, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8fffffff, 0x00000000, 0x00000000, 0x53275449, 0x4d414820, 0x2052454d, 0x454d4954, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Athlon_64_3200[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x28`4505cb65 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xffffff00000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x39 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8059e000 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x81872950 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x105, 0, 0), /* value=0x105 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffed`bf1be178 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffff7f49`bf1bedec */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0xf8000001 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`fc000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x800, 0xfe, UINT64_C(0xfffffffffffff200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x0 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0x0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x81913800 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0x0 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xffffffff00200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x160601, UINT64_C(0xffffffffffc0f800), 0), /* value=0x160601 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0xc000000, UINT64_C(0xffffffff3ff00000), 0), /* value=0xc000000 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffffff0000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffffff0000000fe7), 0), /* value=0xf8000018 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffffff00000007ff), 0), /* value=0xff`fc000800 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffffff00007fffff)), /* value=0x40000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffffff00007fffff), 0), /* value=0x0 */
+ MVI(0xc001001e, "AMD_K8_MANID", 0x20),
+ MFX(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1, 0, UINT64_C(0xffffff0000000000), 0), /* value=0x11`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MVX(0xc0010021, "AMD_K8_UNK_c001_0021", 0, UINT64_C(0xfffffffe00000000), 0),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, UINT64_C(0xfffffffeffffffff), 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_MAX, 0), /* value=0x0 */
+ MFI(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl), /* value=0x0 */
+ MFX(0xc0010041, "AMD_K8_FIDVID_CTL", AmdK8FidVidControl, AmdK8FidVidControl, UINT64_C(0x4e200000000c), 0x33, UINT64_C(0xfff00000fffee0c0)), /* value=0x4e20`0000000c */
+ MFX(0xc0010042, "AMD_K8_FIDVID_STATUS", AmdK8FidVidStatus, ReadOnly, UINT64_C(0x200000c0c0c), 0, 0), /* value=0x200`000c0c0c */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x521020),
+ RFN(0xc0010044, 0xc0010048, "AMD_K8_MC_CTL_MASK_n", AmdK8McCtlMaskN, AmdK8McCtlMaskN),
+ RSN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN, 0x0, 0, UINT64_C(0x1f00000000000000)),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, 0, UINT64_C(0xffffffffffff1f00)), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98000 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffffff000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffffff00000188c0), 0), /* value=0x1 */
+ MVX(0xc0010114, "AMD_K8_UNK_c001_0114", 0, 0, UINT64_C(0xffffffffffffffe4)),
+ MVX(0xc0010115, "AMD_K8_UNK_c001_0115", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0xc0010116, "AMD_K8_UNK_c001_0116", 0, 0, UINT64_C(0xffff0000ffff0000)),
+ MVX(0xc0010117, "AMD_K8_UNK_c001_0117", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0xc0010118, "AMD_K8_UNK_c001_0118",0,0,0),
+ MVX(0xc0010119, "AMD_K8_UNK_c001_0119",0,0,0),
+ MVX(0xc001011a, "AMD_K8_UNK_c001_011a", 0, 0, UINT64_C(0xffffffff00000fff)),
+ MVX(0xc001011b, "AMD_K8_UNK_c001_011b", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0xc001011c, "AMD_K8_UNK_c001_011c", UINT32_C(0xdb1f5000), 0, UINT64_C(0xffffffff00000fff)),
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x78bfbff */
+ MFX(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xf1f3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x20906 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0x10a, 0, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0xc0011011`00000283 */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, UINT64_C(0xfffffffffc000000), 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x521020), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x1100000008), UINT64_C(0xffffff0000000000), 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1000 */
+ MFX(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, 0x800, ~(uint64_t)UINT32_MAX, 0), /* value=0x800 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x24000008 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x2020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Athlon_64_3200 =
+{
+ /*.pszName = */ "AMD Athlon 64 3200+",
+ /*.pszFullName = */ "AMD Athlon(tm) 64 Processor 3200+",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 8,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K8_130nm,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 40,
+ /*.fMxCsrMask = */ 0xffff, ///< @todo check.
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Athlon_64_3200),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Athlon_64_3200)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Athlon_64_3200)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Athlon_64_3200),
+};
+
+#endif /* !VBOX_CPUDB_AMD_Athlon_64_3200_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h
new file mode 100644
index 00000000..63c9a281
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h
@@ -0,0 +1,242 @@
+/* $Id: AMD_Athlon_64_X2_Dual_Core_4200.h $ */
+/** @file
+ * CPU database entry "AMD Athlon 64 X2 Dual Core 4200+".
+ * Generated at 2014-02-28T15:19:16Z by VBoxCpuReport v4.3.53r92578 on linux.amd64 .
+ * .
+ * @remarks Possible that we're missing a few special MSRs due to no .
+ * magic register value capabilities in the linux hosted .
+ * MSR probing code.
+ * @todo Regenerate this using windows/whatever where we can get to the secret AMD MSRs.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200_h
+#define VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00040fb2, 0x01020800, 0x00002001, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000018, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00040fb2, 0x000008d1, 0x0000001f, 0xebd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6c687441, 0x74286e6f, 0x3620296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x32582034, 0x61754420, 0x6f43206c, 0x50206572, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x65636f72, 0x726f7373, 0x30323420, 0x00002b30, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff08ff08, 0xff20ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x42004200, 0x02008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000003f, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003028, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x00000000, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x7e`171166b8 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffff00000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x0 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8103ca80 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x105, 0, 0), /* value=0x105 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffff`a0425995 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`8103124a */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0xf8000001 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`ff000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`81011d20 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8103ccb0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x1da4880 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`28300000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xffffffff00200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760601, UINT64_C(0xffffffffff80f800), 0), /* value=0x760601 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x2000060, UINT64_C(0xffffffff3ff00020), 0), /* value=0x2000060 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffffff0000000fe7)), /* value=0xa30000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffffff0000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffffff00000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffffff00007fffff)), /* value=0xc0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffffff00007fffff), 0), /* value=0x1`40000000 */
+ MVI(0xc001001e, "AMD_K8_MANID", 0x52),
+ MFX(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1, 0, UINT64_C(0x3fbf000000000000), 0), /* value=0x400001`00100008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFN(0xc0010021, "AMD_K8_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_C(0xfffffffff0e088fc), 0), /* value=0x0 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xfffffffff0e088e0), 0), /* value=0x0 */
+ MFX(0xc0010041, "AMD_K8_FIDVID_CTL", AmdK8FidVidControl, AmdK8FidVidControl, UINT64_C(0x100001202), 0xc31, UINT64_C(0xfff00000fffec0c0)), /* value=0x1`00001202 */
+ MFX(0xc0010042, "AMD_K8_FIDVID_STATUS", AmdK8FidVidStatus, ReadOnly, UINT64_C(0x310c12120c0e0202), 0, 0), /* value=0x310c1212`0c0e0202 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x4e1a24),
+ RFN(0xc0010044, 0xc0010048, "AMD_K8_MC_CTL_MASK_n", AmdK8McCtlMaskN, AmdK8McCtlMaskN),
+ RSN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN, 0x0, 0, UINT64_C(0x1f00000000000000)),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffff1f00)), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xe0000000)), /* value=0x3000000 */
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98200 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffffff000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffffff00000188c0), 0), /* value=0x1 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x0 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFN(0xc0010116, "AMD_K8_SMM_CTL", WriteOnly, AmdK8SmmCtl),
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffffff0000000fff)), /* value=0x0 */
+
+ /* Copy & paste from the AMD_Athlon_64_3200 (130nm) profile: */
+ MVX(0xc0010118, "AMD_K8_UNK_c001_0118",0,0,0),
+ MVX(0xc0010119, "AMD_K8_UNK_c001_0119",0,0,0),
+ MVX(0xc001011a, "AMD_K8_UNK_c001_011a", 0, 0, UINT64_C(0xffffffff00000fff)),
+ MVX(0xc001011b, "AMD_K8_UNK_c001_011b", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0xc001011c, "AMD_K8_UNK_c001_011c", UINT32_C(0xdb1f5000), 0, UINT64_C(0xffffffff00000fff)),
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x78bfbff */
+ MFX(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xf1f3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x20906 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0x10a, 0, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0xc0011011`00000283 */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, UINT64_C(0xfffffffffc000000), 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x521020), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x1100000008), UINT64_C(0xffffff0000000000), 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1000 */
+ MFX(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, 0x800, ~(uint64_t)UINT32_MAX, 0), /* value=0x800 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x24000008 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x2020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Athlon_64_X2_Dual_Core_4200 =
+{
+ /*.pszName = */ "AMD Athlon 64 X2 Dual Core 4200+",
+ /*.pszFullName = */ "AMD Athlon(tm) 64 X2 Dual Core Processor 4200+",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 75,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K8_90nm_AMDV,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 40,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200),
+};
+
+#endif /* !VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h b/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h
new file mode 100644
index 00000000..582a5a6e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h
@@ -0,0 +1,393 @@
+/* $Id: AMD_FX_8150_Eight_Core.h $ */
+/** @file
+ * CPU database entry "AMD FX-8150 Eight-Core".
+ * Generated at 2013-12-09T11:27:04Z by VBoxCpuReport v4.3.51r91084 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_AMD_FX_8150_Eight_Core_h
+#define VBOX_CPUDB_AMD_FX_8150_Eight_Core_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_FX_8150_Eight_Core[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00600f12, 0x02080800, 0x1e98220b, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x000003c0, 0x40000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001e, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00600f12, 0x10000000, 0x01c9bfff, 0x2fd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x74285846, 0x382d296d, 0x20303531, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x68676945, 0x6f432d74, 0x50206572, 0x65636f72, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x726f7373, 0x20202020, 0x20202020, 0x00202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff20ff18, 0xff20ff30, 0x10040140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x64000000, 0x64004200, 0x08008140, 0x0040c140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000003d9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00004007, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00010000, 0x00000000, 0x000014ff, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf020f018, 0x64000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x000000ff, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001c, 0x00000000, 0x00000000, 0x00000000, 0x80032013, 0x00010200, 0x8000000f, 0 },
+ { 0x8000001d, 0x00000000, UINT32_MAX, 0x00000121, 0x00c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x8000001d, 0x00000001, UINT32_MAX, 0x00004122, 0x0040003f, 0x000001ff, 0x00000000, 0 },
+ { 0x8000001d, 0x00000002, UINT32_MAX, 0x00004143, 0x03c0003f, 0x000007ff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000003, UINT32_MAX, 0x0001c163, 0x0fc0003f, 0x000007ff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001e, 0x00000000, 0x00000000, 0x00000012, 0x00000101, 0x00000000, 0x00000000, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_FX_8150_Eight_Core[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x6000626),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf),
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x107, 0, 0), /* value=0x107 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffff88), 0), /* value=0x77 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0xcdf00000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fff00800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0xce000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fe000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x0000041b, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x4d01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`02ed0bc0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`02ed0900 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`02f65000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0000104, "AMD_15H_TSC_RATE", AmdFam15hTscRate, AmdFam15hTscRate, 0, 0, UINT64_C(0xffffff0000000000)), /* value=0x1`00000000 */
+ MFX(0xc0000105, "AMD_15H_LWP_CFG", AmdFam15hLwpCfg, AmdFam15hLwpCfg, 0, UINT64_C(0xffff000000000001), 0x7ffffff0), /* value=0x0 */
+ MFX(0xc0000106, "AMD_15H_LWP_CBADDR", AmdFam15hLwpCbAddr, AmdFam15hLwpCbAddr, 0, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0xc0000408, 0xc0000409, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040a, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ MAL(0xc0010000, "AMD_K8_PERF_CTL_0", 0xc0010200),
+ MAL(0xc0010001, "AMD_K8_PERF_CTL_1", 0xc0010202),
+ MAL(0xc0010002, "AMD_K8_PERF_CTL_2", 0xc0010204),
+ MAL(0xc0010003, "AMD_K8_PERF_CTL_3", 0xc0010206),
+ MAL(0xc0010004, "AMD_K8_PERF_CTR_0", 0xc0010201),
+ MAL(0xc0010005, "AMD_K8_PERF_CTR_1", 0xc0010203),
+ MAL(0xc0010006, "AMD_K8_PERF_CTR_2", 0xc0010205),
+ MAL(0xc0010007, "AMD_K8_PERF_CTR_3", 0xc0010207),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x740000, UINT64_C(0xffffffffff82ffff), 0), /* value=0x740000 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0, UINT64_C(0xffffffff01006020), 0), /* value=0x1001031 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xd0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x4`2f000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x400000`00810008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x0 */
+ MVO(0xc0010028, "AMD_K8_UNK_c001_0028", 0),
+ MVO(0xc0010029, "AMD_K8_UNK_c001_0029", 0),
+ MVO(0xc001002a, "AMD_K8_UNK_c001_002a", 0),
+ MVO(0xc001002b, "AMD_K8_UNK_c001_002b", 0),
+ MVO(0xc001002c, "AMD_K8_UNK_c001_002c", 0),
+ MVO(0xc001002d, "AMD_K8_UNK_c001_002d", 0),
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x664c0005, UINT64_C(0xffffffff90008838), 0), /* value=0x664c0005 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffff9fffffdf), 0), /* value=0x60000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x20),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xfffffffffffffc00), 0), /* value=0x0 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, UINT64_C(0xffffffffff004d01), 0), /* value=0x48080 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xffffffffffff8000), 0), /* value=0x0 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_MAX, 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, ~(uint64_t)UINT32_MAX, 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xffffffffffffe000), 0), /* value=0x0 */
+ MFX(0xc001004a, "AMD_K8_MC_CTL_MASK_6", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x6, UINT64_C(0xffffffffffffffc0), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x20000800 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x2000000 */
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x40, 0, 0), /* value=0x40 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b10000161a), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x800001b1`0000161a */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b100001a17), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x800001b1`00001a17 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000017300003014), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000173`00003014 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000016300003a11), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000163`00003a11 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000014900004c0b), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000149`00004c0b */
+ MFX(0xc0010069, "AMD_10H_P_ST_5", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000013100006205), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000131`00006205 */
+ MFX(0xc001006a, "AMD_10H_P_ST_6", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001200000724c), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000120`0000724c */
+ MFX(0xc001006b, "AMD_10H_P_ST_7", AmdFam10hPStateN, AmdFam10hPStateN, 0, UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40011a17, UINT64_C(0xffffffff00b80000), 0), /* value=0x40011a17 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x18000064006724c), UINT64_MAX, 0), /* value=0x1800006`4006724c */
+ MFX(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr, AmdFam10hCStateIoBaseAddr, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x814 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xcdef8800 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0xcdf00000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fff00003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x4, 0, 0), /* value=0x4 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MFX(0xc0010200, "AMD_K8_PERF_CTL_0", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010201, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010202, "AMD_K8_PERF_CTL_1", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x1, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010203, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010204, "AMD_K8_PERF_CTL_2", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x2, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010205, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010206, "AMD_K8_PERF_CTL_3", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x3, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010207, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010208, "AMD_K8_PERF_CTL_4", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x4, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010209, "AMD_K8_PERF_CTR_4", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x4, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc001020a, "AMD_K8_PERF_CTL_5", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x5, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc001020b, "AMD_K8_PERF_CTR_5", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x5, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010240, "AMD_15H_NB_PERF_CTL_0", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x0, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010241, "AMD_15H_NB_PERF_CTR_0", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010242, "AMD_15H_NB_PERF_CTL_1", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x1, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010243, "AMD_15H_NB_PERF_CTR_1", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010244, "AMD_15H_NB_PERF_CTL_2", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x2, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010245, "AMD_15H_NB_PERF_CTR_2", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010246, "AMD_15H_NB_PERF_CTL_3", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x3, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010247, "AMD_15H_NB_PERF_CTR_3", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0x30000, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x30000 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011003, "AMD_K8_CPUID_CTL_STD06", AmdK8CpuIdCtlStd06hEcx, AmdK8CpuIdCtlStd06hEcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x1e98220b`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x1c9ffff`2fd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x10 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, UINT64_C(0xffffffff00800000), 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0xfffffcf0`093634f0 */
+ MVI(0xc0011012, "AMD_K7_UNK_c001_1012", UINT32_MAX),
+ MVI(0xc0011013, "AMD_K7_UNK_c001_1013", UINT64_MAX),
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, 0, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x20), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x40000000810008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0x3fffedafbffe2a), 0), /* value=0x0 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, UINT64_C(0xffffff0000000000)), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0x1ffffbfffff13e0), 0), /* value=0x0 */
+ MFX(0xc0011023, "AMD_15H_CU_CFG", AmdFam15hCombUnitCfg, AmdFam15hCombUnitCfg, 0x220, UINT64_C(0x3ff03c760042000), 0), /* value=0x80004000`00000220 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xfffffffffffffe04), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MFX(0xc0011028, "AMD_15H_FP_CFG", AmdFam15hFpuCfg, AmdFam15hFpuCfg, 0, UINT64_C(0xffffe000000000ff), 0), /* value=0x40`e91d0000 */
+ MFX(0xc0011029, "AMD_15H_DC_CFG", AmdFam15hDecoderCfg, AmdFam15hDecoderCfg, 0, UINT64_C(0xffffffffc0188001), 0), /* value=0x488400 */
+ MFX(0xc001102a, "AMD_15H_CU_CFG2", AmdFam15hCombUnitCfg2, AmdFam15hCombUnitCfg2, 0, UINT64_C(0xfffbfb8ff2fc623f), 0), /* value=0x40040`00000cc0 */
+ MFX(0xc001102b, "AMD_15H_CU_CFG3", AmdFam15hCombUnitCfg3, AmdFam15hCombUnitCfg3, 0, UINT64_C(0xffe0027afff00000), 0), /* value=0x33400`00002b93 */
+ MFX(0xc001102c, "AMD_15H_EX_CFG", AmdFam15hExecUnitCfg, AmdFam15hExecUnitCfg, 0x7aac0, UINT64_C(0xffb0c003fbe00024), 0), /* value=0x400`0007aac0 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfeffffffff0000), 0), /* value=0x0 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0x0 */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0x0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xf8000000f8010000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x0 */
+ MFX(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData, AmdFam10hIbsOpData, 0, UINT64_C(0xffffffc000000000), 0), /* value=0x0 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFX(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3, AmdFam10hIbsOpData3, 0, UINT64_C(0xffff0000fff00400), 0), /* value=0x0 */
+ MFN(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr), /* value=0x0 */
+ MFX(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr, AmdFam10hIbsDcPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+ MFN(0xc001103b, "AMD_14H_IBS_BR_TARGET", AmdFam14hIbsBrTarget, AmdFam14hIbsBrTarget), /* value=0x0 */
+ MVX(0xc0011040, "AMD_15H_UNK_c001_1040", 0, UINT64_C(0xffe0000000000003), 0),
+ MVX(0xc0011041, "AMD_15H_UNK_c001_1041", UINT64_C(0x99dd57b219), 0xa0c820, 0),
+ MVX(0xc0011042, "AMD_15H_UNK_c001_1042", 0, 0, 0),
+ MVX(0xc0011043, "AMD_15H_UNK_c001_1043", UINT64_C(0x300000438), 0, 0),
+ MVX(0xc0011044, "AMD_15H_UNK_c001_1044", UINT64_C(0x300000438), 0, 0),
+ MVX(0xc0011045, "AMD_15H_UNK_c001_1045", UINT64_C(0x300000420), 0, 0),
+ MVX(0xc0011046, "AMD_15H_UNK_c001_1046", UINT64_C(0x300000420), 0, 0),
+ MVX(0xc0011047, "AMD_15H_UNK_c001_1047", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011048, "AMD_15H_UNK_c001_1048", 0xc000001, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011049, "AMD_15H_UNK_c001_1049", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001104a, "AMD_15H_UNK_c001_104a", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001104b, "AMD_15H_UNK_c001_104b", 0, 0, 0),
+ MVX(0xc001104c, "AMD_15H_UNK_c001_104c", 0, 0, 0),
+ MVX(0xc001104d, "AMD_15H_UNK_c001_104d", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001104e, "AMD_15H_UNK_c001_104e", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc001104f, "AMD_15H_UNK_c001_104f", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011050, "AMD_15H_UNK_c001_1050", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011051, "AMD_15H_UNK_c001_1051", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011052, "AMD_15H_UNK_c001_1052", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011053, "AMD_15H_UNK_c001_1053", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011054, "AMD_15H_UNK_c001_1054", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011055, "AMD_15H_UNK_c001_1055", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011056, "AMD_15H_UNK_c001_1056", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011057, "AMD_15H_UNK_c001_1057", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011058, "AMD_15H_UNK_c001_1058", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011059, "AMD_15H_UNK_c001_1059", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc001105a, "AMD_15H_UNK_c001_105a", UINT64_C(0x3060c183060c183), UINT64_C(0x8000000000000000), 0),
+ MVX(0xc001105b, "AMD_15H_UNK_c001_105b", UINT64_C(0x318c6318c60c183), UINT64_C(0xe000000000000000), 0),
+ MVX(0xc001105c, "AMD_15H_UNK_c001_105c", 0, UINT64_C(0xff00000000000000), 0),
+ MVX(0xc001105d, "AMD_15H_UNK_c001_105d", 0, UINT64_C(0xff00000000000000), 0),
+ MVX(0xc001105e, "AMD_15H_UNK_c001_105e", 0, UINT64_C(0xfffffffffffffc00), 0),
+ MVX(0xc001105f, "AMD_15H_UNK_c001_105f", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011060, "AMD_15H_UNK_c001_1060", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011061, "AMD_15H_UNK_c001_1061", 0, 0, 0),
+ MVX(0xc0011062, "AMD_15H_UNK_c001_1062", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0xc0011063, "AMD_15H_UNK_c001_1063", 0, UINT64_C(0xfffffffffffe4000), 0),
+ MVX(0xc0011064, "AMD_15H_UNK_c001_1064", 0x1, UINT64_C(0xfffffffffffff000), 0),
+ MVX(0xc0011065, "AMD_15H_UNK_c001_1065", 0x1, UINT64_C(0xfffffffff0000000), 0),
+ MVX(0xc0011066, "AMD_15H_UNK_c001_1066", 0, 0, 0),
+ MVX(0xc0011067, "AMD_15H_UNK_c001_1067", 0x1, UINT64_C(0xffffffffffffff80), 0),
+ MVX(0xc0011068, "AMD_15H_UNK_c001_1068", 0, 0, 0),
+ MVX(0xc0011069, "AMD_15H_UNK_c001_1069", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106a, "AMD_15H_UNK_c001_106a", 0x1, 0, 0),
+ MVX(0xc001106b, "AMD_15H_UNK_c001_106b", 0, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc001106c, "AMD_15H_UNK_c001_106c", 0x1, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106d, "AMD_15H_UNK_c001_106d", 0x1, UINT64_C(0xf000000000000080), 0),
+ MVX(0xc001106e, "AMD_15H_UNK_c001_106e", 0x1, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106f, "AMD_15H_UNK_c001_106f", 0x1, UINT64_C(0xfffffffffffff800), 0),
+ MVI(0xc0011070, "AMD_15H_UNK_c001_1070", UINT64_C(0x20000000000)),
+ MVX(0xc0011071, "AMD_15H_UNK_c001_1071", 0x400000, UINT64_C(0xffffffff01ffffff), 0),
+ MVI(0xc0011072, "AMD_15H_UNK_c001_1072", UINT64_C(0x101592c00000021)),
+ MVI(0xc0011073, "AMD_15H_UNK_c001_1073", UINT64_C(0xec541c0050000000)),
+ MVX(0xc0011080, "AMD_15H_UNK_c001_1080", 0, 0, 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMDBENTRY const g_Entry_AMD_FX_8150_Eight_Core =
+{
+ /*.pszName = */ "AMD FX-8150 Eight-Core",
+ /*.pszFullName = */ "AMD FX(tm)-8150 Eight-Core Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 21,
+ /*.uModel = */ 1,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_15h_Bulldozer,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.fMxCsrMask = */ 0x2ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_FX_8150_Eight_Core),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_FX_8150_Eight_Core)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_FX_8150_Eight_Core)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_FX_8150_Eight_Core),
+};
+
+#endif /* !VBOX_CPUDB_AMD_FX_8150_Eight_Core_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h b/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h
new file mode 100644
index 00000000..be835f7e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h
@@ -0,0 +1,282 @@
+/* $Id: AMD_Phenom_II_X6_1100T.h $ */
+/** @file
+ * CPU database entry "AMD Phenom II X6 1100T".
+ * Generated at 2013-12-17T13:39:08Z by VBoxCpuReport v4.3.53r91360 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_AMD_Phenom_II_X6_1100T_h
+#define VBOX_CPUDB_AMD_Phenom_II_X6_1100T_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000006, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00100fa0, 0x01060800, 0x00802009, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001b, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00100fa0, 0x100000a1, 0x000837ff, 0xefd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6e656850, 0x74286d6f, 0x4920296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x36582049, 0x30313120, 0x50205430, 0x65636f72, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x726f7373, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff30ff10, 0xff30ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x20800000, 0x42004200, 0x02008140, 0x0030b140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000003f9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00003005, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x0000040f, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf0300000, 0x60100000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x0000001f, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Phenom_II_X6_1100T[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x6db`c482d0b9 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x10000bf),
+ MFX(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf, 0, UINT64_C(0x8644930520000000), 0), /* value=0xa66664d9`32c329b1 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x25`092f34be */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8174c700 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x106, 0, 0), /* value=0x106 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffc0), 0), /* value=0x3f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffefdf`00890004 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffeed0`c7b3ffbc */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0xbdf00000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fff00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xbe000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fe000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`8174b4f0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8174c860 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7f01`3f916740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8804`3fc00000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0xf2c95840 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0xc0000408, 0xc000040a, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040b, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760600, UINT64_C(0xffffffffff80f8ff), 0), /* value=0x760600 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x1000031, UINT64_C(0xffffffff00006020), 0), /* value=0x1000031 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x3`40200000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xc0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x4`40000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x584000`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFN(0xc0010021, "AMD_10H_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x4a4c0005, UINT64_C(0xffffffffb0008838), 0), /* value=0x4a4c0005 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffffc00088c0), 0), /* value=0x10000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x1dc01430),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xffffffffffffff00), 0), /* value=0x80 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xfffffffffffff000), 0), /* value=0x200 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, UINT64_C(0xffffffffc0000000), 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x14000815 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x2000000 */
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x30, 0, 0), /* value=0x30 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x3, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x3 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x3, 0, 0), /* value=0x3 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019e40001015), 0, 0), /* value=0x8000019e`40001015 */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019f40002411), 0, 0), /* value=0x8000019f`40002411 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000017540002809), 0, 0), /* value=0x80000175`40002809 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000015540002c01), 0, 0), /* value=0x80000155`40002c01 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000013340003840), 0, 0), /* value=0x80000133`40003840 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40043840, UINT64_C(0xffffffff01b80000), 0), /* value=0x40043840 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x140043840), UINT64_MAX, 0), /* value=0x1`40043840 */
+ MFO(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr), /* value=0x814 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xbdef8000 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0xbdf00000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fff00003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x4, 0, 0), /* value=0x4 */
+ MFX(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus, 0xe, 0, 0), /* value=0xe */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x4), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x802009`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x837ff`efd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x10 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0x259a5de0`ffffffff */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x1dc01430), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x58400000000008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0xfffc012000000000), 0), /* value=0x20010`00001000 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xffc0000000000000), 0), /* value=0x9c`49000000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x10200020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MVX(0xc0011028, "AMD_10H_UNK_c001_1028", 0, UINT64_C(0xfffffffffffffff8), 0),
+ MVX(0xc0011029, "AMD_10H_UNK_c001_1029", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2, 0, UINT64_C(0xfff00000c0000000), 0), /* value=0x40050`01000040 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfcffff00000000), 0), /* value=0x140003`00000000 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0xffffffff`a08cf13e */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0x4`24ce313e */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xfffffffffff00000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x4d231923 */
+ MFI(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData), /* value=0x12`7fc7bc0e */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFI(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3), /* value=0x0 */
+ MFX(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr, 0, UINT64_C(0x7fffffffffff), 0), /* value=0x0 */
+ MFI(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x101 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Phenom_II_X6_1100T =
+{
+ /*.pszName = */ "AMD Phenom II X6 1100T",
+ /*.pszFullName = */ "AMD Phenom(tm) II X6 1100T Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 16,
+ /*.uModel = */ 10,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K10,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.fMxCsrMask = */ 0x2ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Phenom_II_X6_1100T)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Phenom_II_X6_1100T),
+};
+
+#endif /* !VBOX_CPUDB_AMD_Phenom_II_X6_1100T_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Ryzen_7_1800X_Eight_Core.h b/src/VBox/VMM/VMMR3/cpus/AMD_Ryzen_7_1800X_Eight_Core.h
new file mode 100644
index 00000000..00f7f2d4
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Ryzen_7_1800X_Eight_Core.h
@@ -0,0 +1,5234 @@
+/* $Id: AMD_Ryzen_7_1800X_Eight_Core.h $ */
+/** @file
+ * CPU database entry "AMD Ryzen 7 1800X Eight-Core".
+ * Generated at 2020-07-15T16:30:16Z by VBoxCpuReport v6.1.97r139174 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_AMD_Ryzen_7_1800X_Eight_Core_h
+#define VBOX_CPUDB_AMD_Ryzen_7_1800X_Eight_Core_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Ryzen 7 1800X Eight-Core Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Ryzen_7_1800X_Eight_Core[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00800f11, 0x02100800, 0x7ed8320b, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x209c01a9, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x0000000f, 0x00000340, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001f, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00800f11, 0x20000000, 0x35c233ff, 0x2fd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x657a7952, 0x2037206e, 0x30303831, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x69452058, 0x2d746867, 0x65726f43, 0x6f725020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x73736563, 0x2020726f, 0x20202020, 0x00202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff40ff40, 0xff40ff40, 0x20080140, 0x40040140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x26006400, 0x66006400, 0x02006140, 0x00808140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x0000001b, 0x00000000, 0x00006599, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00001007, 0x0000400f, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00008000, 0x00000000, 0x0001bcff, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf040f040, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x000003ff, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001d, 0x00000000, UINT32_MAX, 0x00004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x8000001d, 0x00000001, UINT32_MAX, 0x00004122, 0x00c0003f, 0x000000ff, 0x00000000, 0 },
+ { 0x8000001d, 0x00000002, UINT32_MAX, 0x00004143, 0x01c0003f, 0x000003ff, 0x00000002, 0 },
+ { 0x8000001d, 0x00000003, UINT32_MAX, 0x0001c163, 0x03c0003f, 0x00001fff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001e, 0x00000000, 0x00000000, 0x00000002, 0x00000101, 0x00000000, 0x00000000, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x8000001f, 0x00000000, 0x00000000, 0x00000007, 0x0000016f, 0x0000000f, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Ryzen 7 1800X Eight-Core Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Ryzen_7_1800X_Eight_Core[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0xad`1782e758 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x00000049, "MSR_LASTBRANCH_9", WriteOnly, IgnoreWrite),
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x8001137 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x5`0d91d8f8 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x3`6ea6067c */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x37200 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x82601750 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x117, 0, 0), /* value=0x117 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, 0x10, 0), /* value=0xffffffff`ffffffef */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`e0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RFN(0x00000400, 0x0000047e, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVI(0x0000047f, "TODO_0000_047f", 0),
+ MVX(0x00000da0, "TODO_0000_0da0", 0, 0, UINT64_MAX),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff0200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`82600010 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`826017f0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x47700 */
+ MVO(0xc00000e7, "TODO_c000_00e7", UINT64_C(0x5e11adb38)),
+ MVO(0xc00000e8, "TODO_c000_00e8", UINT64_C(0x3ea4ad0a5)),
+ MVO(0xc00000e9, "TODO_c000_00e9", 0),
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7fc1`75a34740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff9c2d`d6840000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFX(0xc0000104, "AMD_15H_TSC_RATE", AmdFam15hTscRate, AmdFam15hTscRate, 0, 0, UINT64_C(0xffffff0000000000)), /* value=0x1`00000000 */
+ RVI(0xc0000408, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ MVX(0xc0000410, "TODO_c000_0410", 0x1001028, UINT64_C(0xfffffffffe000f01), 0),
+ MVX(0xc0002000, "TODO_c000_2000", 0x1fffff, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0xc0002001, "TODO_c000_2001", 0, 0, UINT64_MAX),
+ MVX(0xc0002002, "TODO_c000_2002", 0, 0, 0),
+ MVX(0xc0002003, "TODO_c000_2003", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002004, "TODO_c000_2004", UINT64_C(0x2700000035), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002005, "TODO_c000_2005", UINT64_C(0xb000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002006, "TODO_c000_2006", 0, UINT64_C(0xffffff8000000000), 0),
+ MVI(0xc0002007, "TODO_c000_2007", 0),
+ MVX(0xc0002008, "TODO_c000_2008", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002009, "TODO_c000_2009", 0, 0, 0),
+ MVI(0xc000200a, "TODO_c000_200a", 0),
+ MVI(0xc000200b, "TODO_c000_200b", 0),
+ MVI(0xc000200c, "TODO_c000_200c", 0),
+ MVI(0xc000200d, "TODO_c000_200d", 0),
+ MVI(0xc000200e, "TODO_c000_200e", 0),
+ MVI(0xc000200f, "TODO_c000_200f", 0),
+ MVX(0xc0002010, "TODO_c000_2010", 0x3fff, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0xc0002011, "TODO_c000_2011", 0, 0, UINT64_MAX),
+ MVX(0xc0002012, "TODO_c000_2012", 0, 0, 0),
+ MVX(0xc0002013, "TODO_c000_2013", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002014, "TODO_c000_2014", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002015, "TODO_c000_2015", UINT64_C(0x100b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002016, "TODO_c000_2016", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002017, "TODO_c000_2017", 0),
+ MVI(0xc0002018, "TODO_c000_2018", 0),
+ MVI(0xc0002019, "TODO_c000_2019", 0),
+ MVI(0xc000201a, "TODO_c000_201a", 0),
+ MVI(0xc000201b, "TODO_c000_201b", 0),
+ MVI(0xc000201c, "TODO_c000_201c", 0),
+ MVI(0xc000201d, "TODO_c000_201d", 0),
+ MVI(0xc000201e, "TODO_c000_201e", 0),
+ MVI(0xc000201f, "TODO_c000_201f", 0),
+ MVX(0xc0002020, "TODO_c000_2020", 0xf, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc0002021, "TODO_c000_2021", 0, 0, UINT64_MAX),
+ MVX(0xc0002022, "TODO_c000_2022", 0, 0, 0),
+ MVX(0xc0002023, "TODO_c000_2023", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002024, "TODO_c000_2024", UINT64_C(0x2500000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002025, "TODO_c000_2025", UINT64_C(0x200b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002026, "TODO_c000_2026", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0002027, "TODO_c000_2027", 0),
+ MVX(0xc0002028, "TODO_c000_2028", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002029, "TODO_c000_2029", 0, 0, 0),
+ MVI(0xc000202a, "TODO_c000_202a", 0),
+ MVI(0xc000202b, "TODO_c000_202b", 0),
+ MVI(0xc000202c, "TODO_c000_202c", 0),
+ MVI(0xc000202d, "TODO_c000_202d", 0),
+ MVI(0xc000202e, "TODO_c000_202e", 0),
+ MVI(0xc000202f, "TODO_c000_202f", 0),
+ MVX(0xc0002030, "TODO_c000_2030", 0x1ff, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0002031, "TODO_c000_2031", 0, 0, UINT64_MAX),
+ MVX(0xc0002032, "TODO_c000_2032", 0, 0, 0),
+ MVX(0xc0002033, "TODO_c000_2033", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002034, "TODO_c000_2034", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002035, "TODO_c000_2035", UINT64_C(0x300b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002036, "TODO_c000_2036", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002037, "TODO_c000_2037", 0),
+ MVI(0xc0002038, "TODO_c000_2038", 0),
+ MVI(0xc0002039, "TODO_c000_2039", 0),
+ MVI(0xc000203a, "TODO_c000_203a", 0),
+ MVI(0xc000203b, "TODO_c000_203b", 0),
+ MVI(0xc000203c, "TODO_c000_203c", 0),
+ MVI(0xc000203d, "TODO_c000_203d", 0),
+ MVI(0xc000203e, "TODO_c000_203e", 0),
+ MVI(0xc000203f, "TODO_c000_203f", 0),
+ MVI(0xc0002040, "TODO_c000_2040", 0),
+ MVX(0xc0002041, "TODO_c000_2041", 0, 0, UINT64_MAX),
+ MVI(0xc0002042, "TODO_c000_2042", 0),
+ MVI(0xc0002043, "TODO_c000_2043", 0),
+ MVI(0xc0002044, "TODO_c000_2044", 0),
+ MVI(0xc0002045, "TODO_c000_2045", 0),
+ MVI(0xc0002046, "TODO_c000_2046", 0),
+ MVI(0xc0002047, "TODO_c000_2047", 0),
+ MVI(0xc0002048, "TODO_c000_2048", 0),
+ MVI(0xc0002049, "TODO_c000_2049", 0),
+ MVI(0xc000204a, "TODO_c000_204a", 0),
+ MVI(0xc000204b, "TODO_c000_204b", 0),
+ MVI(0xc000204c, "TODO_c000_204c", 0),
+ MVI(0xc000204d, "TODO_c000_204d", 0),
+ MVI(0xc000204e, "TODO_c000_204e", 0),
+ MVI(0xc000204f, "TODO_c000_204f", 0),
+ MVX(0xc0002050, "TODO_c000_2050", 0x7ff, UINT64_C(0xfffffffffffff800), 0),
+ MVX(0xc0002051, "TODO_c000_2051", 0, 0, UINT64_MAX),
+ MVX(0xc0002052, "TODO_c000_2052", 0, 0, 0),
+ MVX(0xc0002053, "TODO_c000_2053", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002054, "TODO_c000_2054", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002055, "TODO_c000_2055", UINT64_C(0x500b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002056, "TODO_c000_2056", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002057, "TODO_c000_2057", 0),
+ MVI(0xc0002058, "TODO_c000_2058", 0),
+ MVI(0xc0002059, "TODO_c000_2059", 0),
+ MVI(0xc000205a, "TODO_c000_205a", 0),
+ MVI(0xc000205b, "TODO_c000_205b", 0),
+ MVI(0xc000205c, "TODO_c000_205c", 0),
+ MVI(0xc000205d, "TODO_c000_205d", 0),
+ MVI(0xc000205e, "TODO_c000_205e", 0),
+ MVI(0xc000205f, "TODO_c000_205f", 0),
+ MVX(0xc0002060, "TODO_c000_2060", 0x7f, UINT64_C(0xffffffffffffff80), 0),
+ MVX(0xc0002061, "TODO_c000_2061", 0, 0, UINT64_MAX),
+ MVI(0xc0002062, "TODO_c000_2062", 0),
+ MVX(0xc0002063, "TODO_c000_2063", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002064, "TODO_c000_2064", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002065, "TODO_c000_2065", UINT64_C(0x600b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002066, "TODO_c000_2066", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002067, "TODO_c000_2067", 0),
+ MVI(0xc0002068, "TODO_c000_2068", 0),
+ MVI(0xc0002069, "TODO_c000_2069", 0),
+ MVI(0xc000206a, "TODO_c000_206a", 0),
+ MVI(0xc000206b, "TODO_c000_206b", 0),
+ MVI(0xc000206c, "TODO_c000_206c", 0),
+ MVI(0xc000206d, "TODO_c000_206d", 0),
+ MVI(0xc000206e, "TODO_c000_206e", 0),
+ MVI(0xc000206f, "TODO_c000_206f", 0),
+ MVI(0xc0002070, "TODO_c000_2070", 0),
+ MVI(0xc0002071, "TODO_c000_2071", 0),
+ MVI(0xc0002072, "TODO_c000_2072", 0),
+ MVI(0xc0002073, "TODO_c000_2073", 0),
+ MVI(0xc0002074, "TODO_c000_2074", 0),
+ MVI(0xc0002075, "TODO_c000_2075", 0),
+ MVI(0xc0002076, "TODO_c000_2076", 0),
+ MVI(0xc0002077, "TODO_c000_2077", 0),
+ MVI(0xc0002078, "TODO_c000_2078", 0),
+ MVI(0xc0002079, "TODO_c000_2079", 0),
+ MVI(0xc000207a, "TODO_c000_207a", 0),
+ MVI(0xc000207b, "TODO_c000_207b", 0),
+ MVI(0xc000207c, "TODO_c000_207c", 0),
+ MVI(0xc000207d, "TODO_c000_207d", 0),
+ MVI(0xc000207e, "TODO_c000_207e", 0),
+ MVI(0xc000207f, "TODO_c000_207f", 0),
+ MVI(0xc0002080, "TODO_c000_2080", 0),
+ MVI(0xc0002081, "TODO_c000_2081", 0),
+ MVI(0xc0002082, "TODO_c000_2082", 0),
+ MVI(0xc0002083, "TODO_c000_2083", 0),
+ MVI(0xc0002084, "TODO_c000_2084", 0),
+ MVI(0xc0002085, "TODO_c000_2085", 0),
+ MVI(0xc0002086, "TODO_c000_2086", 0),
+ MVI(0xc0002087, "TODO_c000_2087", 0),
+ MVI(0xc0002088, "TODO_c000_2088", 0),
+ MVI(0xc0002089, "TODO_c000_2089", 0),
+ MVI(0xc000208a, "TODO_c000_208a", 0),
+ MVI(0xc000208b, "TODO_c000_208b", 0),
+ MVI(0xc000208c, "TODO_c000_208c", 0),
+ MVI(0xc000208d, "TODO_c000_208d", 0),
+ MVI(0xc000208e, "TODO_c000_208e", 0),
+ MVI(0xc000208f, "TODO_c000_208f", 0),
+ MVI(0xc0002090, "TODO_c000_2090", 0),
+ MVI(0xc0002091, "TODO_c000_2091", 0),
+ MVI(0xc0002092, "TODO_c000_2092", 0),
+ MVI(0xc0002093, "TODO_c000_2093", 0),
+ MVI(0xc0002094, "TODO_c000_2094", 0),
+ MVI(0xc0002095, "TODO_c000_2095", 0),
+ MVI(0xc0002096, "TODO_c000_2096", 0),
+ MVI(0xc0002097, "TODO_c000_2097", 0),
+ MVI(0xc0002098, "TODO_c000_2098", 0),
+ MVI(0xc0002099, "TODO_c000_2099", 0),
+ MVI(0xc000209a, "TODO_c000_209a", 0),
+ MVI(0xc000209b, "TODO_c000_209b", 0),
+ MVI(0xc000209c, "TODO_c000_209c", 0),
+ MVI(0xc000209d, "TODO_c000_209d", 0),
+ MVI(0xc000209e, "TODO_c000_209e", 0),
+ MVI(0xc000209f, "TODO_c000_209f", 0),
+ MVI(0xc00020a0, "TODO_c000_20a0", 0),
+ MVI(0xc00020a1, "TODO_c000_20a1", 0),
+ MVI(0xc00020a2, "TODO_c000_20a2", 0),
+ MVI(0xc00020a3, "TODO_c000_20a3", 0),
+ MVI(0xc00020a4, "TODO_c000_20a4", 0),
+ MVI(0xc00020a5, "TODO_c000_20a5", 0),
+ MVI(0xc00020a6, "TODO_c000_20a6", 0),
+ MVI(0xc00020a7, "TODO_c000_20a7", 0),
+ MVI(0xc00020a8, "TODO_c000_20a8", 0),
+ MVI(0xc00020a9, "TODO_c000_20a9", 0),
+ MVI(0xc00020aa, "TODO_c000_20aa", 0),
+ MVI(0xc00020ab, "TODO_c000_20ab", 0),
+ MVI(0xc00020ac, "TODO_c000_20ac", 0),
+ MVI(0xc00020ad, "TODO_c000_20ad", 0),
+ MVI(0xc00020ae, "TODO_c000_20ae", 0),
+ MVI(0xc00020af, "TODO_c000_20af", 0),
+ MVI(0xc00020b0, "TODO_c000_20b0", 0),
+ MVI(0xc00020b1, "TODO_c000_20b1", 0),
+ MVI(0xc00020b2, "TODO_c000_20b2", 0),
+ MVI(0xc00020b3, "TODO_c000_20b3", 0),
+ MVI(0xc00020b4, "TODO_c000_20b4", 0),
+ MVI(0xc00020b5, "TODO_c000_20b5", 0),
+ MVI(0xc00020b6, "TODO_c000_20b6", 0),
+ MVI(0xc00020b7, "TODO_c000_20b7", 0),
+ MVI(0xc00020b8, "TODO_c000_20b8", 0),
+ MVI(0xc00020b9, "TODO_c000_20b9", 0),
+ MVI(0xc00020ba, "TODO_c000_20ba", 0),
+ MVI(0xc00020bb, "TODO_c000_20bb", 0),
+ MVI(0xc00020bc, "TODO_c000_20bc", 0),
+ MVI(0xc00020bd, "TODO_c000_20bd", 0),
+ MVI(0xc00020be, "TODO_c000_20be", 0),
+ MVI(0xc00020bf, "TODO_c000_20bf", 0),
+ MVI(0xc00020c0, "TODO_c000_20c0", 0),
+ MVI(0xc00020c1, "TODO_c000_20c1", 0),
+ MVI(0xc00020c2, "TODO_c000_20c2", 0),
+ MVI(0xc00020c3, "TODO_c000_20c3", 0),
+ MVI(0xc00020c4, "TODO_c000_20c4", 0),
+ MVI(0xc00020c5, "TODO_c000_20c5", 0),
+ MVI(0xc00020c6, "TODO_c000_20c6", 0),
+ MVI(0xc00020c7, "TODO_c000_20c7", 0),
+ MVI(0xc00020c8, "TODO_c000_20c8", 0),
+ MVI(0xc00020c9, "TODO_c000_20c9", 0),
+ MVI(0xc00020ca, "TODO_c000_20ca", 0),
+ MVI(0xc00020cb, "TODO_c000_20cb", 0),
+ MVI(0xc00020cc, "TODO_c000_20cc", 0),
+ MVI(0xc00020cd, "TODO_c000_20cd", 0),
+ MVI(0xc00020ce, "TODO_c000_20ce", 0),
+ MVI(0xc00020cf, "TODO_c000_20cf", 0),
+ MVI(0xc00020d0, "TODO_c000_20d0", 0),
+ MVI(0xc00020d1, "TODO_c000_20d1", 0),
+ MVI(0xc00020d2, "TODO_c000_20d2", 0),
+ MVI(0xc00020d3, "TODO_c000_20d3", 0),
+ MVI(0xc00020d4, "TODO_c000_20d4", 0),
+ MVI(0xc00020d5, "TODO_c000_20d5", 0),
+ MVI(0xc00020d6, "TODO_c000_20d6", 0),
+ MVI(0xc00020d7, "TODO_c000_20d7", 0),
+ MVI(0xc00020d8, "TODO_c000_20d8", 0),
+ MVI(0xc00020d9, "TODO_c000_20d9", 0),
+ MVI(0xc00020da, "TODO_c000_20da", 0),
+ MVI(0xc00020db, "TODO_c000_20db", 0),
+ MVI(0xc00020dc, "TODO_c000_20dc", 0),
+ MVI(0xc00020dd, "TODO_c000_20dd", 0),
+ MVI(0xc00020de, "TODO_c000_20de", 0),
+ MVI(0xc00020df, "TODO_c000_20df", 0),
+ MVI(0xc00020e0, "TODO_c000_20e0", 0),
+ MVI(0xc00020e1, "TODO_c000_20e1", 0),
+ MVI(0xc00020e2, "TODO_c000_20e2", 0),
+ MVI(0xc00020e3, "TODO_c000_20e3", 0),
+ MVI(0xc00020e4, "TODO_c000_20e4", 0),
+ MVI(0xc00020e5, "TODO_c000_20e5", 0),
+ MVI(0xc00020e6, "TODO_c000_20e6", 0),
+ MVI(0xc00020e7, "TODO_c000_20e7", 0),
+ MVI(0xc00020e8, "TODO_c000_20e8", 0),
+ MVI(0xc00020e9, "TODO_c000_20e9", 0),
+ MVI(0xc00020ea, "TODO_c000_20ea", 0),
+ MVI(0xc00020eb, "TODO_c000_20eb", 0),
+ MVI(0xc00020ec, "TODO_c000_20ec", 0),
+ MVI(0xc00020ed, "TODO_c000_20ed", 0),
+ MVI(0xc00020ee, "TODO_c000_20ee", 0),
+ MVI(0xc00020ef, "TODO_c000_20ef", 0),
+ MVI(0xc00020f0, "TODO_c000_20f0", 0),
+ MVI(0xc00020f1, "TODO_c000_20f1", 0),
+ MVI(0xc00020f2, "TODO_c000_20f2", 0),
+ MVI(0xc00020f3, "TODO_c000_20f3", 0),
+ MVI(0xc00020f4, "TODO_c000_20f4", 0),
+ MVI(0xc00020f5, "TODO_c000_20f5", 0),
+ MVI(0xc00020f6, "TODO_c000_20f6", 0),
+ MVI(0xc00020f7, "TODO_c000_20f7", 0),
+ MVI(0xc00020f8, "TODO_c000_20f8", 0),
+ MVI(0xc00020f9, "TODO_c000_20f9", 0),
+ MVI(0xc00020fa, "TODO_c000_20fa", 0),
+ MVI(0xc00020fb, "TODO_c000_20fb", 0),
+ MVI(0xc00020fc, "TODO_c000_20fc", 0),
+ MVI(0xc00020fd, "TODO_c000_20fd", 0),
+ MVI(0xc00020fe, "TODO_c000_20fe", 0),
+ MVI(0xc00020ff, "TODO_c000_20ff", 0),
+ MVI(0xc0002100, "TODO_c000_2100", 0),
+ MVI(0xc0002101, "TODO_c000_2101", 0),
+ MVI(0xc0002102, "TODO_c000_2102", 0),
+ MVI(0xc0002103, "TODO_c000_2103", 0),
+ MVI(0xc0002104, "TODO_c000_2104", 0),
+ MVI(0xc0002105, "TODO_c000_2105", 0),
+ MVI(0xc0002106, "TODO_c000_2106", 0),
+ MVI(0xc0002107, "TODO_c000_2107", 0),
+ MVI(0xc0002108, "TODO_c000_2108", 0),
+ MVI(0xc0002109, "TODO_c000_2109", 0),
+ MVI(0xc000210a, "TODO_c000_210a", 0),
+ MVI(0xc000210b, "TODO_c000_210b", 0),
+ MVI(0xc000210c, "TODO_c000_210c", 0),
+ MVI(0xc000210d, "TODO_c000_210d", 0),
+ MVI(0xc000210e, "TODO_c000_210e", 0),
+ MVI(0xc000210f, "TODO_c000_210f", 0),
+ MVI(0xc0002110, "TODO_c000_2110", 0),
+ MVI(0xc0002111, "TODO_c000_2111", 0),
+ MVI(0xc0002112, "TODO_c000_2112", 0),
+ MVI(0xc0002113, "TODO_c000_2113", 0),
+ MVI(0xc0002114, "TODO_c000_2114", 0),
+ MVI(0xc0002115, "TODO_c000_2115", 0),
+ MVI(0xc0002116, "TODO_c000_2116", 0),
+ MVI(0xc0002117, "TODO_c000_2117", 0),
+ MVI(0xc0002118, "TODO_c000_2118", 0),
+ MVI(0xc0002119, "TODO_c000_2119", 0),
+ MVI(0xc000211a, "TODO_c000_211a", 0),
+ MVI(0xc000211b, "TODO_c000_211b", 0),
+ MVI(0xc000211c, "TODO_c000_211c", 0),
+ MVI(0xc000211d, "TODO_c000_211d", 0),
+ MVI(0xc000211e, "TODO_c000_211e", 0),
+ MVI(0xc000211f, "TODO_c000_211f", 0),
+ MVI(0xc0002120, "TODO_c000_2120", 0),
+ MVI(0xc0002121, "TODO_c000_2121", 0),
+ MVI(0xc0002122, "TODO_c000_2122", 0),
+ MVI(0xc0002123, "TODO_c000_2123", 0),
+ MVI(0xc0002124, "TODO_c000_2124", 0),
+ MVI(0xc0002125, "TODO_c000_2125", 0),
+ MVI(0xc0002126, "TODO_c000_2126", 0),
+ MVI(0xc0002127, "TODO_c000_2127", 0),
+ MVI(0xc0002128, "TODO_c000_2128", 0),
+ MVI(0xc0002129, "TODO_c000_2129", 0),
+ MVI(0xc000212a, "TODO_c000_212a", 0),
+ MVI(0xc000212b, "TODO_c000_212b", 0),
+ MVI(0xc000212c, "TODO_c000_212c", 0),
+ MVI(0xc000212d, "TODO_c000_212d", 0),
+ MVI(0xc000212e, "TODO_c000_212e", 0),
+ MVI(0xc000212f, "TODO_c000_212f", 0),
+ MVI(0xc0002130, "TODO_c000_2130", 0),
+ MVI(0xc0002131, "TODO_c000_2131", 0),
+ MVI(0xc0002132, "TODO_c000_2132", 0),
+ MVI(0xc0002133, "TODO_c000_2133", 0),
+ MVI(0xc0002134, "TODO_c000_2134", 0),
+ MVI(0xc0002135, "TODO_c000_2135", 0),
+ MVI(0xc0002136, "TODO_c000_2136", 0),
+ MVI(0xc0002137, "TODO_c000_2137", 0),
+ MVI(0xc0002138, "TODO_c000_2138", 0),
+ MVI(0xc0002139, "TODO_c000_2139", 0),
+ MVI(0xc000213a, "TODO_c000_213a", 0),
+ MVI(0xc000213b, "TODO_c000_213b", 0),
+ MVI(0xc000213c, "TODO_c000_213c", 0),
+ MVI(0xc000213d, "TODO_c000_213d", 0),
+ MVI(0xc000213e, "TODO_c000_213e", 0),
+ MVI(0xc000213f, "TODO_c000_213f", 0),
+ MVI(0xc0002140, "TODO_c000_2140", 0),
+ MVI(0xc0002141, "TODO_c000_2141", 0),
+ MVI(0xc0002142, "TODO_c000_2142", 0),
+ MVI(0xc0002143, "TODO_c000_2143", 0),
+ MVI(0xc0002144, "TODO_c000_2144", 0),
+ MVI(0xc0002145, "TODO_c000_2145", 0),
+ MVI(0xc0002146, "TODO_c000_2146", 0),
+ MVI(0xc0002147, "TODO_c000_2147", 0),
+ MVI(0xc0002148, "TODO_c000_2148", 0),
+ MVI(0xc0002149, "TODO_c000_2149", 0),
+ MVI(0xc000214a, "TODO_c000_214a", 0),
+ MVI(0xc000214b, "TODO_c000_214b", 0),
+ MVI(0xc000214c, "TODO_c000_214c", 0),
+ MVI(0xc000214d, "TODO_c000_214d", 0),
+ MVI(0xc000214e, "TODO_c000_214e", 0),
+ MVI(0xc000214f, "TODO_c000_214f", 0),
+ MVI(0xc0002150, "TODO_c000_2150", 0),
+ MVI(0xc0002151, "TODO_c000_2151", 0),
+ MVI(0xc0002152, "TODO_c000_2152", 0),
+ MVI(0xc0002153, "TODO_c000_2153", 0),
+ MVI(0xc0002154, "TODO_c000_2154", 0),
+ MVI(0xc0002155, "TODO_c000_2155", 0),
+ MVI(0xc0002156, "TODO_c000_2156", 0),
+ MVI(0xc0002157, "TODO_c000_2157", 0),
+ MVI(0xc0002158, "TODO_c000_2158", 0),
+ MVI(0xc0002159, "TODO_c000_2159", 0),
+ MVI(0xc000215a, "TODO_c000_215a", 0),
+ MVI(0xc000215b, "TODO_c000_215b", 0),
+ MVI(0xc000215c, "TODO_c000_215c", 0),
+ MVI(0xc000215d, "TODO_c000_215d", 0),
+ MVI(0xc000215e, "TODO_c000_215e", 0),
+ MVI(0xc000215f, "TODO_c000_215f", 0),
+ MVI(0xc0002160, "TODO_c000_2160", 0),
+ MVI(0xc0002161, "TODO_c000_2161", 0),
+ MVI(0xc0002162, "TODO_c000_2162", 0),
+ MVI(0xc0002163, "TODO_c000_2163", 0),
+ MVI(0xc0002164, "TODO_c000_2164", 0),
+ MVI(0xc0002165, "TODO_c000_2165", 0),
+ MVI(0xc0002166, "TODO_c000_2166", 0),
+ MVI(0xc0002167, "TODO_c000_2167", 0),
+ MVI(0xc0002168, "TODO_c000_2168", 0),
+ MVI(0xc0002169, "TODO_c000_2169", 0),
+ MVI(0xc000216a, "TODO_c000_216a", 0),
+ MVI(0xc000216b, "TODO_c000_216b", 0),
+ MVI(0xc000216c, "TODO_c000_216c", 0),
+ MVI(0xc000216d, "TODO_c000_216d", 0),
+ MVI(0xc000216e, "TODO_c000_216e", 0),
+ MVI(0xc000216f, "TODO_c000_216f", 0),
+ MVI(0xc0002170, "TODO_c000_2170", 0),
+ MVI(0xc0002171, "TODO_c000_2171", 0),
+ MVI(0xc0002172, "TODO_c000_2172", 0),
+ MVI(0xc0002173, "TODO_c000_2173", 0),
+ MVI(0xc0002174, "TODO_c000_2174", 0),
+ MVI(0xc0002175, "TODO_c000_2175", 0),
+ MVI(0xc0002176, "TODO_c000_2176", 0),
+ MVI(0xc0002177, "TODO_c000_2177", 0),
+ MVI(0xc0002178, "TODO_c000_2178", 0),
+ MVI(0xc0002179, "TODO_c000_2179", 0),
+ MVI(0xc000217a, "TODO_c000_217a", 0),
+ MVI(0xc000217b, "TODO_c000_217b", 0),
+ MVI(0xc000217c, "TODO_c000_217c", 0),
+ MVI(0xc000217d, "TODO_c000_217d", 0),
+ MVI(0xc000217e, "TODO_c000_217e", 0),
+ MVI(0xc000217f, "TODO_c000_217f", 0),
+ MVI(0xc0002180, "TODO_c000_2180", 0),
+ MVI(0xc0002181, "TODO_c000_2181", 0),
+ MVI(0xc0002182, "TODO_c000_2182", 0),
+ MVI(0xc0002183, "TODO_c000_2183", 0),
+ MVI(0xc0002184, "TODO_c000_2184", 0),
+ MVI(0xc0002185, "TODO_c000_2185", 0),
+ MVI(0xc0002186, "TODO_c000_2186", 0),
+ MVI(0xc0002187, "TODO_c000_2187", 0),
+ MVI(0xc0002188, "TODO_c000_2188", 0),
+ MVI(0xc0002189, "TODO_c000_2189", 0),
+ MVI(0xc000218a, "TODO_c000_218a", 0),
+ MVI(0xc000218b, "TODO_c000_218b", 0),
+ MVI(0xc000218c, "TODO_c000_218c", 0),
+ MVI(0xc000218d, "TODO_c000_218d", 0),
+ MVI(0xc000218e, "TODO_c000_218e", 0),
+ MVI(0xc000218f, "TODO_c000_218f", 0),
+ MVI(0xc0002190, "TODO_c000_2190", 0),
+ MVI(0xc0002191, "TODO_c000_2191", 0),
+ MVI(0xc0002192, "TODO_c000_2192", 0),
+ MVI(0xc0002193, "TODO_c000_2193", 0),
+ MVI(0xc0002194, "TODO_c000_2194", 0),
+ MVI(0xc0002195, "TODO_c000_2195", 0),
+ MVI(0xc0002196, "TODO_c000_2196", 0),
+ MVI(0xc0002197, "TODO_c000_2197", 0),
+ MVI(0xc0002198, "TODO_c000_2198", 0),
+ MVI(0xc0002199, "TODO_c000_2199", 0),
+ MVI(0xc000219a, "TODO_c000_219a", 0),
+ MVI(0xc000219b, "TODO_c000_219b", 0),
+ MVI(0xc000219c, "TODO_c000_219c", 0),
+ MVI(0xc000219d, "TODO_c000_219d", 0),
+ MVI(0xc000219e, "TODO_c000_219e", 0),
+ MVI(0xc000219f, "TODO_c000_219f", 0),
+ MVI(0xc00021a0, "TODO_c000_21a0", 0),
+ MVI(0xc00021a1, "TODO_c000_21a1", 0),
+ MVI(0xc00021a2, "TODO_c000_21a2", 0),
+ MVI(0xc00021a3, "TODO_c000_21a3", 0),
+ MVI(0xc00021a4, "TODO_c000_21a4", 0),
+ MVI(0xc00021a5, "TODO_c000_21a5", 0),
+ MVI(0xc00021a6, "TODO_c000_21a6", 0),
+ MVI(0xc00021a7, "TODO_c000_21a7", 0),
+ MVI(0xc00021a8, "TODO_c000_21a8", 0),
+ MVI(0xc00021a9, "TODO_c000_21a9", 0),
+ MVI(0xc00021aa, "TODO_c000_21aa", 0),
+ MVI(0xc00021ab, "TODO_c000_21ab", 0),
+ MVI(0xc00021ac, "TODO_c000_21ac", 0),
+ MVI(0xc00021ad, "TODO_c000_21ad", 0),
+ MVI(0xc00021ae, "TODO_c000_21ae", 0),
+ MVI(0xc00021af, "TODO_c000_21af", 0),
+ MVI(0xc00021b0, "TODO_c000_21b0", 0),
+ MVI(0xc00021b1, "TODO_c000_21b1", 0),
+ MVI(0xc00021b2, "TODO_c000_21b2", 0),
+ MVI(0xc00021b3, "TODO_c000_21b3", 0),
+ MVI(0xc00021b4, "TODO_c000_21b4", 0),
+ MVI(0xc00021b5, "TODO_c000_21b5", 0),
+ MVI(0xc00021b6, "TODO_c000_21b6", 0),
+ MVI(0xc00021b7, "TODO_c000_21b7", 0),
+ MVI(0xc00021b8, "TODO_c000_21b8", 0),
+ MVI(0xc00021b9, "TODO_c000_21b9", 0),
+ MVI(0xc00021ba, "TODO_c000_21ba", 0),
+ MVI(0xc00021bb, "TODO_c000_21bb", 0),
+ MVI(0xc00021bc, "TODO_c000_21bc", 0),
+ MVI(0xc00021bd, "TODO_c000_21bd", 0),
+ MVI(0xc00021be, "TODO_c000_21be", 0),
+ MVI(0xc00021bf, "TODO_c000_21bf", 0),
+ MVI(0xc00021c0, "TODO_c000_21c0", 0),
+ MVI(0xc00021c1, "TODO_c000_21c1", 0),
+ MVI(0xc00021c2, "TODO_c000_21c2", 0),
+ MVI(0xc00021c3, "TODO_c000_21c3", 0),
+ MVI(0xc00021c4, "TODO_c000_21c4", 0),
+ MVI(0xc00021c5, "TODO_c000_21c5", 0),
+ MVI(0xc00021c6, "TODO_c000_21c6", 0),
+ MVI(0xc00021c7, "TODO_c000_21c7", 0),
+ MVI(0xc00021c8, "TODO_c000_21c8", 0),
+ MVI(0xc00021c9, "TODO_c000_21c9", 0),
+ MVI(0xc00021ca, "TODO_c000_21ca", 0),
+ MVI(0xc00021cb, "TODO_c000_21cb", 0),
+ MVI(0xc00021cc, "TODO_c000_21cc", 0),
+ MVI(0xc00021cd, "TODO_c000_21cd", 0),
+ MVI(0xc00021ce, "TODO_c000_21ce", 0),
+ MVI(0xc00021cf, "TODO_c000_21cf", 0),
+ MVI(0xc00021d0, "TODO_c000_21d0", 0),
+ MVI(0xc00021d1, "TODO_c000_21d1", 0),
+ MVI(0xc00021d2, "TODO_c000_21d2", 0),
+ MVI(0xc00021d3, "TODO_c000_21d3", 0),
+ MVI(0xc00021d4, "TODO_c000_21d4", 0),
+ MVI(0xc00021d5, "TODO_c000_21d5", 0),
+ MVI(0xc00021d6, "TODO_c000_21d6", 0),
+ MVI(0xc00021d7, "TODO_c000_21d7", 0),
+ MVI(0xc00021d8, "TODO_c000_21d8", 0),
+ MVI(0xc00021d9, "TODO_c000_21d9", 0),
+ MVI(0xc00021da, "TODO_c000_21da", 0),
+ MVI(0xc00021db, "TODO_c000_21db", 0),
+ MVI(0xc00021dc, "TODO_c000_21dc", 0),
+ MVI(0xc00021dd, "TODO_c000_21dd", 0),
+ MVI(0xc00021de, "TODO_c000_21de", 0),
+ MVI(0xc00021df, "TODO_c000_21df", 0),
+ MVI(0xc00021e0, "TODO_c000_21e0", 0),
+ MVI(0xc00021e1, "TODO_c000_21e1", 0),
+ MVI(0xc00021e2, "TODO_c000_21e2", 0),
+ MVI(0xc00021e3, "TODO_c000_21e3", 0),
+ MVI(0xc00021e4, "TODO_c000_21e4", 0),
+ MVI(0xc00021e5, "TODO_c000_21e5", 0),
+ MVI(0xc00021e6, "TODO_c000_21e6", 0),
+ MVI(0xc00021e7, "TODO_c000_21e7", 0),
+ MVI(0xc00021e8, "TODO_c000_21e8", 0),
+ MVI(0xc00021e9, "TODO_c000_21e9", 0),
+ MVI(0xc00021ea, "TODO_c000_21ea", 0),
+ MVI(0xc00021eb, "TODO_c000_21eb", 0),
+ MVI(0xc00021ec, "TODO_c000_21ec", 0),
+ MVI(0xc00021ed, "TODO_c000_21ed", 0),
+ MVI(0xc00021ee, "TODO_c000_21ee", 0),
+ MVI(0xc00021ef, "TODO_c000_21ef", 0),
+ MVI(0xc00021f0, "TODO_c000_21f0", 0),
+ MVI(0xc00021f1, "TODO_c000_21f1", 0),
+ MVI(0xc00021f2, "TODO_c000_21f2", 0),
+ MVI(0xc00021f3, "TODO_c000_21f3", 0),
+ MVI(0xc00021f4, "TODO_c000_21f4", 0),
+ MVI(0xc00021f5, "TODO_c000_21f5", 0),
+ MVI(0xc00021f6, "TODO_c000_21f6", 0),
+ MVI(0xc00021f7, "TODO_c000_21f7", 0),
+ MVI(0xc00021f8, "TODO_c000_21f8", 0),
+ MVI(0xc00021f9, "TODO_c000_21f9", 0),
+ MVI(0xc00021fa, "TODO_c000_21fa", 0),
+ MVI(0xc00021fb, "TODO_c000_21fb", 0),
+ MVI(0xc00021fc, "TODO_c000_21fc", 0),
+ MVI(0xc00021fd, "TODO_c000_21fd", 0),
+ MVI(0xc00021fe, "TODO_c000_21fe", 0),
+ MVI(0xc00021ff, "TODO_c000_21ff", 0),
+ MVI(0xc0002200, "TODO_c000_2200", 0),
+ MVI(0xc0002201, "TODO_c000_2201", 0),
+ MVI(0xc0002202, "TODO_c000_2202", 0),
+ MVI(0xc0002203, "TODO_c000_2203", 0),
+ MVI(0xc0002204, "TODO_c000_2204", 0),
+ MVI(0xc0002205, "TODO_c000_2205", 0),
+ MVI(0xc0002206, "TODO_c000_2206", 0),
+ MVI(0xc0002207, "TODO_c000_2207", 0),
+ MVI(0xc0002208, "TODO_c000_2208", 0),
+ MVI(0xc0002209, "TODO_c000_2209", 0),
+ MVI(0xc000220a, "TODO_c000_220a", 0),
+ MVI(0xc000220b, "TODO_c000_220b", 0),
+ MVI(0xc000220c, "TODO_c000_220c", 0),
+ MVI(0xc000220d, "TODO_c000_220d", 0),
+ MVI(0xc000220e, "TODO_c000_220e", 0),
+ MVI(0xc000220f, "TODO_c000_220f", 0),
+ MVI(0xc0002210, "TODO_c000_2210", 0),
+ MVI(0xc0002211, "TODO_c000_2211", 0),
+ MVI(0xc0002212, "TODO_c000_2212", 0),
+ MVI(0xc0002213, "TODO_c000_2213", 0),
+ MVI(0xc0002214, "TODO_c000_2214", 0),
+ MVI(0xc0002215, "TODO_c000_2215", 0),
+ MVI(0xc0002216, "TODO_c000_2216", 0),
+ MVI(0xc0002217, "TODO_c000_2217", 0),
+ MVI(0xc0002218, "TODO_c000_2218", 0),
+ MVI(0xc0002219, "TODO_c000_2219", 0),
+ MVI(0xc000221a, "TODO_c000_221a", 0),
+ MVI(0xc000221b, "TODO_c000_221b", 0),
+ MVI(0xc000221c, "TODO_c000_221c", 0),
+ MVI(0xc000221d, "TODO_c000_221d", 0),
+ MVI(0xc000221e, "TODO_c000_221e", 0),
+ MVI(0xc000221f, "TODO_c000_221f", 0),
+ MVI(0xc0002220, "TODO_c000_2220", 0),
+ MVI(0xc0002221, "TODO_c000_2221", 0),
+ MVI(0xc0002222, "TODO_c000_2222", 0),
+ MVI(0xc0002223, "TODO_c000_2223", 0),
+ MVI(0xc0002224, "TODO_c000_2224", 0),
+ MVI(0xc0002225, "TODO_c000_2225", 0),
+ MVI(0xc0002226, "TODO_c000_2226", 0),
+ MVI(0xc0002227, "TODO_c000_2227", 0),
+ MVI(0xc0002228, "TODO_c000_2228", 0),
+ MVI(0xc0002229, "TODO_c000_2229", 0),
+ MVI(0xc000222a, "TODO_c000_222a", 0),
+ MVI(0xc000222b, "TODO_c000_222b", 0),
+ MVI(0xc000222c, "TODO_c000_222c", 0),
+ MVI(0xc000222d, "TODO_c000_222d", 0),
+ MVI(0xc000222e, "TODO_c000_222e", 0),
+ MVI(0xc000222f, "TODO_c000_222f", 0),
+ MVI(0xc0002230, "TODO_c000_2230", 0),
+ MVI(0xc0002231, "TODO_c000_2231", 0),
+ MVI(0xc0002232, "TODO_c000_2232", 0),
+ MVI(0xc0002233, "TODO_c000_2233", 0),
+ MVI(0xc0002234, "TODO_c000_2234", 0),
+ MVI(0xc0002235, "TODO_c000_2235", 0),
+ MVI(0xc0002236, "TODO_c000_2236", 0),
+ MVI(0xc0002237, "TODO_c000_2237", 0),
+ MVI(0xc0002238, "TODO_c000_2238", 0),
+ MVI(0xc0002239, "TODO_c000_2239", 0),
+ MVI(0xc000223a, "TODO_c000_223a", 0),
+ MVI(0xc000223b, "TODO_c000_223b", 0),
+ MVI(0xc000223c, "TODO_c000_223c", 0),
+ MVI(0xc000223d, "TODO_c000_223d", 0),
+ MVI(0xc000223e, "TODO_c000_223e", 0),
+ MVI(0xc000223f, "TODO_c000_223f", 0),
+ MVI(0xc0002240, "TODO_c000_2240", 0),
+ MVI(0xc0002241, "TODO_c000_2241", 0),
+ MVI(0xc0002242, "TODO_c000_2242", 0),
+ MVI(0xc0002243, "TODO_c000_2243", 0),
+ MVI(0xc0002244, "TODO_c000_2244", 0),
+ MVI(0xc0002245, "TODO_c000_2245", 0),
+ MVI(0xc0002246, "TODO_c000_2246", 0),
+ MVI(0xc0002247, "TODO_c000_2247", 0),
+ MVI(0xc0002248, "TODO_c000_2248", 0),
+ MVI(0xc0002249, "TODO_c000_2249", 0),
+ MVI(0xc000224a, "TODO_c000_224a", 0),
+ MVI(0xc000224b, "TODO_c000_224b", 0),
+ MVI(0xc000224c, "TODO_c000_224c", 0),
+ MVI(0xc000224d, "TODO_c000_224d", 0),
+ MVI(0xc000224e, "TODO_c000_224e", 0),
+ MVI(0xc000224f, "TODO_c000_224f", 0),
+ MVI(0xc0002250, "TODO_c000_2250", 0),
+ MVI(0xc0002251, "TODO_c000_2251", 0),
+ MVI(0xc0002252, "TODO_c000_2252", 0),
+ MVI(0xc0002253, "TODO_c000_2253", 0),
+ MVI(0xc0002254, "TODO_c000_2254", 0),
+ MVI(0xc0002255, "TODO_c000_2255", 0),
+ MVI(0xc0002256, "TODO_c000_2256", 0),
+ MVI(0xc0002257, "TODO_c000_2257", 0),
+ MVI(0xc0002258, "TODO_c000_2258", 0),
+ MVI(0xc0002259, "TODO_c000_2259", 0),
+ MVI(0xc000225a, "TODO_c000_225a", 0),
+ MVI(0xc000225b, "TODO_c000_225b", 0),
+ MVI(0xc000225c, "TODO_c000_225c", 0),
+ MVI(0xc000225d, "TODO_c000_225d", 0),
+ MVI(0xc000225e, "TODO_c000_225e", 0),
+ MVI(0xc000225f, "TODO_c000_225f", 0),
+ MVI(0xc0002260, "TODO_c000_2260", 0),
+ MVI(0xc0002261, "TODO_c000_2261", 0),
+ MVI(0xc0002262, "TODO_c000_2262", 0),
+ MVI(0xc0002263, "TODO_c000_2263", 0),
+ MVI(0xc0002264, "TODO_c000_2264", 0),
+ MVI(0xc0002265, "TODO_c000_2265", 0),
+ MVI(0xc0002266, "TODO_c000_2266", 0),
+ MVI(0xc0002267, "TODO_c000_2267", 0),
+ MVI(0xc0002268, "TODO_c000_2268", 0),
+ MVI(0xc0002269, "TODO_c000_2269", 0),
+ MVI(0xc000226a, "TODO_c000_226a", 0),
+ MVI(0xc000226b, "TODO_c000_226b", 0),
+ MVI(0xc000226c, "TODO_c000_226c", 0),
+ MVI(0xc000226d, "TODO_c000_226d", 0),
+ MVI(0xc000226e, "TODO_c000_226e", 0),
+ MVI(0xc000226f, "TODO_c000_226f", 0),
+ MVI(0xc0002270, "TODO_c000_2270", 0),
+ MVI(0xc0002271, "TODO_c000_2271", 0),
+ MVI(0xc0002272, "TODO_c000_2272", 0),
+ MVI(0xc0002273, "TODO_c000_2273", 0),
+ MVI(0xc0002274, "TODO_c000_2274", 0),
+ MVI(0xc0002275, "TODO_c000_2275", 0),
+ MVI(0xc0002276, "TODO_c000_2276", 0),
+ MVI(0xc0002277, "TODO_c000_2277", 0),
+ MVI(0xc0002278, "TODO_c000_2278", 0),
+ MVI(0xc0002279, "TODO_c000_2279", 0),
+ MVI(0xc000227a, "TODO_c000_227a", 0),
+ MVI(0xc000227b, "TODO_c000_227b", 0),
+ MVI(0xc000227c, "TODO_c000_227c", 0),
+ MVI(0xc000227d, "TODO_c000_227d", 0),
+ MVI(0xc000227e, "TODO_c000_227e", 0),
+ MVI(0xc000227f, "TODO_c000_227f", 0),
+ MVI(0xc0002280, "TODO_c000_2280", 0),
+ MVI(0xc0002281, "TODO_c000_2281", 0),
+ MVI(0xc0002282, "TODO_c000_2282", 0),
+ MVI(0xc0002283, "TODO_c000_2283", 0),
+ MVI(0xc0002284, "TODO_c000_2284", 0),
+ MVI(0xc0002285, "TODO_c000_2285", 0),
+ MVI(0xc0002286, "TODO_c000_2286", 0),
+ MVI(0xc0002287, "TODO_c000_2287", 0),
+ MVI(0xc0002288, "TODO_c000_2288", 0),
+ MVI(0xc0002289, "TODO_c000_2289", 0),
+ MVI(0xc000228a, "TODO_c000_228a", 0),
+ MVI(0xc000228b, "TODO_c000_228b", 0),
+ MVI(0xc000228c, "TODO_c000_228c", 0),
+ MVI(0xc000228d, "TODO_c000_228d", 0),
+ MVI(0xc000228e, "TODO_c000_228e", 0),
+ MVI(0xc000228f, "TODO_c000_228f", 0),
+ MVI(0xc0002290, "TODO_c000_2290", 0),
+ MVI(0xc0002291, "TODO_c000_2291", 0),
+ MVI(0xc0002292, "TODO_c000_2292", 0),
+ MVI(0xc0002293, "TODO_c000_2293", 0),
+ MVI(0xc0002294, "TODO_c000_2294", 0),
+ MVI(0xc0002295, "TODO_c000_2295", 0),
+ MVI(0xc0002296, "TODO_c000_2296", 0),
+ MVI(0xc0002297, "TODO_c000_2297", 0),
+ MVI(0xc0002298, "TODO_c000_2298", 0),
+ MVI(0xc0002299, "TODO_c000_2299", 0),
+ MVI(0xc000229a, "TODO_c000_229a", 0),
+ MVI(0xc000229b, "TODO_c000_229b", 0),
+ MVI(0xc000229c, "TODO_c000_229c", 0),
+ MVI(0xc000229d, "TODO_c000_229d", 0),
+ MVI(0xc000229e, "TODO_c000_229e", 0),
+ MVI(0xc000229f, "TODO_c000_229f", 0),
+ MVI(0xc00022a0, "TODO_c000_22a0", 0),
+ MVI(0xc00022a1, "TODO_c000_22a1", 0),
+ MVI(0xc00022a2, "TODO_c000_22a2", 0),
+ MVI(0xc00022a3, "TODO_c000_22a3", 0),
+ MVI(0xc00022a4, "TODO_c000_22a4", 0),
+ MVI(0xc00022a5, "TODO_c000_22a5", 0),
+ MVI(0xc00022a6, "TODO_c000_22a6", 0),
+ MVI(0xc00022a7, "TODO_c000_22a7", 0),
+ MVI(0xc00022a8, "TODO_c000_22a8", 0),
+ MVI(0xc00022a9, "TODO_c000_22a9", 0),
+ MVI(0xc00022aa, "TODO_c000_22aa", 0),
+ MVI(0xc00022ab, "TODO_c000_22ab", 0),
+ MVI(0xc00022ac, "TODO_c000_22ac", 0),
+ MVI(0xc00022ad, "TODO_c000_22ad", 0),
+ MVI(0xc00022ae, "TODO_c000_22ae", 0),
+ MVI(0xc00022af, "TODO_c000_22af", 0),
+ MVI(0xc00022b0, "TODO_c000_22b0", 0),
+ MVI(0xc00022b1, "TODO_c000_22b1", 0),
+ MVI(0xc00022b2, "TODO_c000_22b2", 0),
+ MVI(0xc00022b3, "TODO_c000_22b3", 0),
+ MVI(0xc00022b4, "TODO_c000_22b4", 0),
+ MVI(0xc00022b5, "TODO_c000_22b5", 0),
+ MVI(0xc00022b6, "TODO_c000_22b6", 0),
+ MVI(0xc00022b7, "TODO_c000_22b7", 0),
+ MVI(0xc00022b8, "TODO_c000_22b8", 0),
+ MVI(0xc00022b9, "TODO_c000_22b9", 0),
+ MVI(0xc00022ba, "TODO_c000_22ba", 0),
+ MVI(0xc00022bb, "TODO_c000_22bb", 0),
+ MVI(0xc00022bc, "TODO_c000_22bc", 0),
+ MVI(0xc00022bd, "TODO_c000_22bd", 0),
+ MVI(0xc00022be, "TODO_c000_22be", 0),
+ MVI(0xc00022bf, "TODO_c000_22bf", 0),
+ MVI(0xc00022c0, "TODO_c000_22c0", 0),
+ MVI(0xc00022c1, "TODO_c000_22c1", 0),
+ MVI(0xc00022c2, "TODO_c000_22c2", 0),
+ MVI(0xc00022c3, "TODO_c000_22c3", 0),
+ MVI(0xc00022c4, "TODO_c000_22c4", 0),
+ MVI(0xc00022c5, "TODO_c000_22c5", 0),
+ MVI(0xc00022c6, "TODO_c000_22c6", 0),
+ MVI(0xc00022c7, "TODO_c000_22c7", 0),
+ MVI(0xc00022c8, "TODO_c000_22c8", 0),
+ MVI(0xc00022c9, "TODO_c000_22c9", 0),
+ MVI(0xc00022ca, "TODO_c000_22ca", 0),
+ MVI(0xc00022cb, "TODO_c000_22cb", 0),
+ MVI(0xc00022cc, "TODO_c000_22cc", 0),
+ MVI(0xc00022cd, "TODO_c000_22cd", 0),
+ MVI(0xc00022ce, "TODO_c000_22ce", 0),
+ MVI(0xc00022cf, "TODO_c000_22cf", 0),
+ MVI(0xc00022d0, "TODO_c000_22d0", 0),
+ MVI(0xc00022d1, "TODO_c000_22d1", 0),
+ MVI(0xc00022d2, "TODO_c000_22d2", 0),
+ MVI(0xc00022d3, "TODO_c000_22d3", 0),
+ MVI(0xc00022d4, "TODO_c000_22d4", 0),
+ MVI(0xc00022d5, "TODO_c000_22d5", 0),
+ MVI(0xc00022d6, "TODO_c000_22d6", 0),
+ MVI(0xc00022d7, "TODO_c000_22d7", 0),
+ MVI(0xc00022d8, "TODO_c000_22d8", 0),
+ MVI(0xc00022d9, "TODO_c000_22d9", 0),
+ MVI(0xc00022da, "TODO_c000_22da", 0),
+ MVI(0xc00022db, "TODO_c000_22db", 0),
+ MVI(0xc00022dc, "TODO_c000_22dc", 0),
+ MVI(0xc00022dd, "TODO_c000_22dd", 0),
+ MVI(0xc00022de, "TODO_c000_22de", 0),
+ MVI(0xc00022df, "TODO_c000_22df", 0),
+ MVI(0xc00022e0, "TODO_c000_22e0", 0),
+ MVI(0xc00022e1, "TODO_c000_22e1", 0),
+ MVI(0xc00022e2, "TODO_c000_22e2", 0),
+ MVI(0xc00022e3, "TODO_c000_22e3", 0),
+ MVI(0xc00022e4, "TODO_c000_22e4", 0),
+ MVI(0xc00022e5, "TODO_c000_22e5", 0),
+ MVI(0xc00022e6, "TODO_c000_22e6", 0),
+ MVI(0xc00022e7, "TODO_c000_22e7", 0),
+ MVI(0xc00022e8, "TODO_c000_22e8", 0),
+ MVI(0xc00022e9, "TODO_c000_22e9", 0),
+ MVI(0xc00022ea, "TODO_c000_22ea", 0),
+ MVI(0xc00022eb, "TODO_c000_22eb", 0),
+ MVI(0xc00022ec, "TODO_c000_22ec", 0),
+ MVI(0xc00022ed, "TODO_c000_22ed", 0),
+ MVI(0xc00022ee, "TODO_c000_22ee", 0),
+ MVI(0xc00022ef, "TODO_c000_22ef", 0),
+ MVI(0xc00022f0, "TODO_c000_22f0", 0),
+ MVI(0xc00022f1, "TODO_c000_22f1", 0),
+ MVI(0xc00022f2, "TODO_c000_22f2", 0),
+ MVI(0xc00022f3, "TODO_c000_22f3", 0),
+ MVI(0xc00022f4, "TODO_c000_22f4", 0),
+ MVI(0xc00022f5, "TODO_c000_22f5", 0),
+ MVI(0xc00022f6, "TODO_c000_22f6", 0),
+ MVI(0xc00022f7, "TODO_c000_22f7", 0),
+ MVI(0xc00022f8, "TODO_c000_22f8", 0),
+ MVI(0xc00022f9, "TODO_c000_22f9", 0),
+ MVI(0xc00022fa, "TODO_c000_22fa", 0),
+ MVI(0xc00022fb, "TODO_c000_22fb", 0),
+ MVI(0xc00022fc, "TODO_c000_22fc", 0),
+ MVI(0xc00022fd, "TODO_c000_22fd", 0),
+ MVI(0xc00022fe, "TODO_c000_22fe", 0),
+ MVI(0xc00022ff, "TODO_c000_22ff", 0),
+ MVI(0xc0002300, "TODO_c000_2300", 0),
+ MVI(0xc0002301, "TODO_c000_2301", 0),
+ MVI(0xc0002302, "TODO_c000_2302", 0),
+ MVI(0xc0002303, "TODO_c000_2303", 0),
+ MVI(0xc0002304, "TODO_c000_2304", 0),
+ MVI(0xc0002305, "TODO_c000_2305", 0),
+ MVI(0xc0002306, "TODO_c000_2306", 0),
+ MVI(0xc0002307, "TODO_c000_2307", 0),
+ MVI(0xc0002308, "TODO_c000_2308", 0),
+ MVI(0xc0002309, "TODO_c000_2309", 0),
+ MVI(0xc000230a, "TODO_c000_230a", 0),
+ MVI(0xc000230b, "TODO_c000_230b", 0),
+ MVI(0xc000230c, "TODO_c000_230c", 0),
+ MVI(0xc000230d, "TODO_c000_230d", 0),
+ MVI(0xc000230e, "TODO_c000_230e", 0),
+ MVI(0xc000230f, "TODO_c000_230f", 0),
+ MVI(0xc0002310, "TODO_c000_2310", 0),
+ MVI(0xc0002311, "TODO_c000_2311", 0),
+ MVI(0xc0002312, "TODO_c000_2312", 0),
+ MVI(0xc0002313, "TODO_c000_2313", 0),
+ MVI(0xc0002314, "TODO_c000_2314", 0),
+ MVI(0xc0002315, "TODO_c000_2315", 0),
+ MVI(0xc0002316, "TODO_c000_2316", 0),
+ MVI(0xc0002317, "TODO_c000_2317", 0),
+ MVI(0xc0002318, "TODO_c000_2318", 0),
+ MVI(0xc0002319, "TODO_c000_2319", 0),
+ MVI(0xc000231a, "TODO_c000_231a", 0),
+ MVI(0xc000231b, "TODO_c000_231b", 0),
+ MVI(0xc000231c, "TODO_c000_231c", 0),
+ MVI(0xc000231d, "TODO_c000_231d", 0),
+ MVI(0xc000231e, "TODO_c000_231e", 0),
+ MVI(0xc000231f, "TODO_c000_231f", 0),
+ MVI(0xc0002320, "TODO_c000_2320", 0),
+ MVI(0xc0002321, "TODO_c000_2321", 0),
+ MVI(0xc0002322, "TODO_c000_2322", 0),
+ MVI(0xc0002323, "TODO_c000_2323", 0),
+ MVI(0xc0002324, "TODO_c000_2324", 0),
+ MVI(0xc0002325, "TODO_c000_2325", 0),
+ MVI(0xc0002326, "TODO_c000_2326", 0),
+ MVI(0xc0002327, "TODO_c000_2327", 0),
+ MVI(0xc0002328, "TODO_c000_2328", 0),
+ MVI(0xc0002329, "TODO_c000_2329", 0),
+ MVI(0xc000232a, "TODO_c000_232a", 0),
+ MVI(0xc000232b, "TODO_c000_232b", 0),
+ MVI(0xc000232c, "TODO_c000_232c", 0),
+ MVI(0xc000232d, "TODO_c000_232d", 0),
+ MVI(0xc000232e, "TODO_c000_232e", 0),
+ MVI(0xc000232f, "TODO_c000_232f", 0),
+ MVI(0xc0002330, "TODO_c000_2330", 0),
+ MVI(0xc0002331, "TODO_c000_2331", 0),
+ MVI(0xc0002332, "TODO_c000_2332", 0),
+ MVI(0xc0002333, "TODO_c000_2333", 0),
+ MVI(0xc0002334, "TODO_c000_2334", 0),
+ MVI(0xc0002335, "TODO_c000_2335", 0),
+ MVI(0xc0002336, "TODO_c000_2336", 0),
+ MVI(0xc0002337, "TODO_c000_2337", 0),
+ MVI(0xc0002338, "TODO_c000_2338", 0),
+ MVI(0xc0002339, "TODO_c000_2339", 0),
+ MVI(0xc000233a, "TODO_c000_233a", 0),
+ MVI(0xc000233b, "TODO_c000_233b", 0),
+ MVI(0xc000233c, "TODO_c000_233c", 0),
+ MVI(0xc000233d, "TODO_c000_233d", 0),
+ MVI(0xc000233e, "TODO_c000_233e", 0),
+ MVI(0xc000233f, "TODO_c000_233f", 0),
+ MVI(0xc0002340, "TODO_c000_2340", 0),
+ MVI(0xc0002341, "TODO_c000_2341", 0),
+ MVI(0xc0002342, "TODO_c000_2342", 0),
+ MVI(0xc0002343, "TODO_c000_2343", 0),
+ MVI(0xc0002344, "TODO_c000_2344", 0),
+ MVI(0xc0002345, "TODO_c000_2345", 0),
+ MVI(0xc0002346, "TODO_c000_2346", 0),
+ MVI(0xc0002347, "TODO_c000_2347", 0),
+ MVI(0xc0002348, "TODO_c000_2348", 0),
+ MVI(0xc0002349, "TODO_c000_2349", 0),
+ MVI(0xc000234a, "TODO_c000_234a", 0),
+ MVI(0xc000234b, "TODO_c000_234b", 0),
+ MVI(0xc000234c, "TODO_c000_234c", 0),
+ MVI(0xc000234d, "TODO_c000_234d", 0),
+ MVI(0xc000234e, "TODO_c000_234e", 0),
+ MVI(0xc000234f, "TODO_c000_234f", 0),
+ MVI(0xc0002350, "TODO_c000_2350", 0),
+ MVI(0xc0002351, "TODO_c000_2351", 0),
+ MVI(0xc0002352, "TODO_c000_2352", 0),
+ MVI(0xc0002353, "TODO_c000_2353", 0),
+ MVI(0xc0002354, "TODO_c000_2354", 0),
+ MVI(0xc0002355, "TODO_c000_2355", 0),
+ MVI(0xc0002356, "TODO_c000_2356", 0),
+ MVI(0xc0002357, "TODO_c000_2357", 0),
+ MVI(0xc0002358, "TODO_c000_2358", 0),
+ MVI(0xc0002359, "TODO_c000_2359", 0),
+ MVI(0xc000235a, "TODO_c000_235a", 0),
+ MVI(0xc000235b, "TODO_c000_235b", 0),
+ MVI(0xc000235c, "TODO_c000_235c", 0),
+ MVI(0xc000235d, "TODO_c000_235d", 0),
+ MVI(0xc000235e, "TODO_c000_235e", 0),
+ MVI(0xc000235f, "TODO_c000_235f", 0),
+ MVI(0xc0002360, "TODO_c000_2360", 0),
+ MVI(0xc0002361, "TODO_c000_2361", 0),
+ MVI(0xc0002362, "TODO_c000_2362", 0),
+ MVI(0xc0002363, "TODO_c000_2363", 0),
+ MVI(0xc0002364, "TODO_c000_2364", 0),
+ MVI(0xc0002365, "TODO_c000_2365", 0),
+ MVI(0xc0002366, "TODO_c000_2366", 0),
+ MVI(0xc0002367, "TODO_c000_2367", 0),
+ MVI(0xc0002368, "TODO_c000_2368", 0),
+ MVI(0xc0002369, "TODO_c000_2369", 0),
+ MVI(0xc000236a, "TODO_c000_236a", 0),
+ MVI(0xc000236b, "TODO_c000_236b", 0),
+ MVI(0xc000236c, "TODO_c000_236c", 0),
+ MVI(0xc000236d, "TODO_c000_236d", 0),
+ MVI(0xc000236e, "TODO_c000_236e", 0),
+ MVI(0xc000236f, "TODO_c000_236f", 0),
+ MVI(0xc0002370, "TODO_c000_2370", 0),
+ MVI(0xc0002371, "TODO_c000_2371", 0),
+ MVI(0xc0002372, "TODO_c000_2372", 0),
+ MVI(0xc0002373, "TODO_c000_2373", 0),
+ MVI(0xc0002374, "TODO_c000_2374", 0),
+ MVI(0xc0002375, "TODO_c000_2375", 0),
+ MVI(0xc0002376, "TODO_c000_2376", 0),
+ MVI(0xc0002377, "TODO_c000_2377", 0),
+ MVI(0xc0002378, "TODO_c000_2378", 0),
+ MVI(0xc0002379, "TODO_c000_2379", 0),
+ MVI(0xc000237a, "TODO_c000_237a", 0),
+ MVI(0xc000237b, "TODO_c000_237b", 0),
+ MVI(0xc000237c, "TODO_c000_237c", 0),
+ MVI(0xc000237d, "TODO_c000_237d", 0),
+ MVI(0xc000237e, "TODO_c000_237e", 0),
+ MVI(0xc000237f, "TODO_c000_237f", 0),
+ MVI(0xc0002380, "TODO_c000_2380", 0),
+ MVI(0xc0002381, "TODO_c000_2381", 0),
+ MVI(0xc0002382, "TODO_c000_2382", 0),
+ MVI(0xc0002383, "TODO_c000_2383", 0),
+ MVI(0xc0002384, "TODO_c000_2384", 0),
+ MVI(0xc0002385, "TODO_c000_2385", 0),
+ MVI(0xc0002386, "TODO_c000_2386", 0),
+ MVI(0xc0002387, "TODO_c000_2387", 0),
+ MVI(0xc0002388, "TODO_c000_2388", 0),
+ MVI(0xc0002389, "TODO_c000_2389", 0),
+ MVI(0xc000238a, "TODO_c000_238a", 0),
+ MVI(0xc000238b, "TODO_c000_238b", 0),
+ MVI(0xc000238c, "TODO_c000_238c", 0),
+ MVI(0xc000238d, "TODO_c000_238d", 0),
+ MVI(0xc000238e, "TODO_c000_238e", 0),
+ MVI(0xc000238f, "TODO_c000_238f", 0),
+ MVI(0xc0002390, "TODO_c000_2390", 0),
+ MVI(0xc0002391, "TODO_c000_2391", 0),
+ MVI(0xc0002392, "TODO_c000_2392", 0),
+ MVI(0xc0002393, "TODO_c000_2393", 0),
+ MVI(0xc0002394, "TODO_c000_2394", 0),
+ MVI(0xc0002395, "TODO_c000_2395", 0),
+ MVI(0xc0002396, "TODO_c000_2396", 0),
+ MVI(0xc0002397, "TODO_c000_2397", 0),
+ MVI(0xc0002398, "TODO_c000_2398", 0),
+ MVI(0xc0002399, "TODO_c000_2399", 0),
+ MVI(0xc000239a, "TODO_c000_239a", 0),
+ MVI(0xc000239b, "TODO_c000_239b", 0),
+ MVI(0xc000239c, "TODO_c000_239c", 0),
+ MVI(0xc000239d, "TODO_c000_239d", 0),
+ MVI(0xc000239e, "TODO_c000_239e", 0),
+ MVI(0xc000239f, "TODO_c000_239f", 0),
+ MVI(0xc00023a0, "TODO_c000_23a0", 0),
+ MVI(0xc00023a1, "TODO_c000_23a1", 0),
+ MVI(0xc00023a2, "TODO_c000_23a2", 0),
+ MVI(0xc00023a3, "TODO_c000_23a3", 0),
+ MVI(0xc00023a4, "TODO_c000_23a4", 0),
+ MVI(0xc00023a5, "TODO_c000_23a5", 0),
+ MVI(0xc00023a6, "TODO_c000_23a6", 0),
+ MVI(0xc00023a7, "TODO_c000_23a7", 0),
+ MVI(0xc00023a8, "TODO_c000_23a8", 0),
+ MVI(0xc00023a9, "TODO_c000_23a9", 0),
+ MVI(0xc00023aa, "TODO_c000_23aa", 0),
+ MVI(0xc00023ab, "TODO_c000_23ab", 0),
+ MVI(0xc00023ac, "TODO_c000_23ac", 0),
+ MVI(0xc00023ad, "TODO_c000_23ad", 0),
+ MVI(0xc00023ae, "TODO_c000_23ae", 0),
+ MVI(0xc00023af, "TODO_c000_23af", 0),
+ MVI(0xc00023b0, "TODO_c000_23b0", 0),
+ MVI(0xc00023b1, "TODO_c000_23b1", 0),
+ MVI(0xc00023b2, "TODO_c000_23b2", 0),
+ MVI(0xc00023b3, "TODO_c000_23b3", 0),
+ MVI(0xc00023b4, "TODO_c000_23b4", 0),
+ MVI(0xc00023b5, "TODO_c000_23b5", 0),
+ MVI(0xc00023b6, "TODO_c000_23b6", 0),
+ MVI(0xc00023b7, "TODO_c000_23b7", 0),
+ MVI(0xc00023b8, "TODO_c000_23b8", 0),
+ MVI(0xc00023b9, "TODO_c000_23b9", 0),
+ MVI(0xc00023ba, "TODO_c000_23ba", 0),
+ MVI(0xc00023bb, "TODO_c000_23bb", 0),
+ MVI(0xc00023bc, "TODO_c000_23bc", 0),
+ MVI(0xc00023bd, "TODO_c000_23bd", 0),
+ MVI(0xc00023be, "TODO_c000_23be", 0),
+ MVI(0xc00023bf, "TODO_c000_23bf", 0),
+ MVI(0xc00023c0, "TODO_c000_23c0", 0),
+ MVI(0xc00023c1, "TODO_c000_23c1", 0),
+ MVI(0xc00023c2, "TODO_c000_23c2", 0),
+ MVI(0xc00023c3, "TODO_c000_23c3", 0),
+ MVI(0xc00023c4, "TODO_c000_23c4", 0),
+ MVI(0xc00023c5, "TODO_c000_23c5", 0),
+ MVI(0xc00023c6, "TODO_c000_23c6", 0),
+ MVI(0xc00023c7, "TODO_c000_23c7", 0),
+ MVI(0xc00023c8, "TODO_c000_23c8", 0),
+ MVI(0xc00023c9, "TODO_c000_23c9", 0),
+ MVI(0xc00023ca, "TODO_c000_23ca", 0),
+ MVI(0xc00023cb, "TODO_c000_23cb", 0),
+ MVI(0xc00023cc, "TODO_c000_23cc", 0),
+ MVI(0xc00023cd, "TODO_c000_23cd", 0),
+ MVI(0xc00023ce, "TODO_c000_23ce", 0),
+ MVI(0xc00023cf, "TODO_c000_23cf", 0),
+ MVI(0xc00023d0, "TODO_c000_23d0", 0),
+ MVI(0xc00023d1, "TODO_c000_23d1", 0),
+ MVI(0xc00023d2, "TODO_c000_23d2", 0),
+ MVI(0xc00023d3, "TODO_c000_23d3", 0),
+ MVI(0xc00023d4, "TODO_c000_23d4", 0),
+ MVI(0xc00023d5, "TODO_c000_23d5", 0),
+ MVI(0xc00023d6, "TODO_c000_23d6", 0),
+ MVI(0xc00023d7, "TODO_c000_23d7", 0),
+ MVI(0xc00023d8, "TODO_c000_23d8", 0),
+ MVI(0xc00023d9, "TODO_c000_23d9", 0),
+ MVI(0xc00023da, "TODO_c000_23da", 0),
+ MVI(0xc00023db, "TODO_c000_23db", 0),
+ MVI(0xc00023dc, "TODO_c000_23dc", 0),
+ MVI(0xc00023dd, "TODO_c000_23dd", 0),
+ MVI(0xc00023de, "TODO_c000_23de", 0),
+ MVI(0xc00023df, "TODO_c000_23df", 0),
+ MVI(0xc00023e0, "TODO_c000_23e0", 0),
+ MVI(0xc00023e1, "TODO_c000_23e1", 0),
+ MVI(0xc00023e2, "TODO_c000_23e2", 0),
+ MVI(0xc00023e3, "TODO_c000_23e3", 0),
+ MVI(0xc00023e4, "TODO_c000_23e4", 0),
+ MVI(0xc00023e5, "TODO_c000_23e5", 0),
+ MVI(0xc00023e6, "TODO_c000_23e6", 0),
+ MVI(0xc00023e7, "TODO_c000_23e7", 0),
+ MVI(0xc00023e8, "TODO_c000_23e8", 0),
+ MVI(0xc00023e9, "TODO_c000_23e9", 0),
+ MVI(0xc00023ea, "TODO_c000_23ea", 0),
+ MVI(0xc00023eb, "TODO_c000_23eb", 0),
+ MVI(0xc00023ec, "TODO_c000_23ec", 0),
+ MVI(0xc00023ed, "TODO_c000_23ed", 0),
+ MVI(0xc00023ee, "TODO_c000_23ee", 0),
+ MVI(0xc00023ef, "TODO_c000_23ef", 0),
+ MVI(0xc00023f0, "TODO_c000_23f0", 0),
+ MVI(0xc00023f1, "TODO_c000_23f1", 0),
+ MVI(0xc00023f2, "TODO_c000_23f2", 0),
+ MVI(0xc00023f3, "TODO_c000_23f3", 0),
+ MVI(0xc00023f4, "TODO_c000_23f4", 0),
+ MVI(0xc00023f5, "TODO_c000_23f5", 0),
+ MVI(0xc00023f6, "TODO_c000_23f6", 0),
+ MVI(0xc00023f7, "TODO_c000_23f7", 0),
+ MVI(0xc00023f8, "TODO_c000_23f8", 0),
+ MVI(0xc00023f9, "TODO_c000_23f9", 0),
+ MVI(0xc00023fa, "TODO_c000_23fa", 0),
+ MVI(0xc00023fb, "TODO_c000_23fb", 0),
+ MVI(0xc00023fc, "TODO_c000_23fc", 0),
+ MVI(0xc00023fd, "TODO_c000_23fd", 0),
+ MVI(0xc00023fe, "TODO_c000_23fe", 0),
+ MVI(0xc00023ff, "TODO_c000_23ff", 0),
+ MVI(0xc0002400, "TODO_c000_2400", 0),
+ MVI(0xc0002401, "TODO_c000_2401", 0),
+ MVI(0xc0002402, "TODO_c000_2402", 0),
+ MVI(0xc0002403, "TODO_c000_2403", 0),
+ MVI(0xc0002404, "TODO_c000_2404", 0),
+ MVI(0xc0002405, "TODO_c000_2405", 0),
+ MVI(0xc0002406, "TODO_c000_2406", 0),
+ MVI(0xc0002407, "TODO_c000_2407", 0),
+ MVI(0xc0002408, "TODO_c000_2408", 0),
+ MVI(0xc0002409, "TODO_c000_2409", 0),
+ MVI(0xc000240a, "TODO_c000_240a", 0),
+ MVI(0xc000240b, "TODO_c000_240b", 0),
+ MVI(0xc000240c, "TODO_c000_240c", 0),
+ MVI(0xc000240d, "TODO_c000_240d", 0),
+ MVI(0xc000240e, "TODO_c000_240e", 0),
+ MVI(0xc000240f, "TODO_c000_240f", 0),
+ MVI(0xc0002410, "TODO_c000_2410", 0),
+ MVI(0xc0002411, "TODO_c000_2411", 0),
+ MVI(0xc0002412, "TODO_c000_2412", 0),
+ MVI(0xc0002413, "TODO_c000_2413", 0),
+ MVI(0xc0002414, "TODO_c000_2414", 0),
+ MVI(0xc0002415, "TODO_c000_2415", 0),
+ MVI(0xc0002416, "TODO_c000_2416", 0),
+ MVI(0xc0002417, "TODO_c000_2417", 0),
+ MVI(0xc0002418, "TODO_c000_2418", 0),
+ MVI(0xc0002419, "TODO_c000_2419", 0),
+ MVI(0xc000241a, "TODO_c000_241a", 0),
+ MVI(0xc000241b, "TODO_c000_241b", 0),
+ MVI(0xc000241c, "TODO_c000_241c", 0),
+ MVI(0xc000241d, "TODO_c000_241d", 0),
+ MVI(0xc000241e, "TODO_c000_241e", 0),
+ MVI(0xc000241f, "TODO_c000_241f", 0),
+ MVI(0xc0002420, "TODO_c000_2420", 0),
+ MVI(0xc0002421, "TODO_c000_2421", 0),
+ MVI(0xc0002422, "TODO_c000_2422", 0),
+ MVI(0xc0002423, "TODO_c000_2423", 0),
+ MVI(0xc0002424, "TODO_c000_2424", 0),
+ MVI(0xc0002425, "TODO_c000_2425", 0),
+ MVI(0xc0002426, "TODO_c000_2426", 0),
+ MVI(0xc0002427, "TODO_c000_2427", 0),
+ MVI(0xc0002428, "TODO_c000_2428", 0),
+ MVI(0xc0002429, "TODO_c000_2429", 0),
+ MVI(0xc000242a, "TODO_c000_242a", 0),
+ MVI(0xc000242b, "TODO_c000_242b", 0),
+ MVI(0xc000242c, "TODO_c000_242c", 0),
+ MVI(0xc000242d, "TODO_c000_242d", 0),
+ MVI(0xc000242e, "TODO_c000_242e", 0),
+ MVI(0xc000242f, "TODO_c000_242f", 0),
+ MVI(0xc0002430, "TODO_c000_2430", 0),
+ MVI(0xc0002431, "TODO_c000_2431", 0),
+ MVI(0xc0002432, "TODO_c000_2432", 0),
+ MVI(0xc0002433, "TODO_c000_2433", 0),
+ MVI(0xc0002434, "TODO_c000_2434", 0),
+ MVI(0xc0002435, "TODO_c000_2435", 0),
+ MVI(0xc0002436, "TODO_c000_2436", 0),
+ MVI(0xc0002437, "TODO_c000_2437", 0),
+ MVI(0xc0002438, "TODO_c000_2438", 0),
+ MVI(0xc0002439, "TODO_c000_2439", 0),
+ MVI(0xc000243a, "TODO_c000_243a", 0),
+ MVI(0xc000243b, "TODO_c000_243b", 0),
+ MVI(0xc000243c, "TODO_c000_243c", 0),
+ MVI(0xc000243d, "TODO_c000_243d", 0),
+ MVI(0xc000243e, "TODO_c000_243e", 0),
+ MVI(0xc000243f, "TODO_c000_243f", 0),
+ MVI(0xc0002440, "TODO_c000_2440", 0),
+ MVI(0xc0002441, "TODO_c000_2441", 0),
+ MVI(0xc0002442, "TODO_c000_2442", 0),
+ MVI(0xc0002443, "TODO_c000_2443", 0),
+ MVI(0xc0002444, "TODO_c000_2444", 0),
+ MVI(0xc0002445, "TODO_c000_2445", 0),
+ MVI(0xc0002446, "TODO_c000_2446", 0),
+ MVI(0xc0002447, "TODO_c000_2447", 0),
+ MVI(0xc0002448, "TODO_c000_2448", 0),
+ MVI(0xc0002449, "TODO_c000_2449", 0),
+ MVI(0xc000244a, "TODO_c000_244a", 0),
+ MVI(0xc000244b, "TODO_c000_244b", 0),
+ MVI(0xc000244c, "TODO_c000_244c", 0),
+ MVI(0xc000244d, "TODO_c000_244d", 0),
+ MVI(0xc000244e, "TODO_c000_244e", 0),
+ MVI(0xc000244f, "TODO_c000_244f", 0),
+ MVI(0xc0002450, "TODO_c000_2450", 0),
+ MVI(0xc0002451, "TODO_c000_2451", 0),
+ MVI(0xc0002452, "TODO_c000_2452", 0),
+ MVI(0xc0002453, "TODO_c000_2453", 0),
+ MVI(0xc0002454, "TODO_c000_2454", 0),
+ MVI(0xc0002455, "TODO_c000_2455", 0),
+ MVI(0xc0002456, "TODO_c000_2456", 0),
+ MVI(0xc0002457, "TODO_c000_2457", 0),
+ MVI(0xc0002458, "TODO_c000_2458", 0),
+ MVI(0xc0002459, "TODO_c000_2459", 0),
+ MVI(0xc000245a, "TODO_c000_245a", 0),
+ MVI(0xc000245b, "TODO_c000_245b", 0),
+ MVI(0xc000245c, "TODO_c000_245c", 0),
+ MVI(0xc000245d, "TODO_c000_245d", 0),
+ MVI(0xc000245e, "TODO_c000_245e", 0),
+ MVI(0xc000245f, "TODO_c000_245f", 0),
+ MVI(0xc0002460, "TODO_c000_2460", 0),
+ MVI(0xc0002461, "TODO_c000_2461", 0),
+ MVI(0xc0002462, "TODO_c000_2462", 0),
+ MVI(0xc0002463, "TODO_c000_2463", 0),
+ MVI(0xc0002464, "TODO_c000_2464", 0),
+ MVI(0xc0002465, "TODO_c000_2465", 0),
+ MVI(0xc0002466, "TODO_c000_2466", 0),
+ MVI(0xc0002467, "TODO_c000_2467", 0),
+ MVI(0xc0002468, "TODO_c000_2468", 0),
+ MVI(0xc0002469, "TODO_c000_2469", 0),
+ MVI(0xc000246a, "TODO_c000_246a", 0),
+ MVI(0xc000246b, "TODO_c000_246b", 0),
+ MVI(0xc000246c, "TODO_c000_246c", 0),
+ MVI(0xc000246d, "TODO_c000_246d", 0),
+ MVI(0xc000246e, "TODO_c000_246e", 0),
+ MVI(0xc000246f, "TODO_c000_246f", 0),
+ MVI(0xc0002470, "TODO_c000_2470", 0),
+ MVI(0xc0002471, "TODO_c000_2471", 0),
+ MVI(0xc0002472, "TODO_c000_2472", 0),
+ MVI(0xc0002473, "TODO_c000_2473", 0),
+ MVI(0xc0002474, "TODO_c000_2474", 0),
+ MVI(0xc0002475, "TODO_c000_2475", 0),
+ MVI(0xc0002476, "TODO_c000_2476", 0),
+ MVI(0xc0002477, "TODO_c000_2477", 0),
+ MVI(0xc0002478, "TODO_c000_2478", 0),
+ MVI(0xc0002479, "TODO_c000_2479", 0),
+ MVI(0xc000247a, "TODO_c000_247a", 0),
+ MVI(0xc000247b, "TODO_c000_247b", 0),
+ MVI(0xc000247c, "TODO_c000_247c", 0),
+ MVI(0xc000247d, "TODO_c000_247d", 0),
+ MVI(0xc000247e, "TODO_c000_247e", 0),
+ MVI(0xc000247f, "TODO_c000_247f", 0),
+ MVI(0xc0002480, "TODO_c000_2480", 0),
+ MVI(0xc0002481, "TODO_c000_2481", 0),
+ MVI(0xc0002482, "TODO_c000_2482", 0),
+ MVI(0xc0002483, "TODO_c000_2483", 0),
+ MVI(0xc0002484, "TODO_c000_2484", 0),
+ MVI(0xc0002485, "TODO_c000_2485", 0),
+ MVI(0xc0002486, "TODO_c000_2486", 0),
+ MVI(0xc0002487, "TODO_c000_2487", 0),
+ MVI(0xc0002488, "TODO_c000_2488", 0),
+ MVI(0xc0002489, "TODO_c000_2489", 0),
+ MVI(0xc000248a, "TODO_c000_248a", 0),
+ MVI(0xc000248b, "TODO_c000_248b", 0),
+ MVI(0xc000248c, "TODO_c000_248c", 0),
+ MVI(0xc000248d, "TODO_c000_248d", 0),
+ MVI(0xc000248e, "TODO_c000_248e", 0),
+ MVI(0xc000248f, "TODO_c000_248f", 0),
+ MVI(0xc0002490, "TODO_c000_2490", 0),
+ MVI(0xc0002491, "TODO_c000_2491", 0),
+ MVI(0xc0002492, "TODO_c000_2492", 0),
+ MVI(0xc0002493, "TODO_c000_2493", 0),
+ MVI(0xc0002494, "TODO_c000_2494", 0),
+ MVI(0xc0002495, "TODO_c000_2495", 0),
+ MVI(0xc0002496, "TODO_c000_2496", 0),
+ MVI(0xc0002497, "TODO_c000_2497", 0),
+ MVI(0xc0002498, "TODO_c000_2498", 0),
+ MVI(0xc0002499, "TODO_c000_2499", 0),
+ MVI(0xc000249a, "TODO_c000_249a", 0),
+ MVI(0xc000249b, "TODO_c000_249b", 0),
+ MVI(0xc000249c, "TODO_c000_249c", 0),
+ MVI(0xc000249d, "TODO_c000_249d", 0),
+ MVI(0xc000249e, "TODO_c000_249e", 0),
+ MVI(0xc000249f, "TODO_c000_249f", 0),
+ MVI(0xc00024a0, "TODO_c000_24a0", 0),
+ MVI(0xc00024a1, "TODO_c000_24a1", 0),
+ MVI(0xc00024a2, "TODO_c000_24a2", 0),
+ MVI(0xc00024a3, "TODO_c000_24a3", 0),
+ MVI(0xc00024a4, "TODO_c000_24a4", 0),
+ MVI(0xc00024a5, "TODO_c000_24a5", 0),
+ MVI(0xc00024a6, "TODO_c000_24a6", 0),
+ MVI(0xc00024a7, "TODO_c000_24a7", 0),
+ MVI(0xc00024a8, "TODO_c000_24a8", 0),
+ MVI(0xc00024a9, "TODO_c000_24a9", 0),
+ MVI(0xc00024aa, "TODO_c000_24aa", 0),
+ MVI(0xc00024ab, "TODO_c000_24ab", 0),
+ MVI(0xc00024ac, "TODO_c000_24ac", 0),
+ MVI(0xc00024ad, "TODO_c000_24ad", 0),
+ MVI(0xc00024ae, "TODO_c000_24ae", 0),
+ MVI(0xc00024af, "TODO_c000_24af", 0),
+ MVI(0xc00024b0, "TODO_c000_24b0", 0),
+ MVI(0xc00024b1, "TODO_c000_24b1", 0),
+ MVI(0xc00024b2, "TODO_c000_24b2", 0),
+ MVI(0xc00024b3, "TODO_c000_24b3", 0),
+ MVI(0xc00024b4, "TODO_c000_24b4", 0),
+ MVI(0xc00024b5, "TODO_c000_24b5", 0),
+ MVI(0xc00024b6, "TODO_c000_24b6", 0),
+ MVI(0xc00024b7, "TODO_c000_24b7", 0),
+ MVI(0xc00024b8, "TODO_c000_24b8", 0),
+ MVI(0xc00024b9, "TODO_c000_24b9", 0),
+ MVI(0xc00024ba, "TODO_c000_24ba", 0),
+ MVI(0xc00024bb, "TODO_c000_24bb", 0),
+ MVI(0xc00024bc, "TODO_c000_24bc", 0),
+ MVI(0xc00024bd, "TODO_c000_24bd", 0),
+ MVI(0xc00024be, "TODO_c000_24be", 0),
+ MVI(0xc00024bf, "TODO_c000_24bf", 0),
+ MVI(0xc00024c0, "TODO_c000_24c0", 0),
+ MVI(0xc00024c1, "TODO_c000_24c1", 0),
+ MVI(0xc00024c2, "TODO_c000_24c2", 0),
+ MVI(0xc00024c3, "TODO_c000_24c3", 0),
+ MVI(0xc00024c4, "TODO_c000_24c4", 0),
+ MVI(0xc00024c5, "TODO_c000_24c5", 0),
+ MVI(0xc00024c6, "TODO_c000_24c6", 0),
+ MVI(0xc00024c7, "TODO_c000_24c7", 0),
+ MVI(0xc00024c8, "TODO_c000_24c8", 0),
+ MVI(0xc00024c9, "TODO_c000_24c9", 0),
+ MVI(0xc00024ca, "TODO_c000_24ca", 0),
+ MVI(0xc00024cb, "TODO_c000_24cb", 0),
+ MVI(0xc00024cc, "TODO_c000_24cc", 0),
+ MVI(0xc00024cd, "TODO_c000_24cd", 0),
+ MVI(0xc00024ce, "TODO_c000_24ce", 0),
+ MVI(0xc00024cf, "TODO_c000_24cf", 0),
+ MVI(0xc00024d0, "TODO_c000_24d0", 0),
+ MVI(0xc00024d1, "TODO_c000_24d1", 0),
+ MVI(0xc00024d2, "TODO_c000_24d2", 0),
+ MVI(0xc00024d3, "TODO_c000_24d3", 0),
+ MVI(0xc00024d4, "TODO_c000_24d4", 0),
+ MVI(0xc00024d5, "TODO_c000_24d5", 0),
+ MVI(0xc00024d6, "TODO_c000_24d6", 0),
+ MVI(0xc00024d7, "TODO_c000_24d7", 0),
+ MVI(0xc00024d8, "TODO_c000_24d8", 0),
+ MVI(0xc00024d9, "TODO_c000_24d9", 0),
+ MVI(0xc00024da, "TODO_c000_24da", 0),
+ MVI(0xc00024db, "TODO_c000_24db", 0),
+ MVI(0xc00024dc, "TODO_c000_24dc", 0),
+ MVI(0xc00024dd, "TODO_c000_24dd", 0),
+ MVI(0xc00024de, "TODO_c000_24de", 0),
+ MVI(0xc00024df, "TODO_c000_24df", 0),
+ MVI(0xc00024e0, "TODO_c000_24e0", 0),
+ MVI(0xc00024e1, "TODO_c000_24e1", 0),
+ MVI(0xc00024e2, "TODO_c000_24e2", 0),
+ MVI(0xc00024e3, "TODO_c000_24e3", 0),
+ MVI(0xc00024e4, "TODO_c000_24e4", 0),
+ MVI(0xc00024e5, "TODO_c000_24e5", 0),
+ MVI(0xc00024e6, "TODO_c000_24e6", 0),
+ MVI(0xc00024e7, "TODO_c000_24e7", 0),
+ MVI(0xc00024e8, "TODO_c000_24e8", 0),
+ MVI(0xc00024e9, "TODO_c000_24e9", 0),
+ MVI(0xc00024ea, "TODO_c000_24ea", 0),
+ MVI(0xc00024eb, "TODO_c000_24eb", 0),
+ MVI(0xc00024ec, "TODO_c000_24ec", 0),
+ MVI(0xc00024ed, "TODO_c000_24ed", 0),
+ MVI(0xc00024ee, "TODO_c000_24ee", 0),
+ MVI(0xc00024ef, "TODO_c000_24ef", 0),
+ MVI(0xc00024f0, "TODO_c000_24f0", 0),
+ MVI(0xc00024f1, "TODO_c000_24f1", 0),
+ MVI(0xc00024f2, "TODO_c000_24f2", 0),
+ MVI(0xc00024f3, "TODO_c000_24f3", 0),
+ MVI(0xc00024f4, "TODO_c000_24f4", 0),
+ MVI(0xc00024f5, "TODO_c000_24f5", 0),
+ MVI(0xc00024f6, "TODO_c000_24f6", 0),
+ MVI(0xc00024f7, "TODO_c000_24f7", 0),
+ MVI(0xc00024f8, "TODO_c000_24f8", 0),
+ MVI(0xc00024f9, "TODO_c000_24f9", 0),
+ MVI(0xc00024fa, "TODO_c000_24fa", 0),
+ MVI(0xc00024fb, "TODO_c000_24fb", 0),
+ MVI(0xc00024fc, "TODO_c000_24fc", 0),
+ MVI(0xc00024fd, "TODO_c000_24fd", 0),
+ MVI(0xc00024fe, "TODO_c000_24fe", 0),
+ MVI(0xc00024ff, "TODO_c000_24ff", 0),
+ MVI(0xc0002500, "TODO_c000_2500", 0),
+ MVI(0xc0002501, "TODO_c000_2501", 0),
+ MVI(0xc0002502, "TODO_c000_2502", 0),
+ MVI(0xc0002503, "TODO_c000_2503", 0),
+ MVI(0xc0002504, "TODO_c000_2504", 0),
+ MVI(0xc0002505, "TODO_c000_2505", 0),
+ MVI(0xc0002506, "TODO_c000_2506", 0),
+ MVI(0xc0002507, "TODO_c000_2507", 0),
+ MVI(0xc0002508, "TODO_c000_2508", 0),
+ MVI(0xc0002509, "TODO_c000_2509", 0),
+ MVI(0xc000250a, "TODO_c000_250a", 0),
+ MVI(0xc000250b, "TODO_c000_250b", 0),
+ MVI(0xc000250c, "TODO_c000_250c", 0),
+ MVI(0xc000250d, "TODO_c000_250d", 0),
+ MVI(0xc000250e, "TODO_c000_250e", 0),
+ MVI(0xc000250f, "TODO_c000_250f", 0),
+ MVI(0xc0002510, "TODO_c000_2510", 0),
+ MVI(0xc0002511, "TODO_c000_2511", 0),
+ MVI(0xc0002512, "TODO_c000_2512", 0),
+ MVI(0xc0002513, "TODO_c000_2513", 0),
+ MVI(0xc0002514, "TODO_c000_2514", 0),
+ MVI(0xc0002515, "TODO_c000_2515", 0),
+ MVI(0xc0002516, "TODO_c000_2516", 0),
+ MVI(0xc0002517, "TODO_c000_2517", 0),
+ MVI(0xc0002518, "TODO_c000_2518", 0),
+ MVI(0xc0002519, "TODO_c000_2519", 0),
+ MVI(0xc000251a, "TODO_c000_251a", 0),
+ MVI(0xc000251b, "TODO_c000_251b", 0),
+ MVI(0xc000251c, "TODO_c000_251c", 0),
+ MVI(0xc000251d, "TODO_c000_251d", 0),
+ MVI(0xc000251e, "TODO_c000_251e", 0),
+ MVI(0xc000251f, "TODO_c000_251f", 0),
+ MVI(0xc0002520, "TODO_c000_2520", 0),
+ MVI(0xc0002521, "TODO_c000_2521", 0),
+ MVI(0xc0002522, "TODO_c000_2522", 0),
+ MVI(0xc0002523, "TODO_c000_2523", 0),
+ MVI(0xc0002524, "TODO_c000_2524", 0),
+ MVI(0xc0002525, "TODO_c000_2525", 0),
+ MVI(0xc0002526, "TODO_c000_2526", 0),
+ MVI(0xc0002527, "TODO_c000_2527", 0),
+ MVI(0xc0002528, "TODO_c000_2528", 0),
+ MVI(0xc0002529, "TODO_c000_2529", 0),
+ MVI(0xc000252a, "TODO_c000_252a", 0),
+ MVI(0xc000252b, "TODO_c000_252b", 0),
+ MVI(0xc000252c, "TODO_c000_252c", 0),
+ MVI(0xc000252d, "TODO_c000_252d", 0),
+ MVI(0xc000252e, "TODO_c000_252e", 0),
+ MVI(0xc000252f, "TODO_c000_252f", 0),
+ MVI(0xc0002530, "TODO_c000_2530", 0),
+ MVI(0xc0002531, "TODO_c000_2531", 0),
+ MVI(0xc0002532, "TODO_c000_2532", 0),
+ MVI(0xc0002533, "TODO_c000_2533", 0),
+ MVI(0xc0002534, "TODO_c000_2534", 0),
+ MVI(0xc0002535, "TODO_c000_2535", 0),
+ MVI(0xc0002536, "TODO_c000_2536", 0),
+ MVI(0xc0002537, "TODO_c000_2537", 0),
+ MVI(0xc0002538, "TODO_c000_2538", 0),
+ MVI(0xc0002539, "TODO_c000_2539", 0),
+ MVI(0xc000253a, "TODO_c000_253a", 0),
+ MVI(0xc000253b, "TODO_c000_253b", 0),
+ MVI(0xc000253c, "TODO_c000_253c", 0),
+ MVI(0xc000253d, "TODO_c000_253d", 0),
+ MVI(0xc000253e, "TODO_c000_253e", 0),
+ MVI(0xc000253f, "TODO_c000_253f", 0),
+ MVI(0xc0002540, "TODO_c000_2540", 0),
+ MVI(0xc0002541, "TODO_c000_2541", 0),
+ MVI(0xc0002542, "TODO_c000_2542", 0),
+ MVI(0xc0002543, "TODO_c000_2543", 0),
+ MVI(0xc0002544, "TODO_c000_2544", 0),
+ MVI(0xc0002545, "TODO_c000_2545", 0),
+ MVI(0xc0002546, "TODO_c000_2546", 0),
+ MVI(0xc0002547, "TODO_c000_2547", 0),
+ MVI(0xc0002548, "TODO_c000_2548", 0),
+ MVI(0xc0002549, "TODO_c000_2549", 0),
+ MVI(0xc000254a, "TODO_c000_254a", 0),
+ MVI(0xc000254b, "TODO_c000_254b", 0),
+ MVI(0xc000254c, "TODO_c000_254c", 0),
+ MVI(0xc000254d, "TODO_c000_254d", 0),
+ MVI(0xc000254e, "TODO_c000_254e", 0),
+ MVI(0xc000254f, "TODO_c000_254f", 0),
+ MVI(0xc0002550, "TODO_c000_2550", 0),
+ MVI(0xc0002551, "TODO_c000_2551", 0),
+ MVI(0xc0002552, "TODO_c000_2552", 0),
+ MVI(0xc0002553, "TODO_c000_2553", 0),
+ MVI(0xc0002554, "TODO_c000_2554", 0),
+ MVI(0xc0002555, "TODO_c000_2555", 0),
+ MVI(0xc0002556, "TODO_c000_2556", 0),
+ MVI(0xc0002557, "TODO_c000_2557", 0),
+ MVI(0xc0002558, "TODO_c000_2558", 0),
+ MVI(0xc0002559, "TODO_c000_2559", 0),
+ MVI(0xc000255a, "TODO_c000_255a", 0),
+ MVI(0xc000255b, "TODO_c000_255b", 0),
+ MVI(0xc000255c, "TODO_c000_255c", 0),
+ MVI(0xc000255d, "TODO_c000_255d", 0),
+ MVI(0xc000255e, "TODO_c000_255e", 0),
+ MVI(0xc000255f, "TODO_c000_255f", 0),
+ MVI(0xc0002560, "TODO_c000_2560", 0),
+ MVI(0xc0002561, "TODO_c000_2561", 0),
+ MVI(0xc0002562, "TODO_c000_2562", 0),
+ MVI(0xc0002563, "TODO_c000_2563", 0),
+ MVI(0xc0002564, "TODO_c000_2564", 0),
+ MVI(0xc0002565, "TODO_c000_2565", 0),
+ MVI(0xc0002566, "TODO_c000_2566", 0),
+ MVI(0xc0002567, "TODO_c000_2567", 0),
+ MVI(0xc0002568, "TODO_c000_2568", 0),
+ MVI(0xc0002569, "TODO_c000_2569", 0),
+ MVI(0xc000256a, "TODO_c000_256a", 0),
+ MVI(0xc000256b, "TODO_c000_256b", 0),
+ MVI(0xc000256c, "TODO_c000_256c", 0),
+ MVI(0xc000256d, "TODO_c000_256d", 0),
+ MVI(0xc000256e, "TODO_c000_256e", 0),
+ MVI(0xc000256f, "TODO_c000_256f", 0),
+ MVI(0xc0002570, "TODO_c000_2570", 0),
+ MVI(0xc0002571, "TODO_c000_2571", 0),
+ MVI(0xc0002572, "TODO_c000_2572", 0),
+ MVI(0xc0002573, "TODO_c000_2573", 0),
+ MVI(0xc0002574, "TODO_c000_2574", 0),
+ MVI(0xc0002575, "TODO_c000_2575", 0),
+ MVI(0xc0002576, "TODO_c000_2576", 0),
+ MVI(0xc0002577, "TODO_c000_2577", 0),
+ MVI(0xc0002578, "TODO_c000_2578", 0),
+ MVI(0xc0002579, "TODO_c000_2579", 0),
+ MVI(0xc000257a, "TODO_c000_257a", 0),
+ MVI(0xc000257b, "TODO_c000_257b", 0),
+ MVI(0xc000257c, "TODO_c000_257c", 0),
+ MVI(0xc000257d, "TODO_c000_257d", 0),
+ MVI(0xc000257e, "TODO_c000_257e", 0),
+ MVI(0xc000257f, "TODO_c000_257f", 0),
+ MVI(0xc0002580, "TODO_c000_2580", 0),
+ MVI(0xc0002581, "TODO_c000_2581", 0),
+ MVI(0xc0002582, "TODO_c000_2582", 0),
+ MVI(0xc0002583, "TODO_c000_2583", 0),
+ MVI(0xc0002584, "TODO_c000_2584", 0),
+ MVI(0xc0002585, "TODO_c000_2585", 0),
+ MVI(0xc0002586, "TODO_c000_2586", 0),
+ MVI(0xc0002587, "TODO_c000_2587", 0),
+ MVI(0xc0002588, "TODO_c000_2588", 0),
+ MVI(0xc0002589, "TODO_c000_2589", 0),
+ MVI(0xc000258a, "TODO_c000_258a", 0),
+ MVI(0xc000258b, "TODO_c000_258b", 0),
+ MVI(0xc000258c, "TODO_c000_258c", 0),
+ MVI(0xc000258d, "TODO_c000_258d", 0),
+ MVI(0xc000258e, "TODO_c000_258e", 0),
+ MVI(0xc000258f, "TODO_c000_258f", 0),
+ MVI(0xc0002590, "TODO_c000_2590", 0),
+ MVI(0xc0002591, "TODO_c000_2591", 0),
+ MVI(0xc0002592, "TODO_c000_2592", 0),
+ MVI(0xc0002593, "TODO_c000_2593", 0),
+ MVI(0xc0002594, "TODO_c000_2594", 0),
+ MVI(0xc0002595, "TODO_c000_2595", 0),
+ MVI(0xc0002596, "TODO_c000_2596", 0),
+ MVI(0xc0002597, "TODO_c000_2597", 0),
+ MVI(0xc0002598, "TODO_c000_2598", 0),
+ MVI(0xc0002599, "TODO_c000_2599", 0),
+ MVI(0xc000259a, "TODO_c000_259a", 0),
+ MVI(0xc000259b, "TODO_c000_259b", 0),
+ MVI(0xc000259c, "TODO_c000_259c", 0),
+ MVI(0xc000259d, "TODO_c000_259d", 0),
+ MVI(0xc000259e, "TODO_c000_259e", 0),
+ MVI(0xc000259f, "TODO_c000_259f", 0),
+ MVI(0xc00025a0, "TODO_c000_25a0", 0),
+ MVI(0xc00025a1, "TODO_c000_25a1", 0),
+ MVI(0xc00025a2, "TODO_c000_25a2", 0),
+ MVI(0xc00025a3, "TODO_c000_25a3", 0),
+ MVI(0xc00025a4, "TODO_c000_25a4", 0),
+ MVI(0xc00025a5, "TODO_c000_25a5", 0),
+ MVI(0xc00025a6, "TODO_c000_25a6", 0),
+ MVI(0xc00025a7, "TODO_c000_25a7", 0),
+ MVI(0xc00025a8, "TODO_c000_25a8", 0),
+ MVI(0xc00025a9, "TODO_c000_25a9", 0),
+ MVI(0xc00025aa, "TODO_c000_25aa", 0),
+ MVI(0xc00025ab, "TODO_c000_25ab", 0),
+ MVI(0xc00025ac, "TODO_c000_25ac", 0),
+ MVI(0xc00025ad, "TODO_c000_25ad", 0),
+ MVI(0xc00025ae, "TODO_c000_25ae", 0),
+ MVI(0xc00025af, "TODO_c000_25af", 0),
+ MVI(0xc00025b0, "TODO_c000_25b0", 0),
+ MVI(0xc00025b1, "TODO_c000_25b1", 0),
+ MVI(0xc00025b2, "TODO_c000_25b2", 0),
+ MVI(0xc00025b3, "TODO_c000_25b3", 0),
+ MVI(0xc00025b4, "TODO_c000_25b4", 0),
+ MVI(0xc00025b5, "TODO_c000_25b5", 0),
+ MVI(0xc00025b6, "TODO_c000_25b6", 0),
+ MVI(0xc00025b7, "TODO_c000_25b7", 0),
+ MVI(0xc00025b8, "TODO_c000_25b8", 0),
+ MVI(0xc00025b9, "TODO_c000_25b9", 0),
+ MVI(0xc00025ba, "TODO_c000_25ba", 0),
+ MVI(0xc00025bb, "TODO_c000_25bb", 0),
+ MVI(0xc00025bc, "TODO_c000_25bc", 0),
+ MVI(0xc00025bd, "TODO_c000_25bd", 0),
+ MVI(0xc00025be, "TODO_c000_25be", 0),
+ MVI(0xc00025bf, "TODO_c000_25bf", 0),
+ MVI(0xc00025c0, "TODO_c000_25c0", 0),
+ MVI(0xc00025c1, "TODO_c000_25c1", 0),
+ MVI(0xc00025c2, "TODO_c000_25c2", 0),
+ MVI(0xc00025c3, "TODO_c000_25c3", 0),
+ MVI(0xc00025c4, "TODO_c000_25c4", 0),
+ MVI(0xc00025c5, "TODO_c000_25c5", 0),
+ MVI(0xc00025c6, "TODO_c000_25c6", 0),
+ MVI(0xc00025c7, "TODO_c000_25c7", 0),
+ MVI(0xc00025c8, "TODO_c000_25c8", 0),
+ MVI(0xc00025c9, "TODO_c000_25c9", 0),
+ MVI(0xc00025ca, "TODO_c000_25ca", 0),
+ MVI(0xc00025cb, "TODO_c000_25cb", 0),
+ MVI(0xc00025cc, "TODO_c000_25cc", 0),
+ MVI(0xc00025cd, "TODO_c000_25cd", 0),
+ MVI(0xc00025ce, "TODO_c000_25ce", 0),
+ MVI(0xc00025cf, "TODO_c000_25cf", 0),
+ MVI(0xc00025d0, "TODO_c000_25d0", 0),
+ MVI(0xc00025d1, "TODO_c000_25d1", 0),
+ MVI(0xc00025d2, "TODO_c000_25d2", 0),
+ MVI(0xc00025d3, "TODO_c000_25d3", 0),
+ MVI(0xc00025d4, "TODO_c000_25d4", 0),
+ MVI(0xc00025d5, "TODO_c000_25d5", 0),
+ MVI(0xc00025d6, "TODO_c000_25d6", 0),
+ MVI(0xc00025d7, "TODO_c000_25d7", 0),
+ MVI(0xc00025d8, "TODO_c000_25d8", 0),
+ MVI(0xc00025d9, "TODO_c000_25d9", 0),
+ MVI(0xc00025da, "TODO_c000_25da", 0),
+ MVI(0xc00025db, "TODO_c000_25db", 0),
+ MVI(0xc00025dc, "TODO_c000_25dc", 0),
+ MVI(0xc00025dd, "TODO_c000_25dd", 0),
+ MVI(0xc00025de, "TODO_c000_25de", 0),
+ MVI(0xc00025df, "TODO_c000_25df", 0),
+ MVI(0xc00025e0, "TODO_c000_25e0", 0),
+ MVI(0xc00025e1, "TODO_c000_25e1", 0),
+ MVI(0xc00025e2, "TODO_c000_25e2", 0),
+ MVI(0xc00025e3, "TODO_c000_25e3", 0),
+ MVI(0xc00025e4, "TODO_c000_25e4", 0),
+ MVI(0xc00025e5, "TODO_c000_25e5", 0),
+ MVI(0xc00025e6, "TODO_c000_25e6", 0),
+ MVI(0xc00025e7, "TODO_c000_25e7", 0),
+ MVI(0xc00025e8, "TODO_c000_25e8", 0),
+ MVI(0xc00025e9, "TODO_c000_25e9", 0),
+ MVI(0xc00025ea, "TODO_c000_25ea", 0),
+ MVI(0xc00025eb, "TODO_c000_25eb", 0),
+ MVI(0xc00025ec, "TODO_c000_25ec", 0),
+ MVI(0xc00025ed, "TODO_c000_25ed", 0),
+ MVI(0xc00025ee, "TODO_c000_25ee", 0),
+ MVI(0xc00025ef, "TODO_c000_25ef", 0),
+ MVI(0xc00025f0, "TODO_c000_25f0", 0),
+ MVI(0xc00025f1, "TODO_c000_25f1", 0),
+ MVI(0xc00025f2, "TODO_c000_25f2", 0),
+ MVI(0xc00025f3, "TODO_c000_25f3", 0),
+ MVI(0xc00025f4, "TODO_c000_25f4", 0),
+ MVI(0xc00025f5, "TODO_c000_25f5", 0),
+ MVI(0xc00025f6, "TODO_c000_25f6", 0),
+ MVI(0xc00025f7, "TODO_c000_25f7", 0),
+ MVI(0xc00025f8, "TODO_c000_25f8", 0),
+ MVI(0xc00025f9, "TODO_c000_25f9", 0),
+ MVI(0xc00025fa, "TODO_c000_25fa", 0),
+ MVI(0xc00025fb, "TODO_c000_25fb", 0),
+ MVI(0xc00025fc, "TODO_c000_25fc", 0),
+ MVI(0xc00025fd, "TODO_c000_25fd", 0),
+ MVI(0xc00025fe, "TODO_c000_25fe", 0),
+ MVI(0xc00025ff, "TODO_c000_25ff", 0),
+ MVI(0xc0002600, "TODO_c000_2600", 0),
+ MVI(0xc0002601, "TODO_c000_2601", 0),
+ MVI(0xc0002602, "TODO_c000_2602", 0),
+ MVI(0xc0002603, "TODO_c000_2603", 0),
+ MVI(0xc0002604, "TODO_c000_2604", 0),
+ MVI(0xc0002605, "TODO_c000_2605", 0),
+ MVI(0xc0002606, "TODO_c000_2606", 0),
+ MVI(0xc0002607, "TODO_c000_2607", 0),
+ MVI(0xc0002608, "TODO_c000_2608", 0),
+ MVI(0xc0002609, "TODO_c000_2609", 0),
+ MVI(0xc000260a, "TODO_c000_260a", 0),
+ MVI(0xc000260b, "TODO_c000_260b", 0),
+ MVI(0xc000260c, "TODO_c000_260c", 0),
+ MVI(0xc000260d, "TODO_c000_260d", 0),
+ MVI(0xc000260e, "TODO_c000_260e", 0),
+ MVI(0xc000260f, "TODO_c000_260f", 0),
+ MVI(0xc0002610, "TODO_c000_2610", 0),
+ MVI(0xc0002611, "TODO_c000_2611", 0),
+ MVI(0xc0002612, "TODO_c000_2612", 0),
+ MVI(0xc0002613, "TODO_c000_2613", 0),
+ MVI(0xc0002614, "TODO_c000_2614", 0),
+ MVI(0xc0002615, "TODO_c000_2615", 0),
+ MVI(0xc0002616, "TODO_c000_2616", 0),
+ MVI(0xc0002617, "TODO_c000_2617", 0),
+ MVI(0xc0002618, "TODO_c000_2618", 0),
+ MVI(0xc0002619, "TODO_c000_2619", 0),
+ MVI(0xc000261a, "TODO_c000_261a", 0),
+ MVI(0xc000261b, "TODO_c000_261b", 0),
+ MVI(0xc000261c, "TODO_c000_261c", 0),
+ MVI(0xc000261d, "TODO_c000_261d", 0),
+ MVI(0xc000261e, "TODO_c000_261e", 0),
+ MVI(0xc000261f, "TODO_c000_261f", 0),
+ MVI(0xc0002620, "TODO_c000_2620", 0),
+ MVI(0xc0002621, "TODO_c000_2621", 0),
+ MVI(0xc0002622, "TODO_c000_2622", 0),
+ MVI(0xc0002623, "TODO_c000_2623", 0),
+ MVI(0xc0002624, "TODO_c000_2624", 0),
+ MVI(0xc0002625, "TODO_c000_2625", 0),
+ MVI(0xc0002626, "TODO_c000_2626", 0),
+ MVI(0xc0002627, "TODO_c000_2627", 0),
+ MVI(0xc0002628, "TODO_c000_2628", 0),
+ MVI(0xc0002629, "TODO_c000_2629", 0),
+ MVI(0xc000262a, "TODO_c000_262a", 0),
+ MVI(0xc000262b, "TODO_c000_262b", 0),
+ MVI(0xc000262c, "TODO_c000_262c", 0),
+ MVI(0xc000262d, "TODO_c000_262d", 0),
+ MVI(0xc000262e, "TODO_c000_262e", 0),
+ MVI(0xc000262f, "TODO_c000_262f", 0),
+ MVI(0xc0002630, "TODO_c000_2630", 0),
+ MVI(0xc0002631, "TODO_c000_2631", 0),
+ MVI(0xc0002632, "TODO_c000_2632", 0),
+ MVI(0xc0002633, "TODO_c000_2633", 0),
+ MVI(0xc0002634, "TODO_c000_2634", 0),
+ MVI(0xc0002635, "TODO_c000_2635", 0),
+ MVI(0xc0002636, "TODO_c000_2636", 0),
+ MVI(0xc0002637, "TODO_c000_2637", 0),
+ MVI(0xc0002638, "TODO_c000_2638", 0),
+ MVI(0xc0002639, "TODO_c000_2639", 0),
+ MVI(0xc000263a, "TODO_c000_263a", 0),
+ MVI(0xc000263b, "TODO_c000_263b", 0),
+ MVI(0xc000263c, "TODO_c000_263c", 0),
+ MVI(0xc000263d, "TODO_c000_263d", 0),
+ MVI(0xc000263e, "TODO_c000_263e", 0),
+ MVI(0xc000263f, "TODO_c000_263f", 0),
+ MVI(0xc0002640, "TODO_c000_2640", 0),
+ MVI(0xc0002641, "TODO_c000_2641", 0),
+ MVI(0xc0002642, "TODO_c000_2642", 0),
+ MVI(0xc0002643, "TODO_c000_2643", 0),
+ MVI(0xc0002644, "TODO_c000_2644", 0),
+ MVI(0xc0002645, "TODO_c000_2645", 0),
+ MVI(0xc0002646, "TODO_c000_2646", 0),
+ MVI(0xc0002647, "TODO_c000_2647", 0),
+ MVI(0xc0002648, "TODO_c000_2648", 0),
+ MVI(0xc0002649, "TODO_c000_2649", 0),
+ MVI(0xc000264a, "TODO_c000_264a", 0),
+ MVI(0xc000264b, "TODO_c000_264b", 0),
+ MVI(0xc000264c, "TODO_c000_264c", 0),
+ MVI(0xc000264d, "TODO_c000_264d", 0),
+ MVI(0xc000264e, "TODO_c000_264e", 0),
+ MVI(0xc000264f, "TODO_c000_264f", 0),
+ MVI(0xc0002650, "TODO_c000_2650", 0),
+ MVI(0xc0002651, "TODO_c000_2651", 0),
+ MVI(0xc0002652, "TODO_c000_2652", 0),
+ MVI(0xc0002653, "TODO_c000_2653", 0),
+ MVI(0xc0002654, "TODO_c000_2654", 0),
+ MVI(0xc0002655, "TODO_c000_2655", 0),
+ MVI(0xc0002656, "TODO_c000_2656", 0),
+ MVI(0xc0002657, "TODO_c000_2657", 0),
+ MVI(0xc0002658, "TODO_c000_2658", 0),
+ MVI(0xc0002659, "TODO_c000_2659", 0),
+ MVI(0xc000265a, "TODO_c000_265a", 0),
+ MVI(0xc000265b, "TODO_c000_265b", 0),
+ MVI(0xc000265c, "TODO_c000_265c", 0),
+ MVI(0xc000265d, "TODO_c000_265d", 0),
+ MVI(0xc000265e, "TODO_c000_265e", 0),
+ MVI(0xc000265f, "TODO_c000_265f", 0),
+ MVI(0xc0002660, "TODO_c000_2660", 0),
+ MVI(0xc0002661, "TODO_c000_2661", 0),
+ MVI(0xc0002662, "TODO_c000_2662", 0),
+ MVI(0xc0002663, "TODO_c000_2663", 0),
+ MVI(0xc0002664, "TODO_c000_2664", 0),
+ MVI(0xc0002665, "TODO_c000_2665", 0),
+ MVI(0xc0002666, "TODO_c000_2666", 0),
+ MVI(0xc0002667, "TODO_c000_2667", 0),
+ MVI(0xc0002668, "TODO_c000_2668", 0),
+ MVI(0xc0002669, "TODO_c000_2669", 0),
+ MVI(0xc000266a, "TODO_c000_266a", 0),
+ MVI(0xc000266b, "TODO_c000_266b", 0),
+ MVI(0xc000266c, "TODO_c000_266c", 0),
+ MVI(0xc000266d, "TODO_c000_266d", 0),
+ MVI(0xc000266e, "TODO_c000_266e", 0),
+ MVI(0xc000266f, "TODO_c000_266f", 0),
+ MVI(0xc0002670, "TODO_c000_2670", 0),
+ MVI(0xc0002671, "TODO_c000_2671", 0),
+ MVI(0xc0002672, "TODO_c000_2672", 0),
+ MVI(0xc0002673, "TODO_c000_2673", 0),
+ MVI(0xc0002674, "TODO_c000_2674", 0),
+ MVI(0xc0002675, "TODO_c000_2675", 0),
+ MVI(0xc0002676, "TODO_c000_2676", 0),
+ MVI(0xc0002677, "TODO_c000_2677", 0),
+ MVI(0xc0002678, "TODO_c000_2678", 0),
+ MVI(0xc0002679, "TODO_c000_2679", 0),
+ MVI(0xc000267a, "TODO_c000_267a", 0),
+ MVI(0xc000267b, "TODO_c000_267b", 0),
+ MVI(0xc000267c, "TODO_c000_267c", 0),
+ MVI(0xc000267d, "TODO_c000_267d", 0),
+ MVI(0xc000267e, "TODO_c000_267e", 0),
+ MVI(0xc000267f, "TODO_c000_267f", 0),
+ MVI(0xc0002680, "TODO_c000_2680", 0),
+ MVI(0xc0002681, "TODO_c000_2681", 0),
+ MVI(0xc0002682, "TODO_c000_2682", 0),
+ MVI(0xc0002683, "TODO_c000_2683", 0),
+ MVI(0xc0002684, "TODO_c000_2684", 0),
+ MVI(0xc0002685, "TODO_c000_2685", 0),
+ MVI(0xc0002686, "TODO_c000_2686", 0),
+ MVI(0xc0002687, "TODO_c000_2687", 0),
+ MVI(0xc0002688, "TODO_c000_2688", 0),
+ MVI(0xc0002689, "TODO_c000_2689", 0),
+ MVI(0xc000268a, "TODO_c000_268a", 0),
+ MVI(0xc000268b, "TODO_c000_268b", 0),
+ MVI(0xc000268c, "TODO_c000_268c", 0),
+ MVI(0xc000268d, "TODO_c000_268d", 0),
+ MVI(0xc000268e, "TODO_c000_268e", 0),
+ MVI(0xc000268f, "TODO_c000_268f", 0),
+ MVI(0xc0002690, "TODO_c000_2690", 0),
+ MVI(0xc0002691, "TODO_c000_2691", 0),
+ MVI(0xc0002692, "TODO_c000_2692", 0),
+ MVI(0xc0002693, "TODO_c000_2693", 0),
+ MVI(0xc0002694, "TODO_c000_2694", 0),
+ MVI(0xc0002695, "TODO_c000_2695", 0),
+ MVI(0xc0002696, "TODO_c000_2696", 0),
+ MVI(0xc0002697, "TODO_c000_2697", 0),
+ MVI(0xc0002698, "TODO_c000_2698", 0),
+ MVI(0xc0002699, "TODO_c000_2699", 0),
+ MVI(0xc000269a, "TODO_c000_269a", 0),
+ MVI(0xc000269b, "TODO_c000_269b", 0),
+ MVI(0xc000269c, "TODO_c000_269c", 0),
+ MVI(0xc000269d, "TODO_c000_269d", 0),
+ MVI(0xc000269e, "TODO_c000_269e", 0),
+ MVI(0xc000269f, "TODO_c000_269f", 0),
+ MVI(0xc00026a0, "TODO_c000_26a0", 0),
+ MVI(0xc00026a1, "TODO_c000_26a1", 0),
+ MVI(0xc00026a2, "TODO_c000_26a2", 0),
+ MVI(0xc00026a3, "TODO_c000_26a3", 0),
+ MVI(0xc00026a4, "TODO_c000_26a4", 0),
+ MVI(0xc00026a5, "TODO_c000_26a5", 0),
+ MVI(0xc00026a6, "TODO_c000_26a6", 0),
+ MVI(0xc00026a7, "TODO_c000_26a7", 0),
+ MVI(0xc00026a8, "TODO_c000_26a8", 0),
+ MVI(0xc00026a9, "TODO_c000_26a9", 0),
+ MVI(0xc00026aa, "TODO_c000_26aa", 0),
+ MVI(0xc00026ab, "TODO_c000_26ab", 0),
+ MVI(0xc00026ac, "TODO_c000_26ac", 0),
+ MVI(0xc00026ad, "TODO_c000_26ad", 0),
+ MVI(0xc00026ae, "TODO_c000_26ae", 0),
+ MVI(0xc00026af, "TODO_c000_26af", 0),
+ MVI(0xc00026b0, "TODO_c000_26b0", 0),
+ MVI(0xc00026b1, "TODO_c000_26b1", 0),
+ MVI(0xc00026b2, "TODO_c000_26b2", 0),
+ MVI(0xc00026b3, "TODO_c000_26b3", 0),
+ MVI(0xc00026b4, "TODO_c000_26b4", 0),
+ MVI(0xc00026b5, "TODO_c000_26b5", 0),
+ MVI(0xc00026b6, "TODO_c000_26b6", 0),
+ MVI(0xc00026b7, "TODO_c000_26b7", 0),
+ MVI(0xc00026b8, "TODO_c000_26b8", 0),
+ MVI(0xc00026b9, "TODO_c000_26b9", 0),
+ MVI(0xc00026ba, "TODO_c000_26ba", 0),
+ MVI(0xc00026bb, "TODO_c000_26bb", 0),
+ MVI(0xc00026bc, "TODO_c000_26bc", 0),
+ MVI(0xc00026bd, "TODO_c000_26bd", 0),
+ MVI(0xc00026be, "TODO_c000_26be", 0),
+ MVI(0xc00026bf, "TODO_c000_26bf", 0),
+ MVI(0xc00026c0, "TODO_c000_26c0", 0),
+ MVI(0xc00026c1, "TODO_c000_26c1", 0),
+ MVI(0xc00026c2, "TODO_c000_26c2", 0),
+ MVI(0xc00026c3, "TODO_c000_26c3", 0),
+ MVI(0xc00026c4, "TODO_c000_26c4", 0),
+ MVI(0xc00026c5, "TODO_c000_26c5", 0),
+ MVI(0xc00026c6, "TODO_c000_26c6", 0),
+ MVI(0xc00026c7, "TODO_c000_26c7", 0),
+ MVI(0xc00026c8, "TODO_c000_26c8", 0),
+ MVI(0xc00026c9, "TODO_c000_26c9", 0),
+ MVI(0xc00026ca, "TODO_c000_26ca", 0),
+ MVI(0xc00026cb, "TODO_c000_26cb", 0),
+ MVI(0xc00026cc, "TODO_c000_26cc", 0),
+ MVI(0xc00026cd, "TODO_c000_26cd", 0),
+ MVI(0xc00026ce, "TODO_c000_26ce", 0),
+ MVI(0xc00026cf, "TODO_c000_26cf", 0),
+ MVI(0xc00026d0, "TODO_c000_26d0", 0),
+ MVI(0xc00026d1, "TODO_c000_26d1", 0),
+ MVI(0xc00026d2, "TODO_c000_26d2", 0),
+ MVI(0xc00026d3, "TODO_c000_26d3", 0),
+ MVI(0xc00026d4, "TODO_c000_26d4", 0),
+ MVI(0xc00026d5, "TODO_c000_26d5", 0),
+ MVI(0xc00026d6, "TODO_c000_26d6", 0),
+ MVI(0xc00026d7, "TODO_c000_26d7", 0),
+ MVI(0xc00026d8, "TODO_c000_26d8", 0),
+ MVI(0xc00026d9, "TODO_c000_26d9", 0),
+ MVI(0xc00026da, "TODO_c000_26da", 0),
+ MVI(0xc00026db, "TODO_c000_26db", 0),
+ MVI(0xc00026dc, "TODO_c000_26dc", 0),
+ MVI(0xc00026dd, "TODO_c000_26dd", 0),
+ MVI(0xc00026de, "TODO_c000_26de", 0),
+ MVI(0xc00026df, "TODO_c000_26df", 0),
+ MVI(0xc00026e0, "TODO_c000_26e0", 0),
+ MVI(0xc00026e1, "TODO_c000_26e1", 0),
+ MVI(0xc00026e2, "TODO_c000_26e2", 0),
+ MVI(0xc00026e3, "TODO_c000_26e3", 0),
+ MVI(0xc00026e4, "TODO_c000_26e4", 0),
+ MVI(0xc00026e5, "TODO_c000_26e5", 0),
+ MVI(0xc00026e6, "TODO_c000_26e6", 0),
+ MVI(0xc00026e7, "TODO_c000_26e7", 0),
+ MVI(0xc00026e8, "TODO_c000_26e8", 0),
+ MVI(0xc00026e9, "TODO_c000_26e9", 0),
+ MVI(0xc00026ea, "TODO_c000_26ea", 0),
+ MVI(0xc00026eb, "TODO_c000_26eb", 0),
+ MVI(0xc00026ec, "TODO_c000_26ec", 0),
+ MVI(0xc00026ed, "TODO_c000_26ed", 0),
+ MVI(0xc00026ee, "TODO_c000_26ee", 0),
+ MVI(0xc00026ef, "TODO_c000_26ef", 0),
+ MVI(0xc00026f0, "TODO_c000_26f0", 0),
+ MVI(0xc00026f1, "TODO_c000_26f1", 0),
+ MVI(0xc00026f2, "TODO_c000_26f2", 0),
+ MVI(0xc00026f3, "TODO_c000_26f3", 0),
+ MVI(0xc00026f4, "TODO_c000_26f4", 0),
+ MVI(0xc00026f5, "TODO_c000_26f5", 0),
+ MVI(0xc00026f6, "TODO_c000_26f6", 0),
+ MVI(0xc00026f7, "TODO_c000_26f7", 0),
+ MVI(0xc00026f8, "TODO_c000_26f8", 0),
+ MVI(0xc00026f9, "TODO_c000_26f9", 0),
+ MVI(0xc00026fa, "TODO_c000_26fa", 0),
+ MVI(0xc00026fb, "TODO_c000_26fb", 0),
+ MVI(0xc00026fc, "TODO_c000_26fc", 0),
+ MVI(0xc00026fd, "TODO_c000_26fd", 0),
+ MVI(0xc00026fe, "TODO_c000_26fe", 0),
+ MVI(0xc00026ff, "TODO_c000_26ff", 0),
+ MVI(0xc0002700, "TODO_c000_2700", 0),
+ MVI(0xc0002701, "TODO_c000_2701", 0),
+ MVI(0xc0002702, "TODO_c000_2702", 0),
+ MVI(0xc0002703, "TODO_c000_2703", 0),
+ MVI(0xc0002704, "TODO_c000_2704", 0),
+ MVI(0xc0002705, "TODO_c000_2705", 0),
+ MVI(0xc0002706, "TODO_c000_2706", 0),
+ MVI(0xc0002707, "TODO_c000_2707", 0),
+ MVI(0xc0002708, "TODO_c000_2708", 0),
+ MVI(0xc0002709, "TODO_c000_2709", 0),
+ MVI(0xc000270a, "TODO_c000_270a", 0),
+ MVI(0xc000270b, "TODO_c000_270b", 0),
+ MVI(0xc000270c, "TODO_c000_270c", 0),
+ MVI(0xc000270d, "TODO_c000_270d", 0),
+ MVI(0xc000270e, "TODO_c000_270e", 0),
+ MVI(0xc000270f, "TODO_c000_270f", 0),
+ MVI(0xc0002710, "TODO_c000_2710", 0),
+ MVI(0xc0002711, "TODO_c000_2711", 0),
+ MVI(0xc0002712, "TODO_c000_2712", 0),
+ MVI(0xc0002713, "TODO_c000_2713", 0),
+ MVI(0xc0002714, "TODO_c000_2714", 0),
+ MVI(0xc0002715, "TODO_c000_2715", 0),
+ MVI(0xc0002716, "TODO_c000_2716", 0),
+ MVI(0xc0002717, "TODO_c000_2717", 0),
+ MVI(0xc0002718, "TODO_c000_2718", 0),
+ MVI(0xc0002719, "TODO_c000_2719", 0),
+ MVI(0xc000271a, "TODO_c000_271a", 0),
+ MVI(0xc000271b, "TODO_c000_271b", 0),
+ MVI(0xc000271c, "TODO_c000_271c", 0),
+ MVI(0xc000271d, "TODO_c000_271d", 0),
+ MVI(0xc000271e, "TODO_c000_271e", 0),
+ MVI(0xc000271f, "TODO_c000_271f", 0),
+ MVI(0xc0002720, "TODO_c000_2720", 0),
+ MVI(0xc0002721, "TODO_c000_2721", 0),
+ MVI(0xc0002722, "TODO_c000_2722", 0),
+ MVI(0xc0002723, "TODO_c000_2723", 0),
+ MVI(0xc0002724, "TODO_c000_2724", 0),
+ MVI(0xc0002725, "TODO_c000_2725", 0),
+ MVI(0xc0002726, "TODO_c000_2726", 0),
+ MVI(0xc0002727, "TODO_c000_2727", 0),
+ MVI(0xc0002728, "TODO_c000_2728", 0),
+ MVI(0xc0002729, "TODO_c000_2729", 0),
+ MVI(0xc000272a, "TODO_c000_272a", 0),
+ MVI(0xc000272b, "TODO_c000_272b", 0),
+ MVI(0xc000272c, "TODO_c000_272c", 0),
+ MVI(0xc000272d, "TODO_c000_272d", 0),
+ MVI(0xc000272e, "TODO_c000_272e", 0),
+ MVI(0xc000272f, "TODO_c000_272f", 0),
+ MVI(0xc0002730, "TODO_c000_2730", 0),
+ MVI(0xc0002731, "TODO_c000_2731", 0),
+ MVI(0xc0002732, "TODO_c000_2732", 0),
+ MVI(0xc0002733, "TODO_c000_2733", 0),
+ MVI(0xc0002734, "TODO_c000_2734", 0),
+ MVI(0xc0002735, "TODO_c000_2735", 0),
+ MVI(0xc0002736, "TODO_c000_2736", 0),
+ MVI(0xc0002737, "TODO_c000_2737", 0),
+ MVI(0xc0002738, "TODO_c000_2738", 0),
+ MVI(0xc0002739, "TODO_c000_2739", 0),
+ MVI(0xc000273a, "TODO_c000_273a", 0),
+ MVI(0xc000273b, "TODO_c000_273b", 0),
+ MVI(0xc000273c, "TODO_c000_273c", 0),
+ MVI(0xc000273d, "TODO_c000_273d", 0),
+ MVI(0xc000273e, "TODO_c000_273e", 0),
+ MVI(0xc000273f, "TODO_c000_273f", 0),
+ MVI(0xc0002740, "TODO_c000_2740", 0),
+ MVI(0xc0002741, "TODO_c000_2741", 0),
+ MVI(0xc0002742, "TODO_c000_2742", 0),
+ MVI(0xc0002743, "TODO_c000_2743", 0),
+ MVI(0xc0002744, "TODO_c000_2744", 0),
+ MVI(0xc0002745, "TODO_c000_2745", 0),
+ MVI(0xc0002746, "TODO_c000_2746", 0),
+ MVI(0xc0002747, "TODO_c000_2747", 0),
+ MVI(0xc0002748, "TODO_c000_2748", 0),
+ MVI(0xc0002749, "TODO_c000_2749", 0),
+ MVI(0xc000274a, "TODO_c000_274a", 0),
+ MVI(0xc000274b, "TODO_c000_274b", 0),
+ MVI(0xc000274c, "TODO_c000_274c", 0),
+ MVI(0xc000274d, "TODO_c000_274d", 0),
+ MVI(0xc000274e, "TODO_c000_274e", 0),
+ MVI(0xc000274f, "TODO_c000_274f", 0),
+ MVI(0xc0002750, "TODO_c000_2750", 0),
+ MVI(0xc0002751, "TODO_c000_2751", 0),
+ MVI(0xc0002752, "TODO_c000_2752", 0),
+ MVI(0xc0002753, "TODO_c000_2753", 0),
+ MVI(0xc0002754, "TODO_c000_2754", 0),
+ MVI(0xc0002755, "TODO_c000_2755", 0),
+ MVI(0xc0002756, "TODO_c000_2756", 0),
+ MVI(0xc0002757, "TODO_c000_2757", 0),
+ MVI(0xc0002758, "TODO_c000_2758", 0),
+ MVI(0xc0002759, "TODO_c000_2759", 0),
+ MVI(0xc000275a, "TODO_c000_275a", 0),
+ MVI(0xc000275b, "TODO_c000_275b", 0),
+ MVI(0xc000275c, "TODO_c000_275c", 0),
+ MVI(0xc000275d, "TODO_c000_275d", 0),
+ MVI(0xc000275e, "TODO_c000_275e", 0),
+ MVI(0xc000275f, "TODO_c000_275f", 0),
+ MVI(0xc0002760, "TODO_c000_2760", 0),
+ MVI(0xc0002761, "TODO_c000_2761", 0),
+ MVI(0xc0002762, "TODO_c000_2762", 0),
+ MVI(0xc0002763, "TODO_c000_2763", 0),
+ MVI(0xc0002764, "TODO_c000_2764", 0),
+ MVI(0xc0002765, "TODO_c000_2765", 0),
+ MVI(0xc0002766, "TODO_c000_2766", 0),
+ MVI(0xc0002767, "TODO_c000_2767", 0),
+ MVI(0xc0002768, "TODO_c000_2768", 0),
+ MVI(0xc0002769, "TODO_c000_2769", 0),
+ MVI(0xc000276a, "TODO_c000_276a", 0),
+ MVI(0xc000276b, "TODO_c000_276b", 0),
+ MVI(0xc000276c, "TODO_c000_276c", 0),
+ MVI(0xc000276d, "TODO_c000_276d", 0),
+ MVI(0xc000276e, "TODO_c000_276e", 0),
+ MVI(0xc000276f, "TODO_c000_276f", 0),
+ MVI(0xc0002770, "TODO_c000_2770", 0),
+ MVI(0xc0002771, "TODO_c000_2771", 0),
+ MVI(0xc0002772, "TODO_c000_2772", 0),
+ MVI(0xc0002773, "TODO_c000_2773", 0),
+ MVI(0xc0002774, "TODO_c000_2774", 0),
+ MVI(0xc0002775, "TODO_c000_2775", 0),
+ MVI(0xc0002776, "TODO_c000_2776", 0),
+ MVI(0xc0002777, "TODO_c000_2777", 0),
+ MVI(0xc0002778, "TODO_c000_2778", 0),
+ MVI(0xc0002779, "TODO_c000_2779", 0),
+ MVI(0xc000277a, "TODO_c000_277a", 0),
+ MVI(0xc000277b, "TODO_c000_277b", 0),
+ MVI(0xc000277c, "TODO_c000_277c", 0),
+ MVI(0xc000277d, "TODO_c000_277d", 0),
+ MVI(0xc000277e, "TODO_c000_277e", 0),
+ MVI(0xc000277f, "TODO_c000_277f", 0),
+ MVI(0xc0002780, "TODO_c000_2780", 0),
+ MVI(0xc0002781, "TODO_c000_2781", 0),
+ MVI(0xc0002782, "TODO_c000_2782", 0),
+ MVI(0xc0002783, "TODO_c000_2783", 0),
+ MVI(0xc0002784, "TODO_c000_2784", 0),
+ MVI(0xc0002785, "TODO_c000_2785", 0),
+ MVI(0xc0002786, "TODO_c000_2786", 0),
+ MVI(0xc0002787, "TODO_c000_2787", 0),
+ MVI(0xc0002788, "TODO_c000_2788", 0),
+ MVI(0xc0002789, "TODO_c000_2789", 0),
+ MVI(0xc000278a, "TODO_c000_278a", 0),
+ MVI(0xc000278b, "TODO_c000_278b", 0),
+ MVI(0xc000278c, "TODO_c000_278c", 0),
+ MVI(0xc000278d, "TODO_c000_278d", 0),
+ MVI(0xc000278e, "TODO_c000_278e", 0),
+ MVI(0xc000278f, "TODO_c000_278f", 0),
+ MVI(0xc0002790, "TODO_c000_2790", 0),
+ MVI(0xc0002791, "TODO_c000_2791", 0),
+ MVI(0xc0002792, "TODO_c000_2792", 0),
+ MVI(0xc0002793, "TODO_c000_2793", 0),
+ MVI(0xc0002794, "TODO_c000_2794", 0),
+ MVI(0xc0002795, "TODO_c000_2795", 0),
+ MVI(0xc0002796, "TODO_c000_2796", 0),
+ MVI(0xc0002797, "TODO_c000_2797", 0),
+ MVI(0xc0002798, "TODO_c000_2798", 0),
+ MVI(0xc0002799, "TODO_c000_2799", 0),
+ MVI(0xc000279a, "TODO_c000_279a", 0),
+ MVI(0xc000279b, "TODO_c000_279b", 0),
+ MVI(0xc000279c, "TODO_c000_279c", 0),
+ MVI(0xc000279d, "TODO_c000_279d", 0),
+ MVI(0xc000279e, "TODO_c000_279e", 0),
+ MVI(0xc000279f, "TODO_c000_279f", 0),
+ MVI(0xc00027a0, "TODO_c000_27a0", 0),
+ MVI(0xc00027a1, "TODO_c000_27a1", 0),
+ MVI(0xc00027a2, "TODO_c000_27a2", 0),
+ MVI(0xc00027a3, "TODO_c000_27a3", 0),
+ MVI(0xc00027a4, "TODO_c000_27a4", 0),
+ MVI(0xc00027a5, "TODO_c000_27a5", 0),
+ MVI(0xc00027a6, "TODO_c000_27a6", 0),
+ MVI(0xc00027a7, "TODO_c000_27a7", 0),
+ MVI(0xc00027a8, "TODO_c000_27a8", 0),
+ MVI(0xc00027a9, "TODO_c000_27a9", 0),
+ MVI(0xc00027aa, "TODO_c000_27aa", 0),
+ MVI(0xc00027ab, "TODO_c000_27ab", 0),
+ MVI(0xc00027ac, "TODO_c000_27ac", 0),
+ MVI(0xc00027ad, "TODO_c000_27ad", 0),
+ MVI(0xc00027ae, "TODO_c000_27ae", 0),
+ MVI(0xc00027af, "TODO_c000_27af", 0),
+ MVI(0xc00027b0, "TODO_c000_27b0", 0),
+ MVI(0xc00027b1, "TODO_c000_27b1", 0),
+ MVI(0xc00027b2, "TODO_c000_27b2", 0),
+ MVI(0xc00027b3, "TODO_c000_27b3", 0),
+ MVI(0xc00027b4, "TODO_c000_27b4", 0),
+ MVI(0xc00027b5, "TODO_c000_27b5", 0),
+ MVI(0xc00027b6, "TODO_c000_27b6", 0),
+ MVI(0xc00027b7, "TODO_c000_27b7", 0),
+ MVI(0xc00027b8, "TODO_c000_27b8", 0),
+ MVI(0xc00027b9, "TODO_c000_27b9", 0),
+ MVI(0xc00027ba, "TODO_c000_27ba", 0),
+ MVI(0xc00027bb, "TODO_c000_27bb", 0),
+ MVI(0xc00027bc, "TODO_c000_27bc", 0),
+ MVI(0xc00027bd, "TODO_c000_27bd", 0),
+ MVI(0xc00027be, "TODO_c000_27be", 0),
+ MVI(0xc00027bf, "TODO_c000_27bf", 0),
+ MVI(0xc00027c0, "TODO_c000_27c0", 0),
+ MVI(0xc00027c1, "TODO_c000_27c1", 0),
+ MVI(0xc00027c2, "TODO_c000_27c2", 0),
+ MVI(0xc00027c3, "TODO_c000_27c3", 0),
+ MVI(0xc00027c4, "TODO_c000_27c4", 0),
+ MVI(0xc00027c5, "TODO_c000_27c5", 0),
+ MVI(0xc00027c6, "TODO_c000_27c6", 0),
+ MVI(0xc00027c7, "TODO_c000_27c7", 0),
+ MVI(0xc00027c8, "TODO_c000_27c8", 0),
+ MVI(0xc00027c9, "TODO_c000_27c9", 0),
+ MVI(0xc00027ca, "TODO_c000_27ca", 0),
+ MVI(0xc00027cb, "TODO_c000_27cb", 0),
+ MVI(0xc00027cc, "TODO_c000_27cc", 0),
+ MVI(0xc00027cd, "TODO_c000_27cd", 0),
+ MVI(0xc00027ce, "TODO_c000_27ce", 0),
+ MVI(0xc00027cf, "TODO_c000_27cf", 0),
+ MVI(0xc00027d0, "TODO_c000_27d0", 0),
+ MVI(0xc00027d1, "TODO_c000_27d1", 0),
+ MVI(0xc00027d2, "TODO_c000_27d2", 0),
+ MVI(0xc00027d3, "TODO_c000_27d3", 0),
+ MVI(0xc00027d4, "TODO_c000_27d4", 0),
+ MVI(0xc00027d5, "TODO_c000_27d5", 0),
+ MVI(0xc00027d6, "TODO_c000_27d6", 0),
+ MVI(0xc00027d7, "TODO_c000_27d7", 0),
+ MVI(0xc00027d8, "TODO_c000_27d8", 0),
+ MVI(0xc00027d9, "TODO_c000_27d9", 0),
+ MVI(0xc00027da, "TODO_c000_27da", 0),
+ MVI(0xc00027db, "TODO_c000_27db", 0),
+ MVI(0xc00027dc, "TODO_c000_27dc", 0),
+ MVI(0xc00027dd, "TODO_c000_27dd", 0),
+ MVI(0xc00027de, "TODO_c000_27de", 0),
+ MVI(0xc00027df, "TODO_c000_27df", 0),
+ MVI(0xc00027e0, "TODO_c000_27e0", 0),
+ MVI(0xc00027e1, "TODO_c000_27e1", 0),
+ MVI(0xc00027e2, "TODO_c000_27e2", 0),
+ MVI(0xc00027e3, "TODO_c000_27e3", 0),
+ MVI(0xc00027e4, "TODO_c000_27e4", 0),
+ MVI(0xc00027e5, "TODO_c000_27e5", 0),
+ MVI(0xc00027e6, "TODO_c000_27e6", 0),
+ MVI(0xc00027e7, "TODO_c000_27e7", 0),
+ MVI(0xc00027e8, "TODO_c000_27e8", 0),
+ MVI(0xc00027e9, "TODO_c000_27e9", 0),
+ MVI(0xc00027ea, "TODO_c000_27ea", 0),
+ MVI(0xc00027eb, "TODO_c000_27eb", 0),
+ MVI(0xc00027ec, "TODO_c000_27ec", 0),
+ MVI(0xc00027ed, "TODO_c000_27ed", 0),
+ MVI(0xc00027ee, "TODO_c000_27ee", 0),
+ MVI(0xc00027ef, "TODO_c000_27ef", 0),
+ MVI(0xc00027f0, "TODO_c000_27f0", 0),
+ MVI(0xc00027f1, "TODO_c000_27f1", 0),
+ MVI(0xc00027f2, "TODO_c000_27f2", 0),
+ MVI(0xc00027f3, "TODO_c000_27f3", 0),
+ MVI(0xc00027f4, "TODO_c000_27f4", 0),
+ MVI(0xc00027f5, "TODO_c000_27f5", 0),
+ MVI(0xc00027f6, "TODO_c000_27f6", 0),
+ MVI(0xc00027f7, "TODO_c000_27f7", 0),
+ MVI(0xc00027f8, "TODO_c000_27f8", 0),
+ MVI(0xc00027f9, "TODO_c000_27f9", 0),
+ MVI(0xc00027fa, "TODO_c000_27fa", 0),
+ MVI(0xc00027fb, "TODO_c000_27fb", 0),
+ MVI(0xc00027fc, "TODO_c000_27fc", 0),
+ MVI(0xc00027fd, "TODO_c000_27fd", 0),
+ MVI(0xc00027fe, "TODO_c000_27fe", 0),
+ MVI(0xc00027ff, "TODO_c000_27ff", 0),
+ MVI(0xc0002800, "TODO_c000_2800", 0),
+ MVI(0xc0002801, "TODO_c000_2801", 0),
+ MVI(0xc0002802, "TODO_c000_2802", 0),
+ MVI(0xc0002803, "TODO_c000_2803", 0),
+ MVI(0xc0002804, "TODO_c000_2804", 0),
+ MVI(0xc0002805, "TODO_c000_2805", 0),
+ MVI(0xc0002806, "TODO_c000_2806", 0),
+ MVI(0xc0002807, "TODO_c000_2807", 0),
+ MVI(0xc0002808, "TODO_c000_2808", 0),
+ MVI(0xc0002809, "TODO_c000_2809", 0),
+ MVI(0xc000280a, "TODO_c000_280a", 0),
+ MVI(0xc000280b, "TODO_c000_280b", 0),
+ MVI(0xc000280c, "TODO_c000_280c", 0),
+ MVI(0xc000280d, "TODO_c000_280d", 0),
+ MVI(0xc000280e, "TODO_c000_280e", 0),
+ MVI(0xc000280f, "TODO_c000_280f", 0),
+ MVI(0xc0002810, "TODO_c000_2810", 0),
+ MVI(0xc0002811, "TODO_c000_2811", 0),
+ MVI(0xc0002812, "TODO_c000_2812", 0),
+ MVI(0xc0002813, "TODO_c000_2813", 0),
+ MVI(0xc0002814, "TODO_c000_2814", 0),
+ MVI(0xc0002815, "TODO_c000_2815", 0),
+ MVI(0xc0002816, "TODO_c000_2816", 0),
+ MVI(0xc0002817, "TODO_c000_2817", 0),
+ MVI(0xc0002818, "TODO_c000_2818", 0),
+ MVI(0xc0002819, "TODO_c000_2819", 0),
+ MVI(0xc000281a, "TODO_c000_281a", 0),
+ MVI(0xc000281b, "TODO_c000_281b", 0),
+ MVI(0xc000281c, "TODO_c000_281c", 0),
+ MVI(0xc000281d, "TODO_c000_281d", 0),
+ MVI(0xc000281e, "TODO_c000_281e", 0),
+ MVI(0xc000281f, "TODO_c000_281f", 0),
+ MVI(0xc0002820, "TODO_c000_2820", 0),
+ MVI(0xc0002821, "TODO_c000_2821", 0),
+ MVI(0xc0002822, "TODO_c000_2822", 0),
+ MVI(0xc0002823, "TODO_c000_2823", 0),
+ MVI(0xc0002824, "TODO_c000_2824", 0),
+ MVI(0xc0002825, "TODO_c000_2825", 0),
+ MVI(0xc0002826, "TODO_c000_2826", 0),
+ MVI(0xc0002827, "TODO_c000_2827", 0),
+ MVI(0xc0002828, "TODO_c000_2828", 0),
+ MVI(0xc0002829, "TODO_c000_2829", 0),
+ MVI(0xc000282a, "TODO_c000_282a", 0),
+ MVI(0xc000282b, "TODO_c000_282b", 0),
+ MVI(0xc000282c, "TODO_c000_282c", 0),
+ MVI(0xc000282d, "TODO_c000_282d", 0),
+ MVI(0xc000282e, "TODO_c000_282e", 0),
+ MVI(0xc000282f, "TODO_c000_282f", 0),
+ MVI(0xc0002830, "TODO_c000_2830", 0),
+ MVI(0xc0002831, "TODO_c000_2831", 0),
+ MVI(0xc0002832, "TODO_c000_2832", 0),
+ MVI(0xc0002833, "TODO_c000_2833", 0),
+ MVI(0xc0002834, "TODO_c000_2834", 0),
+ MVI(0xc0002835, "TODO_c000_2835", 0),
+ MVI(0xc0002836, "TODO_c000_2836", 0),
+ MVI(0xc0002837, "TODO_c000_2837", 0),
+ MVI(0xc0002838, "TODO_c000_2838", 0),
+ MVI(0xc0002839, "TODO_c000_2839", 0),
+ MVI(0xc000283a, "TODO_c000_283a", 0),
+ MVI(0xc000283b, "TODO_c000_283b", 0),
+ MVI(0xc000283c, "TODO_c000_283c", 0),
+ MVI(0xc000283d, "TODO_c000_283d", 0),
+ MVI(0xc000283e, "TODO_c000_283e", 0),
+ MVI(0xc000283f, "TODO_c000_283f", 0),
+ MVI(0xc0002840, "TODO_c000_2840", 0),
+ MVI(0xc0002841, "TODO_c000_2841", 0),
+ MVI(0xc0002842, "TODO_c000_2842", 0),
+ MVI(0xc0002843, "TODO_c000_2843", 0),
+ MVI(0xc0002844, "TODO_c000_2844", 0),
+ MVI(0xc0002845, "TODO_c000_2845", 0),
+ MVI(0xc0002846, "TODO_c000_2846", 0),
+ MVI(0xc0002847, "TODO_c000_2847", 0),
+ MVI(0xc0002848, "TODO_c000_2848", 0),
+ MVI(0xc0002849, "TODO_c000_2849", 0),
+ MVI(0xc000284a, "TODO_c000_284a", 0),
+ MVI(0xc000284b, "TODO_c000_284b", 0),
+ MVI(0xc000284c, "TODO_c000_284c", 0),
+ MVI(0xc000284d, "TODO_c000_284d", 0),
+ MVI(0xc000284e, "TODO_c000_284e", 0),
+ MVI(0xc000284f, "TODO_c000_284f", 0),
+ MVI(0xc0002850, "TODO_c000_2850", 0),
+ MVI(0xc0002851, "TODO_c000_2851", 0),
+ MVI(0xc0002852, "TODO_c000_2852", 0),
+ MVI(0xc0002853, "TODO_c000_2853", 0),
+ MVI(0xc0002854, "TODO_c000_2854", 0),
+ MVI(0xc0002855, "TODO_c000_2855", 0),
+ MVI(0xc0002856, "TODO_c000_2856", 0),
+ MVI(0xc0002857, "TODO_c000_2857", 0),
+ MVI(0xc0002858, "TODO_c000_2858", 0),
+ MVI(0xc0002859, "TODO_c000_2859", 0),
+ MVI(0xc000285a, "TODO_c000_285a", 0),
+ MVI(0xc000285b, "TODO_c000_285b", 0),
+ MVI(0xc000285c, "TODO_c000_285c", 0),
+ MVI(0xc000285d, "TODO_c000_285d", 0),
+ MVI(0xc000285e, "TODO_c000_285e", 0),
+ MVI(0xc000285f, "TODO_c000_285f", 0),
+ MVI(0xc0002860, "TODO_c000_2860", 0),
+ MVI(0xc0002861, "TODO_c000_2861", 0),
+ MVI(0xc0002862, "TODO_c000_2862", 0),
+ MVI(0xc0002863, "TODO_c000_2863", 0),
+ MVI(0xc0002864, "TODO_c000_2864", 0),
+ MVI(0xc0002865, "TODO_c000_2865", 0),
+ MVI(0xc0002866, "TODO_c000_2866", 0),
+ MVI(0xc0002867, "TODO_c000_2867", 0),
+ MVI(0xc0002868, "TODO_c000_2868", 0),
+ MVI(0xc0002869, "TODO_c000_2869", 0),
+ MVI(0xc000286a, "TODO_c000_286a", 0),
+ MVI(0xc000286b, "TODO_c000_286b", 0),
+ MVI(0xc000286c, "TODO_c000_286c", 0),
+ MVI(0xc000286d, "TODO_c000_286d", 0),
+ MVI(0xc000286e, "TODO_c000_286e", 0),
+ MVI(0xc000286f, "TODO_c000_286f", 0),
+ MVI(0xc0002870, "TODO_c000_2870", 0),
+ MVI(0xc0002871, "TODO_c000_2871", 0),
+ MVI(0xc0002872, "TODO_c000_2872", 0),
+ MVI(0xc0002873, "TODO_c000_2873", 0),
+ MVI(0xc0002874, "TODO_c000_2874", 0),
+ MVI(0xc0002875, "TODO_c000_2875", 0),
+ MVI(0xc0002876, "TODO_c000_2876", 0),
+ MVI(0xc0002877, "TODO_c000_2877", 0),
+ MVI(0xc0002878, "TODO_c000_2878", 0),
+ MVI(0xc0002879, "TODO_c000_2879", 0),
+ MVI(0xc000287a, "TODO_c000_287a", 0),
+ MVI(0xc000287b, "TODO_c000_287b", 0),
+ MVI(0xc000287c, "TODO_c000_287c", 0),
+ MVI(0xc000287d, "TODO_c000_287d", 0),
+ MVI(0xc000287e, "TODO_c000_287e", 0),
+ MVI(0xc000287f, "TODO_c000_287f", 0),
+ MVI(0xc0002880, "TODO_c000_2880", 0),
+ MVI(0xc0002881, "TODO_c000_2881", 0),
+ MVI(0xc0002882, "TODO_c000_2882", 0),
+ MVI(0xc0002883, "TODO_c000_2883", 0),
+ MVI(0xc0002884, "TODO_c000_2884", 0),
+ MVI(0xc0002885, "TODO_c000_2885", 0),
+ MVI(0xc0002886, "TODO_c000_2886", 0),
+ MVI(0xc0002887, "TODO_c000_2887", 0),
+ MVI(0xc0002888, "TODO_c000_2888", 0),
+ MVI(0xc0002889, "TODO_c000_2889", 0),
+ MVI(0xc000288a, "TODO_c000_288a", 0),
+ MVI(0xc000288b, "TODO_c000_288b", 0),
+ MVI(0xc000288c, "TODO_c000_288c", 0),
+ MVI(0xc000288d, "TODO_c000_288d", 0),
+ MVI(0xc000288e, "TODO_c000_288e", 0),
+ MVI(0xc000288f, "TODO_c000_288f", 0),
+ MVI(0xc0002890, "TODO_c000_2890", 0),
+ MVI(0xc0002891, "TODO_c000_2891", 0),
+ MVI(0xc0002892, "TODO_c000_2892", 0),
+ MVI(0xc0002893, "TODO_c000_2893", 0),
+ MVI(0xc0002894, "TODO_c000_2894", 0),
+ MVI(0xc0002895, "TODO_c000_2895", 0),
+ MVI(0xc0002896, "TODO_c000_2896", 0),
+ MVI(0xc0002897, "TODO_c000_2897", 0),
+ MVI(0xc0002898, "TODO_c000_2898", 0),
+ MVI(0xc0002899, "TODO_c000_2899", 0),
+ MVI(0xc000289a, "TODO_c000_289a", 0),
+ MVI(0xc000289b, "TODO_c000_289b", 0),
+ MVI(0xc000289c, "TODO_c000_289c", 0),
+ MVI(0xc000289d, "TODO_c000_289d", 0),
+ MVI(0xc000289e, "TODO_c000_289e", 0),
+ MVI(0xc000289f, "TODO_c000_289f", 0),
+ MVI(0xc00028a0, "TODO_c000_28a0", 0),
+ MVI(0xc00028a1, "TODO_c000_28a1", 0),
+ MVI(0xc00028a2, "TODO_c000_28a2", 0),
+ MVI(0xc00028a3, "TODO_c000_28a3", 0),
+ MVI(0xc00028a4, "TODO_c000_28a4", 0),
+ MVI(0xc00028a5, "TODO_c000_28a5", 0),
+ MVI(0xc00028a6, "TODO_c000_28a6", 0),
+ MVI(0xc00028a7, "TODO_c000_28a7", 0),
+ MVI(0xc00028a8, "TODO_c000_28a8", 0),
+ MVI(0xc00028a9, "TODO_c000_28a9", 0),
+ MVI(0xc00028aa, "TODO_c000_28aa", 0),
+ MVI(0xc00028ab, "TODO_c000_28ab", 0),
+ MVI(0xc00028ac, "TODO_c000_28ac", 0),
+ MVI(0xc00028ad, "TODO_c000_28ad", 0),
+ MVI(0xc00028ae, "TODO_c000_28ae", 0),
+ MVI(0xc00028af, "TODO_c000_28af", 0),
+ MVI(0xc00028b0, "TODO_c000_28b0", 0),
+ MVI(0xc00028b1, "TODO_c000_28b1", 0),
+ MVI(0xc00028b2, "TODO_c000_28b2", 0),
+ MVI(0xc00028b3, "TODO_c000_28b3", 0),
+ MVI(0xc00028b4, "TODO_c000_28b4", 0),
+ MVI(0xc00028b5, "TODO_c000_28b5", 0),
+ MVI(0xc00028b6, "TODO_c000_28b6", 0),
+ MVI(0xc00028b7, "TODO_c000_28b7", 0),
+ MVI(0xc00028b8, "TODO_c000_28b8", 0),
+ MVI(0xc00028b9, "TODO_c000_28b9", 0),
+ MVI(0xc00028ba, "TODO_c000_28ba", 0),
+ MVI(0xc00028bb, "TODO_c000_28bb", 0),
+ MVI(0xc00028bc, "TODO_c000_28bc", 0),
+ MVI(0xc00028bd, "TODO_c000_28bd", 0),
+ MVI(0xc00028be, "TODO_c000_28be", 0),
+ MVI(0xc00028bf, "TODO_c000_28bf", 0),
+ MVI(0xc00028c0, "TODO_c000_28c0", 0),
+ MVI(0xc00028c1, "TODO_c000_28c1", 0),
+ MVI(0xc00028c2, "TODO_c000_28c2", 0),
+ MVI(0xc00028c3, "TODO_c000_28c3", 0),
+ MVI(0xc00028c4, "TODO_c000_28c4", 0),
+ MVI(0xc00028c5, "TODO_c000_28c5", 0),
+ MVI(0xc00028c6, "TODO_c000_28c6", 0),
+ MVI(0xc00028c7, "TODO_c000_28c7", 0),
+ MVI(0xc00028c8, "TODO_c000_28c8", 0),
+ MVI(0xc00028c9, "TODO_c000_28c9", 0),
+ MVI(0xc00028ca, "TODO_c000_28ca", 0),
+ MVI(0xc00028cb, "TODO_c000_28cb", 0),
+ MVI(0xc00028cc, "TODO_c000_28cc", 0),
+ MVI(0xc00028cd, "TODO_c000_28cd", 0),
+ MVI(0xc00028ce, "TODO_c000_28ce", 0),
+ MVI(0xc00028cf, "TODO_c000_28cf", 0),
+ MVI(0xc00028d0, "TODO_c000_28d0", 0),
+ MVI(0xc00028d1, "TODO_c000_28d1", 0),
+ MVI(0xc00028d2, "TODO_c000_28d2", 0),
+ MVI(0xc00028d3, "TODO_c000_28d3", 0),
+ MVI(0xc00028d4, "TODO_c000_28d4", 0),
+ MVI(0xc00028d5, "TODO_c000_28d5", 0),
+ MVI(0xc00028d6, "TODO_c000_28d6", 0),
+ MVI(0xc00028d7, "TODO_c000_28d7", 0),
+ MVI(0xc00028d8, "TODO_c000_28d8", 0),
+ MVI(0xc00028d9, "TODO_c000_28d9", 0),
+ MVI(0xc00028da, "TODO_c000_28da", 0),
+ MVI(0xc00028db, "TODO_c000_28db", 0),
+ MVI(0xc00028dc, "TODO_c000_28dc", 0),
+ MVI(0xc00028dd, "TODO_c000_28dd", 0),
+ MVI(0xc00028de, "TODO_c000_28de", 0),
+ MVI(0xc00028df, "TODO_c000_28df", 0),
+ MVI(0xc00028e0, "TODO_c000_28e0", 0),
+ MVI(0xc00028e1, "TODO_c000_28e1", 0),
+ MVI(0xc00028e2, "TODO_c000_28e2", 0),
+ MVI(0xc00028e3, "TODO_c000_28e3", 0),
+ MVI(0xc00028e4, "TODO_c000_28e4", 0),
+ MVI(0xc00028e5, "TODO_c000_28e5", 0),
+ MVI(0xc00028e6, "TODO_c000_28e6", 0),
+ MVI(0xc00028e7, "TODO_c000_28e7", 0),
+ MVI(0xc00028e8, "TODO_c000_28e8", 0),
+ MVI(0xc00028e9, "TODO_c000_28e9", 0),
+ MVI(0xc00028ea, "TODO_c000_28ea", 0),
+ MVI(0xc00028eb, "TODO_c000_28eb", 0),
+ MVI(0xc00028ec, "TODO_c000_28ec", 0),
+ MVI(0xc00028ed, "TODO_c000_28ed", 0),
+ MVI(0xc00028ee, "TODO_c000_28ee", 0),
+ MVI(0xc00028ef, "TODO_c000_28ef", 0),
+ MVI(0xc00028f0, "TODO_c000_28f0", 0),
+ MVI(0xc00028f1, "TODO_c000_28f1", 0),
+ MVI(0xc00028f2, "TODO_c000_28f2", 0),
+ MVI(0xc00028f3, "TODO_c000_28f3", 0),
+ MVI(0xc00028f4, "TODO_c000_28f4", 0),
+ MVI(0xc00028f5, "TODO_c000_28f5", 0),
+ MVI(0xc00028f6, "TODO_c000_28f6", 0),
+ MVI(0xc00028f7, "TODO_c000_28f7", 0),
+ MVI(0xc00028f8, "TODO_c000_28f8", 0),
+ MVI(0xc00028f9, "TODO_c000_28f9", 0),
+ MVI(0xc00028fa, "TODO_c000_28fa", 0),
+ MVI(0xc00028fb, "TODO_c000_28fb", 0),
+ MVI(0xc00028fc, "TODO_c000_28fc", 0),
+ MVI(0xc00028fd, "TODO_c000_28fd", 0),
+ MVI(0xc00028fe, "TODO_c000_28fe", 0),
+ MVI(0xc00028ff, "TODO_c000_28ff", 0),
+ MVI(0xc0002900, "TODO_c000_2900", 0),
+ MVI(0xc0002901, "TODO_c000_2901", 0),
+ MVI(0xc0002902, "TODO_c000_2902", 0),
+ MVI(0xc0002903, "TODO_c000_2903", 0),
+ MVI(0xc0002904, "TODO_c000_2904", 0),
+ MVI(0xc0002905, "TODO_c000_2905", 0),
+ MVI(0xc0002906, "TODO_c000_2906", 0),
+ MVI(0xc0002907, "TODO_c000_2907", 0),
+ MVI(0xc0002908, "TODO_c000_2908", 0),
+ MVI(0xc0002909, "TODO_c000_2909", 0),
+ MVI(0xc000290a, "TODO_c000_290a", 0),
+ MVI(0xc000290b, "TODO_c000_290b", 0),
+ MVI(0xc000290c, "TODO_c000_290c", 0),
+ MVI(0xc000290d, "TODO_c000_290d", 0),
+ MVI(0xc000290e, "TODO_c000_290e", 0),
+ MVI(0xc000290f, "TODO_c000_290f", 0),
+ MVI(0xc0002910, "TODO_c000_2910", 0),
+ MVI(0xc0002911, "TODO_c000_2911", 0),
+ MVI(0xc0002912, "TODO_c000_2912", 0),
+ MVI(0xc0002913, "TODO_c000_2913", 0),
+ MVI(0xc0002914, "TODO_c000_2914", 0),
+ MVI(0xc0002915, "TODO_c000_2915", 0),
+ MVI(0xc0002916, "TODO_c000_2916", 0),
+ MVI(0xc0002917, "TODO_c000_2917", 0),
+ MVI(0xc0002918, "TODO_c000_2918", 0),
+ MVI(0xc0002919, "TODO_c000_2919", 0),
+ MVI(0xc000291a, "TODO_c000_291a", 0),
+ MVI(0xc000291b, "TODO_c000_291b", 0),
+ MVI(0xc000291c, "TODO_c000_291c", 0),
+ MVI(0xc000291d, "TODO_c000_291d", 0),
+ MVI(0xc000291e, "TODO_c000_291e", 0),
+ MVI(0xc000291f, "TODO_c000_291f", 0),
+ MVI(0xc0002920, "TODO_c000_2920", 0),
+ MVI(0xc0002921, "TODO_c000_2921", 0),
+ MVI(0xc0002922, "TODO_c000_2922", 0),
+ MVI(0xc0002923, "TODO_c000_2923", 0),
+ MVI(0xc0002924, "TODO_c000_2924", 0),
+ MVI(0xc0002925, "TODO_c000_2925", 0),
+ MVI(0xc0002926, "TODO_c000_2926", 0),
+ MVI(0xc0002927, "TODO_c000_2927", 0),
+ MVI(0xc0002928, "TODO_c000_2928", 0),
+ MVI(0xc0002929, "TODO_c000_2929", 0),
+ MVI(0xc000292a, "TODO_c000_292a", 0),
+ MVI(0xc000292b, "TODO_c000_292b", 0),
+ MVI(0xc000292c, "TODO_c000_292c", 0),
+ MVI(0xc000292d, "TODO_c000_292d", 0),
+ MVI(0xc000292e, "TODO_c000_292e", 0),
+ MVI(0xc000292f, "TODO_c000_292f", 0),
+ MVI(0xc0002930, "TODO_c000_2930", 0),
+ MVI(0xc0002931, "TODO_c000_2931", 0),
+ MVI(0xc0002932, "TODO_c000_2932", 0),
+ MVI(0xc0002933, "TODO_c000_2933", 0),
+ MVI(0xc0002934, "TODO_c000_2934", 0),
+ MVI(0xc0002935, "TODO_c000_2935", 0),
+ MVI(0xc0002936, "TODO_c000_2936", 0),
+ MVI(0xc0002937, "TODO_c000_2937", 0),
+ MVI(0xc0002938, "TODO_c000_2938", 0),
+ MVI(0xc0002939, "TODO_c000_2939", 0),
+ MVI(0xc000293a, "TODO_c000_293a", 0),
+ MVI(0xc000293b, "TODO_c000_293b", 0),
+ MVI(0xc000293c, "TODO_c000_293c", 0),
+ MVI(0xc000293d, "TODO_c000_293d", 0),
+ MVI(0xc000293e, "TODO_c000_293e", 0),
+ MVI(0xc000293f, "TODO_c000_293f", 0),
+ MVI(0xc0002940, "TODO_c000_2940", 0),
+ MVI(0xc0002941, "TODO_c000_2941", 0),
+ MVI(0xc0002942, "TODO_c000_2942", 0),
+ MVI(0xc0002943, "TODO_c000_2943", 0),
+ MVI(0xc0002944, "TODO_c000_2944", 0),
+ MVI(0xc0002945, "TODO_c000_2945", 0),
+ MVI(0xc0002946, "TODO_c000_2946", 0),
+ MVI(0xc0002947, "TODO_c000_2947", 0),
+ MVI(0xc0002948, "TODO_c000_2948", 0),
+ MVI(0xc0002949, "TODO_c000_2949", 0),
+ MVI(0xc000294a, "TODO_c000_294a", 0),
+ MVI(0xc000294b, "TODO_c000_294b", 0),
+ MVI(0xc000294c, "TODO_c000_294c", 0),
+ MVI(0xc000294d, "TODO_c000_294d", 0),
+ MVI(0xc000294e, "TODO_c000_294e", 0),
+ MVI(0xc000294f, "TODO_c000_294f", 0),
+ MVI(0xc0002950, "TODO_c000_2950", 0),
+ MVI(0xc0002951, "TODO_c000_2951", 0),
+ MVI(0xc0002952, "TODO_c000_2952", 0),
+ MVI(0xc0002953, "TODO_c000_2953", 0),
+ MVI(0xc0002954, "TODO_c000_2954", 0),
+ MVI(0xc0002955, "TODO_c000_2955", 0),
+ MVI(0xc0002956, "TODO_c000_2956", 0),
+ MVI(0xc0002957, "TODO_c000_2957", 0),
+ MVI(0xc0002958, "TODO_c000_2958", 0),
+ MVI(0xc0002959, "TODO_c000_2959", 0),
+ MVI(0xc000295a, "TODO_c000_295a", 0),
+ MVI(0xc000295b, "TODO_c000_295b", 0),
+ MVI(0xc000295c, "TODO_c000_295c", 0),
+ MVI(0xc000295d, "TODO_c000_295d", 0),
+ MVI(0xc000295e, "TODO_c000_295e", 0),
+ MVI(0xc000295f, "TODO_c000_295f", 0),
+ MVI(0xc0002960, "TODO_c000_2960", 0),
+ MVI(0xc0002961, "TODO_c000_2961", 0),
+ MVI(0xc0002962, "TODO_c000_2962", 0),
+ MVI(0xc0002963, "TODO_c000_2963", 0),
+ MVI(0xc0002964, "TODO_c000_2964", 0),
+ MVI(0xc0002965, "TODO_c000_2965", 0),
+ MVI(0xc0002966, "TODO_c000_2966", 0),
+ MVI(0xc0002967, "TODO_c000_2967", 0),
+ MVI(0xc0002968, "TODO_c000_2968", 0),
+ MVI(0xc0002969, "TODO_c000_2969", 0),
+ MVI(0xc000296a, "TODO_c000_296a", 0),
+ MVI(0xc000296b, "TODO_c000_296b", 0),
+ MVI(0xc000296c, "TODO_c000_296c", 0),
+ MVI(0xc000296d, "TODO_c000_296d", 0),
+ MVI(0xc000296e, "TODO_c000_296e", 0),
+ MVI(0xc000296f, "TODO_c000_296f", 0),
+ MVI(0xc0002970, "TODO_c000_2970", 0),
+ MVI(0xc0002971, "TODO_c000_2971", 0),
+ MVI(0xc0002972, "TODO_c000_2972", 0),
+ MVI(0xc0002973, "TODO_c000_2973", 0),
+ MVI(0xc0002974, "TODO_c000_2974", 0),
+ MVI(0xc0002975, "TODO_c000_2975", 0),
+ MVI(0xc0002976, "TODO_c000_2976", 0),
+ MVI(0xc0002977, "TODO_c000_2977", 0),
+ MVI(0xc0002978, "TODO_c000_2978", 0),
+ MVI(0xc0002979, "TODO_c000_2979", 0),
+ MVI(0xc000297a, "TODO_c000_297a", 0),
+ MVI(0xc000297b, "TODO_c000_297b", 0),
+ MVI(0xc000297c, "TODO_c000_297c", 0),
+ MVI(0xc000297d, "TODO_c000_297d", 0),
+ MVI(0xc000297e, "TODO_c000_297e", 0),
+ MVI(0xc000297f, "TODO_c000_297f", 0),
+ MVI(0xc0002980, "TODO_c000_2980", 0),
+ MVI(0xc0002981, "TODO_c000_2981", 0),
+ MVI(0xc0002982, "TODO_c000_2982", 0),
+ MVI(0xc0002983, "TODO_c000_2983", 0),
+ MVI(0xc0002984, "TODO_c000_2984", 0),
+ MVI(0xc0002985, "TODO_c000_2985", 0),
+ MVI(0xc0002986, "TODO_c000_2986", 0),
+ MVI(0xc0002987, "TODO_c000_2987", 0),
+ MVI(0xc0002988, "TODO_c000_2988", 0),
+ MVI(0xc0002989, "TODO_c000_2989", 0),
+ MVI(0xc000298a, "TODO_c000_298a", 0),
+ MVI(0xc000298b, "TODO_c000_298b", 0),
+ MVI(0xc000298c, "TODO_c000_298c", 0),
+ MVI(0xc000298d, "TODO_c000_298d", 0),
+ MVI(0xc000298e, "TODO_c000_298e", 0),
+ MVI(0xc000298f, "TODO_c000_298f", 0),
+ MVI(0xc0002990, "TODO_c000_2990", 0),
+ MVI(0xc0002991, "TODO_c000_2991", 0),
+ MVI(0xc0002992, "TODO_c000_2992", 0),
+ MVI(0xc0002993, "TODO_c000_2993", 0),
+ MVI(0xc0002994, "TODO_c000_2994", 0),
+ MVI(0xc0002995, "TODO_c000_2995", 0),
+ MVI(0xc0002996, "TODO_c000_2996", 0),
+ MVI(0xc0002997, "TODO_c000_2997", 0),
+ MVI(0xc0002998, "TODO_c000_2998", 0),
+ MVI(0xc0002999, "TODO_c000_2999", 0),
+ MVI(0xc000299a, "TODO_c000_299a", 0),
+ MVI(0xc000299b, "TODO_c000_299b", 0),
+ MVI(0xc000299c, "TODO_c000_299c", 0),
+ MVI(0xc000299d, "TODO_c000_299d", 0),
+ MVI(0xc000299e, "TODO_c000_299e", 0),
+ MVI(0xc000299f, "TODO_c000_299f", 0),
+ MVI(0xc00029a0, "TODO_c000_29a0", 0),
+ MVI(0xc00029a1, "TODO_c000_29a1", 0),
+ MVI(0xc00029a2, "TODO_c000_29a2", 0),
+ MVI(0xc00029a3, "TODO_c000_29a3", 0),
+ MVI(0xc00029a4, "TODO_c000_29a4", 0),
+ MVI(0xc00029a5, "TODO_c000_29a5", 0),
+ MVI(0xc00029a6, "TODO_c000_29a6", 0),
+ MVI(0xc00029a7, "TODO_c000_29a7", 0),
+ MVI(0xc00029a8, "TODO_c000_29a8", 0),
+ MVI(0xc00029a9, "TODO_c000_29a9", 0),
+ MVI(0xc00029aa, "TODO_c000_29aa", 0),
+ MVI(0xc00029ab, "TODO_c000_29ab", 0),
+ MVI(0xc00029ac, "TODO_c000_29ac", 0),
+ MVI(0xc00029ad, "TODO_c000_29ad", 0),
+ MVI(0xc00029ae, "TODO_c000_29ae", 0),
+ MVI(0xc00029af, "TODO_c000_29af", 0),
+ MVI(0xc00029b0, "TODO_c000_29b0", 0),
+ MVI(0xc00029b1, "TODO_c000_29b1", 0),
+ MVI(0xc00029b2, "TODO_c000_29b2", 0),
+ MVI(0xc00029b3, "TODO_c000_29b3", 0),
+ MVI(0xc00029b4, "TODO_c000_29b4", 0),
+ MVI(0xc00029b5, "TODO_c000_29b5", 0),
+ MVI(0xc00029b6, "TODO_c000_29b6", 0),
+ MVI(0xc00029b7, "TODO_c000_29b7", 0),
+ MVI(0xc00029b8, "TODO_c000_29b8", 0),
+ MVI(0xc00029b9, "TODO_c000_29b9", 0),
+ MVI(0xc00029ba, "TODO_c000_29ba", 0),
+ MVI(0xc00029bb, "TODO_c000_29bb", 0),
+ MVI(0xc00029bc, "TODO_c000_29bc", 0),
+ MVI(0xc00029bd, "TODO_c000_29bd", 0),
+ MVI(0xc00029be, "TODO_c000_29be", 0),
+ MVI(0xc00029bf, "TODO_c000_29bf", 0),
+ MVI(0xc00029c0, "TODO_c000_29c0", 0),
+ MVI(0xc00029c1, "TODO_c000_29c1", 0),
+ MVI(0xc00029c2, "TODO_c000_29c2", 0),
+ MVI(0xc00029c3, "TODO_c000_29c3", 0),
+ MVI(0xc00029c4, "TODO_c000_29c4", 0),
+ MVI(0xc00029c5, "TODO_c000_29c5", 0),
+ MVI(0xc00029c6, "TODO_c000_29c6", 0),
+ MVI(0xc00029c7, "TODO_c000_29c7", 0),
+ MVI(0xc00029c8, "TODO_c000_29c8", 0),
+ MVI(0xc00029c9, "TODO_c000_29c9", 0),
+ MVI(0xc00029ca, "TODO_c000_29ca", 0),
+ MVI(0xc00029cb, "TODO_c000_29cb", 0),
+ MVI(0xc00029cc, "TODO_c000_29cc", 0),
+ MVI(0xc00029cd, "TODO_c000_29cd", 0),
+ MVI(0xc00029ce, "TODO_c000_29ce", 0),
+ MVI(0xc00029cf, "TODO_c000_29cf", 0),
+ MVI(0xc00029d0, "TODO_c000_29d0", 0),
+ MVI(0xc00029d1, "TODO_c000_29d1", 0),
+ MVI(0xc00029d2, "TODO_c000_29d2", 0),
+ MVI(0xc00029d3, "TODO_c000_29d3", 0),
+ MVI(0xc00029d4, "TODO_c000_29d4", 0),
+ MVI(0xc00029d5, "TODO_c000_29d5", 0),
+ MVI(0xc00029d6, "TODO_c000_29d6", 0),
+ MVI(0xc00029d7, "TODO_c000_29d7", 0),
+ MVI(0xc00029d8, "TODO_c000_29d8", 0),
+ MVI(0xc00029d9, "TODO_c000_29d9", 0),
+ MVI(0xc00029da, "TODO_c000_29da", 0),
+ MVI(0xc00029db, "TODO_c000_29db", 0),
+ MVI(0xc00029dc, "TODO_c000_29dc", 0),
+ MVI(0xc00029dd, "TODO_c000_29dd", 0),
+ MVI(0xc00029de, "TODO_c000_29de", 0),
+ MVI(0xc00029df, "TODO_c000_29df", 0),
+ MVI(0xc00029e0, "TODO_c000_29e0", 0),
+ MVI(0xc00029e1, "TODO_c000_29e1", 0),
+ MVI(0xc00029e2, "TODO_c000_29e2", 0),
+ MVI(0xc00029e3, "TODO_c000_29e3", 0),
+ MVI(0xc00029e4, "TODO_c000_29e4", 0),
+ MVI(0xc00029e5, "TODO_c000_29e5", 0),
+ MVI(0xc00029e6, "TODO_c000_29e6", 0),
+ MVI(0xc00029e7, "TODO_c000_29e7", 0),
+ MVI(0xc00029e8, "TODO_c000_29e8", 0),
+ MVI(0xc00029e9, "TODO_c000_29e9", 0),
+ MVI(0xc00029ea, "TODO_c000_29ea", 0),
+ MVI(0xc00029eb, "TODO_c000_29eb", 0),
+ MVI(0xc00029ec, "TODO_c000_29ec", 0),
+ MVI(0xc00029ed, "TODO_c000_29ed", 0),
+ MVI(0xc00029ee, "TODO_c000_29ee", 0),
+ MVI(0xc00029ef, "TODO_c000_29ef", 0),
+ MVI(0xc00029f0, "TODO_c000_29f0", 0),
+ MVI(0xc00029f1, "TODO_c000_29f1", 0),
+ MVI(0xc00029f2, "TODO_c000_29f2", 0),
+ MVI(0xc00029f3, "TODO_c000_29f3", 0),
+ MVI(0xc00029f4, "TODO_c000_29f4", 0),
+ MVI(0xc00029f5, "TODO_c000_29f5", 0),
+ MVI(0xc00029f6, "TODO_c000_29f6", 0),
+ MVI(0xc00029f7, "TODO_c000_29f7", 0),
+ MVI(0xc00029f8, "TODO_c000_29f8", 0),
+ MVI(0xc00029f9, "TODO_c000_29f9", 0),
+ MVI(0xc00029fa, "TODO_c000_29fa", 0),
+ MVI(0xc00029fb, "TODO_c000_29fb", 0),
+ MVI(0xc00029fc, "TODO_c000_29fc", 0),
+ MVI(0xc00029fd, "TODO_c000_29fd", 0),
+ MVI(0xc00029fe, "TODO_c000_29fe", 0),
+ MVI(0xc00029ff, "TODO_c000_29ff", 0),
+ MVI(0xc0002a00, "TODO_c000_2a00", 0),
+ MVI(0xc0002a01, "TODO_c000_2a01", 0),
+ MVI(0xc0002a02, "TODO_c000_2a02", 0),
+ MVI(0xc0002a03, "TODO_c000_2a03", 0),
+ MVI(0xc0002a04, "TODO_c000_2a04", 0),
+ MVI(0xc0002a05, "TODO_c000_2a05", 0),
+ MVI(0xc0002a06, "TODO_c000_2a06", 0),
+ MVI(0xc0002a07, "TODO_c000_2a07", 0),
+ MVI(0xc0002a08, "TODO_c000_2a08", 0),
+ MVI(0xc0002a09, "TODO_c000_2a09", 0),
+ MVI(0xc0002a0a, "TODO_c000_2a0a", 0),
+ MVI(0xc0002a0b, "TODO_c000_2a0b", 0),
+ MVI(0xc0002a0c, "TODO_c000_2a0c", 0),
+ MVI(0xc0002a0d, "TODO_c000_2a0d", 0),
+ MVI(0xc0002a0e, "TODO_c000_2a0e", 0),
+ MVI(0xc0002a0f, "TODO_c000_2a0f", 0),
+ MVI(0xc0002a10, "TODO_c000_2a10", 0),
+ MVI(0xc0002a11, "TODO_c000_2a11", 0),
+ MVI(0xc0002a12, "TODO_c000_2a12", 0),
+ MVI(0xc0002a13, "TODO_c000_2a13", 0),
+ MVI(0xc0002a14, "TODO_c000_2a14", 0),
+ MVI(0xc0002a15, "TODO_c000_2a15", 0),
+ MVI(0xc0002a16, "TODO_c000_2a16", 0),
+ MVI(0xc0002a17, "TODO_c000_2a17", 0),
+ MVI(0xc0002a18, "TODO_c000_2a18", 0),
+ MVI(0xc0002a19, "TODO_c000_2a19", 0),
+ MVI(0xc0002a1a, "TODO_c000_2a1a", 0),
+ MVI(0xc0002a1b, "TODO_c000_2a1b", 0),
+ MVI(0xc0002a1c, "TODO_c000_2a1c", 0),
+ MVI(0xc0002a1d, "TODO_c000_2a1d", 0),
+ MVI(0xc0002a1e, "TODO_c000_2a1e", 0),
+ MVI(0xc0002a1f, "TODO_c000_2a1f", 0),
+ MVI(0xc0002a20, "TODO_c000_2a20", 0),
+ MVI(0xc0002a21, "TODO_c000_2a21", 0),
+ MVI(0xc0002a22, "TODO_c000_2a22", 0),
+ MVI(0xc0002a23, "TODO_c000_2a23", 0),
+ MVI(0xc0002a24, "TODO_c000_2a24", 0),
+ MVI(0xc0002a25, "TODO_c000_2a25", 0),
+ MVI(0xc0002a26, "TODO_c000_2a26", 0),
+ MVI(0xc0002a27, "TODO_c000_2a27", 0),
+ MVI(0xc0002a28, "TODO_c000_2a28", 0),
+ MVI(0xc0002a29, "TODO_c000_2a29", 0),
+ MVI(0xc0002a2a, "TODO_c000_2a2a", 0),
+ MVI(0xc0002a2b, "TODO_c000_2a2b", 0),
+ MVI(0xc0002a2c, "TODO_c000_2a2c", 0),
+ MVI(0xc0002a2d, "TODO_c000_2a2d", 0),
+ MVI(0xc0002a2e, "TODO_c000_2a2e", 0),
+ MVI(0xc0002a2f, "TODO_c000_2a2f", 0),
+ MVI(0xc0002a30, "TODO_c000_2a30", 0),
+ MVI(0xc0002a31, "TODO_c000_2a31", 0),
+ MVI(0xc0002a32, "TODO_c000_2a32", 0),
+ MVI(0xc0002a33, "TODO_c000_2a33", 0),
+ MVI(0xc0002a34, "TODO_c000_2a34", 0),
+ MVI(0xc0002a35, "TODO_c000_2a35", 0),
+ MVI(0xc0002a36, "TODO_c000_2a36", 0),
+ MVI(0xc0002a37, "TODO_c000_2a37", 0),
+ MVI(0xc0002a38, "TODO_c000_2a38", 0),
+ MVI(0xc0002a39, "TODO_c000_2a39", 0),
+ MVI(0xc0002a3a, "TODO_c000_2a3a", 0),
+ MVI(0xc0002a3b, "TODO_c000_2a3b", 0),
+ MVI(0xc0002a3c, "TODO_c000_2a3c", 0),
+ MVI(0xc0002a3d, "TODO_c000_2a3d", 0),
+ MVI(0xc0002a3e, "TODO_c000_2a3e", 0),
+ MVI(0xc0002a3f, "TODO_c000_2a3f", 0),
+ MVI(0xc0002a40, "TODO_c000_2a40", 0),
+ MVI(0xc0002a41, "TODO_c000_2a41", 0),
+ MVI(0xc0002a42, "TODO_c000_2a42", 0),
+ MVI(0xc0002a43, "TODO_c000_2a43", 0),
+ MVI(0xc0002a44, "TODO_c000_2a44", 0),
+ MVI(0xc0002a45, "TODO_c000_2a45", 0),
+ MVI(0xc0002a46, "TODO_c000_2a46", 0),
+ MVI(0xc0002a47, "TODO_c000_2a47", 0),
+ MVI(0xc0002a48, "TODO_c000_2a48", 0),
+ MVI(0xc0002a49, "TODO_c000_2a49", 0),
+ MVI(0xc0002a4a, "TODO_c000_2a4a", 0),
+ MVI(0xc0002a4b, "TODO_c000_2a4b", 0),
+ MVI(0xc0002a4c, "TODO_c000_2a4c", 0),
+ MVI(0xc0002a4d, "TODO_c000_2a4d", 0),
+ MVI(0xc0002a4e, "TODO_c000_2a4e", 0),
+ MVI(0xc0002a4f, "TODO_c000_2a4f", 0),
+ MVI(0xc0002a50, "TODO_c000_2a50", 0),
+ MVI(0xc0002a51, "TODO_c000_2a51", 0),
+ MVI(0xc0002a52, "TODO_c000_2a52", 0),
+ MVI(0xc0002a53, "TODO_c000_2a53", 0),
+ MVI(0xc0002a54, "TODO_c000_2a54", 0),
+ MVI(0xc0002a55, "TODO_c000_2a55", 0),
+ MVI(0xc0002a56, "TODO_c000_2a56", 0),
+ MVI(0xc0002a57, "TODO_c000_2a57", 0),
+ MVI(0xc0002a58, "TODO_c000_2a58", 0),
+ MVI(0xc0002a59, "TODO_c000_2a59", 0),
+ MVI(0xc0002a5a, "TODO_c000_2a5a", 0),
+ MVI(0xc0002a5b, "TODO_c000_2a5b", 0),
+ MVI(0xc0002a5c, "TODO_c000_2a5c", 0),
+ MVI(0xc0002a5d, "TODO_c000_2a5d", 0),
+ MVI(0xc0002a5e, "TODO_c000_2a5e", 0),
+ MVI(0xc0002a5f, "TODO_c000_2a5f", 0),
+ MVI(0xc0002a60, "TODO_c000_2a60", 0),
+ MVI(0xc0002a61, "TODO_c000_2a61", 0),
+ MVI(0xc0002a62, "TODO_c000_2a62", 0),
+ MVI(0xc0002a63, "TODO_c000_2a63", 0),
+ MVI(0xc0002a64, "TODO_c000_2a64", 0),
+ MVI(0xc0002a65, "TODO_c000_2a65", 0),
+ MVI(0xc0002a66, "TODO_c000_2a66", 0),
+ MVI(0xc0002a67, "TODO_c000_2a67", 0),
+ MVI(0xc0002a68, "TODO_c000_2a68", 0),
+ MVI(0xc0002a69, "TODO_c000_2a69", 0),
+ MVI(0xc0002a6a, "TODO_c000_2a6a", 0),
+ MVI(0xc0002a6b, "TODO_c000_2a6b", 0),
+ MVI(0xc0002a6c, "TODO_c000_2a6c", 0),
+ MVI(0xc0002a6d, "TODO_c000_2a6d", 0),
+ MVI(0xc0002a6e, "TODO_c000_2a6e", 0),
+ MVI(0xc0002a6f, "TODO_c000_2a6f", 0),
+ MVI(0xc0002a70, "TODO_c000_2a70", 0),
+ MVI(0xc0002a71, "TODO_c000_2a71", 0),
+ MVI(0xc0002a72, "TODO_c000_2a72", 0),
+ MVI(0xc0002a73, "TODO_c000_2a73", 0),
+ MVI(0xc0002a74, "TODO_c000_2a74", 0),
+ MVI(0xc0002a75, "TODO_c000_2a75", 0),
+ MVI(0xc0002a76, "TODO_c000_2a76", 0),
+ MVI(0xc0002a77, "TODO_c000_2a77", 0),
+ MVI(0xc0002a78, "TODO_c000_2a78", 0),
+ MVI(0xc0002a79, "TODO_c000_2a79", 0),
+ MVI(0xc0002a7a, "TODO_c000_2a7a", 0),
+ MVI(0xc0002a7b, "TODO_c000_2a7b", 0),
+ MVI(0xc0002a7c, "TODO_c000_2a7c", 0),
+ MVI(0xc0002a7d, "TODO_c000_2a7d", 0),
+ MVI(0xc0002a7e, "TODO_c000_2a7e", 0),
+ MVI(0xc0002a7f, "TODO_c000_2a7f", 0),
+ MVI(0xc0002a80, "TODO_c000_2a80", 0),
+ MVI(0xc0002a81, "TODO_c000_2a81", 0),
+ MVI(0xc0002a82, "TODO_c000_2a82", 0),
+ MVI(0xc0002a83, "TODO_c000_2a83", 0),
+ MVI(0xc0002a84, "TODO_c000_2a84", 0),
+ MVI(0xc0002a85, "TODO_c000_2a85", 0),
+ MVI(0xc0002a86, "TODO_c000_2a86", 0),
+ MVI(0xc0002a87, "TODO_c000_2a87", 0),
+ MVI(0xc0002a88, "TODO_c000_2a88", 0),
+ MVI(0xc0002a89, "TODO_c000_2a89", 0),
+ MVI(0xc0002a8a, "TODO_c000_2a8a", 0),
+ MVI(0xc0002a8b, "TODO_c000_2a8b", 0),
+ MVI(0xc0002a8c, "TODO_c000_2a8c", 0),
+ MVI(0xc0002a8d, "TODO_c000_2a8d", 0),
+ MVI(0xc0002a8e, "TODO_c000_2a8e", 0),
+ MVI(0xc0002a8f, "TODO_c000_2a8f", 0),
+ MVI(0xc0002a90, "TODO_c000_2a90", 0),
+ MVI(0xc0002a91, "TODO_c000_2a91", 0),
+ MVI(0xc0002a92, "TODO_c000_2a92", 0),
+ MVI(0xc0002a93, "TODO_c000_2a93", 0),
+ MVI(0xc0002a94, "TODO_c000_2a94", 0),
+ MVI(0xc0002a95, "TODO_c000_2a95", 0),
+ MVI(0xc0002a96, "TODO_c000_2a96", 0),
+ MVI(0xc0002a97, "TODO_c000_2a97", 0),
+ MVI(0xc0002a98, "TODO_c000_2a98", 0),
+ MVI(0xc0002a99, "TODO_c000_2a99", 0),
+ MVI(0xc0002a9a, "TODO_c000_2a9a", 0),
+ MVI(0xc0002a9b, "TODO_c000_2a9b", 0),
+ MVI(0xc0002a9c, "TODO_c000_2a9c", 0),
+ MVI(0xc0002a9d, "TODO_c000_2a9d", 0),
+ MVI(0xc0002a9e, "TODO_c000_2a9e", 0),
+ MVI(0xc0002a9f, "TODO_c000_2a9f", 0),
+ MVI(0xc0002aa0, "TODO_c000_2aa0", 0),
+ MVI(0xc0002aa1, "TODO_c000_2aa1", 0),
+ MVI(0xc0002aa2, "TODO_c000_2aa2", 0),
+ MVI(0xc0002aa3, "TODO_c000_2aa3", 0),
+ MVI(0xc0002aa4, "TODO_c000_2aa4", 0),
+ MVI(0xc0002aa5, "TODO_c000_2aa5", 0),
+ MVI(0xc0002aa6, "TODO_c000_2aa6", 0),
+ MVI(0xc0002aa7, "TODO_c000_2aa7", 0),
+ MVI(0xc0002aa8, "TODO_c000_2aa8", 0),
+ MVI(0xc0002aa9, "TODO_c000_2aa9", 0),
+ MVI(0xc0002aaa, "TODO_c000_2aaa", 0),
+ MVI(0xc0002aab, "TODO_c000_2aab", 0),
+ MVI(0xc0002aac, "TODO_c000_2aac", 0),
+ MVI(0xc0002aad, "TODO_c000_2aad", 0),
+ MVI(0xc0002aae, "TODO_c000_2aae", 0),
+ MVI(0xc0002aaf, "TODO_c000_2aaf", 0),
+ MVI(0xc0002ab0, "TODO_c000_2ab0", 0),
+ MVI(0xc0002ab1, "TODO_c000_2ab1", 0),
+ MVI(0xc0002ab2, "TODO_c000_2ab2", 0),
+ MVI(0xc0002ab3, "TODO_c000_2ab3", 0),
+ MVI(0xc0002ab4, "TODO_c000_2ab4", 0),
+ MVI(0xc0002ab5, "TODO_c000_2ab5", 0),
+ MVI(0xc0002ab6, "TODO_c000_2ab6", 0),
+ MVI(0xc0002ab7, "TODO_c000_2ab7", 0),
+ MVI(0xc0002ab8, "TODO_c000_2ab8", 0),
+ MVI(0xc0002ab9, "TODO_c000_2ab9", 0),
+ MVI(0xc0002aba, "TODO_c000_2aba", 0),
+ MVI(0xc0002abb, "TODO_c000_2abb", 0),
+ MVI(0xc0002abc, "TODO_c000_2abc", 0),
+ MVI(0xc0002abd, "TODO_c000_2abd", 0),
+ MVI(0xc0002abe, "TODO_c000_2abe", 0),
+ MVI(0xc0002abf, "TODO_c000_2abf", 0),
+ MVI(0xc0002ac0, "TODO_c000_2ac0", 0),
+ MVI(0xc0002ac1, "TODO_c000_2ac1", 0),
+ MVI(0xc0002ac2, "TODO_c000_2ac2", 0),
+ MVI(0xc0002ac3, "TODO_c000_2ac3", 0),
+ MVI(0xc0002ac4, "TODO_c000_2ac4", 0),
+ MVI(0xc0002ac5, "TODO_c000_2ac5", 0),
+ MVI(0xc0002ac6, "TODO_c000_2ac6", 0),
+ MVI(0xc0002ac7, "TODO_c000_2ac7", 0),
+ MVI(0xc0002ac8, "TODO_c000_2ac8", 0),
+ MVI(0xc0002ac9, "TODO_c000_2ac9", 0),
+ MVI(0xc0002aca, "TODO_c000_2aca", 0),
+ MVI(0xc0002acb, "TODO_c000_2acb", 0),
+ MVI(0xc0002acc, "TODO_c000_2acc", 0),
+ MVI(0xc0002acd, "TODO_c000_2acd", 0),
+ MVI(0xc0002ace, "TODO_c000_2ace", 0),
+ MVI(0xc0002acf, "TODO_c000_2acf", 0),
+ MVI(0xc0002ad0, "TODO_c000_2ad0", 0),
+ MVI(0xc0002ad1, "TODO_c000_2ad1", 0),
+ MVI(0xc0002ad2, "TODO_c000_2ad2", 0),
+ MVI(0xc0002ad3, "TODO_c000_2ad3", 0),
+ MVI(0xc0002ad4, "TODO_c000_2ad4", 0),
+ MVI(0xc0002ad5, "TODO_c000_2ad5", 0),
+ MVI(0xc0002ad6, "TODO_c000_2ad6", 0),
+ MVI(0xc0002ad7, "TODO_c000_2ad7", 0),
+ MVI(0xc0002ad8, "TODO_c000_2ad8", 0),
+ MVI(0xc0002ad9, "TODO_c000_2ad9", 0),
+ MVI(0xc0002ada, "TODO_c000_2ada", 0),
+ MVI(0xc0002adb, "TODO_c000_2adb", 0),
+ MVI(0xc0002adc, "TODO_c000_2adc", 0),
+ MVI(0xc0002add, "TODO_c000_2add", 0),
+ MVI(0xc0002ade, "TODO_c000_2ade", 0),
+ MVI(0xc0002adf, "TODO_c000_2adf", 0),
+ MVI(0xc0002ae0, "TODO_c000_2ae0", 0),
+ MVI(0xc0002ae1, "TODO_c000_2ae1", 0),
+ MVI(0xc0002ae2, "TODO_c000_2ae2", 0),
+ MVI(0xc0002ae3, "TODO_c000_2ae3", 0),
+ MVI(0xc0002ae4, "TODO_c000_2ae4", 0),
+ MVI(0xc0002ae5, "TODO_c000_2ae5", 0),
+ MVI(0xc0002ae6, "TODO_c000_2ae6", 0),
+ MVI(0xc0002ae7, "TODO_c000_2ae7", 0),
+ MVI(0xc0002ae8, "TODO_c000_2ae8", 0),
+ MVI(0xc0002ae9, "TODO_c000_2ae9", 0),
+ MVI(0xc0002aea, "TODO_c000_2aea", 0),
+ MVI(0xc0002aeb, "TODO_c000_2aeb", 0),
+ MVI(0xc0002aec, "TODO_c000_2aec", 0),
+ MVI(0xc0002aed, "TODO_c000_2aed", 0),
+ MVI(0xc0002aee, "TODO_c000_2aee", 0),
+ MVI(0xc0002aef, "TODO_c000_2aef", 0),
+ MVI(0xc0002af0, "TODO_c000_2af0", 0),
+ MVI(0xc0002af1, "TODO_c000_2af1", 0),
+ MVI(0xc0002af2, "TODO_c000_2af2", 0),
+ MVI(0xc0002af3, "TODO_c000_2af3", 0),
+ MVI(0xc0002af4, "TODO_c000_2af4", 0),
+ MVI(0xc0002af5, "TODO_c000_2af5", 0),
+ MVI(0xc0002af6, "TODO_c000_2af6", 0),
+ MVI(0xc0002af7, "TODO_c000_2af7", 0),
+ MVI(0xc0002af8, "TODO_c000_2af8", 0),
+ MVI(0xc0002af9, "TODO_c000_2af9", 0),
+ MVI(0xc0002afa, "TODO_c000_2afa", 0),
+ MVI(0xc0002afb, "TODO_c000_2afb", 0),
+ MVI(0xc0002afc, "TODO_c000_2afc", 0),
+ MVI(0xc0002afd, "TODO_c000_2afd", 0),
+ MVI(0xc0002afe, "TODO_c000_2afe", 0),
+ MVI(0xc0002aff, "TODO_c000_2aff", 0),
+ MVI(0xc0002b00, "TODO_c000_2b00", 0),
+ MVI(0xc0002b01, "TODO_c000_2b01", 0),
+ MVI(0xc0002b02, "TODO_c000_2b02", 0),
+ MVI(0xc0002b03, "TODO_c000_2b03", 0),
+ MVI(0xc0002b04, "TODO_c000_2b04", 0),
+ MVI(0xc0002b05, "TODO_c000_2b05", 0),
+ MVI(0xc0002b06, "TODO_c000_2b06", 0),
+ MVI(0xc0002b07, "TODO_c000_2b07", 0),
+ MVI(0xc0002b08, "TODO_c000_2b08", 0),
+ MVI(0xc0002b09, "TODO_c000_2b09", 0),
+ MVI(0xc0002b0a, "TODO_c000_2b0a", 0),
+ MVI(0xc0002b0b, "TODO_c000_2b0b", 0),
+ MVI(0xc0002b0c, "TODO_c000_2b0c", 0),
+ MVI(0xc0002b0d, "TODO_c000_2b0d", 0),
+ MVI(0xc0002b0e, "TODO_c000_2b0e", 0),
+ MVI(0xc0002b0f, "TODO_c000_2b0f", 0),
+ MVI(0xc0002b10, "TODO_c000_2b10", 0),
+ MVI(0xc0002b11, "TODO_c000_2b11", 0),
+ MVI(0xc0002b12, "TODO_c000_2b12", 0),
+ MVI(0xc0002b13, "TODO_c000_2b13", 0),
+ MVI(0xc0002b14, "TODO_c000_2b14", 0),
+ MVI(0xc0002b15, "TODO_c000_2b15", 0),
+ MVI(0xc0002b16, "TODO_c000_2b16", 0),
+ MVI(0xc0002b17, "TODO_c000_2b17", 0),
+ MVI(0xc0002b18, "TODO_c000_2b18", 0),
+ MVI(0xc0002b19, "TODO_c000_2b19", 0),
+ MVI(0xc0002b1a, "TODO_c000_2b1a", 0),
+ MVI(0xc0002b1b, "TODO_c000_2b1b", 0),
+ MVI(0xc0002b1c, "TODO_c000_2b1c", 0),
+ MVI(0xc0002b1d, "TODO_c000_2b1d", 0),
+ MVI(0xc0002b1e, "TODO_c000_2b1e", 0),
+ MVI(0xc0002b1f, "TODO_c000_2b1f", 0),
+ MVI(0xc0002b20, "TODO_c000_2b20", 0),
+ MVI(0xc0002b21, "TODO_c000_2b21", 0),
+ MVI(0xc0002b22, "TODO_c000_2b22", 0),
+ MVI(0xc0002b23, "TODO_c000_2b23", 0),
+ MVI(0xc0002b24, "TODO_c000_2b24", 0),
+ MVI(0xc0002b25, "TODO_c000_2b25", 0),
+ MVI(0xc0002b26, "TODO_c000_2b26", 0),
+ MVI(0xc0002b27, "TODO_c000_2b27", 0),
+ MVI(0xc0002b28, "TODO_c000_2b28", 0),
+ MVI(0xc0002b29, "TODO_c000_2b29", 0),
+ MVI(0xc0002b2a, "TODO_c000_2b2a", 0),
+ MVI(0xc0002b2b, "TODO_c000_2b2b", 0),
+ MVI(0xc0002b2c, "TODO_c000_2b2c", 0),
+ MVI(0xc0002b2d, "TODO_c000_2b2d", 0),
+ MVI(0xc0002b2e, "TODO_c000_2b2e", 0),
+ MVI(0xc0002b2f, "TODO_c000_2b2f", 0),
+ MVI(0xc0002b30, "TODO_c000_2b30", 0),
+ MVI(0xc0002b31, "TODO_c000_2b31", 0),
+ MVI(0xc0002b32, "TODO_c000_2b32", 0),
+ MVI(0xc0002b33, "TODO_c000_2b33", 0),
+ MVI(0xc0002b34, "TODO_c000_2b34", 0),
+ MVI(0xc0002b35, "TODO_c000_2b35", 0),
+ MVI(0xc0002b36, "TODO_c000_2b36", 0),
+ MVI(0xc0002b37, "TODO_c000_2b37", 0),
+ MVI(0xc0002b38, "TODO_c000_2b38", 0),
+ MVI(0xc0002b39, "TODO_c000_2b39", 0),
+ MVI(0xc0002b3a, "TODO_c000_2b3a", 0),
+ MVI(0xc0002b3b, "TODO_c000_2b3b", 0),
+ MVI(0xc0002b3c, "TODO_c000_2b3c", 0),
+ MVI(0xc0002b3d, "TODO_c000_2b3d", 0),
+ MVI(0xc0002b3e, "TODO_c000_2b3e", 0),
+ MVI(0xc0002b3f, "TODO_c000_2b3f", 0),
+ MVI(0xc0002b40, "TODO_c000_2b40", 0),
+ MVI(0xc0002b41, "TODO_c000_2b41", 0),
+ MVI(0xc0002b42, "TODO_c000_2b42", 0),
+ MVI(0xc0002b43, "TODO_c000_2b43", 0),
+ MVI(0xc0002b44, "TODO_c000_2b44", 0),
+ MVI(0xc0002b45, "TODO_c000_2b45", 0),
+ MVI(0xc0002b46, "TODO_c000_2b46", 0),
+ MVI(0xc0002b47, "TODO_c000_2b47", 0),
+ MVI(0xc0002b48, "TODO_c000_2b48", 0),
+ MVI(0xc0002b49, "TODO_c000_2b49", 0),
+ MVI(0xc0002b4a, "TODO_c000_2b4a", 0),
+ MVI(0xc0002b4b, "TODO_c000_2b4b", 0),
+ MVI(0xc0002b4c, "TODO_c000_2b4c", 0),
+ MVI(0xc0002b4d, "TODO_c000_2b4d", 0),
+ MVI(0xc0002b4e, "TODO_c000_2b4e", 0),
+ MVI(0xc0002b4f, "TODO_c000_2b4f", 0),
+ MVI(0xc0002b50, "TODO_c000_2b50", 0),
+ MVI(0xc0002b51, "TODO_c000_2b51", 0),
+ MVI(0xc0002b52, "TODO_c000_2b52", 0),
+ MVI(0xc0002b53, "TODO_c000_2b53", 0),
+ MVI(0xc0002b54, "TODO_c000_2b54", 0),
+ MVI(0xc0002b55, "TODO_c000_2b55", 0),
+ MVI(0xc0002b56, "TODO_c000_2b56", 0),
+ MVI(0xc0002b57, "TODO_c000_2b57", 0),
+ MVI(0xc0002b58, "TODO_c000_2b58", 0),
+ MVI(0xc0002b59, "TODO_c000_2b59", 0),
+ MVI(0xc0002b5a, "TODO_c000_2b5a", 0),
+ MVI(0xc0002b5b, "TODO_c000_2b5b", 0),
+ MVI(0xc0002b5c, "TODO_c000_2b5c", 0),
+ MVI(0xc0002b5d, "TODO_c000_2b5d", 0),
+ MVI(0xc0002b5e, "TODO_c000_2b5e", 0),
+ MVI(0xc0002b5f, "TODO_c000_2b5f", 0),
+ MVI(0xc0002b60, "TODO_c000_2b60", 0),
+ MVI(0xc0002b61, "TODO_c000_2b61", 0),
+ MVI(0xc0002b62, "TODO_c000_2b62", 0),
+ MVI(0xc0002b63, "TODO_c000_2b63", 0),
+ MVI(0xc0002b64, "TODO_c000_2b64", 0),
+ MVI(0xc0002b65, "TODO_c000_2b65", 0),
+ MVI(0xc0002b66, "TODO_c000_2b66", 0),
+ MVI(0xc0002b67, "TODO_c000_2b67", 0),
+ MVI(0xc0002b68, "TODO_c000_2b68", 0),
+ MVI(0xc0002b69, "TODO_c000_2b69", 0),
+ MVI(0xc0002b6a, "TODO_c000_2b6a", 0),
+ MVI(0xc0002b6b, "TODO_c000_2b6b", 0),
+ MVI(0xc0002b6c, "TODO_c000_2b6c", 0),
+ MVI(0xc0002b6d, "TODO_c000_2b6d", 0),
+ MVI(0xc0002b6e, "TODO_c000_2b6e", 0),
+ MVI(0xc0002b6f, "TODO_c000_2b6f", 0),
+ MVI(0xc0002b70, "TODO_c000_2b70", 0),
+ MVI(0xc0002b71, "TODO_c000_2b71", 0),
+ MVI(0xc0002b72, "TODO_c000_2b72", 0),
+ MVI(0xc0002b73, "TODO_c000_2b73", 0),
+ MVI(0xc0002b74, "TODO_c000_2b74", 0),
+ MVI(0xc0002b75, "TODO_c000_2b75", 0),
+ MVI(0xc0002b76, "TODO_c000_2b76", 0),
+ MVI(0xc0002b77, "TODO_c000_2b77", 0),
+ MVI(0xc0002b78, "TODO_c000_2b78", 0),
+ MVI(0xc0002b79, "TODO_c000_2b79", 0),
+ MVI(0xc0002b7a, "TODO_c000_2b7a", 0),
+ MVI(0xc0002b7b, "TODO_c000_2b7b", 0),
+ MVI(0xc0002b7c, "TODO_c000_2b7c", 0),
+ MVI(0xc0002b7d, "TODO_c000_2b7d", 0),
+ MVI(0xc0002b7e, "TODO_c000_2b7e", 0),
+ MVI(0xc0002b7f, "TODO_c000_2b7f", 0),
+ MVI(0xc0002b80, "TODO_c000_2b80", 0),
+ MVI(0xc0002b81, "TODO_c000_2b81", 0),
+ MVI(0xc0002b82, "TODO_c000_2b82", 0),
+ MVI(0xc0002b83, "TODO_c000_2b83", 0),
+ MVI(0xc0002b84, "TODO_c000_2b84", 0),
+ MVI(0xc0002b85, "TODO_c000_2b85", 0),
+ MVI(0xc0002b86, "TODO_c000_2b86", 0),
+ MVI(0xc0002b87, "TODO_c000_2b87", 0),
+ MVI(0xc0002b88, "TODO_c000_2b88", 0),
+ MVI(0xc0002b89, "TODO_c000_2b89", 0),
+ MVI(0xc0002b8a, "TODO_c000_2b8a", 0),
+ MVI(0xc0002b8b, "TODO_c000_2b8b", 0),
+ MVI(0xc0002b8c, "TODO_c000_2b8c", 0),
+ MVI(0xc0002b8d, "TODO_c000_2b8d", 0),
+ MVI(0xc0002b8e, "TODO_c000_2b8e", 0),
+ MVI(0xc0002b8f, "TODO_c000_2b8f", 0),
+ MVI(0xc0002b90, "TODO_c000_2b90", 0),
+ MVI(0xc0002b91, "TODO_c000_2b91", 0),
+ MVI(0xc0002b92, "TODO_c000_2b92", 0),
+ MVI(0xc0002b93, "TODO_c000_2b93", 0),
+ MVI(0xc0002b94, "TODO_c000_2b94", 0),
+ MVI(0xc0002b95, "TODO_c000_2b95", 0),
+ MVI(0xc0002b96, "TODO_c000_2b96", 0),
+ MVI(0xc0002b97, "TODO_c000_2b97", 0),
+ MVI(0xc0002b98, "TODO_c000_2b98", 0),
+ MVI(0xc0002b99, "TODO_c000_2b99", 0),
+ MVI(0xc0002b9a, "TODO_c000_2b9a", 0),
+ MVI(0xc0002b9b, "TODO_c000_2b9b", 0),
+ MVI(0xc0002b9c, "TODO_c000_2b9c", 0),
+ MVI(0xc0002b9d, "TODO_c000_2b9d", 0),
+ MVI(0xc0002b9e, "TODO_c000_2b9e", 0),
+ MVI(0xc0002b9f, "TODO_c000_2b9f", 0),
+ MVI(0xc0002ba0, "TODO_c000_2ba0", 0),
+ MVI(0xc0002ba1, "TODO_c000_2ba1", 0),
+ MVI(0xc0002ba2, "TODO_c000_2ba2", 0),
+ MVI(0xc0002ba3, "TODO_c000_2ba3", 0),
+ MVI(0xc0002ba4, "TODO_c000_2ba4", 0),
+ MVI(0xc0002ba5, "TODO_c000_2ba5", 0),
+ MVI(0xc0002ba6, "TODO_c000_2ba6", 0),
+ MVI(0xc0002ba7, "TODO_c000_2ba7", 0),
+ MVI(0xc0002ba8, "TODO_c000_2ba8", 0),
+ MVI(0xc0002ba9, "TODO_c000_2ba9", 0),
+ MVI(0xc0002baa, "TODO_c000_2baa", 0),
+ MVI(0xc0002bab, "TODO_c000_2bab", 0),
+ MVI(0xc0002bac, "TODO_c000_2bac", 0),
+ MVI(0xc0002bad, "TODO_c000_2bad", 0),
+ MVI(0xc0002bae, "TODO_c000_2bae", 0),
+ MVI(0xc0002baf, "TODO_c000_2baf", 0),
+ MVI(0xc0002bb0, "TODO_c000_2bb0", 0),
+ MVI(0xc0002bb1, "TODO_c000_2bb1", 0),
+ MVI(0xc0002bb2, "TODO_c000_2bb2", 0),
+ MVI(0xc0002bb3, "TODO_c000_2bb3", 0),
+ MVI(0xc0002bb4, "TODO_c000_2bb4", 0),
+ MVI(0xc0002bb5, "TODO_c000_2bb5", 0),
+ MVI(0xc0002bb6, "TODO_c000_2bb6", 0),
+ MVI(0xc0002bb7, "TODO_c000_2bb7", 0),
+ MVI(0xc0002bb8, "TODO_c000_2bb8", 0),
+ MVI(0xc0002bb9, "TODO_c000_2bb9", 0),
+ MVI(0xc0002bba, "TODO_c000_2bba", 0),
+ MVI(0xc0002bbb, "TODO_c000_2bbb", 0),
+ MVI(0xc0002bbc, "TODO_c000_2bbc", 0),
+ MVI(0xc0002bbd, "TODO_c000_2bbd", 0),
+ MVI(0xc0002bbe, "TODO_c000_2bbe", 0),
+ MVI(0xc0002bbf, "TODO_c000_2bbf", 0),
+ MVI(0xc0002bc0, "TODO_c000_2bc0", 0),
+ MVI(0xc0002bc1, "TODO_c000_2bc1", 0),
+ MVI(0xc0002bc2, "TODO_c000_2bc2", 0),
+ MVI(0xc0002bc3, "TODO_c000_2bc3", 0),
+ MVI(0xc0002bc4, "TODO_c000_2bc4", 0),
+ MVI(0xc0002bc5, "TODO_c000_2bc5", 0),
+ MVI(0xc0002bc6, "TODO_c000_2bc6", 0),
+ MVI(0xc0002bc7, "TODO_c000_2bc7", 0),
+ MVI(0xc0002bc8, "TODO_c000_2bc8", 0),
+ MVI(0xc0002bc9, "TODO_c000_2bc9", 0),
+ MVI(0xc0002bca, "TODO_c000_2bca", 0),
+ MVI(0xc0002bcb, "TODO_c000_2bcb", 0),
+ MVI(0xc0002bcc, "TODO_c000_2bcc", 0),
+ MVI(0xc0002bcd, "TODO_c000_2bcd", 0),
+ MVI(0xc0002bce, "TODO_c000_2bce", 0),
+ MVI(0xc0002bcf, "TODO_c000_2bcf", 0),
+ MVI(0xc0002bd0, "TODO_c000_2bd0", 0),
+ MVI(0xc0002bd1, "TODO_c000_2bd1", 0),
+ MVI(0xc0002bd2, "TODO_c000_2bd2", 0),
+ MVI(0xc0002bd3, "TODO_c000_2bd3", 0),
+ MVI(0xc0002bd4, "TODO_c000_2bd4", 0),
+ MVI(0xc0002bd5, "TODO_c000_2bd5", 0),
+ MVI(0xc0002bd6, "TODO_c000_2bd6", 0),
+ MVI(0xc0002bd7, "TODO_c000_2bd7", 0),
+ MVI(0xc0002bd8, "TODO_c000_2bd8", 0),
+ MVI(0xc0002bd9, "TODO_c000_2bd9", 0),
+ MVI(0xc0002bda, "TODO_c000_2bda", 0),
+ MVI(0xc0002bdb, "TODO_c000_2bdb", 0),
+ MVI(0xc0002bdc, "TODO_c000_2bdc", 0),
+ MVI(0xc0002bdd, "TODO_c000_2bdd", 0),
+ MVI(0xc0002bde, "TODO_c000_2bde", 0),
+ MVI(0xc0002bdf, "TODO_c000_2bdf", 0),
+ MVI(0xc0002be0, "TODO_c000_2be0", 0),
+ MVI(0xc0002be1, "TODO_c000_2be1", 0),
+ MVI(0xc0002be2, "TODO_c000_2be2", 0),
+ MVI(0xc0002be3, "TODO_c000_2be3", 0),
+ MVI(0xc0002be4, "TODO_c000_2be4", 0),
+ MVI(0xc0002be5, "TODO_c000_2be5", 0),
+ MVI(0xc0002be6, "TODO_c000_2be6", 0),
+ MVI(0xc0002be7, "TODO_c000_2be7", 0),
+ MVI(0xc0002be8, "TODO_c000_2be8", 0),
+ MVI(0xc0002be9, "TODO_c000_2be9", 0),
+ MVI(0xc0002bea, "TODO_c000_2bea", 0),
+ MVI(0xc0002beb, "TODO_c000_2beb", 0),
+ MVI(0xc0002bec, "TODO_c000_2bec", 0),
+ MVI(0xc0002bed, "TODO_c000_2bed", 0),
+ MVI(0xc0002bee, "TODO_c000_2bee", 0),
+ MVI(0xc0002bef, "TODO_c000_2bef", 0),
+ MVI(0xc0002bf0, "TODO_c000_2bf0", 0),
+ MVI(0xc0002bf1, "TODO_c000_2bf1", 0),
+ MVI(0xc0002bf2, "TODO_c000_2bf2", 0),
+ MVI(0xc0002bf3, "TODO_c000_2bf3", 0),
+ MVI(0xc0002bf4, "TODO_c000_2bf4", 0),
+ MVI(0xc0002bf5, "TODO_c000_2bf5", 0),
+ MVI(0xc0002bf6, "TODO_c000_2bf6", 0),
+ MVI(0xc0002bf7, "TODO_c000_2bf7", 0),
+ MVI(0xc0002bf8, "TODO_c000_2bf8", 0),
+ MVI(0xc0002bf9, "TODO_c000_2bf9", 0),
+ MVI(0xc0002bfa, "TODO_c000_2bfa", 0),
+ MVI(0xc0002bfb, "TODO_c000_2bfb", 0),
+ MVI(0xc0002bfc, "TODO_c000_2bfc", 0),
+ MVI(0xc0002bfd, "TODO_c000_2bfd", 0),
+ MVI(0xc0002bfe, "TODO_c000_2bfe", 0),
+ MVI(0xc0002bff, "TODO_c000_2bff", 0),
+ MVI(0xc0002c00, "TODO_c000_2c00", 0),
+ MVI(0xc0002c01, "TODO_c000_2c01", 0),
+ MVI(0xc0002c02, "TODO_c000_2c02", 0),
+ MVI(0xc0002c03, "TODO_c000_2c03", 0),
+ MVI(0xc0002c04, "TODO_c000_2c04", 0),
+ MVI(0xc0002c05, "TODO_c000_2c05", 0),
+ MVI(0xc0002c06, "TODO_c000_2c06", 0),
+ MVI(0xc0002c07, "TODO_c000_2c07", 0),
+ MVI(0xc0002c08, "TODO_c000_2c08", 0),
+ MVI(0xc0002c09, "TODO_c000_2c09", 0),
+ MVI(0xc0002c0a, "TODO_c000_2c0a", 0),
+ MVI(0xc0002c0b, "TODO_c000_2c0b", 0),
+ MVI(0xc0002c0c, "TODO_c000_2c0c", 0),
+ MVI(0xc0002c0d, "TODO_c000_2c0d", 0),
+ MVI(0xc0002c0e, "TODO_c000_2c0e", 0),
+ MVI(0xc0002c0f, "TODO_c000_2c0f", 0),
+ MVI(0xc0002c10, "TODO_c000_2c10", 0),
+ MVI(0xc0002c11, "TODO_c000_2c11", 0),
+ MVI(0xc0002c12, "TODO_c000_2c12", 0),
+ MVI(0xc0002c13, "TODO_c000_2c13", 0),
+ MVI(0xc0002c14, "TODO_c000_2c14", 0),
+ MVI(0xc0002c15, "TODO_c000_2c15", 0),
+ MVI(0xc0002c16, "TODO_c000_2c16", 0),
+ MVI(0xc0002c17, "TODO_c000_2c17", 0),
+ MVI(0xc0002c18, "TODO_c000_2c18", 0),
+ MVI(0xc0002c19, "TODO_c000_2c19", 0),
+ MVI(0xc0002c1a, "TODO_c000_2c1a", 0),
+ MVI(0xc0002c1b, "TODO_c000_2c1b", 0),
+ MVI(0xc0002c1c, "TODO_c000_2c1c", 0),
+ MVI(0xc0002c1d, "TODO_c000_2c1d", 0),
+ MVI(0xc0002c1e, "TODO_c000_2c1e", 0),
+ MVI(0xc0002c1f, "TODO_c000_2c1f", 0),
+ MVI(0xc0002c20, "TODO_c000_2c20", 0),
+ MVI(0xc0002c21, "TODO_c000_2c21", 0),
+ MVI(0xc0002c22, "TODO_c000_2c22", 0),
+ MVI(0xc0002c23, "TODO_c000_2c23", 0),
+ MVI(0xc0002c24, "TODO_c000_2c24", 0),
+ MVI(0xc0002c25, "TODO_c000_2c25", 0),
+ MVI(0xc0002c26, "TODO_c000_2c26", 0),
+ MVI(0xc0002c27, "TODO_c000_2c27", 0),
+ MVI(0xc0002c28, "TODO_c000_2c28", 0),
+ MVI(0xc0002c29, "TODO_c000_2c29", 0),
+ MVI(0xc0002c2a, "TODO_c000_2c2a", 0),
+ MVI(0xc0002c2b, "TODO_c000_2c2b", 0),
+ MVI(0xc0002c2c, "TODO_c000_2c2c", 0),
+ MVI(0xc0002c2d, "TODO_c000_2c2d", 0),
+ MVI(0xc0002c2e, "TODO_c000_2c2e", 0),
+ MVI(0xc0002c2f, "TODO_c000_2c2f", 0),
+ MVI(0xc0002c30, "TODO_c000_2c30", 0),
+ MVI(0xc0002c31, "TODO_c000_2c31", 0),
+ MVI(0xc0002c32, "TODO_c000_2c32", 0),
+ MVI(0xc0002c33, "TODO_c000_2c33", 0),
+ MVI(0xc0002c34, "TODO_c000_2c34", 0),
+ MVI(0xc0002c35, "TODO_c000_2c35", 0),
+ MVI(0xc0002c36, "TODO_c000_2c36", 0),
+ MVI(0xc0002c37, "TODO_c000_2c37", 0),
+ MVI(0xc0002c38, "TODO_c000_2c38", 0),
+ MVI(0xc0002c39, "TODO_c000_2c39", 0),
+ MVI(0xc0002c3a, "TODO_c000_2c3a", 0),
+ MVI(0xc0002c3b, "TODO_c000_2c3b", 0),
+ MVI(0xc0002c3c, "TODO_c000_2c3c", 0),
+ MVI(0xc0002c3d, "TODO_c000_2c3d", 0),
+ MVI(0xc0002c3e, "TODO_c000_2c3e", 0),
+ MVI(0xc0002c3f, "TODO_c000_2c3f", 0),
+ MVI(0xc0002c40, "TODO_c000_2c40", 0),
+ MVI(0xc0002c41, "TODO_c000_2c41", 0),
+ MVI(0xc0002c42, "TODO_c000_2c42", 0),
+ MVI(0xc0002c43, "TODO_c000_2c43", 0),
+ MVI(0xc0002c44, "TODO_c000_2c44", 0),
+ MVI(0xc0002c45, "TODO_c000_2c45", 0),
+ MVI(0xc0002c46, "TODO_c000_2c46", 0),
+ MVI(0xc0002c47, "TODO_c000_2c47", 0),
+ MVI(0xc0002c48, "TODO_c000_2c48", 0),
+ MVI(0xc0002c49, "TODO_c000_2c49", 0),
+ MVI(0xc0002c4a, "TODO_c000_2c4a", 0),
+ MVI(0xc0002c4b, "TODO_c000_2c4b", 0),
+ MVI(0xc0002c4c, "TODO_c000_2c4c", 0),
+ MVI(0xc0002c4d, "TODO_c000_2c4d", 0),
+ MVI(0xc0002c4e, "TODO_c000_2c4e", 0),
+ MVI(0xc0002c4f, "TODO_c000_2c4f", 0),
+ MVI(0xc0002c50, "TODO_c000_2c50", 0),
+ MVI(0xc0002c51, "TODO_c000_2c51", 0),
+ MVI(0xc0002c52, "TODO_c000_2c52", 0),
+ MVI(0xc0002c53, "TODO_c000_2c53", 0),
+ MVI(0xc0002c54, "TODO_c000_2c54", 0),
+ MVI(0xc0002c55, "TODO_c000_2c55", 0),
+ MVI(0xc0002c56, "TODO_c000_2c56", 0),
+ MVI(0xc0002c57, "TODO_c000_2c57", 0),
+ MVI(0xc0002c58, "TODO_c000_2c58", 0),
+ MVI(0xc0002c59, "TODO_c000_2c59", 0),
+ MVI(0xc0002c5a, "TODO_c000_2c5a", 0),
+ MVI(0xc0002c5b, "TODO_c000_2c5b", 0),
+ MVI(0xc0002c5c, "TODO_c000_2c5c", 0),
+ MVI(0xc0002c5d, "TODO_c000_2c5d", 0),
+ MVI(0xc0002c5e, "TODO_c000_2c5e", 0),
+ MVI(0xc0002c5f, "TODO_c000_2c5f", 0),
+ MVI(0xc0002c60, "TODO_c000_2c60", 0),
+ MVI(0xc0002c61, "TODO_c000_2c61", 0),
+ MVI(0xc0002c62, "TODO_c000_2c62", 0),
+ MVI(0xc0002c63, "TODO_c000_2c63", 0),
+ MVI(0xc0002c64, "TODO_c000_2c64", 0),
+ MVI(0xc0002c65, "TODO_c000_2c65", 0),
+ MVI(0xc0002c66, "TODO_c000_2c66", 0),
+ MVI(0xc0002c67, "TODO_c000_2c67", 0),
+ MVI(0xc0002c68, "TODO_c000_2c68", 0),
+ MVI(0xc0002c69, "TODO_c000_2c69", 0),
+ MVI(0xc0002c6a, "TODO_c000_2c6a", 0),
+ MVI(0xc0002c6b, "TODO_c000_2c6b", 0),
+ MVI(0xc0002c6c, "TODO_c000_2c6c", 0),
+ MVI(0xc0002c6d, "TODO_c000_2c6d", 0),
+ MVI(0xc0002c6e, "TODO_c000_2c6e", 0),
+ MVI(0xc0002c6f, "TODO_c000_2c6f", 0),
+ MVI(0xc0002c70, "TODO_c000_2c70", 0),
+ MVI(0xc0002c71, "TODO_c000_2c71", 0),
+ MVI(0xc0002c72, "TODO_c000_2c72", 0),
+ MVI(0xc0002c73, "TODO_c000_2c73", 0),
+ MVI(0xc0002c74, "TODO_c000_2c74", 0),
+ MVI(0xc0002c75, "TODO_c000_2c75", 0),
+ MVI(0xc0002c76, "TODO_c000_2c76", 0),
+ MVI(0xc0002c77, "TODO_c000_2c77", 0),
+ MVI(0xc0002c78, "TODO_c000_2c78", 0),
+ MVI(0xc0002c79, "TODO_c000_2c79", 0),
+ MVI(0xc0002c7a, "TODO_c000_2c7a", 0),
+ MVI(0xc0002c7b, "TODO_c000_2c7b", 0),
+ MVI(0xc0002c7c, "TODO_c000_2c7c", 0),
+ MVI(0xc0002c7d, "TODO_c000_2c7d", 0),
+ MVI(0xc0002c7e, "TODO_c000_2c7e", 0),
+ MVI(0xc0002c7f, "TODO_c000_2c7f", 0),
+ MVI(0xc0002c80, "TODO_c000_2c80", 0),
+ MVI(0xc0002c81, "TODO_c000_2c81", 0),
+ MVI(0xc0002c82, "TODO_c000_2c82", 0),
+ MVI(0xc0002c83, "TODO_c000_2c83", 0),
+ MVI(0xc0002c84, "TODO_c000_2c84", 0),
+ MVI(0xc0002c85, "TODO_c000_2c85", 0),
+ MVI(0xc0002c86, "TODO_c000_2c86", 0),
+ MVI(0xc0002c87, "TODO_c000_2c87", 0),
+ MVI(0xc0002c88, "TODO_c000_2c88", 0),
+ MVI(0xc0002c89, "TODO_c000_2c89", 0),
+ MVI(0xc0002c8a, "TODO_c000_2c8a", 0),
+ MVI(0xc0002c8b, "TODO_c000_2c8b", 0),
+ MVI(0xc0002c8c, "TODO_c000_2c8c", 0),
+ MVI(0xc0002c8d, "TODO_c000_2c8d", 0),
+ MVI(0xc0002c8e, "TODO_c000_2c8e", 0),
+ MVI(0xc0002c8f, "TODO_c000_2c8f", 0),
+ MVI(0xc0002c90, "TODO_c000_2c90", 0),
+ MVI(0xc0002c91, "TODO_c000_2c91", 0),
+ MVI(0xc0002c92, "TODO_c000_2c92", 0),
+ MVI(0xc0002c93, "TODO_c000_2c93", 0),
+ MVI(0xc0002c94, "TODO_c000_2c94", 0),
+ MVI(0xc0002c95, "TODO_c000_2c95", 0),
+ MVI(0xc0002c96, "TODO_c000_2c96", 0),
+ MVI(0xc0002c97, "TODO_c000_2c97", 0),
+ MVI(0xc0002c98, "TODO_c000_2c98", 0),
+ MVI(0xc0002c99, "TODO_c000_2c99", 0),
+ MVI(0xc0002c9a, "TODO_c000_2c9a", 0),
+ MVI(0xc0002c9b, "TODO_c000_2c9b", 0),
+ MVI(0xc0002c9c, "TODO_c000_2c9c", 0),
+ MVI(0xc0002c9d, "TODO_c000_2c9d", 0),
+ MVI(0xc0002c9e, "TODO_c000_2c9e", 0),
+ MVI(0xc0002c9f, "TODO_c000_2c9f", 0),
+ MVI(0xc0002ca0, "TODO_c000_2ca0", 0),
+ MVI(0xc0002ca1, "TODO_c000_2ca1", 0),
+ MVI(0xc0002ca2, "TODO_c000_2ca2", 0),
+ MVI(0xc0002ca3, "TODO_c000_2ca3", 0),
+ MVI(0xc0002ca4, "TODO_c000_2ca4", 0),
+ MVI(0xc0002ca5, "TODO_c000_2ca5", 0),
+ MVI(0xc0002ca6, "TODO_c000_2ca6", 0),
+ MVI(0xc0002ca7, "TODO_c000_2ca7", 0),
+ MVI(0xc0002ca8, "TODO_c000_2ca8", 0),
+ MVI(0xc0002ca9, "TODO_c000_2ca9", 0),
+ MVI(0xc0002caa, "TODO_c000_2caa", 0),
+ MVI(0xc0002cab, "TODO_c000_2cab", 0),
+ MVI(0xc0002cac, "TODO_c000_2cac", 0),
+ MVI(0xc0002cad, "TODO_c000_2cad", 0),
+ MVI(0xc0002cae, "TODO_c000_2cae", 0),
+ MVI(0xc0002caf, "TODO_c000_2caf", 0),
+ MVI(0xc0002cb0, "TODO_c000_2cb0", 0),
+ MVI(0xc0002cb1, "TODO_c000_2cb1", 0),
+ MVI(0xc0002cb2, "TODO_c000_2cb2", 0),
+ MVI(0xc0002cb3, "TODO_c000_2cb3", 0),
+ MVI(0xc0002cb4, "TODO_c000_2cb4", 0),
+ MVI(0xc0002cb5, "TODO_c000_2cb5", 0),
+ MVI(0xc0002cb6, "TODO_c000_2cb6", 0),
+ MVI(0xc0002cb7, "TODO_c000_2cb7", 0),
+ MVI(0xc0002cb8, "TODO_c000_2cb8", 0),
+ MVI(0xc0002cb9, "TODO_c000_2cb9", 0),
+ MVI(0xc0002cba, "TODO_c000_2cba", 0),
+ MVI(0xc0002cbb, "TODO_c000_2cbb", 0),
+ MVI(0xc0002cbc, "TODO_c000_2cbc", 0),
+ MVI(0xc0002cbd, "TODO_c000_2cbd", 0),
+ MVI(0xc0002cbe, "TODO_c000_2cbe", 0),
+ MVI(0xc0002cbf, "TODO_c000_2cbf", 0),
+ MVI(0xc0002cc0, "TODO_c000_2cc0", 0),
+ MVI(0xc0002cc1, "TODO_c000_2cc1", 0),
+ MVI(0xc0002cc2, "TODO_c000_2cc2", 0),
+ MVI(0xc0002cc3, "TODO_c000_2cc3", 0),
+ MVI(0xc0002cc4, "TODO_c000_2cc4", 0),
+ MVI(0xc0002cc5, "TODO_c000_2cc5", 0),
+ MVI(0xc0002cc6, "TODO_c000_2cc6", 0),
+ MVI(0xc0002cc7, "TODO_c000_2cc7", 0),
+ MVI(0xc0002cc8, "TODO_c000_2cc8", 0),
+ MVI(0xc0002cc9, "TODO_c000_2cc9", 0),
+ MVI(0xc0002cca, "TODO_c000_2cca", 0),
+ MVI(0xc0002ccb, "TODO_c000_2ccb", 0),
+ MVI(0xc0002ccc, "TODO_c000_2ccc", 0),
+ MVI(0xc0002ccd, "TODO_c000_2ccd", 0),
+ MVI(0xc0002cce, "TODO_c000_2cce", 0),
+ MVI(0xc0002ccf, "TODO_c000_2ccf", 0),
+ MVI(0xc0002cd0, "TODO_c000_2cd0", 0),
+ MVI(0xc0002cd1, "TODO_c000_2cd1", 0),
+ MVI(0xc0002cd2, "TODO_c000_2cd2", 0),
+ MVI(0xc0002cd3, "TODO_c000_2cd3", 0),
+ MVI(0xc0002cd4, "TODO_c000_2cd4", 0),
+ MVI(0xc0002cd5, "TODO_c000_2cd5", 0),
+ MVI(0xc0002cd6, "TODO_c000_2cd6", 0),
+ MVI(0xc0002cd7, "TODO_c000_2cd7", 0),
+ MVI(0xc0002cd8, "TODO_c000_2cd8", 0),
+ MVI(0xc0002cd9, "TODO_c000_2cd9", 0),
+ MVI(0xc0002cda, "TODO_c000_2cda", 0),
+ MVI(0xc0002cdb, "TODO_c000_2cdb", 0),
+ MVI(0xc0002cdc, "TODO_c000_2cdc", 0),
+ MVI(0xc0002cdd, "TODO_c000_2cdd", 0),
+ MVI(0xc0002cde, "TODO_c000_2cde", 0),
+ MVI(0xc0002cdf, "TODO_c000_2cdf", 0),
+ MVI(0xc0002ce0, "TODO_c000_2ce0", 0),
+ MVI(0xc0002ce1, "TODO_c000_2ce1", 0),
+ MVI(0xc0002ce2, "TODO_c000_2ce2", 0),
+ MVI(0xc0002ce3, "TODO_c000_2ce3", 0),
+ MVI(0xc0002ce4, "TODO_c000_2ce4", 0),
+ MVI(0xc0002ce5, "TODO_c000_2ce5", 0),
+ MVI(0xc0002ce6, "TODO_c000_2ce6", 0),
+ MVI(0xc0002ce7, "TODO_c000_2ce7", 0),
+ MVI(0xc0002ce8, "TODO_c000_2ce8", 0),
+ MVI(0xc0002ce9, "TODO_c000_2ce9", 0),
+ MVI(0xc0002cea, "TODO_c000_2cea", 0),
+ MVI(0xc0002ceb, "TODO_c000_2ceb", 0),
+ MVI(0xc0002cec, "TODO_c000_2cec", 0),
+ MVI(0xc0002ced, "TODO_c000_2ced", 0),
+ MVI(0xc0002cee, "TODO_c000_2cee", 0),
+ MVI(0xc0002cef, "TODO_c000_2cef", 0),
+ MVI(0xc0002cf0, "TODO_c000_2cf0", 0),
+ MVI(0xc0002cf1, "TODO_c000_2cf1", 0),
+ MVI(0xc0002cf2, "TODO_c000_2cf2", 0),
+ MVI(0xc0002cf3, "TODO_c000_2cf3", 0),
+ MVI(0xc0002cf4, "TODO_c000_2cf4", 0),
+ MVI(0xc0002cf5, "TODO_c000_2cf5", 0),
+ MVI(0xc0002cf6, "TODO_c000_2cf6", 0),
+ MVI(0xc0002cf7, "TODO_c000_2cf7", 0),
+ MVI(0xc0002cf8, "TODO_c000_2cf8", 0),
+ MVI(0xc0002cf9, "TODO_c000_2cf9", 0),
+ MVI(0xc0002cfa, "TODO_c000_2cfa", 0),
+ MVI(0xc0002cfb, "TODO_c000_2cfb", 0),
+ MVI(0xc0002cfc, "TODO_c000_2cfc", 0),
+ MVI(0xc0002cfd, "TODO_c000_2cfd", 0),
+ MVI(0xc0002cfe, "TODO_c000_2cfe", 0),
+ MVI(0xc0002cff, "TODO_c000_2cff", 0),
+ MVI(0xc0002d00, "TODO_c000_2d00", 0),
+ MVI(0xc0002d01, "TODO_c000_2d01", 0),
+ MVI(0xc0002d02, "TODO_c000_2d02", 0),
+ MVI(0xc0002d03, "TODO_c000_2d03", 0),
+ MVI(0xc0002d04, "TODO_c000_2d04", 0),
+ MVI(0xc0002d05, "TODO_c000_2d05", 0),
+ MVI(0xc0002d06, "TODO_c000_2d06", 0),
+ MVI(0xc0002d07, "TODO_c000_2d07", 0),
+ MVI(0xc0002d08, "TODO_c000_2d08", 0),
+ MVI(0xc0002d09, "TODO_c000_2d09", 0),
+ MVI(0xc0002d0a, "TODO_c000_2d0a", 0),
+ MVI(0xc0002d0b, "TODO_c000_2d0b", 0),
+ MVI(0xc0002d0c, "TODO_c000_2d0c", 0),
+ MVI(0xc0002d0d, "TODO_c000_2d0d", 0),
+ MVI(0xc0002d0e, "TODO_c000_2d0e", 0),
+ MVI(0xc0002d0f, "TODO_c000_2d0f", 0),
+ MVI(0xc0002d10, "TODO_c000_2d10", 0),
+ MVI(0xc0002d11, "TODO_c000_2d11", 0),
+ MVI(0xc0002d12, "TODO_c000_2d12", 0),
+ MVI(0xc0002d13, "TODO_c000_2d13", 0),
+ MVI(0xc0002d14, "TODO_c000_2d14", 0),
+ MVI(0xc0002d15, "TODO_c000_2d15", 0),
+ MVI(0xc0002d16, "TODO_c000_2d16", 0),
+ MVI(0xc0002d17, "TODO_c000_2d17", 0),
+ MVI(0xc0002d18, "TODO_c000_2d18", 0),
+ MVI(0xc0002d19, "TODO_c000_2d19", 0),
+ MVI(0xc0002d1a, "TODO_c000_2d1a", 0),
+ MVI(0xc0002d1b, "TODO_c000_2d1b", 0),
+ MVI(0xc0002d1c, "TODO_c000_2d1c", 0),
+ MVI(0xc0002d1d, "TODO_c000_2d1d", 0),
+ MVI(0xc0002d1e, "TODO_c000_2d1e", 0),
+ MVI(0xc0002d1f, "TODO_c000_2d1f", 0),
+ MVI(0xc0002d20, "TODO_c000_2d20", 0),
+ MVI(0xc0002d21, "TODO_c000_2d21", 0),
+ MVI(0xc0002d22, "TODO_c000_2d22", 0),
+ MVI(0xc0002d23, "TODO_c000_2d23", 0),
+ MVI(0xc0002d24, "TODO_c000_2d24", 0),
+ MVI(0xc0002d25, "TODO_c000_2d25", 0),
+ MVI(0xc0002d26, "TODO_c000_2d26", 0),
+ MVI(0xc0002d27, "TODO_c000_2d27", 0),
+ MVI(0xc0002d28, "TODO_c000_2d28", 0),
+ MVI(0xc0002d29, "TODO_c000_2d29", 0),
+ MVI(0xc0002d2a, "TODO_c000_2d2a", 0),
+ MVI(0xc0002d2b, "TODO_c000_2d2b", 0),
+ MVI(0xc0002d2c, "TODO_c000_2d2c", 0),
+ MVI(0xc0002d2d, "TODO_c000_2d2d", 0),
+ MVI(0xc0002d2e, "TODO_c000_2d2e", 0),
+ MVI(0xc0002d2f, "TODO_c000_2d2f", 0),
+ MVI(0xc0002d30, "TODO_c000_2d30", 0),
+ MVI(0xc0002d31, "TODO_c000_2d31", 0),
+ MVI(0xc0002d32, "TODO_c000_2d32", 0),
+ MVI(0xc0002d33, "TODO_c000_2d33", 0),
+ MVI(0xc0002d34, "TODO_c000_2d34", 0),
+ MVI(0xc0002d35, "TODO_c000_2d35", 0),
+ MVI(0xc0002d36, "TODO_c000_2d36", 0),
+ MVI(0xc0002d37, "TODO_c000_2d37", 0),
+ MVI(0xc0002d38, "TODO_c000_2d38", 0),
+ MVI(0xc0002d39, "TODO_c000_2d39", 0),
+ MVI(0xc0002d3a, "TODO_c000_2d3a", 0),
+ MVI(0xc0002d3b, "TODO_c000_2d3b", 0),
+ MVI(0xc0002d3c, "TODO_c000_2d3c", 0),
+ MVI(0xc0002d3d, "TODO_c000_2d3d", 0),
+ MVI(0xc0002d3e, "TODO_c000_2d3e", 0),
+ MVI(0xc0002d3f, "TODO_c000_2d3f", 0),
+ MVI(0xc0002d40, "TODO_c000_2d40", 0),
+ MVI(0xc0002d41, "TODO_c000_2d41", 0),
+ MVI(0xc0002d42, "TODO_c000_2d42", 0),
+ MVI(0xc0002d43, "TODO_c000_2d43", 0),
+ MVI(0xc0002d44, "TODO_c000_2d44", 0),
+ MVI(0xc0002d45, "TODO_c000_2d45", 0),
+ MVI(0xc0002d46, "TODO_c000_2d46", 0),
+ MVI(0xc0002d47, "TODO_c000_2d47", 0),
+ MVI(0xc0002d48, "TODO_c000_2d48", 0),
+ MVI(0xc0002d49, "TODO_c000_2d49", 0),
+ MVI(0xc0002d4a, "TODO_c000_2d4a", 0),
+ MVI(0xc0002d4b, "TODO_c000_2d4b", 0),
+ MVI(0xc0002d4c, "TODO_c000_2d4c", 0),
+ MVI(0xc0002d4d, "TODO_c000_2d4d", 0),
+ MVI(0xc0002d4e, "TODO_c000_2d4e", 0),
+ MVI(0xc0002d4f, "TODO_c000_2d4f", 0),
+ MVI(0xc0002d50, "TODO_c000_2d50", 0),
+ MVI(0xc0002d51, "TODO_c000_2d51", 0),
+ MVI(0xc0002d52, "TODO_c000_2d52", 0),
+ MVI(0xc0002d53, "TODO_c000_2d53", 0),
+ MVI(0xc0002d54, "TODO_c000_2d54", 0),
+ MVI(0xc0002d55, "TODO_c000_2d55", 0),
+ MVI(0xc0002d56, "TODO_c000_2d56", 0),
+ MVI(0xc0002d57, "TODO_c000_2d57", 0),
+ MVI(0xc0002d58, "TODO_c000_2d58", 0),
+ MVI(0xc0002d59, "TODO_c000_2d59", 0),
+ MVI(0xc0002d5a, "TODO_c000_2d5a", 0),
+ MVI(0xc0002d5b, "TODO_c000_2d5b", 0),
+ MVI(0xc0002d5c, "TODO_c000_2d5c", 0),
+ MVI(0xc0002d5d, "TODO_c000_2d5d", 0),
+ MVI(0xc0002d5e, "TODO_c000_2d5e", 0),
+ MVI(0xc0002d5f, "TODO_c000_2d5f", 0),
+ MVI(0xc0002d60, "TODO_c000_2d60", 0),
+ MVI(0xc0002d61, "TODO_c000_2d61", 0),
+ MVI(0xc0002d62, "TODO_c000_2d62", 0),
+ MVI(0xc0002d63, "TODO_c000_2d63", 0),
+ MVI(0xc0002d64, "TODO_c000_2d64", 0),
+ MVI(0xc0002d65, "TODO_c000_2d65", 0),
+ MVI(0xc0002d66, "TODO_c000_2d66", 0),
+ MVI(0xc0002d67, "TODO_c000_2d67", 0),
+ MVI(0xc0002d68, "TODO_c000_2d68", 0),
+ MVI(0xc0002d69, "TODO_c000_2d69", 0),
+ MVI(0xc0002d6a, "TODO_c000_2d6a", 0),
+ MVI(0xc0002d6b, "TODO_c000_2d6b", 0),
+ MVI(0xc0002d6c, "TODO_c000_2d6c", 0),
+ MVI(0xc0002d6d, "TODO_c000_2d6d", 0),
+ MVI(0xc0002d6e, "TODO_c000_2d6e", 0),
+ MVI(0xc0002d6f, "TODO_c000_2d6f", 0),
+ MVI(0xc0002d70, "TODO_c000_2d70", 0),
+ MVI(0xc0002d71, "TODO_c000_2d71", 0),
+ MVI(0xc0002d72, "TODO_c000_2d72", 0),
+ MVI(0xc0002d73, "TODO_c000_2d73", 0),
+ MVI(0xc0002d74, "TODO_c000_2d74", 0),
+ MVI(0xc0002d75, "TODO_c000_2d75", 0),
+ MVI(0xc0002d76, "TODO_c000_2d76", 0),
+ MVI(0xc0002d77, "TODO_c000_2d77", 0),
+ MVI(0xc0002d78, "TODO_c000_2d78", 0),
+ MVI(0xc0002d79, "TODO_c000_2d79", 0),
+ MVI(0xc0002d7a, "TODO_c000_2d7a", 0),
+ MVI(0xc0002d7b, "TODO_c000_2d7b", 0),
+ MVI(0xc0002d7c, "TODO_c000_2d7c", 0),
+ MVI(0xc0002d7d, "TODO_c000_2d7d", 0),
+ MVI(0xc0002d7e, "TODO_c000_2d7e", 0),
+ MVI(0xc0002d7f, "TODO_c000_2d7f", 0),
+ MVI(0xc0002d80, "TODO_c000_2d80", 0),
+ MVI(0xc0002d81, "TODO_c000_2d81", 0),
+ MVI(0xc0002d82, "TODO_c000_2d82", 0),
+ MVI(0xc0002d83, "TODO_c000_2d83", 0),
+ MVI(0xc0002d84, "TODO_c000_2d84", 0),
+ MVI(0xc0002d85, "TODO_c000_2d85", 0),
+ MVI(0xc0002d86, "TODO_c000_2d86", 0),
+ MVI(0xc0002d87, "TODO_c000_2d87", 0),
+ MVI(0xc0002d88, "TODO_c000_2d88", 0),
+ MVI(0xc0002d89, "TODO_c000_2d89", 0),
+ MVI(0xc0002d8a, "TODO_c000_2d8a", 0),
+ MVI(0xc0002d8b, "TODO_c000_2d8b", 0),
+ MVI(0xc0002d8c, "TODO_c000_2d8c", 0),
+ MVI(0xc0002d8d, "TODO_c000_2d8d", 0),
+ MVI(0xc0002d8e, "TODO_c000_2d8e", 0),
+ MVI(0xc0002d8f, "TODO_c000_2d8f", 0),
+ MVI(0xc0002d90, "TODO_c000_2d90", 0),
+ MVI(0xc0002d91, "TODO_c000_2d91", 0),
+ MVI(0xc0002d92, "TODO_c000_2d92", 0),
+ MVI(0xc0002d93, "TODO_c000_2d93", 0),
+ MVI(0xc0002d94, "TODO_c000_2d94", 0),
+ MVI(0xc0002d95, "TODO_c000_2d95", 0),
+ MVI(0xc0002d96, "TODO_c000_2d96", 0),
+ MVI(0xc0002d97, "TODO_c000_2d97", 0),
+ MVI(0xc0002d98, "TODO_c000_2d98", 0),
+ MVI(0xc0002d99, "TODO_c000_2d99", 0),
+ MVI(0xc0002d9a, "TODO_c000_2d9a", 0),
+ MVI(0xc0002d9b, "TODO_c000_2d9b", 0),
+ MVI(0xc0002d9c, "TODO_c000_2d9c", 0),
+ MVI(0xc0002d9d, "TODO_c000_2d9d", 0),
+ MVI(0xc0002d9e, "TODO_c000_2d9e", 0),
+ MVI(0xc0002d9f, "TODO_c000_2d9f", 0),
+ MVI(0xc0002da0, "TODO_c000_2da0", 0),
+ MVI(0xc0002da1, "TODO_c000_2da1", 0),
+ MVI(0xc0002da2, "TODO_c000_2da2", 0),
+ MVI(0xc0002da3, "TODO_c000_2da3", 0),
+ MVI(0xc0002da4, "TODO_c000_2da4", 0),
+ MVI(0xc0002da5, "TODO_c000_2da5", 0),
+ MVI(0xc0002da6, "TODO_c000_2da6", 0),
+ MVI(0xc0002da7, "TODO_c000_2da7", 0),
+ MVI(0xc0002da8, "TODO_c000_2da8", 0),
+ MVI(0xc0002da9, "TODO_c000_2da9", 0),
+ MVI(0xc0002daa, "TODO_c000_2daa", 0),
+ MVI(0xc0002dab, "TODO_c000_2dab", 0),
+ MVI(0xc0002dac, "TODO_c000_2dac", 0),
+ MVI(0xc0002dad, "TODO_c000_2dad", 0),
+ MVI(0xc0002dae, "TODO_c000_2dae", 0),
+ MVI(0xc0002daf, "TODO_c000_2daf", 0),
+ MVI(0xc0002db0, "TODO_c000_2db0", 0),
+ MVI(0xc0002db1, "TODO_c000_2db1", 0),
+ MVI(0xc0002db2, "TODO_c000_2db2", 0),
+ MVI(0xc0002db3, "TODO_c000_2db3", 0),
+ MVI(0xc0002db4, "TODO_c000_2db4", 0),
+ MVI(0xc0002db5, "TODO_c000_2db5", 0),
+ MVI(0xc0002db6, "TODO_c000_2db6", 0),
+ MVI(0xc0002db7, "TODO_c000_2db7", 0),
+ MVI(0xc0002db8, "TODO_c000_2db8", 0),
+ MVI(0xc0002db9, "TODO_c000_2db9", 0),
+ MVI(0xc0002dba, "TODO_c000_2dba", 0),
+ MVI(0xc0002dbb, "TODO_c000_2dbb", 0),
+ MVI(0xc0002dbc, "TODO_c000_2dbc", 0),
+ MVI(0xc0002dbd, "TODO_c000_2dbd", 0),
+ MVI(0xc0002dbe, "TODO_c000_2dbe", 0),
+ MVI(0xc0002dbf, "TODO_c000_2dbf", 0),
+ MVI(0xc0002dc0, "TODO_c000_2dc0", 0),
+ MVI(0xc0002dc1, "TODO_c000_2dc1", 0),
+ MVI(0xc0002dc2, "TODO_c000_2dc2", 0),
+ MVI(0xc0002dc3, "TODO_c000_2dc3", 0),
+ MVI(0xc0002dc4, "TODO_c000_2dc4", 0),
+ MVI(0xc0002dc5, "TODO_c000_2dc5", 0),
+ MVI(0xc0002dc6, "TODO_c000_2dc6", 0),
+ MVI(0xc0002dc7, "TODO_c000_2dc7", 0),
+ MVI(0xc0002dc8, "TODO_c000_2dc8", 0),
+ MVI(0xc0002dc9, "TODO_c000_2dc9", 0),
+ MVI(0xc0002dca, "TODO_c000_2dca", 0),
+ MVI(0xc0002dcb, "TODO_c000_2dcb", 0),
+ MVI(0xc0002dcc, "TODO_c000_2dcc", 0),
+ MVI(0xc0002dcd, "TODO_c000_2dcd", 0),
+ MVI(0xc0002dce, "TODO_c000_2dce", 0),
+ MVI(0xc0002dcf, "TODO_c000_2dcf", 0),
+ MVI(0xc0002dd0, "TODO_c000_2dd0", 0),
+ MVI(0xc0002dd1, "TODO_c000_2dd1", 0),
+ MVI(0xc0002dd2, "TODO_c000_2dd2", 0),
+ MVI(0xc0002dd3, "TODO_c000_2dd3", 0),
+ MVI(0xc0002dd4, "TODO_c000_2dd4", 0),
+ MVI(0xc0002dd5, "TODO_c000_2dd5", 0),
+ MVI(0xc0002dd6, "TODO_c000_2dd6", 0),
+ MVI(0xc0002dd7, "TODO_c000_2dd7", 0),
+ MVI(0xc0002dd8, "TODO_c000_2dd8", 0),
+ MVI(0xc0002dd9, "TODO_c000_2dd9", 0),
+ MVI(0xc0002dda, "TODO_c000_2dda", 0),
+ MVI(0xc0002ddb, "TODO_c000_2ddb", 0),
+ MVI(0xc0002ddc, "TODO_c000_2ddc", 0),
+ MVI(0xc0002ddd, "TODO_c000_2ddd", 0),
+ MVI(0xc0002dde, "TODO_c000_2dde", 0),
+ MVI(0xc0002ddf, "TODO_c000_2ddf", 0),
+ MVI(0xc0002de0, "TODO_c000_2de0", 0),
+ MVI(0xc0002de1, "TODO_c000_2de1", 0),
+ MVI(0xc0002de2, "TODO_c000_2de2", 0),
+ MVI(0xc0002de3, "TODO_c000_2de3", 0),
+ MVI(0xc0002de4, "TODO_c000_2de4", 0),
+ MVI(0xc0002de5, "TODO_c000_2de5", 0),
+ MVI(0xc0002de6, "TODO_c000_2de6", 0),
+ MVI(0xc0002de7, "TODO_c000_2de7", 0),
+ MVI(0xc0002de8, "TODO_c000_2de8", 0),
+ MVI(0xc0002de9, "TODO_c000_2de9", 0),
+ MVI(0xc0002dea, "TODO_c000_2dea", 0),
+ MVI(0xc0002deb, "TODO_c000_2deb", 0),
+ MVI(0xc0002dec, "TODO_c000_2dec", 0),
+ MVI(0xc0002ded, "TODO_c000_2ded", 0),
+ MVI(0xc0002dee, "TODO_c000_2dee", 0),
+ MVI(0xc0002def, "TODO_c000_2def", 0),
+ MVI(0xc0002df0, "TODO_c000_2df0", 0),
+ MVI(0xc0002df1, "TODO_c000_2df1", 0),
+ MVI(0xc0002df2, "TODO_c000_2df2", 0),
+ MVI(0xc0002df3, "TODO_c000_2df3", 0),
+ MVI(0xc0002df4, "TODO_c000_2df4", 0),
+ MVI(0xc0002df5, "TODO_c000_2df5", 0),
+ MVI(0xc0002df6, "TODO_c000_2df6", 0),
+ MVI(0xc0002df7, "TODO_c000_2df7", 0),
+ MVI(0xc0002df8, "TODO_c000_2df8", 0),
+ MVI(0xc0002df9, "TODO_c000_2df9", 0),
+ MVI(0xc0002dfa, "TODO_c000_2dfa", 0),
+ MVI(0xc0002dfb, "TODO_c000_2dfb", 0),
+ MVI(0xc0002dfc, "TODO_c000_2dfc", 0),
+ MVI(0xc0002dfd, "TODO_c000_2dfd", 0),
+ MVI(0xc0002dfe, "TODO_c000_2dfe", 0),
+ MVI(0xc0002dff, "TODO_c000_2dff", 0),
+ MVI(0xc0002e00, "TODO_c000_2e00", 0),
+ MVI(0xc0002e01, "TODO_c000_2e01", 0),
+ MVI(0xc0002e02, "TODO_c000_2e02", 0),
+ MVI(0xc0002e03, "TODO_c000_2e03", 0),
+ MVI(0xc0002e04, "TODO_c000_2e04", 0),
+ MVI(0xc0002e05, "TODO_c000_2e05", 0),
+ MVI(0xc0002e06, "TODO_c000_2e06", 0),
+ MVI(0xc0002e07, "TODO_c000_2e07", 0),
+ MVI(0xc0002e08, "TODO_c000_2e08", 0),
+ MVI(0xc0002e09, "TODO_c000_2e09", 0),
+ MVI(0xc0002e0a, "TODO_c000_2e0a", 0),
+ MVI(0xc0002e0b, "TODO_c000_2e0b", 0),
+ MVI(0xc0002e0c, "TODO_c000_2e0c", 0),
+ MVI(0xc0002e0d, "TODO_c000_2e0d", 0),
+ MVI(0xc0002e0e, "TODO_c000_2e0e", 0),
+ MVI(0xc0002e0f, "TODO_c000_2e0f", 0),
+ MVI(0xc0002e10, "TODO_c000_2e10", 0),
+ MVI(0xc0002e11, "TODO_c000_2e11", 0),
+ MVI(0xc0002e12, "TODO_c000_2e12", 0),
+ MVI(0xc0002e13, "TODO_c000_2e13", 0),
+ MVI(0xc0002e14, "TODO_c000_2e14", 0),
+ MVI(0xc0002e15, "TODO_c000_2e15", 0),
+ MVI(0xc0002e16, "TODO_c000_2e16", 0),
+ MVI(0xc0002e17, "TODO_c000_2e17", 0),
+ MVI(0xc0002e18, "TODO_c000_2e18", 0),
+ MVI(0xc0002e19, "TODO_c000_2e19", 0),
+ MVI(0xc0002e1a, "TODO_c000_2e1a", 0),
+ MVI(0xc0002e1b, "TODO_c000_2e1b", 0),
+ MVI(0xc0002e1c, "TODO_c000_2e1c", 0),
+ MVI(0xc0002e1d, "TODO_c000_2e1d", 0),
+ MVI(0xc0002e1e, "TODO_c000_2e1e", 0),
+ MVI(0xc0002e1f, "TODO_c000_2e1f", 0),
+ MVI(0xc0002e20, "TODO_c000_2e20", 0),
+ MVI(0xc0002e21, "TODO_c000_2e21", 0),
+ MVI(0xc0002e22, "TODO_c000_2e22", 0),
+ MVI(0xc0002e23, "TODO_c000_2e23", 0),
+ MVI(0xc0002e24, "TODO_c000_2e24", 0),
+ MVI(0xc0002e25, "TODO_c000_2e25", 0),
+ MVI(0xc0002e26, "TODO_c000_2e26", 0),
+ MVI(0xc0002e27, "TODO_c000_2e27", 0),
+ MVI(0xc0002e28, "TODO_c000_2e28", 0),
+ MVI(0xc0002e29, "TODO_c000_2e29", 0),
+ MVI(0xc0002e2a, "TODO_c000_2e2a", 0),
+ MVI(0xc0002e2b, "TODO_c000_2e2b", 0),
+ MVI(0xc0002e2c, "TODO_c000_2e2c", 0),
+ MVI(0xc0002e2d, "TODO_c000_2e2d", 0),
+ MVI(0xc0002e2e, "TODO_c000_2e2e", 0),
+ MVI(0xc0002e2f, "TODO_c000_2e2f", 0),
+ MVI(0xc0002e30, "TODO_c000_2e30", 0),
+ MVI(0xc0002e31, "TODO_c000_2e31", 0),
+ MVI(0xc0002e32, "TODO_c000_2e32", 0),
+ MVI(0xc0002e33, "TODO_c000_2e33", 0),
+ MVI(0xc0002e34, "TODO_c000_2e34", 0),
+ MVI(0xc0002e35, "TODO_c000_2e35", 0),
+ MVI(0xc0002e36, "TODO_c000_2e36", 0),
+ MVI(0xc0002e37, "TODO_c000_2e37", 0),
+ MVI(0xc0002e38, "TODO_c000_2e38", 0),
+ MVI(0xc0002e39, "TODO_c000_2e39", 0),
+ MVI(0xc0002e3a, "TODO_c000_2e3a", 0),
+ MVI(0xc0002e3b, "TODO_c000_2e3b", 0),
+ MVI(0xc0002e3c, "TODO_c000_2e3c", 0),
+ MVI(0xc0002e3d, "TODO_c000_2e3d", 0),
+ MVI(0xc0002e3e, "TODO_c000_2e3e", 0),
+ MVI(0xc0002e3f, "TODO_c000_2e3f", 0),
+ MVI(0xc0002e40, "TODO_c000_2e40", 0),
+ MVI(0xc0002e41, "TODO_c000_2e41", 0),
+ MVI(0xc0002e42, "TODO_c000_2e42", 0),
+ MVI(0xc0002e43, "TODO_c000_2e43", 0),
+ MVI(0xc0002e44, "TODO_c000_2e44", 0),
+ MVI(0xc0002e45, "TODO_c000_2e45", 0),
+ MVI(0xc0002e46, "TODO_c000_2e46", 0),
+ MVI(0xc0002e47, "TODO_c000_2e47", 0),
+ MVI(0xc0002e48, "TODO_c000_2e48", 0),
+ MVI(0xc0002e49, "TODO_c000_2e49", 0),
+ MVI(0xc0002e4a, "TODO_c000_2e4a", 0),
+ MVI(0xc0002e4b, "TODO_c000_2e4b", 0),
+ MVI(0xc0002e4c, "TODO_c000_2e4c", 0),
+ MVI(0xc0002e4d, "TODO_c000_2e4d", 0),
+ MVI(0xc0002e4e, "TODO_c000_2e4e", 0),
+ MVI(0xc0002e4f, "TODO_c000_2e4f", 0),
+ MVI(0xc0002e50, "TODO_c000_2e50", 0),
+ MVI(0xc0002e51, "TODO_c000_2e51", 0),
+ MVI(0xc0002e52, "TODO_c000_2e52", 0),
+ MVI(0xc0002e53, "TODO_c000_2e53", 0),
+ MVI(0xc0002e54, "TODO_c000_2e54", 0),
+ MVI(0xc0002e55, "TODO_c000_2e55", 0),
+ MVI(0xc0002e56, "TODO_c000_2e56", 0),
+ MVI(0xc0002e57, "TODO_c000_2e57", 0),
+ MVI(0xc0002e58, "TODO_c000_2e58", 0),
+ MVI(0xc0002e59, "TODO_c000_2e59", 0),
+ MVI(0xc0002e5a, "TODO_c000_2e5a", 0),
+ MVI(0xc0002e5b, "TODO_c000_2e5b", 0),
+ MVI(0xc0002e5c, "TODO_c000_2e5c", 0),
+ MVI(0xc0002e5d, "TODO_c000_2e5d", 0),
+ MVI(0xc0002e5e, "TODO_c000_2e5e", 0),
+ MVI(0xc0002e5f, "TODO_c000_2e5f", 0),
+ MVI(0xc0002e60, "TODO_c000_2e60", 0),
+ MVI(0xc0002e61, "TODO_c000_2e61", 0),
+ MVI(0xc0002e62, "TODO_c000_2e62", 0),
+ MVI(0xc0002e63, "TODO_c000_2e63", 0),
+ MVI(0xc0002e64, "TODO_c000_2e64", 0),
+ MVI(0xc0002e65, "TODO_c000_2e65", 0),
+ MVI(0xc0002e66, "TODO_c000_2e66", 0),
+ MVI(0xc0002e67, "TODO_c000_2e67", 0),
+ MVI(0xc0002e68, "TODO_c000_2e68", 0),
+ MVI(0xc0002e69, "TODO_c000_2e69", 0),
+ MVI(0xc0002e6a, "TODO_c000_2e6a", 0),
+ MVI(0xc0002e6b, "TODO_c000_2e6b", 0),
+ MVI(0xc0002e6c, "TODO_c000_2e6c", 0),
+ MVI(0xc0002e6d, "TODO_c000_2e6d", 0),
+ MVI(0xc0002e6e, "TODO_c000_2e6e", 0),
+ MVI(0xc0002e6f, "TODO_c000_2e6f", 0),
+ MVI(0xc0002e70, "TODO_c000_2e70", 0),
+ MVI(0xc0002e71, "TODO_c000_2e71", 0),
+ MVI(0xc0002e72, "TODO_c000_2e72", 0),
+ MVI(0xc0002e73, "TODO_c000_2e73", 0),
+ MVI(0xc0002e74, "TODO_c000_2e74", 0),
+ MVI(0xc0002e75, "TODO_c000_2e75", 0),
+ MVI(0xc0002e76, "TODO_c000_2e76", 0),
+ MVI(0xc0002e77, "TODO_c000_2e77", 0),
+ MVI(0xc0002e78, "TODO_c000_2e78", 0),
+ MVI(0xc0002e79, "TODO_c000_2e79", 0),
+ MVI(0xc0002e7a, "TODO_c000_2e7a", 0),
+ MVI(0xc0002e7b, "TODO_c000_2e7b", 0),
+ MVI(0xc0002e7c, "TODO_c000_2e7c", 0),
+ MVI(0xc0002e7d, "TODO_c000_2e7d", 0),
+ MVI(0xc0002e7e, "TODO_c000_2e7e", 0),
+ MVI(0xc0002e7f, "TODO_c000_2e7f", 0),
+ MVI(0xc0002e80, "TODO_c000_2e80", 0),
+ MVI(0xc0002e81, "TODO_c000_2e81", 0),
+ MVI(0xc0002e82, "TODO_c000_2e82", 0),
+ MVI(0xc0002e83, "TODO_c000_2e83", 0),
+ MVI(0xc0002e84, "TODO_c000_2e84", 0),
+ MVI(0xc0002e85, "TODO_c000_2e85", 0),
+ MVI(0xc0002e86, "TODO_c000_2e86", 0),
+ MVI(0xc0002e87, "TODO_c000_2e87", 0),
+ MVI(0xc0002e88, "TODO_c000_2e88", 0),
+ MVI(0xc0002e89, "TODO_c000_2e89", 0),
+ MVI(0xc0002e8a, "TODO_c000_2e8a", 0),
+ MVI(0xc0002e8b, "TODO_c000_2e8b", 0),
+ MVI(0xc0002e8c, "TODO_c000_2e8c", 0),
+ MVI(0xc0002e8d, "TODO_c000_2e8d", 0),
+ MVI(0xc0002e8e, "TODO_c000_2e8e", 0),
+ MVI(0xc0002e8f, "TODO_c000_2e8f", 0),
+ MVI(0xc0002e90, "TODO_c000_2e90", 0),
+ MVI(0xc0002e91, "TODO_c000_2e91", 0),
+ MVI(0xc0002e92, "TODO_c000_2e92", 0),
+ MVI(0xc0002e93, "TODO_c000_2e93", 0),
+ MVI(0xc0002e94, "TODO_c000_2e94", 0),
+ MVI(0xc0002e95, "TODO_c000_2e95", 0),
+ MVI(0xc0002e96, "TODO_c000_2e96", 0),
+ MVI(0xc0002e97, "TODO_c000_2e97", 0),
+ MVI(0xc0002e98, "TODO_c000_2e98", 0),
+ MVI(0xc0002e99, "TODO_c000_2e99", 0),
+ MVI(0xc0002e9a, "TODO_c000_2e9a", 0),
+ MVI(0xc0002e9b, "TODO_c000_2e9b", 0),
+ MVI(0xc0002e9c, "TODO_c000_2e9c", 0),
+ MVI(0xc0002e9d, "TODO_c000_2e9d", 0),
+ MVI(0xc0002e9e, "TODO_c000_2e9e", 0),
+ MVI(0xc0002e9f, "TODO_c000_2e9f", 0),
+ MVI(0xc0002ea0, "TODO_c000_2ea0", 0),
+ MVI(0xc0002ea1, "TODO_c000_2ea1", 0),
+ MVI(0xc0002ea2, "TODO_c000_2ea2", 0),
+ MVI(0xc0002ea3, "TODO_c000_2ea3", 0),
+ MVI(0xc0002ea4, "TODO_c000_2ea4", 0),
+ MVI(0xc0002ea5, "TODO_c000_2ea5", 0),
+ MVI(0xc0002ea6, "TODO_c000_2ea6", 0),
+ MVI(0xc0002ea7, "TODO_c000_2ea7", 0),
+ MVI(0xc0002ea8, "TODO_c000_2ea8", 0),
+ MVI(0xc0002ea9, "TODO_c000_2ea9", 0),
+ MVI(0xc0002eaa, "TODO_c000_2eaa", 0),
+ MVI(0xc0002eab, "TODO_c000_2eab", 0),
+ MVI(0xc0002eac, "TODO_c000_2eac", 0),
+ MVI(0xc0002ead, "TODO_c000_2ead", 0),
+ MVI(0xc0002eae, "TODO_c000_2eae", 0),
+ MVI(0xc0002eaf, "TODO_c000_2eaf", 0),
+ MVI(0xc0002eb0, "TODO_c000_2eb0", 0),
+ MVI(0xc0002eb1, "TODO_c000_2eb1", 0),
+ MVI(0xc0002eb2, "TODO_c000_2eb2", 0),
+ MVI(0xc0002eb3, "TODO_c000_2eb3", 0),
+ MVI(0xc0002eb4, "TODO_c000_2eb4", 0),
+ MVI(0xc0002eb5, "TODO_c000_2eb5", 0),
+ MVI(0xc0002eb6, "TODO_c000_2eb6", 0),
+ MVI(0xc0002eb7, "TODO_c000_2eb7", 0),
+ MVI(0xc0002eb8, "TODO_c000_2eb8", 0),
+ MVI(0xc0002eb9, "TODO_c000_2eb9", 0),
+ MVI(0xc0002eba, "TODO_c000_2eba", 0),
+ MVI(0xc0002ebb, "TODO_c000_2ebb", 0),
+ MVI(0xc0002ebc, "TODO_c000_2ebc", 0),
+ MVI(0xc0002ebd, "TODO_c000_2ebd", 0),
+ MVI(0xc0002ebe, "TODO_c000_2ebe", 0),
+ MVI(0xc0002ebf, "TODO_c000_2ebf", 0),
+ MVI(0xc0002ec0, "TODO_c000_2ec0", 0),
+ MVI(0xc0002ec1, "TODO_c000_2ec1", 0),
+ MVI(0xc0002ec2, "TODO_c000_2ec2", 0),
+ MVI(0xc0002ec3, "TODO_c000_2ec3", 0),
+ MVI(0xc0002ec4, "TODO_c000_2ec4", 0),
+ MVI(0xc0002ec5, "TODO_c000_2ec5", 0),
+ MVI(0xc0002ec6, "TODO_c000_2ec6", 0),
+ MVI(0xc0002ec7, "TODO_c000_2ec7", 0),
+ MVI(0xc0002ec8, "TODO_c000_2ec8", 0),
+ MVI(0xc0002ec9, "TODO_c000_2ec9", 0),
+ MVI(0xc0002eca, "TODO_c000_2eca", 0),
+ MVI(0xc0002ecb, "TODO_c000_2ecb", 0),
+ MVI(0xc0002ecc, "TODO_c000_2ecc", 0),
+ MVI(0xc0002ecd, "TODO_c000_2ecd", 0),
+ MVI(0xc0002ece, "TODO_c000_2ece", 0),
+ MVI(0xc0002ecf, "TODO_c000_2ecf", 0),
+ MVI(0xc0002ed0, "TODO_c000_2ed0", 0),
+ MVI(0xc0002ed1, "TODO_c000_2ed1", 0),
+ MVI(0xc0002ed2, "TODO_c000_2ed2", 0),
+ MVI(0xc0002ed3, "TODO_c000_2ed3", 0),
+ MVI(0xc0002ed4, "TODO_c000_2ed4", 0),
+ MVI(0xc0002ed5, "TODO_c000_2ed5", 0),
+ MVI(0xc0002ed6, "TODO_c000_2ed6", 0),
+ MVI(0xc0002ed7, "TODO_c000_2ed7", 0),
+ MVI(0xc0002ed8, "TODO_c000_2ed8", 0),
+ MVI(0xc0002ed9, "TODO_c000_2ed9", 0),
+ MVI(0xc0002eda, "TODO_c000_2eda", 0),
+ MVI(0xc0002edb, "TODO_c000_2edb", 0),
+ MVI(0xc0002edc, "TODO_c000_2edc", 0),
+ MVI(0xc0002edd, "TODO_c000_2edd", 0),
+ MVI(0xc0002ede, "TODO_c000_2ede", 0),
+ MVI(0xc0002edf, "TODO_c000_2edf", 0),
+ MVI(0xc0002ee0, "TODO_c000_2ee0", 0),
+ MVI(0xc0002ee1, "TODO_c000_2ee1", 0),
+ MVI(0xc0002ee2, "TODO_c000_2ee2", 0),
+ MVI(0xc0002ee3, "TODO_c000_2ee3", 0),
+ MVI(0xc0002ee4, "TODO_c000_2ee4", 0),
+ MVI(0xc0002ee5, "TODO_c000_2ee5", 0),
+ MVI(0xc0002ee6, "TODO_c000_2ee6", 0),
+ MVI(0xc0002ee7, "TODO_c000_2ee7", 0),
+ MVI(0xc0002ee8, "TODO_c000_2ee8", 0),
+ MVI(0xc0002ee9, "TODO_c000_2ee9", 0),
+ MVI(0xc0002eea, "TODO_c000_2eea", 0),
+ MVI(0xc0002eeb, "TODO_c000_2eeb", 0),
+ MVI(0xc0002eec, "TODO_c000_2eec", 0),
+ MVI(0xc0002eed, "TODO_c000_2eed", 0),
+ MVI(0xc0002eee, "TODO_c000_2eee", 0),
+ MVI(0xc0002eef, "TODO_c000_2eef", 0),
+ MVI(0xc0002ef0, "TODO_c000_2ef0", 0),
+ MVI(0xc0002ef1, "TODO_c000_2ef1", 0),
+ MVI(0xc0002ef2, "TODO_c000_2ef2", 0),
+ MVI(0xc0002ef3, "TODO_c000_2ef3", 0),
+ MVI(0xc0002ef4, "TODO_c000_2ef4", 0),
+ MVI(0xc0002ef5, "TODO_c000_2ef5", 0),
+ MVI(0xc0002ef6, "TODO_c000_2ef6", 0),
+ MVI(0xc0002ef7, "TODO_c000_2ef7", 0),
+ MVI(0xc0002ef8, "TODO_c000_2ef8", 0),
+ MVI(0xc0002ef9, "TODO_c000_2ef9", 0),
+ MVI(0xc0002efa, "TODO_c000_2efa", 0),
+ MVI(0xc0002efb, "TODO_c000_2efb", 0),
+ MVI(0xc0002efc, "TODO_c000_2efc", 0),
+ MVI(0xc0002efd, "TODO_c000_2efd", 0),
+ MVI(0xc0002efe, "TODO_c000_2efe", 0),
+ MVI(0xc0002eff, "TODO_c000_2eff", 0),
+ MVI(0xc0002f00, "TODO_c000_2f00", 0),
+ MVI(0xc0002f01, "TODO_c000_2f01", 0),
+ MVI(0xc0002f02, "TODO_c000_2f02", 0),
+ MVI(0xc0002f03, "TODO_c000_2f03", 0),
+ MVI(0xc0002f04, "TODO_c000_2f04", 0),
+ MVI(0xc0002f05, "TODO_c000_2f05", 0),
+ MVI(0xc0002f06, "TODO_c000_2f06", 0),
+ MVI(0xc0002f07, "TODO_c000_2f07", 0),
+ MVI(0xc0002f08, "TODO_c000_2f08", 0),
+ MVI(0xc0002f09, "TODO_c000_2f09", 0),
+ MVI(0xc0002f0a, "TODO_c000_2f0a", 0),
+ MVI(0xc0002f0b, "TODO_c000_2f0b", 0),
+ MVI(0xc0002f0c, "TODO_c000_2f0c", 0),
+ MVI(0xc0002f0d, "TODO_c000_2f0d", 0),
+ MVI(0xc0002f0e, "TODO_c000_2f0e", 0),
+ MVI(0xc0002f0f, "TODO_c000_2f0f", 0),
+ MVI(0xc0002f10, "TODO_c000_2f10", 0),
+ MVI(0xc0002f11, "TODO_c000_2f11", 0),
+ MVI(0xc0002f12, "TODO_c000_2f12", 0),
+ MVI(0xc0002f13, "TODO_c000_2f13", 0),
+ MVI(0xc0002f14, "TODO_c000_2f14", 0),
+ MVI(0xc0002f15, "TODO_c000_2f15", 0),
+ MVI(0xc0002f16, "TODO_c000_2f16", 0),
+ MVI(0xc0002f17, "TODO_c000_2f17", 0),
+ MVI(0xc0002f18, "TODO_c000_2f18", 0),
+ MVI(0xc0002f19, "TODO_c000_2f19", 0),
+ MVI(0xc0002f1a, "TODO_c000_2f1a", 0),
+ MVI(0xc0002f1b, "TODO_c000_2f1b", 0),
+ MVI(0xc0002f1c, "TODO_c000_2f1c", 0),
+ MVI(0xc0002f1d, "TODO_c000_2f1d", 0),
+ MVI(0xc0002f1e, "TODO_c000_2f1e", 0),
+ MVI(0xc0002f1f, "TODO_c000_2f1f", 0),
+ MVI(0xc0002f20, "TODO_c000_2f20", 0),
+ MVI(0xc0002f21, "TODO_c000_2f21", 0),
+ MVI(0xc0002f22, "TODO_c000_2f22", 0),
+ MVI(0xc0002f23, "TODO_c000_2f23", 0),
+ MVI(0xc0002f24, "TODO_c000_2f24", 0),
+ MVI(0xc0002f25, "TODO_c000_2f25", 0),
+ MVI(0xc0002f26, "TODO_c000_2f26", 0),
+ MVI(0xc0002f27, "TODO_c000_2f27", 0),
+ MVI(0xc0002f28, "TODO_c000_2f28", 0),
+ MVI(0xc0002f29, "TODO_c000_2f29", 0),
+ MVI(0xc0002f2a, "TODO_c000_2f2a", 0),
+ MVI(0xc0002f2b, "TODO_c000_2f2b", 0),
+ MVI(0xc0002f2c, "TODO_c000_2f2c", 0),
+ MVI(0xc0002f2d, "TODO_c000_2f2d", 0),
+ MVI(0xc0002f2e, "TODO_c000_2f2e", 0),
+ MVI(0xc0002f2f, "TODO_c000_2f2f", 0),
+ MVI(0xc0002f30, "TODO_c000_2f30", 0),
+ MVI(0xc0002f31, "TODO_c000_2f31", 0),
+ MVI(0xc0002f32, "TODO_c000_2f32", 0),
+ MVI(0xc0002f33, "TODO_c000_2f33", 0),
+ MVI(0xc0002f34, "TODO_c000_2f34", 0),
+ MVI(0xc0002f35, "TODO_c000_2f35", 0),
+ MVI(0xc0002f36, "TODO_c000_2f36", 0),
+ MVI(0xc0002f37, "TODO_c000_2f37", 0),
+ MVI(0xc0002f38, "TODO_c000_2f38", 0),
+ MVI(0xc0002f39, "TODO_c000_2f39", 0),
+ MVI(0xc0002f3a, "TODO_c000_2f3a", 0),
+ MVI(0xc0002f3b, "TODO_c000_2f3b", 0),
+ MVI(0xc0002f3c, "TODO_c000_2f3c", 0),
+ MVI(0xc0002f3d, "TODO_c000_2f3d", 0),
+ MVI(0xc0002f3e, "TODO_c000_2f3e", 0),
+ MVI(0xc0002f3f, "TODO_c000_2f3f", 0),
+ MVI(0xc0002f40, "TODO_c000_2f40", 0),
+ MVI(0xc0002f41, "TODO_c000_2f41", 0),
+ MVI(0xc0002f42, "TODO_c000_2f42", 0),
+ MVI(0xc0002f43, "TODO_c000_2f43", 0),
+ MVI(0xc0002f44, "TODO_c000_2f44", 0),
+ MVI(0xc0002f45, "TODO_c000_2f45", 0),
+ MVI(0xc0002f46, "TODO_c000_2f46", 0),
+ MVI(0xc0002f47, "TODO_c000_2f47", 0),
+ MVI(0xc0002f48, "TODO_c000_2f48", 0),
+ MVI(0xc0002f49, "TODO_c000_2f49", 0),
+ MVI(0xc0002f4a, "TODO_c000_2f4a", 0),
+ MVI(0xc0002f4b, "TODO_c000_2f4b", 0),
+ MVI(0xc0002f4c, "TODO_c000_2f4c", 0),
+ MVI(0xc0002f4d, "TODO_c000_2f4d", 0),
+ MVI(0xc0002f4e, "TODO_c000_2f4e", 0),
+ MVI(0xc0002f4f, "TODO_c000_2f4f", 0),
+ MVI(0xc0002f50, "TODO_c000_2f50", 0),
+ MVI(0xc0002f51, "TODO_c000_2f51", 0),
+ MVI(0xc0002f52, "TODO_c000_2f52", 0),
+ MVI(0xc0002f53, "TODO_c000_2f53", 0),
+ MVI(0xc0002f54, "TODO_c000_2f54", 0),
+ MVI(0xc0002f55, "TODO_c000_2f55", 0),
+ MVI(0xc0002f56, "TODO_c000_2f56", 0),
+ MVI(0xc0002f57, "TODO_c000_2f57", 0),
+ MVI(0xc0002f58, "TODO_c000_2f58", 0),
+ MVI(0xc0002f59, "TODO_c000_2f59", 0),
+ MVI(0xc0002f5a, "TODO_c000_2f5a", 0),
+ MVI(0xc0002f5b, "TODO_c000_2f5b", 0),
+ MVI(0xc0002f5c, "TODO_c000_2f5c", 0),
+ MVI(0xc0002f5d, "TODO_c000_2f5d", 0),
+ MVI(0xc0002f5e, "TODO_c000_2f5e", 0),
+ MVI(0xc0002f5f, "TODO_c000_2f5f", 0),
+ MVI(0xc0002f60, "TODO_c000_2f60", 0),
+ MVI(0xc0002f61, "TODO_c000_2f61", 0),
+ MVI(0xc0002f62, "TODO_c000_2f62", 0),
+ MVI(0xc0002f63, "TODO_c000_2f63", 0),
+ MVI(0xc0002f64, "TODO_c000_2f64", 0),
+ MVI(0xc0002f65, "TODO_c000_2f65", 0),
+ MVI(0xc0002f66, "TODO_c000_2f66", 0),
+ MVI(0xc0002f67, "TODO_c000_2f67", 0),
+ MVI(0xc0002f68, "TODO_c000_2f68", 0),
+ MVI(0xc0002f69, "TODO_c000_2f69", 0),
+ MVI(0xc0002f6a, "TODO_c000_2f6a", 0),
+ MVI(0xc0002f6b, "TODO_c000_2f6b", 0),
+ MVI(0xc0002f6c, "TODO_c000_2f6c", 0),
+ MVI(0xc0002f6d, "TODO_c000_2f6d", 0),
+ MVI(0xc0002f6e, "TODO_c000_2f6e", 0),
+ MVI(0xc0002f6f, "TODO_c000_2f6f", 0),
+ MVI(0xc0002f70, "TODO_c000_2f70", 0),
+ MVI(0xc0002f71, "TODO_c000_2f71", 0),
+ MVI(0xc0002f72, "TODO_c000_2f72", 0),
+ MVI(0xc0002f73, "TODO_c000_2f73", 0),
+ MVI(0xc0002f74, "TODO_c000_2f74", 0),
+ MVI(0xc0002f75, "TODO_c000_2f75", 0),
+ MVI(0xc0002f76, "TODO_c000_2f76", 0),
+ MVI(0xc0002f77, "TODO_c000_2f77", 0),
+ MVI(0xc0002f78, "TODO_c000_2f78", 0),
+ MVI(0xc0002f79, "TODO_c000_2f79", 0),
+ MVI(0xc0002f7a, "TODO_c000_2f7a", 0),
+ MVI(0xc0002f7b, "TODO_c000_2f7b", 0),
+ MVI(0xc0002f7c, "TODO_c000_2f7c", 0),
+ MVI(0xc0002f7d, "TODO_c000_2f7d", 0),
+ MVI(0xc0002f7e, "TODO_c000_2f7e", 0),
+ MVI(0xc0002f7f, "TODO_c000_2f7f", 0),
+ MVI(0xc0002f80, "TODO_c000_2f80", 0),
+ MVI(0xc0002f81, "TODO_c000_2f81", 0),
+ MVI(0xc0002f82, "TODO_c000_2f82", 0),
+ MVI(0xc0002f83, "TODO_c000_2f83", 0),
+ MVI(0xc0002f84, "TODO_c000_2f84", 0),
+ MVI(0xc0002f85, "TODO_c000_2f85", 0),
+ MVI(0xc0002f86, "TODO_c000_2f86", 0),
+ MVI(0xc0002f87, "TODO_c000_2f87", 0),
+ MVI(0xc0002f88, "TODO_c000_2f88", 0),
+ MVI(0xc0002f89, "TODO_c000_2f89", 0),
+ MVI(0xc0002f8a, "TODO_c000_2f8a", 0),
+ MVI(0xc0002f8b, "TODO_c000_2f8b", 0),
+ MVI(0xc0002f8c, "TODO_c000_2f8c", 0),
+ MVI(0xc0002f8d, "TODO_c000_2f8d", 0),
+ MVI(0xc0002f8e, "TODO_c000_2f8e", 0),
+ MVI(0xc0002f8f, "TODO_c000_2f8f", 0),
+ MVI(0xc0002f90, "TODO_c000_2f90", 0),
+ MVI(0xc0002f91, "TODO_c000_2f91", 0),
+ MVI(0xc0002f92, "TODO_c000_2f92", 0),
+ MVI(0xc0002f93, "TODO_c000_2f93", 0),
+ MVI(0xc0002f94, "TODO_c000_2f94", 0),
+ MVI(0xc0002f95, "TODO_c000_2f95", 0),
+ MVI(0xc0002f96, "TODO_c000_2f96", 0),
+ MVI(0xc0002f97, "TODO_c000_2f97", 0),
+ MVI(0xc0002f98, "TODO_c000_2f98", 0),
+ MVI(0xc0002f99, "TODO_c000_2f99", 0),
+ MVI(0xc0002f9a, "TODO_c000_2f9a", 0),
+ MVI(0xc0002f9b, "TODO_c000_2f9b", 0),
+ MVI(0xc0002f9c, "TODO_c000_2f9c", 0),
+ MVI(0xc0002f9d, "TODO_c000_2f9d", 0),
+ MVI(0xc0002f9e, "TODO_c000_2f9e", 0),
+ MVI(0xc0002f9f, "TODO_c000_2f9f", 0),
+ MVI(0xc0002fa0, "TODO_c000_2fa0", 0),
+ MVI(0xc0002fa1, "TODO_c000_2fa1", 0),
+ MVI(0xc0002fa2, "TODO_c000_2fa2", 0),
+ MVI(0xc0002fa3, "TODO_c000_2fa3", 0),
+ MVI(0xc0002fa4, "TODO_c000_2fa4", 0),
+ MVI(0xc0002fa5, "TODO_c000_2fa5", 0),
+ MVI(0xc0002fa6, "TODO_c000_2fa6", 0),
+ MVI(0xc0002fa7, "TODO_c000_2fa7", 0),
+ MVI(0xc0002fa8, "TODO_c000_2fa8", 0),
+ MVI(0xc0002fa9, "TODO_c000_2fa9", 0),
+ MVI(0xc0002faa, "TODO_c000_2faa", 0),
+ MVI(0xc0002fab, "TODO_c000_2fab", 0),
+ MVI(0xc0002fac, "TODO_c000_2fac", 0),
+ MVI(0xc0002fad, "TODO_c000_2fad", 0),
+ MVI(0xc0002fae, "TODO_c000_2fae", 0),
+ MVI(0xc0002faf, "TODO_c000_2faf", 0),
+ MVI(0xc0002fb0, "TODO_c000_2fb0", 0),
+ MVI(0xc0002fb1, "TODO_c000_2fb1", 0),
+ MVI(0xc0002fb2, "TODO_c000_2fb2", 0),
+ MVI(0xc0002fb3, "TODO_c000_2fb3", 0),
+ MVI(0xc0002fb4, "TODO_c000_2fb4", 0),
+ MVI(0xc0002fb5, "TODO_c000_2fb5", 0),
+ MVI(0xc0002fb6, "TODO_c000_2fb6", 0),
+ MVI(0xc0002fb7, "TODO_c000_2fb7", 0),
+ MVI(0xc0002fb8, "TODO_c000_2fb8", 0),
+ MVI(0xc0002fb9, "TODO_c000_2fb9", 0),
+ MVI(0xc0002fba, "TODO_c000_2fba", 0),
+ MVI(0xc0002fbb, "TODO_c000_2fbb", 0),
+ MVI(0xc0002fbc, "TODO_c000_2fbc", 0),
+ MVI(0xc0002fbd, "TODO_c000_2fbd", 0),
+ MVI(0xc0002fbe, "TODO_c000_2fbe", 0),
+ MVI(0xc0002fbf, "TODO_c000_2fbf", 0),
+ MVI(0xc0002fc0, "TODO_c000_2fc0", 0),
+ MVI(0xc0002fc1, "TODO_c000_2fc1", 0),
+ MVI(0xc0002fc2, "TODO_c000_2fc2", 0),
+ MVI(0xc0002fc3, "TODO_c000_2fc3", 0),
+ MVI(0xc0002fc4, "TODO_c000_2fc4", 0),
+ MVI(0xc0002fc5, "TODO_c000_2fc5", 0),
+ MVI(0xc0002fc6, "TODO_c000_2fc6", 0),
+ MVI(0xc0002fc7, "TODO_c000_2fc7", 0),
+ MVI(0xc0002fc8, "TODO_c000_2fc8", 0),
+ MVI(0xc0002fc9, "TODO_c000_2fc9", 0),
+ MVI(0xc0002fca, "TODO_c000_2fca", 0),
+ MVI(0xc0002fcb, "TODO_c000_2fcb", 0),
+ MVI(0xc0002fcc, "TODO_c000_2fcc", 0),
+ MVI(0xc0002fcd, "TODO_c000_2fcd", 0),
+ MVI(0xc0002fce, "TODO_c000_2fce", 0),
+ MVI(0xc0002fcf, "TODO_c000_2fcf", 0),
+ MVI(0xc0002fd0, "TODO_c000_2fd0", 0),
+ MVI(0xc0002fd1, "TODO_c000_2fd1", 0),
+ MVI(0xc0002fd2, "TODO_c000_2fd2", 0),
+ MVI(0xc0002fd3, "TODO_c000_2fd3", 0),
+ MVI(0xc0002fd4, "TODO_c000_2fd4", 0),
+ MVI(0xc0002fd5, "TODO_c000_2fd5", 0),
+ MVI(0xc0002fd6, "TODO_c000_2fd6", 0),
+ MVI(0xc0002fd7, "TODO_c000_2fd7", 0),
+ MVI(0xc0002fd8, "TODO_c000_2fd8", 0),
+ MVI(0xc0002fd9, "TODO_c000_2fd9", 0),
+ MVI(0xc0002fda, "TODO_c000_2fda", 0),
+ MVI(0xc0002fdb, "TODO_c000_2fdb", 0),
+ MVI(0xc0002fdc, "TODO_c000_2fdc", 0),
+ MVI(0xc0002fdd, "TODO_c000_2fdd", 0),
+ MVI(0xc0002fde, "TODO_c000_2fde", 0),
+ MVI(0xc0002fdf, "TODO_c000_2fdf", 0),
+ MVI(0xc0002fe0, "TODO_c000_2fe0", 0),
+ MVI(0xc0002fe1, "TODO_c000_2fe1", 0),
+ MVI(0xc0002fe2, "TODO_c000_2fe2", 0),
+ MVI(0xc0002fe3, "TODO_c000_2fe3", 0),
+ MVI(0xc0002fe4, "TODO_c000_2fe4", 0),
+ MVI(0xc0002fe5, "TODO_c000_2fe5", 0),
+ MVI(0xc0002fe6, "TODO_c000_2fe6", 0),
+ MVI(0xc0002fe7, "TODO_c000_2fe7", 0),
+ MVI(0xc0002fe8, "TODO_c000_2fe8", 0),
+ MVI(0xc0002fe9, "TODO_c000_2fe9", 0),
+ MVI(0xc0002fea, "TODO_c000_2fea", 0),
+ MVI(0xc0002feb, "TODO_c000_2feb", 0),
+ MVI(0xc0002fec, "TODO_c000_2fec", 0),
+ MVI(0xc0002fed, "TODO_c000_2fed", 0),
+ MVI(0xc0002fee, "TODO_c000_2fee", 0),
+ MVI(0xc0002fef, "TODO_c000_2fef", 0),
+ MVI(0xc0002ff0, "TODO_c000_2ff0", 0),
+ MVI(0xc0002ff1, "TODO_c000_2ff1", 0),
+ MVI(0xc0002ff2, "TODO_c000_2ff2", 0),
+ MVI(0xc0002ff3, "TODO_c000_2ff3", 0),
+ MVI(0xc0002ff4, "TODO_c000_2ff4", 0),
+ MVI(0xc0002ff5, "TODO_c000_2ff5", 0),
+ MVI(0xc0002ff6, "TODO_c000_2ff6", 0),
+ MVI(0xc0002ff7, "TODO_c000_2ff7", 0),
+ MVI(0xc0002ff8, "TODO_c000_2ff8", 0),
+ MVI(0xc0002ff9, "TODO_c000_2ff9", 0),
+ MVI(0xc0002ffa, "TODO_c000_2ffa", 0),
+ MVI(0xc0002ffb, "TODO_c000_2ffb", 0),
+ MVI(0xc0002ffc, "TODO_c000_2ffc", 0),
+ MVI(0xc0002ffd, "TODO_c000_2ffd", 0),
+ MVI(0xc0002ffe, "TODO_c000_2ffe", 0),
+ MVI(0xc0002fff, "TODO_c000_2fff", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ MFX(0xc0010004, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000020415), 0), /* XXX: The range ended earlier than expected! */
+ MFX(0xc0010005, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010006, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010007, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0xf40000, UINT64_C(0xffffffffff80ffff), 0), /* value=0xf40000 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x9000011, UINT64_C(0xffffffff89006000), 0), /* value=0x9000011 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xe0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x2`20000000 */
+ MFI(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1), /* value=0x0 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFN(0xc0010021, "AMD_K8_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_MAX, 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFI(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage), /* value=0x0 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr), /* Villain? value=0xf8000019 */
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x20, 0, 0), /* value=0x20 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x2, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x2 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x2, 0, 0), /* value=0x2 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000000049080890), 0, 0), /* value=0x80000000`49080890 */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x80000000480b0880), 0, 0), /* value=0x80000000`480b0880 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x80000000459a0c84), 0, 0), /* value=0x80000000`459a0c84 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010069, "AMD_10H_P_ST_5", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc001006a, "AMD_10H_P_ST_6", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc001006b, "AMD_10H_P_ST_7", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr, AmdFam10hCStateIoBaseAddr, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x413 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x1 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xdfecc800 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0xde000000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fe006003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, UINT64_C(0xffffffff00000005), UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff780000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc001011b, "AMD_K8_UNK_c001_011b", WriteOnly, IgnoreWrite),
+ MVI(0xc0010120, "TODO_c001_0120", 0),
+ MVI(0xc0010121, "TODO_c001_0121", 0),
+ MVI(0xc0010122, "TODO_c001_0122", 0),
+ MVO(0xc0010131, "TODO_c001_0131", 0),
+ MFN(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength), /* value=0x0 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MVX(0xc0010188, "TODO_c001_0188", 0, UINT64_C(0xfffffcf000200000), 0),
+ MVX(0xc0010189, "TODO_c001_0189", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001018a, "TODO_c001_018a", 0, UINT64_C(0xfffffcf000200000), 0),
+ MVX(0xc001018b, "TODO_c001_018b", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010200, "AMD_K8_PERF_CTL_0", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0), /* value=0x530076 */
+ MFX(0xc0010201, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000008a1d), 0), /* value=0xfff8`f44e86f8 */
+ MFX(0xc0010202, "AMD_K8_PERF_CTL_1", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x1, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010203, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010204, "AMD_K8_PERF_CTL_2", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x2, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010205, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010206, "AMD_K8_PERF_CTL_3", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x3, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010207, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010208, "AMD_K8_PERF_CTL_4", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x4, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010209, "AMD_K8_PERF_CTR_4", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x4, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc001020a, "AMD_K8_PERF_CTL_5", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x5, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc001020b, "AMD_K8_PERF_CTR_5", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x5, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010230, "AMD_16H_L2I_PERF_CTL_0", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x0, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010231, "AMD_16H_L2I_PERF_CTR_0", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x0, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010232, "AMD_16H_L2I_PERF_CTL_1", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x1, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010233, "AMD_16H_L2I_PERF_CTR_1", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x1, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010234, "AMD_16H_L2I_PERF_CTL_2", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x2, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010235, "AMD_16H_L2I_PERF_CTR_2", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x2, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010236, "AMD_16H_L2I_PERF_CTL_3", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x3, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010237, "AMD_16H_L2I_PERF_CTR_3", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x3, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MVX(0xc0010238, "TODO_c001_0238", 0, UINT64_C(0xf0ffffffbf0000), 0),
+ MVX(0xc0010239, "TODO_c001_0239", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001023a, "TODO_c001_023a", 0, UINT64_C(0xf0ffffffbf0000), 0),
+ MVX(0xc001023b, "TODO_c001_023b", 0, UINT64_C(0xfffe000000000000), 0),
+ MFX(0xc0010240, "AMD_15H_NB_PERF_CTL_0", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x0, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010241, "AMD_15H_NB_PERF_CTR_0", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010242, "AMD_15H_NB_PERF_CTL_1", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x1, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010243, "AMD_15H_NB_PERF_CTR_1", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010244, "AMD_15H_NB_PERF_CTL_2", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x2, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010245, "AMD_15H_NB_PERF_CTR_2", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010246, "AMD_15H_NB_PERF_CTL_3", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x3, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010247, "AMD_15H_NB_PERF_CTR_3", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MVX(0xc0010290, "TODO_c001_0290", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0010292, "TODO_c001_0292", UINT64_C(0x100000052), UINT32_MAX, 0),
+ MVI(0xc0010293, "TODO_c001_0293", 0x9a4c84),
+ MVX(0xc0010294, "TODO_c001_0294", UINT64_C(0x1001f47f000f0912), UINT64_C(0xc0000000ffe00000), 0),
+ MVX(0xc0010296, "TODO_c001_0296", 0x484848, UINT64_C(0xffffffffff808080), 0),
+ MVI(0xc0010297, "TODO_c001_0297", UINT64_C(0x380000fc000)),
+ MVI(0xc0010299, "TODO_c001_0299", 0xa1003),
+ MVI(0xc001029a, "TODO_c001_029a", 0x1f9f2f),
+ MVI(0xc001029b, "TODO_c001_029b", 0xe9bfb38),
+ MVX(0xc0010400, "TODO_c001_0400", 0x600, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0xc0010401, "TODO_c001_0401", 0x2000, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0xc0010402, "TODO_c001_0402", 0x8, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc0010403, "TODO_c001_0403", 0x8, UINT64_C(0xfffffffffffffe00), 0),
+ MVI(0xc0010404, "TODO_c001_0404", 0),
+ MVX(0xc0010405, "TODO_c001_0405", 0, UINT64_C(0xfffffffffffff800), 0),
+ MVX(0xc0010406, "TODO_c001_0406", 0x40, UINT64_C(0xffffffffffffff80), 0),
+ MVI(0xc0010407, "TODO_c001_0407", 0),
+ MVI(0xc0010408, "TODO_c001_0408", 0),
+ MVI(0xc0010409, "TODO_c001_0409", 0),
+ MVI(0xc001040a, "TODO_c001_040a", 0),
+ MVI(0xc001040b, "TODO_c001_040b", 0),
+ MVI(0xc001040c, "TODO_c001_040c", 0),
+ MVI(0xc001040d, "TODO_c001_040d", 0),
+ MVI(0xc001040e, "TODO_c001_040e", 0),
+ MVI(0xc001040f, "TODO_c001_040f", 0),
+ MVI(0xc0010410, "TODO_c001_0410", 0),
+ MVI(0xc0010411, "TODO_c001_0411", 0),
+ MVI(0xc0010412, "TODO_c001_0412", 0),
+ MVI(0xc0010413, "TODO_c001_0413", 0),
+ MVI(0xc0010414, "TODO_c001_0414", 0),
+ MVI(0xc0010415, "TODO_c001_0415", 0),
+ MVI(0xc0010416, "TODO_c001_0416", 0),
+ MVI(0xc0010417, "TODO_c001_0417", 0),
+ MVI(0xc0010418, "TODO_c001_0418", 0),
+ MVI(0xc0010419, "TODO_c001_0419", 0),
+ MVI(0xc001041a, "TODO_c001_041a", 0),
+ MVI(0xc001041b, "TODO_c001_041b", 0),
+ MVI(0xc001041c, "TODO_c001_041c", 0),
+ MVI(0xc001041d, "TODO_c001_041d", 0),
+ MVI(0xc001041e, "TODO_c001_041e", 0),
+ MVI(0xc001041f, "TODO_c001_041f", 0),
+ MVI(0xc0010420, "TODO_c001_0420", 0),
+ MVI(0xc0010421, "TODO_c001_0421", 0),
+ MVI(0xc0010422, "TODO_c001_0422", 0),
+ MVI(0xc0010423, "TODO_c001_0423", 0),
+ MVI(0xc0010424, "TODO_c001_0424", 0),
+ MVI(0xc0010425, "TODO_c001_0425", 0),
+ MVI(0xc0010426, "TODO_c001_0426", 0),
+ MVI(0xc0010427, "TODO_c001_0427", 0),
+ MVI(0xc0010428, "TODO_c001_0428", 0),
+ MVI(0xc0010429, "TODO_c001_0429", 0),
+ MVI(0xc001042a, "TODO_c001_042a", 0),
+ MVI(0xc001042b, "TODO_c001_042b", 0),
+ MVI(0xc001042c, "TODO_c001_042c", 0),
+ MVI(0xc001042d, "TODO_c001_042d", 0),
+ MVI(0xc001042e, "TODO_c001_042e", 0),
+ MVI(0xc001042f, "TODO_c001_042f", 0),
+ MVI(0xc0010430, "TODO_c001_0430", 0),
+ MVI(0xc0010431, "TODO_c001_0431", 0),
+ MVI(0xc0010432, "TODO_c001_0432", 0),
+ MVI(0xc0010433, "TODO_c001_0433", 0),
+ MVI(0xc0010434, "TODO_c001_0434", 0),
+ MVI(0xc0010435, "TODO_c001_0435", 0),
+ MVI(0xc0010436, "TODO_c001_0436", 0),
+ MVI(0xc0010437, "TODO_c001_0437", 0),
+ MVI(0xc0010438, "TODO_c001_0438", 0),
+ MVI(0xc0010439, "TODO_c001_0439", 0),
+ MVI(0xc001043a, "TODO_c001_043a", 0),
+ MVI(0xc001043b, "TODO_c001_043b", 0),
+ MVI(0xc001043c, "TODO_c001_043c", 0),
+ MVI(0xc001043d, "TODO_c001_043d", 0),
+ MVI(0xc001043e, "TODO_c001_043e", 0),
+ MVI(0xc001043f, "TODO_c001_043f", 0),
+ MVI(0xc0010440, "TODO_c001_0440", 0),
+ MVI(0xc0010441, "TODO_c001_0441", 0),
+ MVI(0xc0010442, "TODO_c001_0442", 0),
+ MVI(0xc0010443, "TODO_c001_0443", 0),
+ MVI(0xc0010444, "TODO_c001_0444", 0),
+ MVI(0xc0010445, "TODO_c001_0445", 0),
+ MVI(0xc0010446, "TODO_c001_0446", 0),
+ MVI(0xc0010447, "TODO_c001_0447", 0),
+ MVI(0xc0010448, "TODO_c001_0448", 0),
+ MVI(0xc0010449, "TODO_c001_0449", 0),
+ MVI(0xc001044a, "TODO_c001_044a", 0),
+ MVI(0xc001044b, "TODO_c001_044b", 0),
+ MVI(0xc001044c, "TODO_c001_044c", 0),
+ MVI(0xc001044d, "TODO_c001_044d", 0),
+ MVI(0xc001044e, "TODO_c001_044e", 0),
+ MVI(0xc001044f, "TODO_c001_044f", 0),
+ MVI(0xc0010450, "TODO_c001_0450", 0),
+ MVI(0xc0010451, "TODO_c001_0451", 0),
+ MVI(0xc0010452, "TODO_c001_0452", 0),
+ MVI(0xc0010453, "TODO_c001_0453", 0),
+ MVI(0xc0010454, "TODO_c001_0454", 0),
+ MVI(0xc0010455, "TODO_c001_0455", 0),
+ MVI(0xc0010456, "TODO_c001_0456", 0),
+ MVI(0xc0010457, "TODO_c001_0457", 0),
+ MVI(0xc0010458, "TODO_c001_0458", 0),
+ MVI(0xc0010459, "TODO_c001_0459", 0),
+ MVI(0xc001045a, "TODO_c001_045a", 0),
+ MVI(0xc001045b, "TODO_c001_045b", 0),
+ MVI(0xc001045c, "TODO_c001_045c", 0),
+ MVI(0xc001045d, "TODO_c001_045d", 0),
+ MVI(0xc001045e, "TODO_c001_045e", 0),
+ MVI(0xc001045f, "TODO_c001_045f", 0),
+ MVI(0xc0010460, "TODO_c001_0460", 0),
+ MVI(0xc0010461, "TODO_c001_0461", 0),
+ MVI(0xc0010462, "TODO_c001_0462", 0),
+ MVI(0xc0010463, "TODO_c001_0463", 0),
+ MVI(0xc0010464, "TODO_c001_0464", 0),
+ MVI(0xc0010465, "TODO_c001_0465", 0),
+ MVI(0xc0010466, "TODO_c001_0466", 0),
+ MVI(0xc0010467, "TODO_c001_0467", 0),
+ MVI(0xc0010468, "TODO_c001_0468", 0),
+ MVI(0xc0010469, "TODO_c001_0469", 0),
+ MVI(0xc001046a, "TODO_c001_046a", 0),
+ MVI(0xc001046b, "TODO_c001_046b", 0),
+ MVI(0xc001046c, "TODO_c001_046c", 0),
+ MVI(0xc001046d, "TODO_c001_046d", 0),
+ MVI(0xc001046e, "TODO_c001_046e", 0),
+ MVI(0xc001046f, "TODO_c001_046f", 0),
+ MVI(0xc0010470, "TODO_c001_0470", 0),
+ MVI(0xc0010471, "TODO_c001_0471", 0),
+ MVI(0xc0010472, "TODO_c001_0472", 0),
+ MVI(0xc0010473, "TODO_c001_0473", 0),
+ MVI(0xc0010474, "TODO_c001_0474", 0),
+ MVI(0xc0010475, "TODO_c001_0475", 0),
+ MVI(0xc0010476, "TODO_c001_0476", 0),
+ MVI(0xc0010477, "TODO_c001_0477", 0),
+ MVI(0xc0010478, "TODO_c001_0478", 0),
+ MVI(0xc0010479, "TODO_c001_0479", 0),
+ MVI(0xc001047a, "TODO_c001_047a", 0),
+ MVI(0xc001047b, "TODO_c001_047b", 0),
+ MVI(0xc001047c, "TODO_c001_047c", 0),
+ MVI(0xc001047d, "TODO_c001_047d", 0),
+ MVI(0xc001047e, "TODO_c001_047e", 0),
+ MVI(0xc001047f, "TODO_c001_047f", 0),
+ MVI(0xc0010480, "TODO_c001_0480", 0),
+ MVI(0xc0010481, "TODO_c001_0481", 0),
+ MVI(0xc0010482, "TODO_c001_0482", 0),
+ MVI(0xc0010483, "TODO_c001_0483", 0),
+ MVI(0xc0010484, "TODO_c001_0484", 0),
+ MVI(0xc0010485, "TODO_c001_0485", 0),
+ MVI(0xc0010486, "TODO_c001_0486", 0),
+ MVI(0xc0010487, "TODO_c001_0487", 0),
+ MVI(0xc0010488, "TODO_c001_0488", 0),
+ MVI(0xc0010489, "TODO_c001_0489", 0),
+ MVI(0xc001048a, "TODO_c001_048a", 0),
+ MVI(0xc001048b, "TODO_c001_048b", 0),
+ MVI(0xc001048c, "TODO_c001_048c", 0),
+ MVI(0xc001048d, "TODO_c001_048d", 0),
+ MVI(0xc001048e, "TODO_c001_048e", 0),
+ MVI(0xc001048f, "TODO_c001_048f", 0),
+ MVI(0xc0010490, "TODO_c001_0490", 0),
+ MVI(0xc0010491, "TODO_c001_0491", 0),
+ MVI(0xc0010492, "TODO_c001_0492", 0),
+ MVI(0xc0010493, "TODO_c001_0493", 0),
+ MVI(0xc0010494, "TODO_c001_0494", 0),
+ MVI(0xc0010495, "TODO_c001_0495", 0),
+ MVI(0xc0010496, "TODO_c001_0496", 0),
+ MVI(0xc0010497, "TODO_c001_0497", 0),
+ MVI(0xc0010498, "TODO_c001_0498", 0),
+ MVI(0xc0010499, "TODO_c001_0499", 0),
+ MVI(0xc001049a, "TODO_c001_049a", 0),
+ MVI(0xc001049b, "TODO_c001_049b", 0),
+ MVI(0xc001049c, "TODO_c001_049c", 0),
+ MVI(0xc001049d, "TODO_c001_049d", 0),
+ MVI(0xc001049e, "TODO_c001_049e", 0),
+ MVI(0xc001049f, "TODO_c001_049f", 0),
+ MVI(0xc00104a0, "TODO_c001_04a0", 0),
+ MVI(0xc00104a1, "TODO_c001_04a1", 0),
+ MVI(0xc00104a2, "TODO_c001_04a2", 0),
+ MVI(0xc00104a3, "TODO_c001_04a3", 0),
+ MVI(0xc00104a4, "TODO_c001_04a4", 0),
+ MVI(0xc00104a5, "TODO_c001_04a5", 0),
+ MVI(0xc00104a6, "TODO_c001_04a6", 0),
+ MVI(0xc00104a7, "TODO_c001_04a7", 0),
+ MVI(0xc00104a8, "TODO_c001_04a8", 0),
+ MVI(0xc00104a9, "TODO_c001_04a9", 0),
+ MVI(0xc00104aa, "TODO_c001_04aa", 0),
+ MVI(0xc00104ab, "TODO_c001_04ab", 0),
+ MVI(0xc00104ac, "TODO_c001_04ac", 0),
+ MVI(0xc00104ad, "TODO_c001_04ad", 0),
+ MVI(0xc00104ae, "TODO_c001_04ae", 0),
+ MVI(0xc00104af, "TODO_c001_04af", 0),
+ MVI(0xc00104b0, "TODO_c001_04b0", 0),
+ MVI(0xc00104b1, "TODO_c001_04b1", 0),
+ MVI(0xc00104b2, "TODO_c001_04b2", 0),
+ MVI(0xc00104b3, "TODO_c001_04b3", 0),
+ MVI(0xc00104b4, "TODO_c001_04b4", 0),
+ MVI(0xc00104b5, "TODO_c001_04b5", 0),
+ MVI(0xc00104b6, "TODO_c001_04b6", 0),
+ MVI(0xc00104b7, "TODO_c001_04b7", 0),
+ MVI(0xc00104b8, "TODO_c001_04b8", 0),
+ MVI(0xc00104b9, "TODO_c001_04b9", 0),
+ MVI(0xc00104ba, "TODO_c001_04ba", 0),
+ MVI(0xc00104bb, "TODO_c001_04bb", 0),
+ MVI(0xc00104bc, "TODO_c001_04bc", 0),
+ MVI(0xc00104bd, "TODO_c001_04bd", 0),
+ MVI(0xc00104be, "TODO_c001_04be", 0),
+ MVI(0xc00104bf, "TODO_c001_04bf", 0),
+ MVI(0xc00104c0, "TODO_c001_04c0", 0),
+ MVI(0xc00104c1, "TODO_c001_04c1", 0),
+ MVI(0xc00104c2, "TODO_c001_04c2", 0),
+ MVI(0xc00104c3, "TODO_c001_04c3", 0),
+ MVI(0xc00104c4, "TODO_c001_04c4", 0),
+ MVI(0xc00104c5, "TODO_c001_04c5", 0),
+ MVI(0xc00104c6, "TODO_c001_04c6", 0),
+ MVI(0xc00104c7, "TODO_c001_04c7", 0),
+ MVI(0xc00104c8, "TODO_c001_04c8", 0),
+ MVI(0xc00104c9, "TODO_c001_04c9", 0),
+ MVI(0xc00104ca, "TODO_c001_04ca", 0),
+ MVI(0xc00104cb, "TODO_c001_04cb", 0),
+ MVI(0xc00104cc, "TODO_c001_04cc", 0),
+ MVI(0xc00104cd, "TODO_c001_04cd", 0),
+ MVI(0xc00104ce, "TODO_c001_04ce", 0),
+ MVI(0xc00104cf, "TODO_c001_04cf", 0),
+ MVI(0xc00104d0, "TODO_c001_04d0", 0),
+ MVI(0xc00104d1, "TODO_c001_04d1", 0),
+ MVI(0xc00104d2, "TODO_c001_04d2", 0),
+ MVI(0xc00104d3, "TODO_c001_04d3", 0),
+ MVI(0xc00104d4, "TODO_c001_04d4", 0),
+ MVI(0xc00104d5, "TODO_c001_04d5", 0),
+ MVI(0xc00104d6, "TODO_c001_04d6", 0),
+ MVI(0xc00104d7, "TODO_c001_04d7", 0),
+ MVI(0xc00104d8, "TODO_c001_04d8", 0),
+ MVI(0xc00104d9, "TODO_c001_04d9", 0),
+ MVI(0xc00104da, "TODO_c001_04da", 0),
+ MVI(0xc00104db, "TODO_c001_04db", 0),
+ MVI(0xc00104dc, "TODO_c001_04dc", 0),
+ MVI(0xc00104dd, "TODO_c001_04dd", 0),
+ MVI(0xc00104de, "TODO_c001_04de", 0),
+ MVI(0xc00104df, "TODO_c001_04df", 0),
+ MVI(0xc00104e0, "TODO_c001_04e0", 0),
+ MVI(0xc00104e1, "TODO_c001_04e1", 0),
+ MVI(0xc00104e2, "TODO_c001_04e2", 0),
+ MVI(0xc00104e3, "TODO_c001_04e3", 0),
+ MVI(0xc00104e4, "TODO_c001_04e4", 0),
+ MVI(0xc00104e5, "TODO_c001_04e5", 0),
+ MVI(0xc00104e6, "TODO_c001_04e6", 0),
+ MVI(0xc00104e7, "TODO_c001_04e7", 0),
+ MVI(0xc00104e8, "TODO_c001_04e8", 0),
+ MVI(0xc00104e9, "TODO_c001_04e9", 0),
+ MVI(0xc00104ea, "TODO_c001_04ea", 0),
+ MVI(0xc00104eb, "TODO_c001_04eb", 0),
+ MVI(0xc00104ec, "TODO_c001_04ec", 0),
+ MVI(0xc00104ed, "TODO_c001_04ed", 0),
+ MVI(0xc00104ee, "TODO_c001_04ee", 0),
+ MVI(0xc00104ef, "TODO_c001_04ef", 0),
+ MVI(0xc00104f0, "TODO_c001_04f0", 0),
+ MVI(0xc00104f1, "TODO_c001_04f1", 0),
+ MVI(0xc00104f2, "TODO_c001_04f2", 0),
+ MVI(0xc00104f3, "TODO_c001_04f3", 0),
+ MVI(0xc00104f4, "TODO_c001_04f4", 0),
+ MVI(0xc00104f5, "TODO_c001_04f5", 0),
+ MVI(0xc00104f6, "TODO_c001_04f6", 0),
+ MVI(0xc00104f7, "TODO_c001_04f7", 0),
+ MVI(0xc00104f8, "TODO_c001_04f8", 0),
+ MVI(0xc00104f9, "TODO_c001_04f9", 0),
+ MVI(0xc00104fa, "TODO_c001_04fa", 0),
+ MVI(0xc00104fb, "TODO_c001_04fb", 0),
+ MVI(0xc00104fc, "TODO_c001_04fc", 0),
+ MVI(0xc00104fd, "TODO_c001_04fd", 0),
+ MVI(0xc00104fe, "TODO_c001_04fe", 0),
+ MVI(0xc00104ff, "TODO_c001_04ff", 0),
+ MVI(0xc0010500, "TODO_c001_0500", 0),
+ MVI(0xc0010501, "TODO_c001_0501", 0),
+ MVX(0xc0010502, "TODO_c001_0502", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0010503, "TODO_c001_0503", 0),
+ MVI(0xc0010504, "TODO_c001_0504", 0),
+ MVI(0xc0010505, "TODO_c001_0505", 0),
+ MVI(0xc0010506, "TODO_c001_0506", 0),
+ MVI(0xc0010507, "TODO_c001_0507", 0),
+ MVI(0xc0010508, "TODO_c001_0508", 0),
+ MVI(0xc0010509, "TODO_c001_0509", 0),
+ MVI(0xc001050a, "TODO_c001_050a", 0),
+ MVI(0xc001050b, "TODO_c001_050b", 0),
+ MVI(0xc001050c, "TODO_c001_050c", 0),
+ MVI(0xc001050d, "TODO_c001_050d", 0),
+ MVI(0xc001050e, "TODO_c001_050e", 0),
+ MVI(0xc001050f, "TODO_c001_050f", 0),
+ MVI(0xc0010510, "TODO_c001_0510", 0),
+ MVI(0xc0010511, "TODO_c001_0511", 0),
+ MVI(0xc0010512, "TODO_c001_0512", 0),
+ MVI(0xc0010513, "TODO_c001_0513", 0),
+ MVI(0xc0010514, "TODO_c001_0514", 0),
+ MVI(0xc0010515, "TODO_c001_0515", 0),
+ MVI(0xc0010516, "TODO_c001_0516", 0),
+ MVI(0xc0010517, "TODO_c001_0517", 0),
+ MVI(0xc0010518, "TODO_c001_0518", 0),
+ MVI(0xc0010519, "TODO_c001_0519", 0),
+ MVI(0xc001051a, "TODO_c001_051a", 0),
+ MVI(0xc001051b, "TODO_c001_051b", 0),
+ MVI(0xc001051c, "TODO_c001_051c", 0),
+ MVI(0xc001051d, "TODO_c001_051d", 0),
+ MVI(0xc001051e, "TODO_c001_051e", 0),
+ MVI(0xc001051f, "TODO_c001_051f", 0),
+ MVI(0xc0010520, "TODO_c001_0520", 0),
+ MVI(0xc0010521, "TODO_c001_0521", 0),
+ MVI(0xc0010522, "TODO_c001_0522", 0),
+ MVI(0xc0010523, "TODO_c001_0523", 0),
+ MVI(0xc0010524, "TODO_c001_0524", 0),
+ MVI(0xc0010525, "TODO_c001_0525", 0),
+ MVI(0xc0010526, "TODO_c001_0526", 0),
+ MVI(0xc0010527, "TODO_c001_0527", 0),
+ MVI(0xc0010528, "TODO_c001_0528", 0),
+ MVI(0xc0010529, "TODO_c001_0529", 0),
+ MVI(0xc001052a, "TODO_c001_052a", 0),
+ MVI(0xc001052b, "TODO_c001_052b", 0),
+ MVI(0xc001052c, "TODO_c001_052c", 0),
+ MVI(0xc001052d, "TODO_c001_052d", 0),
+ MVI(0xc001052e, "TODO_c001_052e", 0),
+ MVI(0xc001052f, "TODO_c001_052f", 0),
+ MVI(0xc0010530, "TODO_c001_0530", 0),
+ MVI(0xc0010531, "TODO_c001_0531", 0),
+ MVI(0xc0010532, "TODO_c001_0532", 0),
+ MVI(0xc0010533, "TODO_c001_0533", 0),
+ MVI(0xc0010534, "TODO_c001_0534", 0),
+ MVI(0xc0010535, "TODO_c001_0535", 0),
+ MVI(0xc0010536, "TODO_c001_0536", 0),
+ MVI(0xc0010537, "TODO_c001_0537", 0),
+ MVI(0xc0010538, "TODO_c001_0538", 0),
+ MVI(0xc0010539, "TODO_c001_0539", 0),
+ MVI(0xc001053a, "TODO_c001_053a", 0),
+ MVI(0xc001053b, "TODO_c001_053b", 0),
+ MVI(0xc001053c, "TODO_c001_053c", 0),
+ MVI(0xc001053d, "TODO_c001_053d", 0),
+ MVI(0xc001053e, "TODO_c001_053e", 0),
+ MVI(0xc001053f, "TODO_c001_053f", 0),
+ MVI(0xc0010540, "TODO_c001_0540", 0),
+ MVI(0xc0010541, "TODO_c001_0541", 0),
+ MVI(0xc0010542, "TODO_c001_0542", 0),
+ MVI(0xc0010543, "TODO_c001_0543", 0),
+ MVI(0xc0010544, "TODO_c001_0544", 0),
+ MVI(0xc0010545, "TODO_c001_0545", 0),
+ MVI(0xc0010546, "TODO_c001_0546", 0),
+ MVI(0xc0010547, "TODO_c001_0547", 0),
+ MVI(0xc0010548, "TODO_c001_0548", 0),
+ MVI(0xc0010549, "TODO_c001_0549", 0),
+ MVI(0xc001054a, "TODO_c001_054a", 0),
+ MVI(0xc001054b, "TODO_c001_054b", 0),
+ MVI(0xc001054c, "TODO_c001_054c", 0),
+ MVI(0xc001054d, "TODO_c001_054d", 0),
+ MVI(0xc001054e, "TODO_c001_054e", 0),
+ MVI(0xc001054f, "TODO_c001_054f", 0),
+ MVI(0xc0010550, "TODO_c001_0550", 0),
+ MVI(0xc0010551, "TODO_c001_0551", 0),
+ MVI(0xc0010552, "TODO_c001_0552", 0),
+ MVI(0xc0010553, "TODO_c001_0553", 0),
+ MVI(0xc0010554, "TODO_c001_0554", 0),
+ MVI(0xc0010555, "TODO_c001_0555", 0),
+ MVI(0xc0010556, "TODO_c001_0556", 0),
+ MVI(0xc0010557, "TODO_c001_0557", 0),
+ MVI(0xc0010558, "TODO_c001_0558", 0),
+ MVI(0xc0010559, "TODO_c001_0559", 0),
+ MVI(0xc001055a, "TODO_c001_055a", 0),
+ MVI(0xc001055b, "TODO_c001_055b", 0),
+ MVI(0xc001055c, "TODO_c001_055c", 0),
+ MVI(0xc001055d, "TODO_c001_055d", 0),
+ MVI(0xc001055e, "TODO_c001_055e", 0),
+ MVI(0xc001055f, "TODO_c001_055f", 0),
+ MVI(0xc0010560, "TODO_c001_0560", 0),
+ MVI(0xc0010561, "TODO_c001_0561", 0),
+ MVI(0xc0010562, "TODO_c001_0562", 0),
+ MVI(0xc0010563, "TODO_c001_0563", 0),
+ MVI(0xc0010564, "TODO_c001_0564", 0),
+ MVI(0xc0010565, "TODO_c001_0565", 0),
+ MVI(0xc0010566, "TODO_c001_0566", 0),
+ MVI(0xc0010567, "TODO_c001_0567", 0),
+ MVI(0xc0010568, "TODO_c001_0568", 0),
+ MVI(0xc0010569, "TODO_c001_0569", 0),
+ MVI(0xc001056a, "TODO_c001_056a", 0),
+ MVI(0xc001056b, "TODO_c001_056b", 0),
+ MVI(0xc001056c, "TODO_c001_056c", 0),
+ MVI(0xc001056d, "TODO_c001_056d", 0),
+ MVI(0xc001056e, "TODO_c001_056e", 0),
+ MVI(0xc001056f, "TODO_c001_056f", 0),
+ MVI(0xc0010570, "TODO_c001_0570", 0),
+ MVI(0xc0010571, "TODO_c001_0571", 0),
+ MVI(0xc0010572, "TODO_c001_0572", 0),
+ MVI(0xc0010573, "TODO_c001_0573", 0),
+ MVI(0xc0010574, "TODO_c001_0574", 0),
+ MVI(0xc0010575, "TODO_c001_0575", 0),
+ MVI(0xc0010576, "TODO_c001_0576", 0),
+ MVI(0xc0010577, "TODO_c001_0577", 0),
+ MVI(0xc0010578, "TODO_c001_0578", 0),
+ MVI(0xc0010579, "TODO_c001_0579", 0),
+ MVI(0xc001057a, "TODO_c001_057a", 0),
+ MVI(0xc001057b, "TODO_c001_057b", 0),
+ MVI(0xc001057c, "TODO_c001_057c", 0),
+ MVI(0xc001057d, "TODO_c001_057d", 0),
+ MVI(0xc001057e, "TODO_c001_057e", 0),
+ MVI(0xc001057f, "TODO_c001_057f", 0),
+ MVI(0xc0010580, "TODO_c001_0580", 0),
+ MVI(0xc0010581, "TODO_c001_0581", 0),
+ MVI(0xc0010582, "TODO_c001_0582", 0),
+ MVI(0xc0010583, "TODO_c001_0583", 0),
+ MVI(0xc0010584, "TODO_c001_0584", 0),
+ MVI(0xc0010585, "TODO_c001_0585", 0),
+ MVI(0xc0010586, "TODO_c001_0586", 0),
+ MVI(0xc0010587, "TODO_c001_0587", 0),
+ MVI(0xc0010588, "TODO_c001_0588", 0),
+ MVI(0xc0010589, "TODO_c001_0589", 0),
+ MVI(0xc001058a, "TODO_c001_058a", 0),
+ MVI(0xc001058b, "TODO_c001_058b", 0),
+ MVI(0xc001058c, "TODO_c001_058c", 0),
+ MVI(0xc001058d, "TODO_c001_058d", 0),
+ MVI(0xc001058e, "TODO_c001_058e", 0),
+ MVI(0xc001058f, "TODO_c001_058f", 0),
+ MVI(0xc0010590, "TODO_c001_0590", 0),
+ MVI(0xc0010591, "TODO_c001_0591", 0),
+ MVI(0xc0010592, "TODO_c001_0592", 0),
+ MVI(0xc0010593, "TODO_c001_0593", 0),
+ MVI(0xc0010594, "TODO_c001_0594", 0),
+ MVI(0xc0010595, "TODO_c001_0595", 0),
+ MVI(0xc0010596, "TODO_c001_0596", 0),
+ MVI(0xc0010597, "TODO_c001_0597", 0),
+ MVI(0xc0010598, "TODO_c001_0598", 0),
+ MVI(0xc0010599, "TODO_c001_0599", 0),
+ MVI(0xc001059a, "TODO_c001_059a", 0),
+ MVI(0xc001059b, "TODO_c001_059b", 0),
+ MVI(0xc001059c, "TODO_c001_059c", 0),
+ MVI(0xc001059d, "TODO_c001_059d", 0),
+ MVI(0xc001059e, "TODO_c001_059e", 0),
+ MVI(0xc001059f, "TODO_c001_059f", 0),
+ MVI(0xc00105a0, "TODO_c001_05a0", 0),
+ MVI(0xc00105a1, "TODO_c001_05a1", 0),
+ MVI(0xc00105a2, "TODO_c001_05a2", 0),
+ MVI(0xc00105a3, "TODO_c001_05a3", 0),
+ MVI(0xc00105a4, "TODO_c001_05a4", 0),
+ MVI(0xc00105a5, "TODO_c001_05a5", 0),
+ MVI(0xc00105a6, "TODO_c001_05a6", 0),
+ MVI(0xc00105a7, "TODO_c001_05a7", 0),
+ MVI(0xc00105a8, "TODO_c001_05a8", 0),
+ MVI(0xc00105a9, "TODO_c001_05a9", 0),
+ MVI(0xc00105aa, "TODO_c001_05aa", 0),
+ MVI(0xc00105ab, "TODO_c001_05ab", 0),
+ MVI(0xc00105ac, "TODO_c001_05ac", 0),
+ MVI(0xc00105ad, "TODO_c001_05ad", 0),
+ MVI(0xc00105ae, "TODO_c001_05ae", 0),
+ MVI(0xc00105af, "TODO_c001_05af", 0),
+ MVI(0xc00105b0, "TODO_c001_05b0", 0),
+ MVI(0xc00105b1, "TODO_c001_05b1", 0),
+ MVI(0xc00105b2, "TODO_c001_05b2", 0),
+ MVI(0xc00105b3, "TODO_c001_05b3", 0),
+ MVI(0xc00105b4, "TODO_c001_05b4", 0),
+ MVI(0xc00105b5, "TODO_c001_05b5", 0),
+ MVI(0xc00105b6, "TODO_c001_05b6", 0),
+ MVI(0xc00105b7, "TODO_c001_05b7", 0),
+ MVI(0xc00105b8, "TODO_c001_05b8", 0),
+ MVI(0xc00105b9, "TODO_c001_05b9", 0),
+ MVI(0xc00105ba, "TODO_c001_05ba", 0),
+ MVI(0xc00105bb, "TODO_c001_05bb", 0),
+ MVI(0xc00105bc, "TODO_c001_05bc", 0),
+ MVI(0xc00105bd, "TODO_c001_05bd", 0),
+ MVI(0xc00105be, "TODO_c001_05be", 0),
+ MVI(0xc00105bf, "TODO_c001_05bf", 0),
+ MVI(0xc00105c0, "TODO_c001_05c0", 0),
+ MVI(0xc00105c1, "TODO_c001_05c1", 0),
+ MVI(0xc00105c2, "TODO_c001_05c2", 0),
+ MVI(0xc00105c3, "TODO_c001_05c3", 0),
+ MVI(0xc00105c4, "TODO_c001_05c4", 0),
+ MVI(0xc00105c5, "TODO_c001_05c5", 0),
+ MVI(0xc00105c6, "TODO_c001_05c6", 0),
+ MVI(0xc00105c7, "TODO_c001_05c7", 0),
+ MVI(0xc00105c8, "TODO_c001_05c8", 0),
+ MVI(0xc00105c9, "TODO_c001_05c9", 0),
+ MVI(0xc00105ca, "TODO_c001_05ca", 0),
+ MVI(0xc00105cb, "TODO_c001_05cb", 0),
+ MVI(0xc00105cc, "TODO_c001_05cc", 0),
+ MVI(0xc00105cd, "TODO_c001_05cd", 0),
+ MVI(0xc00105ce, "TODO_c001_05ce", 0),
+ MVI(0xc00105cf, "TODO_c001_05cf", 0),
+ MVI(0xc00105d0, "TODO_c001_05d0", 0),
+ MVI(0xc00105d1, "TODO_c001_05d1", 0),
+ MVI(0xc00105d2, "TODO_c001_05d2", 0),
+ MVI(0xc00105d3, "TODO_c001_05d3", 0),
+ MVI(0xc00105d4, "TODO_c001_05d4", 0),
+ MVI(0xc00105d5, "TODO_c001_05d5", 0),
+ MVI(0xc00105d6, "TODO_c001_05d6", 0),
+ MVI(0xc00105d7, "TODO_c001_05d7", 0),
+ MVI(0xc00105d8, "TODO_c001_05d8", 0),
+ MVI(0xc00105d9, "TODO_c001_05d9", 0),
+ MVI(0xc00105da, "TODO_c001_05da", 0),
+ MVI(0xc00105db, "TODO_c001_05db", 0),
+ MVI(0xc00105dc, "TODO_c001_05dc", 0),
+ MVI(0xc00105dd, "TODO_c001_05dd", 0),
+ MVI(0xc00105de, "TODO_c001_05de", 0),
+ MVI(0xc00105df, "TODO_c001_05df", 0),
+ MVI(0xc00105e0, "TODO_c001_05e0", 0),
+ MVI(0xc00105e1, "TODO_c001_05e1", 0),
+ MVI(0xc00105e2, "TODO_c001_05e2", 0),
+ MVI(0xc00105e3, "TODO_c001_05e3", 0),
+ MVI(0xc00105e4, "TODO_c001_05e4", 0),
+ MVI(0xc00105e5, "TODO_c001_05e5", 0),
+ MVI(0xc00105e6, "TODO_c001_05e6", 0),
+ MVI(0xc00105e7, "TODO_c001_05e7", 0),
+ MVI(0xc00105e8, "TODO_c001_05e8", 0),
+ MVI(0xc00105e9, "TODO_c001_05e9", 0),
+ MVI(0xc00105ea, "TODO_c001_05ea", 0),
+ MVI(0xc00105eb, "TODO_c001_05eb", 0),
+ MVI(0xc00105ec, "TODO_c001_05ec", 0),
+ MVI(0xc00105ed, "TODO_c001_05ed", 0),
+ MVI(0xc00105ee, "TODO_c001_05ee", 0),
+ MVI(0xc00105ef, "TODO_c001_05ef", 0),
+ MVI(0xc00105f0, "TODO_c001_05f0", 0),
+ MVI(0xc00105f1, "TODO_c001_05f1", 0),
+ MVI(0xc00105f2, "TODO_c001_05f2", 0),
+ MVI(0xc00105f3, "TODO_c001_05f3", 0),
+ MVI(0xc00105f4, "TODO_c001_05f4", 0),
+ MVI(0xc00105f5, "TODO_c001_05f5", 0),
+ MVI(0xc00105f6, "TODO_c001_05f6", 0),
+ MVI(0xc00105f7, "TODO_c001_05f7", 0),
+ MVI(0xc00105f8, "TODO_c001_05f8", 0),
+ MVI(0xc00105f9, "TODO_c001_05f9", 0),
+ MVI(0xc00105fa, "TODO_c001_05fa", 0),
+ MVI(0xc00105fb, "TODO_c001_05fb", 0),
+ MVI(0xc00105fc, "TODO_c001_05fc", 0),
+ MVI(0xc00105fd, "TODO_c001_05fd", 0),
+ MVI(0xc00105fe, "TODO_c001_05fe", 0),
+ MVI(0xc00105ff, "TODO_c001_05ff", 0),
+ MVI(0xc0010600, "TODO_c001_0600", 0),
+ MVI(0xc0010601, "TODO_c001_0601", 0),
+ MVX(0xc0010602, "TODO_c001_0602", 0, 0, 0),
+ MVI(0xc0010603, "TODO_c001_0603", 0),
+ MVI(0xc0010604, "TODO_c001_0604", 0),
+ MVI(0xc0010605, "TODO_c001_0605", 0),
+ MVI(0xc0010606, "TODO_c001_0606", 0),
+ MVI(0xc0010607, "TODO_c001_0607", 0),
+ MVI(0xc0010608, "TODO_c001_0608", 0),
+ MVI(0xc0010609, "TODO_c001_0609", 0),
+ MVI(0xc001060a, "TODO_c001_060a", 0),
+ MVI(0xc001060b, "TODO_c001_060b", 0),
+ MVI(0xc001060c, "TODO_c001_060c", 0),
+ MVI(0xc001060d, "TODO_c001_060d", 0),
+ MVI(0xc001060e, "TODO_c001_060e", 0),
+ MVI(0xc001060f, "TODO_c001_060f", 0),
+ MVI(0xc0010610, "TODO_c001_0610", 0),
+ MVI(0xc0010611, "TODO_c001_0611", 0),
+ MVI(0xc0010612, "TODO_c001_0612", 0),
+ MVI(0xc0010613, "TODO_c001_0613", 0),
+ MVI(0xc0010614, "TODO_c001_0614", 0),
+ MVI(0xc0010615, "TODO_c001_0615", 0),
+ MVI(0xc0010616, "TODO_c001_0616", 0),
+ MVI(0xc0010617, "TODO_c001_0617", 0),
+ MVI(0xc0010618, "TODO_c001_0618", 0),
+ MVI(0xc0010619, "TODO_c001_0619", 0),
+ MVI(0xc001061a, "TODO_c001_061a", 0),
+ MVI(0xc001061b, "TODO_c001_061b", 0),
+ MVI(0xc001061c, "TODO_c001_061c", 0),
+ MVI(0xc001061d, "TODO_c001_061d", 0),
+ MVI(0xc001061e, "TODO_c001_061e", 0),
+ MVI(0xc001061f, "TODO_c001_061f", 0),
+ MVI(0xc0010620, "TODO_c001_0620", 0),
+ MVI(0xc0010621, "TODO_c001_0621", 0),
+ MVI(0xc0010622, "TODO_c001_0622", 0),
+ MVI(0xc0010623, "TODO_c001_0623", 0),
+ MVI(0xc0010624, "TODO_c001_0624", 0),
+ MVI(0xc0010625, "TODO_c001_0625", 0),
+ MVI(0xc0010626, "TODO_c001_0626", 0),
+ MVI(0xc0010627, "TODO_c001_0627", 0),
+ MVI(0xc0010628, "TODO_c001_0628", 0),
+ MVI(0xc0010629, "TODO_c001_0629", 0),
+ MVI(0xc001062a, "TODO_c001_062a", 0),
+ MVI(0xc001062b, "TODO_c001_062b", 0),
+ MVI(0xc001062c, "TODO_c001_062c", 0),
+ MVI(0xc001062d, "TODO_c001_062d", 0),
+ MVI(0xc001062e, "TODO_c001_062e", 0),
+ MVI(0xc001062f, "TODO_c001_062f", 0),
+ MVI(0xc0010630, "TODO_c001_0630", 0),
+ MVI(0xc0010631, "TODO_c001_0631", 0),
+ MVI(0xc0010632, "TODO_c001_0632", 0),
+ MVI(0xc0010633, "TODO_c001_0633", 0),
+ MVI(0xc0010634, "TODO_c001_0634", 0),
+ MVI(0xc0010635, "TODO_c001_0635", 0),
+ MVI(0xc0010636, "TODO_c001_0636", 0),
+ MVI(0xc0010637, "TODO_c001_0637", 0),
+ MVI(0xc0010638, "TODO_c001_0638", 0),
+ MVI(0xc0010639, "TODO_c001_0639", 0),
+ MVI(0xc001063a, "TODO_c001_063a", 0),
+ MVI(0xc001063b, "TODO_c001_063b", 0),
+ MVI(0xc001063c, "TODO_c001_063c", 0),
+ MVI(0xc001063d, "TODO_c001_063d", 0),
+ MVI(0xc001063e, "TODO_c001_063e", 0),
+ MVI(0xc001063f, "TODO_c001_063f", 0),
+ MVI(0xc0010640, "TODO_c001_0640", 0),
+ MVI(0xc0010641, "TODO_c001_0641", 0),
+ MVI(0xc0010642, "TODO_c001_0642", 0),
+ MVI(0xc0010643, "TODO_c001_0643", 0),
+ MVI(0xc0010644, "TODO_c001_0644", 0),
+ MVI(0xc0010645, "TODO_c001_0645", 0),
+ MVI(0xc0010646, "TODO_c001_0646", 0),
+ MVI(0xc0010647, "TODO_c001_0647", 0),
+ MVI(0xc0010648, "TODO_c001_0648", 0),
+ MVI(0xc0010649, "TODO_c001_0649", 0),
+ MVI(0xc001064a, "TODO_c001_064a", 0),
+ MVI(0xc001064b, "TODO_c001_064b", 0),
+ MVI(0xc001064c, "TODO_c001_064c", 0),
+ MVI(0xc001064d, "TODO_c001_064d", 0),
+ MVI(0xc001064e, "TODO_c001_064e", 0),
+ MVI(0xc001064f, "TODO_c001_064f", 0),
+ MVI(0xc0010650, "TODO_c001_0650", 0),
+ MVI(0xc0010651, "TODO_c001_0651", 0),
+ MVI(0xc0010652, "TODO_c001_0652", 0),
+ MVI(0xc0010653, "TODO_c001_0653", 0),
+ MVI(0xc0010654, "TODO_c001_0654", 0),
+ MVI(0xc0010655, "TODO_c001_0655", 0),
+ MVI(0xc0010656, "TODO_c001_0656", 0),
+ MVI(0xc0010657, "TODO_c001_0657", 0),
+ MVI(0xc0010658, "TODO_c001_0658", 0),
+ MVI(0xc0010659, "TODO_c001_0659", 0),
+ MVI(0xc001065a, "TODO_c001_065a", 0),
+ MVI(0xc001065b, "TODO_c001_065b", 0),
+ MVI(0xc001065c, "TODO_c001_065c", 0),
+ MVI(0xc001065d, "TODO_c001_065d", 0),
+ MVI(0xc001065e, "TODO_c001_065e", 0),
+ MVI(0xc001065f, "TODO_c001_065f", 0),
+ MVI(0xc0010660, "TODO_c001_0660", 0),
+ MVI(0xc0010661, "TODO_c001_0661", 0),
+ MVI(0xc0010662, "TODO_c001_0662", 0),
+ MVI(0xc0010663, "TODO_c001_0663", 0),
+ MVI(0xc0010664, "TODO_c001_0664", 0),
+ MVI(0xc0010665, "TODO_c001_0665", 0),
+ MVI(0xc0010666, "TODO_c001_0666", 0),
+ MVI(0xc0010667, "TODO_c001_0667", 0),
+ MVI(0xc0010668, "TODO_c001_0668", 0),
+ MVI(0xc0010669, "TODO_c001_0669", 0),
+ MVI(0xc001066a, "TODO_c001_066a", 0),
+ MVI(0xc001066b, "TODO_c001_066b", 0),
+ MVI(0xc001066c, "TODO_c001_066c", 0),
+ MVI(0xc001066d, "TODO_c001_066d", 0),
+ MVI(0xc001066e, "TODO_c001_066e", 0),
+ MVI(0xc001066f, "TODO_c001_066f", 0),
+ MVI(0xc0010670, "TODO_c001_0670", 0),
+ MVI(0xc0010671, "TODO_c001_0671", 0),
+ MVI(0xc0010672, "TODO_c001_0672", 0),
+ MVI(0xc0010673, "TODO_c001_0673", 0),
+ MVI(0xc0010674, "TODO_c001_0674", 0),
+ MVI(0xc0010675, "TODO_c001_0675", 0),
+ MVI(0xc0010676, "TODO_c001_0676", 0),
+ MVI(0xc0010677, "TODO_c001_0677", 0),
+ MVI(0xc0010678, "TODO_c001_0678", 0),
+ MVI(0xc0010679, "TODO_c001_0679", 0),
+ MVI(0xc001067a, "TODO_c001_067a", 0),
+ MVI(0xc001067b, "TODO_c001_067b", 0),
+ MVI(0xc001067c, "TODO_c001_067c", 0),
+ MVI(0xc001067d, "TODO_c001_067d", 0),
+ MVI(0xc001067e, "TODO_c001_067e", 0),
+ MVI(0xc001067f, "TODO_c001_067f", 0),
+ MVI(0xc0010680, "TODO_c001_0680", 0),
+ MVI(0xc0010681, "TODO_c001_0681", 0),
+ MVI(0xc0010682, "TODO_c001_0682", 0),
+ MVI(0xc0010683, "TODO_c001_0683", 0),
+ MVI(0xc0010684, "TODO_c001_0684", 0),
+ MVI(0xc0010685, "TODO_c001_0685", 0),
+ MVI(0xc0010686, "TODO_c001_0686", 0),
+ MVI(0xc0010687, "TODO_c001_0687", 0),
+ MVI(0xc0010688, "TODO_c001_0688", 0),
+ MVI(0xc0010689, "TODO_c001_0689", 0),
+ MVI(0xc001068a, "TODO_c001_068a", 0),
+ MVI(0xc001068b, "TODO_c001_068b", 0),
+ MVI(0xc001068c, "TODO_c001_068c", 0),
+ MVI(0xc001068d, "TODO_c001_068d", 0),
+ MVI(0xc001068e, "TODO_c001_068e", 0),
+ MVI(0xc001068f, "TODO_c001_068f", 0),
+ MVI(0xc0010690, "TODO_c001_0690", 0),
+ MVI(0xc0010691, "TODO_c001_0691", 0),
+ MVI(0xc0010692, "TODO_c001_0692", 0),
+ MVI(0xc0010693, "TODO_c001_0693", 0),
+ MVI(0xc0010694, "TODO_c001_0694", 0),
+ MVI(0xc0010695, "TODO_c001_0695", 0),
+ MVI(0xc0010696, "TODO_c001_0696", 0),
+ MVI(0xc0010697, "TODO_c001_0697", 0),
+ MVI(0xc0010698, "TODO_c001_0698", 0),
+ MVI(0xc0010699, "TODO_c001_0699", 0),
+ MVI(0xc001069a, "TODO_c001_069a", 0),
+ MVI(0xc001069b, "TODO_c001_069b", 0),
+ MVI(0xc001069c, "TODO_c001_069c", 0),
+ MVI(0xc001069d, "TODO_c001_069d", 0),
+ MVI(0xc001069e, "TODO_c001_069e", 0),
+ MVI(0xc001069f, "TODO_c001_069f", 0),
+ MVI(0xc00106a0, "TODO_c001_06a0", 0),
+ MVI(0xc00106a1, "TODO_c001_06a1", 0),
+ MVI(0xc00106a2, "TODO_c001_06a2", 0),
+ MVI(0xc00106a3, "TODO_c001_06a3", 0),
+ MVI(0xc00106a4, "TODO_c001_06a4", 0),
+ MVI(0xc00106a5, "TODO_c001_06a5", 0),
+ MVI(0xc00106a6, "TODO_c001_06a6", 0),
+ MVI(0xc00106a7, "TODO_c001_06a7", 0),
+ MVI(0xc00106a8, "TODO_c001_06a8", 0),
+ MVI(0xc00106a9, "TODO_c001_06a9", 0),
+ MVI(0xc00106aa, "TODO_c001_06aa", 0),
+ MVI(0xc00106ab, "TODO_c001_06ab", 0),
+ MVI(0xc00106ac, "TODO_c001_06ac", 0),
+ MVI(0xc00106ad, "TODO_c001_06ad", 0),
+ MVI(0xc00106ae, "TODO_c001_06ae", 0),
+ MVI(0xc00106af, "TODO_c001_06af", 0),
+ MVI(0xc00106b0, "TODO_c001_06b0", 0),
+ MVI(0xc00106b1, "TODO_c001_06b1", 0),
+ MVI(0xc00106b2, "TODO_c001_06b2", 0),
+ MVI(0xc00106b3, "TODO_c001_06b3", 0),
+ MVI(0xc00106b4, "TODO_c001_06b4", 0),
+ MVI(0xc00106b5, "TODO_c001_06b5", 0),
+ MVI(0xc00106b6, "TODO_c001_06b6", 0),
+ MVI(0xc00106b7, "TODO_c001_06b7", 0),
+ MVI(0xc00106b8, "TODO_c001_06b8", 0),
+ MVI(0xc00106b9, "TODO_c001_06b9", 0),
+ MVI(0xc00106ba, "TODO_c001_06ba", 0),
+ MVI(0xc00106bb, "TODO_c001_06bb", 0),
+ MVI(0xc00106bc, "TODO_c001_06bc", 0),
+ MVI(0xc00106bd, "TODO_c001_06bd", 0),
+ MVI(0xc00106be, "TODO_c001_06be", 0),
+ MVI(0xc00106bf, "TODO_c001_06bf", 0),
+ MVI(0xc00106c0, "TODO_c001_06c0", 0),
+ MVI(0xc00106c1, "TODO_c001_06c1", 0),
+ MVI(0xc00106c2, "TODO_c001_06c2", 0),
+ MVI(0xc00106c3, "TODO_c001_06c3", 0),
+ MVI(0xc00106c4, "TODO_c001_06c4", 0),
+ MVI(0xc00106c5, "TODO_c001_06c5", 0),
+ MVI(0xc00106c6, "TODO_c001_06c6", 0),
+ MVI(0xc00106c7, "TODO_c001_06c7", 0),
+ MVI(0xc00106c8, "TODO_c001_06c8", 0),
+ MVI(0xc00106c9, "TODO_c001_06c9", 0),
+ MVI(0xc00106ca, "TODO_c001_06ca", 0),
+ MVI(0xc00106cb, "TODO_c001_06cb", 0),
+ MVI(0xc00106cc, "TODO_c001_06cc", 0),
+ MVI(0xc00106cd, "TODO_c001_06cd", 0),
+ MVI(0xc00106ce, "TODO_c001_06ce", 0),
+ MVI(0xc00106cf, "TODO_c001_06cf", 0),
+ MVI(0xc00106d0, "TODO_c001_06d0", 0),
+ MVI(0xc00106d1, "TODO_c001_06d1", 0),
+ MVI(0xc00106d2, "TODO_c001_06d2", 0),
+ MVI(0xc00106d3, "TODO_c001_06d3", 0),
+ MVI(0xc00106d4, "TODO_c001_06d4", 0),
+ MVI(0xc00106d5, "TODO_c001_06d5", 0),
+ MVI(0xc00106d6, "TODO_c001_06d6", 0),
+ MVI(0xc00106d7, "TODO_c001_06d7", 0),
+ MVI(0xc00106d8, "TODO_c001_06d8", 0),
+ MVI(0xc00106d9, "TODO_c001_06d9", 0),
+ MVI(0xc00106da, "TODO_c001_06da", 0),
+ MVI(0xc00106db, "TODO_c001_06db", 0),
+ MVI(0xc00106dc, "TODO_c001_06dc", 0),
+ MVI(0xc00106dd, "TODO_c001_06dd", 0),
+ MVI(0xc00106de, "TODO_c001_06de", 0),
+ MVI(0xc00106df, "TODO_c001_06df", 0),
+ MVI(0xc00106e0, "TODO_c001_06e0", 0),
+ MVI(0xc00106e1, "TODO_c001_06e1", 0),
+ MVI(0xc00106e2, "TODO_c001_06e2", 0),
+ MVI(0xc00106e3, "TODO_c001_06e3", 0),
+ MVI(0xc00106e4, "TODO_c001_06e4", 0),
+ MVI(0xc00106e5, "TODO_c001_06e5", 0),
+ MVI(0xc00106e6, "TODO_c001_06e6", 0),
+ MVI(0xc00106e7, "TODO_c001_06e7", 0),
+ MVI(0xc00106e8, "TODO_c001_06e8", 0),
+ MVI(0xc00106e9, "TODO_c001_06e9", 0),
+ MVI(0xc00106ea, "TODO_c001_06ea", 0),
+ MVI(0xc00106eb, "TODO_c001_06eb", 0),
+ MVI(0xc00106ec, "TODO_c001_06ec", 0),
+ MVI(0xc00106ed, "TODO_c001_06ed", 0),
+ MVI(0xc00106ee, "TODO_c001_06ee", 0),
+ MVI(0xc00106ef, "TODO_c001_06ef", 0),
+ MVI(0xc00106f0, "TODO_c001_06f0", 0),
+ MVI(0xc00106f1, "TODO_c001_06f1", 0),
+ MVI(0xc00106f2, "TODO_c001_06f2", 0),
+ MVI(0xc00106f3, "TODO_c001_06f3", 0),
+ MVI(0xc00106f4, "TODO_c001_06f4", 0),
+ MVI(0xc00106f5, "TODO_c001_06f5", 0),
+ MVI(0xc00106f6, "TODO_c001_06f6", 0),
+ MVI(0xc00106f7, "TODO_c001_06f7", 0),
+ MVI(0xc00106f8, "TODO_c001_06f8", 0),
+ MVI(0xc00106f9, "TODO_c001_06f9", 0),
+ MVI(0xc00106fa, "TODO_c001_06fa", 0),
+ MVI(0xc00106fb, "TODO_c001_06fb", 0),
+ MVI(0xc00106fc, "TODO_c001_06fc", 0),
+ MVI(0xc00106fd, "TODO_c001_06fd", 0),
+ MVI(0xc00106fe, "TODO_c001_06fe", 0),
+ MVI(0xc00106ff, "TODO_c001_06ff", 0),
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x4), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011002, "AMD_K8_CPUID_CTL_STD07", AmdK8CpuIdCtlStd07hEbax, AmdK8CpuIdCtlStd07hEbax), /* value=0x209c01a9 */
+ MFX(0xc0011003, "AMD_K8_CPUID_CTL_STD06", AmdK8CpuIdCtlStd06hEcx, AmdK8CpuIdCtlStd06hEcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x7ed8320b`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x35c233ff`2fd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFI(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, UINT64_C(0xffffffff00010000), 0),
+ MVI(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0), /* value=0x0 */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, 0, 0),
+ RSN(0xc0011019, 0xc001101a, "AMD_16H_DR1_ADDR_MASn", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001101b, "AMD_16H_DR3_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0x7c000fffc000), 0), /* value=0x2068000`00000110 */
+ MFX(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, 0x200000, 0, 0), /* value=0x200000 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xfffffffff000000), 0), /* value=0x500000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x22000`00000100 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011028, "AMD_15H_FP_CFG", AmdFam15hFpuCfg, AmdFam15hFpuCfg, 0, UINT64_C(0xff80f0fc0000fc00), 0), /* value=0x140200`248000d4 */
+ MFX(0xc0011029, "AMD_15H_DC_CFG", AmdFam15hDecoderCfg, AmdFam15hDecoderCfg, 0, 0x18001, 0), /* value=0xe2e002 */
+ MFN(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2), /* value=0x2800000`00028080 */
+ MVX(0xc001102b, "TODO_c001_102b", 0x1808cc17, 0, 0),
+ MVI(0xc001102c, "TODO_c001_102c", UINT64_C(0x168000000000000)), /* Villain? */
+ MVX(0xc001102d, "TODO_c001_102d", UINT64_C(0x1000000500010000), UINT64_C(0x1ffc0ffe00000), 0),
+ MVX(0xc001102e, "TODO_c001_102e", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001102f, "TODO_c001_102f", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfeffffffff0000), 0), /* value=0x0 */
+ MFX(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr, AmdFam10hIbsFetchLinAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr, AmdFam10hIbsFetchPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xf8000000f8010000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x0 */
+ MFX(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData, AmdFam10hIbsOpData, 0, UINT64_C(0xfffffe0300000000), 0), /* value=0x0 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFX(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3, AmdFam10hIbsOpData3, 0, 0x1e00, 0), /* value=0x0 */
+ MFN(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr), /* value=0x0 */
+ MFX(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr, AmdFam10hIbsDcPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+ MFN(0xc001103b, "AMD_14H_IBS_BR_TARGET", AmdFam14hIbsBrTarget, AmdFam14hIbsBrTarget), /* value=0x0 */
+ MVI(0xc001103c, "TODO_c001_103c", 0),
+ MVX(0xc0011041, "AMD_15H_UNK_c001_1041", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0011042, "AMD_15H_UNK_c001_1042", 0),
+ MVX(0xc0011074, "TODO_c001_1074", UINT64_C(0x8000000000000000), UINT64_C(0x8fffffffffffffff), 0),
+ MVX(0xc0011075, "TODO_c001_1075", 0, UINT64_C(0xfffffffff0000000), 0),
+ MVX(0xc0011076, "TODO_c001_1076", 0x14, UINT64_C(0xffffffffffffffe0), 0),
+ MVI(0xc0011077, "TODO_c001_1077", UINT64_C(0x7100000000000000)),
+ MVI(0xc0011078, "TODO_c001_1078", 0),
+ MVX(0xc0011083, "TODO_c001_1083", UINT64_C(0x1ad6b5ad1ad6b5ad), UINT64_C(0xffffffffffffffe0), 0),
+ MVX(0xc0011092, "TODO_c001_1092", 0x57854a00, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011093, "TODO_c001_1093", 0xe860e0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011094, "TODO_c001_1094", 0x11fd, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011095, "TODO_c001_1095", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011096, "TODO_c001_1096", 0x78000000, UINT64_C(0xffffffff87ff0000), 0),
+ MVX(0xc0011097, "TODO_c001_1097", 0xff6, UINT64_C(0xffffffffffffc000), 0),
+ MVO(0xc00110a2, "TODO_c001_10a2", UINT32_C(0xfd100000)),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Ryzen 7 1800X Eight-Core Processor.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Ryzen_7_1800X_Eight_Core =
+{
+ /*.pszName = */ "AMD Ryzen 7 1800X Eight-Core",
+ /*.pszFullName = */ "AMD Ryzen 7 1800X Eight-Core Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 23,
+ /*.uModel = */ 1,
+ /*.uStepping = */ 1,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_Zen_Ryzen,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.fMxCsrMask = */ 0x0002ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Ryzen_7_1800X_Eight_Core),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Ryzen_7_1800X_Eight_Core)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Ryzen_7_1800X_Eight_Core)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Ryzen_7_1800X_Eight_Core),
+};
+
+#endif /* !VBOX_CPUDB_AMD_Ryzen_7_1800X_Eight_Core_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Hygon_C86_7185_32_core.h b/src/VBox/VMM/VMMR3/cpus/Hygon_C86_7185_32_core.h
new file mode 100644
index 00000000..86bd0b30
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Hygon_C86_7185_32_core.h
@@ -0,0 +1,5234 @@
+/* $Id: Hygon_C86_7185_32_core.h $ */
+/** @file
+ * CPU database entry "Hygon C86 7185 32-core".
+ * Generated at 2019-09-25T11:07:33Z by VBoxCpuReport v6.1.0_BETA1r80830 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Hygon_C86_7185_32_core_h
+#define VBOX_CPUDB_Hygon_C86_7185_32_core_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Hygon C86 7185 32-core Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Hygon_C86_7185_32_core[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x6f677948, 0x656e6975, 0x6e65476e, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00900f01, 0x3b400800, 0x7cd83209, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000011, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x009c01a9, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x0000000f, 0x00000340, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001f, 0x6f677948, 0x656e6975, 0x6e65476e, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00900f01, 0x40000000, 0x35c233ff, 0x2fd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x6f677948, 0x3843206e, 0x31372036, 0x33203538, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x6f632d32, 0x50206572, 0x65636f72, 0x726f7373, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x00202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff40ff40, 0xff40ff40, 0x20080140, 0x40040140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x36006400, 0x56006400, 0x02006140, 0x0200c140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x0000001b, 0x00000000, 0x00006599, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00001007, 0x0000603f, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00008000, 0x00000000, 0x0001bcff, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf040f040, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x000003ff, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001d, 0x00000000, UINT32_MAX, 0x00004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x8000001d, 0x00000001, UINT32_MAX, 0x00004122, 0x00c0003f, 0x000000ff, 0x00000000, 0 },
+ { 0x8000001d, 0x00000002, UINT32_MAX, 0x00004143, 0x01c0003f, 0x000003ff, 0x00000002, 0 },
+ { 0x8000001d, 0x00000003, UINT32_MAX, 0x0001c163, 0x03c0003f, 0x00001fff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001e, 0x00000000, 0x00000000, 0x0000003b, 0x0000011d, 0x00000303, 0x00000000, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x8000001f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Hygon C86 7185 32-core Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Hygon_C86_7185_32_core[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* Villain? value=0x1284`714a5328 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x00000049, "MSR_LASTBRANCH_9", WriteOnly, IgnoreWrite),
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x900006 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x11c5`3efacda4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2e`1e9ecd0c */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x9e578a50 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x117, 0, 0), /* value=0x117 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, 0x10, 0), /* value=0xffffffff`ffffffef */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x7c000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fc000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x0000041b, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVX(0x00000da0, "TODO_0000_0da0", 0, 0, UINT64_MAX),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff0200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* Might bite. value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* Might bite. value=0xffffffff`9e574c70 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* Might bite. value=0xffffffff`9e578de0 */
+ /** @todo r=bird: This seems wrong, all others CPUs \#GP if any of thie high
+ * bits are set. */
+ MFN(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask), /* Might bite. value=0x43700 */
+ MVO(0xc00000e7, "TODO_c000_00e7", UINT64_C(0x11c7b9402a58)),
+ MVO(0xc00000e8, "TODO_c000_00e8", UINT64_C(0x2f70233a93)),
+ MVO(0xc00000e9, "TODO_c000_00e9", 0),
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* Might bite. value=0x7f4f`f293d700 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* Might bite. value=0xffff8a48`dd400000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* Might bite. value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0000104, "AMD_15H_TSC_RATE", AmdFam15hTscRate, AmdFam15hTscRate, 0, 0, UINT64_C(0xffffff0000000000)), /* value=0x1`00000000 */
+ RVI(0xc0000408, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ MVX(0xc0000410, "TODO_c000_0410", 0x100102a, UINT64_C(0xfffffffffe000f01), 0),
+ MVX(0xc0002000, "TODO_c000_2000", 0x1fffff, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0xc0002001, "TODO_c000_2001", 0, 0, UINT64_MAX),
+ MVX(0xc0002002, "TODO_c000_2002", 0, 0, 0),
+ MVX(0xc0002003, "TODO_c000_2003", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002004, "TODO_c000_2004", UINT64_C(0x2300000035), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002005, "TODO_c000_2005", UINT64_C(0xb000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002006, "TODO_c000_2006", 0, UINT64_C(0xffffff8000000000), 0),
+ MVI(0xc0002007, "TODO_c000_2007", 0),
+ MVX(0xc0002008, "TODO_c000_2008", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002009, "TODO_c000_2009", 0, 0, 0),
+ MVI(0xc000200a, "TODO_c000_200a", 0),
+ MVI(0xc000200b, "TODO_c000_200b", 0),
+ MVI(0xc000200c, "TODO_c000_200c", 0),
+ MVI(0xc000200d, "TODO_c000_200d", 0),
+ MVI(0xc000200e, "TODO_c000_200e", 0),
+ MVI(0xc000200f, "TODO_c000_200f", 0),
+ MVX(0xc0002010, "TODO_c000_2010", 0x3fff, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0xc0002011, "TODO_c000_2011", 0, 0, UINT64_MAX),
+ MVX(0xc0002012, "TODO_c000_2012", 0, 0, 0),
+ MVX(0xc0002013, "TODO_c000_2013", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002014, "TODO_c000_2014", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002015, "TODO_c000_2015", UINT64_C(0x100b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002016, "TODO_c000_2016", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002017, "TODO_c000_2017", 0),
+ MVI(0xc0002018, "TODO_c000_2018", 0),
+ MVI(0xc0002019, "TODO_c000_2019", 0),
+ MVI(0xc000201a, "TODO_c000_201a", 0),
+ MVI(0xc000201b, "TODO_c000_201b", 0),
+ MVI(0xc000201c, "TODO_c000_201c", 0),
+ MVI(0xc000201d, "TODO_c000_201d", 0),
+ MVI(0xc000201e, "TODO_c000_201e", 0),
+ MVI(0xc000201f, "TODO_c000_201f", 0),
+ MVX(0xc0002020, "TODO_c000_2020", 0xf, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc0002021, "TODO_c000_2021", 0, 0, UINT64_MAX),
+ MVX(0xc0002022, "TODO_c000_2022", 0, 0, 0),
+ MVX(0xc0002023, "TODO_c000_2023", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002024, "TODO_c000_2024", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002025, "TODO_c000_2025", UINT64_C(0x200b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002026, "TODO_c000_2026", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0002027, "TODO_c000_2027", 0),
+ MVX(0xc0002028, "TODO_c000_2028", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002029, "TODO_c000_2029", 0, 0, 0),
+ MVI(0xc000202a, "TODO_c000_202a", 0),
+ MVI(0xc000202b, "TODO_c000_202b", 0),
+ MVI(0xc000202c, "TODO_c000_202c", 0),
+ MVI(0xc000202d, "TODO_c000_202d", 0),
+ MVI(0xc000202e, "TODO_c000_202e", 0),
+ MVI(0xc000202f, "TODO_c000_202f", 0),
+ MVX(0xc0002030, "TODO_c000_2030", 0x1ff, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0002031, "TODO_c000_2031", 0, 0, UINT64_MAX),
+ MVX(0xc0002032, "TODO_c000_2032", 0, 0, 0),
+ MVX(0xc0002033, "TODO_c000_2033", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002034, "TODO_c000_2034", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002035, "TODO_c000_2035", UINT64_C(0x300b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002036, "TODO_c000_2036", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002037, "TODO_c000_2037", 0),
+ MVI(0xc0002038, "TODO_c000_2038", 0),
+ MVI(0xc0002039, "TODO_c000_2039", 0),
+ MVI(0xc000203a, "TODO_c000_203a", 0),
+ MVI(0xc000203b, "TODO_c000_203b", 0),
+ MVI(0xc000203c, "TODO_c000_203c", 0),
+ MVI(0xc000203d, "TODO_c000_203d", 0),
+ MVI(0xc000203e, "TODO_c000_203e", 0),
+ MVI(0xc000203f, "TODO_c000_203f", 0),
+ MVI(0xc0002040, "TODO_c000_2040", 0),
+ MVX(0xc0002041, "TODO_c000_2041", 0, 0, UINT64_MAX),
+ MVI(0xc0002042, "TODO_c000_2042", 0),
+ MVI(0xc0002043, "TODO_c000_2043", 0),
+ MVI(0xc0002044, "TODO_c000_2044", 0),
+ MVI(0xc0002045, "TODO_c000_2045", 0),
+ MVI(0xc0002046, "TODO_c000_2046", 0),
+ MVI(0xc0002047, "TODO_c000_2047", 0),
+ MVI(0xc0002048, "TODO_c000_2048", 0),
+ MVI(0xc0002049, "TODO_c000_2049", 0),
+ MVI(0xc000204a, "TODO_c000_204a", 0),
+ MVI(0xc000204b, "TODO_c000_204b", 0),
+ MVI(0xc000204c, "TODO_c000_204c", 0),
+ MVI(0xc000204d, "TODO_c000_204d", 0),
+ MVI(0xc000204e, "TODO_c000_204e", 0),
+ MVI(0xc000204f, "TODO_c000_204f", 0),
+ MVX(0xc0002050, "TODO_c000_2050", 0x7ff, UINT64_C(0xfffffffffffff800), 0),
+ MVX(0xc0002051, "TODO_c000_2051", 0, 0, UINT64_MAX),
+ MVX(0xc0002052, "TODO_c000_2052", 0, 0, 0),
+ MVX(0xc0002053, "TODO_c000_2053", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002054, "TODO_c000_2054", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002055, "TODO_c000_2055", UINT64_C(0x500b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002056, "TODO_c000_2056", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002057, "TODO_c000_2057", 0),
+ MVI(0xc0002058, "TODO_c000_2058", 0),
+ MVI(0xc0002059, "TODO_c000_2059", 0),
+ MVI(0xc000205a, "TODO_c000_205a", 0),
+ MVI(0xc000205b, "TODO_c000_205b", 0),
+ MVI(0xc000205c, "TODO_c000_205c", 0),
+ MVI(0xc000205d, "TODO_c000_205d", 0),
+ MVI(0xc000205e, "TODO_c000_205e", 0),
+ MVI(0xc000205f, "TODO_c000_205f", 0),
+ MVX(0xc0002060, "TODO_c000_2060", 0x7f, UINT64_C(0xffffffffffffff80), 0),
+ MVX(0xc0002061, "TODO_c000_2061", 0, 0, UINT64_MAX),
+ MVI(0xc0002062, "TODO_c000_2062", 0),
+ MVX(0xc0002063, "TODO_c000_2063", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002064, "TODO_c000_2064", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002065, "TODO_c000_2065", UINT64_C(0x600b000000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002066, "TODO_c000_2066", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002067, "TODO_c000_2067", 0),
+ MVI(0xc0002068, "TODO_c000_2068", 0),
+ MVI(0xc0002069, "TODO_c000_2069", 0),
+ MVI(0xc000206a, "TODO_c000_206a", 0),
+ MVI(0xc000206b, "TODO_c000_206b", 0),
+ MVI(0xc000206c, "TODO_c000_206c", 0),
+ MVI(0xc000206d, "TODO_c000_206d", 0),
+ MVI(0xc000206e, "TODO_c000_206e", 0),
+ MVI(0xc000206f, "TODO_c000_206f", 0),
+ MVX(0xc0002070, "TODO_c000_2070", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc0002071, "TODO_c000_2071", 0, 0, UINT64_MAX),
+ MVX(0xc0002072, "TODO_c000_2072", 0, 0, 0),
+ MVX(0xc0002073, "TODO_c000_2073", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002074, "TODO_c000_2074", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002075, "TODO_c000_2075", UINT64_C(0x700b018090100), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002076, "TODO_c000_2076", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0002077, "TODO_c000_2077", 0),
+ MVX(0xc0002078, "TODO_c000_2078", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002079, "TODO_c000_2079", 0, 0, 0),
+ MVI(0xc000207a, "TODO_c000_207a", 0),
+ MVI(0xc000207b, "TODO_c000_207b", 0),
+ MVI(0xc000207c, "TODO_c000_207c", 0),
+ MVI(0xc000207d, "TODO_c000_207d", 0),
+ MVI(0xc000207e, "TODO_c000_207e", 0),
+ MVI(0xc000207f, "TODO_c000_207f", 0),
+ MVX(0xc0002080, "TODO_c000_2080", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc0002081, "TODO_c000_2081", 0, 0, UINT64_MAX),
+ MVX(0xc0002082, "TODO_c000_2082", 0, 0, 0),
+ MVX(0xc0002083, "TODO_c000_2083", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002084, "TODO_c000_2084", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002085, "TODO_c000_2085", UINT64_C(0x700b018090200), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002086, "TODO_c000_2086", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0002087, "TODO_c000_2087", 0),
+ MVX(0xc0002088, "TODO_c000_2088", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002089, "TODO_c000_2089", 0, 0, 0),
+ MVI(0xc000208a, "TODO_c000_208a", 0),
+ MVI(0xc000208b, "TODO_c000_208b", 0),
+ MVI(0xc000208c, "TODO_c000_208c", 0),
+ MVI(0xc000208d, "TODO_c000_208d", 0),
+ MVI(0xc000208e, "TODO_c000_208e", 0),
+ MVI(0xc000208f, "TODO_c000_208f", 0),
+ MVX(0xc0002090, "TODO_c000_2090", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc0002091, "TODO_c000_2091", 0, 0, UINT64_MAX),
+ MVX(0xc0002092, "TODO_c000_2092", 0, 0, 0),
+ MVX(0xc0002093, "TODO_c000_2093", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002094, "TODO_c000_2094", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002095, "TODO_c000_2095", UINT64_C(0x700b018090300), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002096, "TODO_c000_2096", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0002097, "TODO_c000_2097", 0),
+ MVX(0xc0002098, "TODO_c000_2098", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002099, "TODO_c000_2099", 0, 0, 0),
+ MVI(0xc000209a, "TODO_c000_209a", 0),
+ MVI(0xc000209b, "TODO_c000_209b", 0),
+ MVI(0xc000209c, "TODO_c000_209c", 0),
+ MVI(0xc000209d, "TODO_c000_209d", 0),
+ MVI(0xc000209e, "TODO_c000_209e", 0),
+ MVI(0xc000209f, "TODO_c000_209f", 0),
+ MVX(0xc00020a0, "TODO_c000_20a0", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc00020a1, "TODO_c000_20a1", 0, 0, UINT64_MAX),
+ MVX(0xc00020a2, "TODO_c000_20a2", 0, 0, 0),
+ MVX(0xc00020a3, "TODO_c000_20a3", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020a4, "TODO_c000_20a4", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020a5, "TODO_c000_20a5", UINT64_C(0x700b018090400), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020a6, "TODO_c000_20a6", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc00020a7, "TODO_c000_20a7", 0),
+ MVX(0xc00020a8, "TODO_c000_20a8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020a9, "TODO_c000_20a9", 0, 0, 0),
+ MVI(0xc00020aa, "TODO_c000_20aa", 0),
+ MVI(0xc00020ab, "TODO_c000_20ab", 0),
+ MVI(0xc00020ac, "TODO_c000_20ac", 0),
+ MVI(0xc00020ad, "TODO_c000_20ad", 0),
+ MVI(0xc00020ae, "TODO_c000_20ae", 0),
+ MVI(0xc00020af, "TODO_c000_20af", 0),
+ MVX(0xc00020b0, "TODO_c000_20b0", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc00020b1, "TODO_c000_20b1", 0, 0, UINT64_MAX),
+ MVX(0xc00020b2, "TODO_c000_20b2", 0, 0, 0),
+ MVX(0xc00020b3, "TODO_c000_20b3", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020b4, "TODO_c000_20b4", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020b5, "TODO_c000_20b5", UINT64_C(0x700b018490100), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020b6, "TODO_c000_20b6", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc00020b7, "TODO_c000_20b7", 0),
+ MVX(0xc00020b8, "TODO_c000_20b8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020b9, "TODO_c000_20b9", 0, 0, 0),
+ MVI(0xc00020ba, "TODO_c000_20ba", 0),
+ MVI(0xc00020bb, "TODO_c000_20bb", 0),
+ MVI(0xc00020bc, "TODO_c000_20bc", 0),
+ MVI(0xc00020bd, "TODO_c000_20bd", 0),
+ MVI(0xc00020be, "TODO_c000_20be", 0),
+ MVI(0xc00020bf, "TODO_c000_20bf", 0),
+ MVX(0xc00020c0, "TODO_c000_20c0", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc00020c1, "TODO_c000_20c1", 0, 0, UINT64_MAX),
+ MVX(0xc00020c2, "TODO_c000_20c2", 0, 0, 0),
+ MVX(0xc00020c3, "TODO_c000_20c3", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020c4, "TODO_c000_20c4", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020c5, "TODO_c000_20c5", UINT64_C(0x700b018490200), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020c6, "TODO_c000_20c6", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc00020c7, "TODO_c000_20c7", 0),
+ MVX(0xc00020c8, "TODO_c000_20c8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020c9, "TODO_c000_20c9", 0, 0, 0),
+ MVI(0xc00020ca, "TODO_c000_20ca", 0),
+ MVI(0xc00020cb, "TODO_c000_20cb", 0),
+ MVI(0xc00020cc, "TODO_c000_20cc", 0),
+ MVI(0xc00020cd, "TODO_c000_20cd", 0),
+ MVI(0xc00020ce, "TODO_c000_20ce", 0),
+ MVI(0xc00020cf, "TODO_c000_20cf", 0),
+ MVX(0xc00020d0, "TODO_c000_20d0", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc00020d1, "TODO_c000_20d1", 0, 0, UINT64_MAX),
+ MVX(0xc00020d2, "TODO_c000_20d2", 0, 0, 0),
+ MVX(0xc00020d3, "TODO_c000_20d3", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020d4, "TODO_c000_20d4", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020d5, "TODO_c000_20d5", UINT64_C(0x700b018490300), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020d6, "TODO_c000_20d6", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc00020d7, "TODO_c000_20d7", 0),
+ MVX(0xc00020d8, "TODO_c000_20d8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020d9, "TODO_c000_20d9", 0, 0, 0),
+ MVI(0xc00020da, "TODO_c000_20da", 0),
+ MVI(0xc00020db, "TODO_c000_20db", 0),
+ MVI(0xc00020dc, "TODO_c000_20dc", 0),
+ MVI(0xc00020dd, "TODO_c000_20dd", 0),
+ MVI(0xc00020de, "TODO_c000_20de", 0),
+ MVI(0xc00020df, "TODO_c000_20df", 0),
+ MVX(0xc00020e0, "TODO_c000_20e0", 0xff, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc00020e1, "TODO_c000_20e1", 0, 0, UINT64_MAX),
+ MVX(0xc00020e2, "TODO_c000_20e2", 0, 0, 0),
+ MVX(0xc00020e3, "TODO_c000_20e3", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020e4, "TODO_c000_20e4", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020e5, "TODO_c000_20e5", UINT64_C(0x700b018490400), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020e6, "TODO_c000_20e6", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc00020e7, "TODO_c000_20e7", 0),
+ MVX(0xc00020e8, "TODO_c000_20e8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020e9, "TODO_c000_20e9", 0, 0, 0),
+ MVI(0xc00020ea, "TODO_c000_20ea", 0),
+ MVI(0xc00020eb, "TODO_c000_20eb", 0),
+ MVI(0xc00020ec, "TODO_c000_20ec", 0),
+ MVI(0xc00020ed, "TODO_c000_20ed", 0),
+ MVI(0xc00020ee, "TODO_c000_20ee", 0),
+ MVI(0xc00020ef, "TODO_c000_20ef", 0),
+ MVX(0xc00020f0, "TODO_c000_20f0", 0x3f, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0xc00020f1, "TODO_c000_20f1", 0, 0, UINT64_MAX),
+ MVX(0xc00020f2, "TODO_c000_20f2", 0, 0, 0),
+ MVX(0xc00020f3, "TODO_c000_20f3", UINT64_C(0xd01a000001000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc00020f4, "TODO_c000_20f4", UINT64_C(0x2300000035), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc00020f5, "TODO_c000_20f5", UINT64_C(0x9600050f00), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc00020f6, "TODO_c000_20f6", 0, UINT64_C(0xffff000000000000), 0),
+ MVI(0xc00020f7, "TODO_c000_20f7", 0),
+ MVX(0xc00020f8, "TODO_c000_20f8", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc00020f9, "TODO_c000_20f9", 0, 0, 0),
+ MVX(0xc00020fa, "TODO_c000_20fa", UINT64_C(0xd00a000000000000), UINT64_C(0xff0f00000ffffff), 0),
+ MVI(0xc00020fb, "TODO_c000_20fb", 0),
+ MVI(0xc00020fc, "TODO_c000_20fc", 0),
+ MVI(0xc00020fd, "TODO_c000_20fd", 0),
+ MVI(0xc00020fe, "TODO_c000_20fe", 0),
+ MVI(0xc00020ff, "TODO_c000_20ff", 0),
+ MVX(0xc0002100, "TODO_c000_2100", 0x3f, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0xc0002101, "TODO_c000_2101", 0, 0, UINT64_MAX),
+ MVX(0xc0002102, "TODO_c000_2102", 0, 0, 0),
+ MVX(0xc0002103, "TODO_c000_2103", UINT64_C(0xd01a000001000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002104, "TODO_c000_2104", UINT64_C(0x2300000035), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002105, "TODO_c000_2105", UINT64_C(0x9600150f00), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002106, "TODO_c000_2106", 0, UINT64_C(0xffff000000000000), 0),
+ MVI(0xc0002107, "TODO_c000_2107", 0),
+ MVX(0xc0002108, "TODO_c000_2108", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002109, "TODO_c000_2109", 0, 0, 0),
+ MVX(0xc000210a, "TODO_c000_210a", UINT64_C(0xd00a000000000000), UINT64_C(0xff0f00000ffffff), 0),
+ MVI(0xc000210b, "TODO_c000_210b", 0),
+ MVI(0xc000210c, "TODO_c000_210c", 0),
+ MVI(0xc000210d, "TODO_c000_210d", 0),
+ MVI(0xc000210e, "TODO_c000_210e", 0),
+ MVI(0xc000210f, "TODO_c000_210f", 0),
+ MVX(0xc0002110, "TODO_c000_2110", 0x1, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0002111, "TODO_c000_2111", 0, 0, UINT64_MAX),
+ MVI(0xc0002112, "TODO_c000_2112", 0),
+ MVX(0xc0002113, "TODO_c000_2113", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002114, "TODO_c000_2114", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002115, "TODO_c000_2115", UINT64_C(0x103b30400), ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0002116, "TODO_c000_2116", 0),
+ MVI(0xc0002117, "TODO_c000_2117", 0),
+ MVI(0xc0002118, "TODO_c000_2118", 0),
+ MVI(0xc0002119, "TODO_c000_2119", 0),
+ MVI(0xc000211a, "TODO_c000_211a", 0),
+ MVI(0xc000211b, "TODO_c000_211b", 0),
+ MVI(0xc000211c, "TODO_c000_211c", 0),
+ MVI(0xc000211d, "TODO_c000_211d", 0),
+ MVI(0xc000211e, "TODO_c000_211e", 0),
+ MVI(0xc000211f, "TODO_c000_211f", 0),
+ MVX(0xc0002120, "TODO_c000_2120", 0x1, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0002121, "TODO_c000_2121", 0, 0, UINT64_MAX),
+ MVI(0xc0002122, "TODO_c000_2122", 0),
+ MVX(0xc0002123, "TODO_c000_2123", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002124, "TODO_c000_2124", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002125, "TODO_c000_2125", UINT64_C(0xff03830400), ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0002126, "TODO_c000_2126", 0),
+ MVI(0xc0002127, "TODO_c000_2127", 0),
+ MVI(0xc0002128, "TODO_c000_2128", 0),
+ MVI(0xc0002129, "TODO_c000_2129", 0),
+ MVI(0xc000212a, "TODO_c000_212a", 0),
+ MVI(0xc000212b, "TODO_c000_212b", 0),
+ MVI(0xc000212c, "TODO_c000_212c", 0),
+ MVI(0xc000212d, "TODO_c000_212d", 0),
+ MVI(0xc000212e, "TODO_c000_212e", 0),
+ MVI(0xc000212f, "TODO_c000_212f", 0),
+ MVX(0xc0002130, "TODO_c000_2130", 0x1, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0002131, "TODO_c000_2131", 0, 0, UINT64_MAX),
+ MVI(0xc0002132, "TODO_c000_2132", 0),
+ MVX(0xc0002133, "TODO_c000_2133", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002134, "TODO_c000_2134", UINT64_C(0x2300000031), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002135, "TODO_c000_2135", UINT64_C(0x500000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002136, "TODO_c000_2136", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002137, "TODO_c000_2137", 0),
+ MVI(0xc0002138, "TODO_c000_2138", 0),
+ MVI(0xc0002139, "TODO_c000_2139", 0),
+ MVI(0xc000213a, "TODO_c000_213a", 0),
+ MVI(0xc000213b, "TODO_c000_213b", 0),
+ MVI(0xc000213c, "TODO_c000_213c", 0),
+ MVI(0xc000213d, "TODO_c000_213d", 0),
+ MVI(0xc000213e, "TODO_c000_213e", 0),
+ MVI(0xc000213f, "TODO_c000_213f", 0),
+ MVX(0xc0002140, "TODO_c000_2140", 0x1ff, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0002141, "TODO_c000_2141", 0, 0, UINT64_MAX),
+ MVX(0xc0002142, "TODO_c000_2142", 0, 0, 0),
+ MVX(0xc0002143, "TODO_c000_2143", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002144, "TODO_c000_2144", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002145, "TODO_c000_2145", UINT64_C(0x2e00000000), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002146, "TODO_c000_2146", 0, UINT64_C(0xfffffe0000000000), 0),
+ MVI(0xc0002147, "TODO_c000_2147", 0),
+ MVX(0xc0002148, "TODO_c000_2148", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002149, "TODO_c000_2149", 0, 0, 0),
+ MVI(0xc000214a, "TODO_c000_214a", 0),
+ MVI(0xc000214b, "TODO_c000_214b", 0),
+ MVI(0xc000214c, "TODO_c000_214c", 0),
+ MVI(0xc000214d, "TODO_c000_214d", 0),
+ MVI(0xc000214e, "TODO_c000_214e", 0),
+ MVI(0xc000214f, "TODO_c000_214f", 0),
+ MVX(0xc0002150, "TODO_c000_2150", 0x1ff, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0002151, "TODO_c000_2151", 0, 0, UINT64_MAX),
+ MVX(0xc0002152, "TODO_c000_2152", 0, 0, 0),
+ MVX(0xc0002153, "TODO_c000_2153", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002154, "TODO_c000_2154", UINT64_C(0x2100000037), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002155, "TODO_c000_2155", UINT64_C(0x2e00000001), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002156, "TODO_c000_2156", 0, UINT64_C(0xfffffe0000000000), 0),
+ MVI(0xc0002157, "TODO_c000_2157", 0),
+ MVX(0xc0002158, "TODO_c000_2158", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVX(0xc0002159, "TODO_c000_2159", 0, 0, 0),
+ MVI(0xc000215a, "TODO_c000_215a", 0),
+ MVI(0xc000215b, "TODO_c000_215b", 0),
+ MVI(0xc000215c, "TODO_c000_215c", 0),
+ MVI(0xc000215d, "TODO_c000_215d", 0),
+ MVI(0xc000215e, "TODO_c000_215e", 0),
+ MVI(0xc000215f, "TODO_c000_215f", 0),
+ MVX(0xc0002160, "TODO_c000_2160", 0xf, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc0002161, "TODO_c000_2161", 0, 0, UINT64_MAX),
+ MVI(0xc0002162, "TODO_c000_2162", 0),
+ MVX(0xc0002163, "TODO_c000_2163", UINT64_C(0xd01a000000000000), UINT64_C(0xef00f00000ffffff), 0),
+ MVX(0xc0002164, "TODO_c000_2164", UINT64_C(0x2300000035), UINT64_C(0xffffff80ffffffff), 0),
+ MVX(0xc0002165, "TODO_c000_2165", UINT64_C(0x1002e00000002), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0002166, "TODO_c000_2166", 0, UINT64_C(0xfffffffe00000000), 0),
+ MVI(0xc0002167, "TODO_c000_2167", 0),
+ MVX(0xc0002168, "TODO_c000_2168", 0, UINT64_C(0x3bdfefffffffffff), 0),
+ MVI(0xc0002169, "TODO_c000_2169", 0),
+ MVI(0xc000216a, "TODO_c000_216a", 0),
+ MVI(0xc000216b, "TODO_c000_216b", 0),
+ MVI(0xc000216c, "TODO_c000_216c", 0),
+ MVI(0xc000216d, "TODO_c000_216d", 0),
+ MVI(0xc000216e, "TODO_c000_216e", 0),
+ MVI(0xc000216f, "TODO_c000_216f", 0),
+ MVI(0xc0002170, "TODO_c000_2170", 0),
+ MVI(0xc0002171, "TODO_c000_2171", 0),
+ MVI(0xc0002172, "TODO_c000_2172", 0),
+ MVI(0xc0002173, "TODO_c000_2173", 0),
+ MVI(0xc0002174, "TODO_c000_2174", 0),
+ MVI(0xc0002175, "TODO_c000_2175", 0),
+ MVI(0xc0002176, "TODO_c000_2176", 0),
+ MVI(0xc0002177, "TODO_c000_2177", 0),
+ MVI(0xc0002178, "TODO_c000_2178", 0),
+ MVI(0xc0002179, "TODO_c000_2179", 0),
+ MVI(0xc000217a, "TODO_c000_217a", 0),
+ MVI(0xc000217b, "TODO_c000_217b", 0),
+ MVI(0xc000217c, "TODO_c000_217c", 0),
+ MVI(0xc000217d, "TODO_c000_217d", 0),
+ MVI(0xc000217e, "TODO_c000_217e", 0),
+ MVI(0xc000217f, "TODO_c000_217f", 0),
+ MVI(0xc0002180, "TODO_c000_2180", 0),
+ MVI(0xc0002181, "TODO_c000_2181", 0),
+ MVI(0xc0002182, "TODO_c000_2182", 0),
+ MVI(0xc0002183, "TODO_c000_2183", 0),
+ MVI(0xc0002184, "TODO_c000_2184", 0),
+ MVI(0xc0002185, "TODO_c000_2185", 0),
+ MVI(0xc0002186, "TODO_c000_2186", 0),
+ MVI(0xc0002187, "TODO_c000_2187", 0),
+ MVI(0xc0002188, "TODO_c000_2188", 0),
+ MVI(0xc0002189, "TODO_c000_2189", 0),
+ MVI(0xc000218a, "TODO_c000_218a", 0),
+ MVI(0xc000218b, "TODO_c000_218b", 0),
+ MVI(0xc000218c, "TODO_c000_218c", 0),
+ MVI(0xc000218d, "TODO_c000_218d", 0),
+ MVI(0xc000218e, "TODO_c000_218e", 0),
+ MVI(0xc000218f, "TODO_c000_218f", 0),
+ MVI(0xc0002190, "TODO_c000_2190", 0),
+ MVI(0xc0002191, "TODO_c000_2191", 0),
+ MVI(0xc0002192, "TODO_c000_2192", 0),
+ MVI(0xc0002193, "TODO_c000_2193", 0),
+ MVI(0xc0002194, "TODO_c000_2194", 0),
+ MVI(0xc0002195, "TODO_c000_2195", 0),
+ MVI(0xc0002196, "TODO_c000_2196", 0),
+ MVI(0xc0002197, "TODO_c000_2197", 0),
+ MVI(0xc0002198, "TODO_c000_2198", 0),
+ MVI(0xc0002199, "TODO_c000_2199", 0),
+ MVI(0xc000219a, "TODO_c000_219a", 0),
+ MVI(0xc000219b, "TODO_c000_219b", 0),
+ MVI(0xc000219c, "TODO_c000_219c", 0),
+ MVI(0xc000219d, "TODO_c000_219d", 0),
+ MVI(0xc000219e, "TODO_c000_219e", 0),
+ MVI(0xc000219f, "TODO_c000_219f", 0),
+ MVI(0xc00021a0, "TODO_c000_21a0", 0),
+ MVI(0xc00021a1, "TODO_c000_21a1", 0),
+ MVI(0xc00021a2, "TODO_c000_21a2", 0),
+ MVI(0xc00021a3, "TODO_c000_21a3", 0),
+ MVI(0xc00021a4, "TODO_c000_21a4", 0),
+ MVI(0xc00021a5, "TODO_c000_21a5", 0),
+ MVI(0xc00021a6, "TODO_c000_21a6", 0),
+ MVI(0xc00021a7, "TODO_c000_21a7", 0),
+ MVI(0xc00021a8, "TODO_c000_21a8", 0),
+ MVI(0xc00021a9, "TODO_c000_21a9", 0),
+ MVI(0xc00021aa, "TODO_c000_21aa", 0),
+ MVI(0xc00021ab, "TODO_c000_21ab", 0),
+ MVI(0xc00021ac, "TODO_c000_21ac", 0),
+ MVI(0xc00021ad, "TODO_c000_21ad", 0),
+ MVI(0xc00021ae, "TODO_c000_21ae", 0),
+ MVI(0xc00021af, "TODO_c000_21af", 0),
+ MVI(0xc00021b0, "TODO_c000_21b0", 0),
+ MVI(0xc00021b1, "TODO_c000_21b1", 0),
+ MVI(0xc00021b2, "TODO_c000_21b2", 0),
+ MVI(0xc00021b3, "TODO_c000_21b3", 0),
+ MVI(0xc00021b4, "TODO_c000_21b4", 0),
+ MVI(0xc00021b5, "TODO_c000_21b5", 0),
+ MVI(0xc00021b6, "TODO_c000_21b6", 0),
+ MVI(0xc00021b7, "TODO_c000_21b7", 0),
+ MVI(0xc00021b8, "TODO_c000_21b8", 0),
+ MVI(0xc00021b9, "TODO_c000_21b9", 0),
+ MVI(0xc00021ba, "TODO_c000_21ba", 0),
+ MVI(0xc00021bb, "TODO_c000_21bb", 0),
+ MVI(0xc00021bc, "TODO_c000_21bc", 0),
+ MVI(0xc00021bd, "TODO_c000_21bd", 0),
+ MVI(0xc00021be, "TODO_c000_21be", 0),
+ MVI(0xc00021bf, "TODO_c000_21bf", 0),
+ MVI(0xc00021c0, "TODO_c000_21c0", 0),
+ MVI(0xc00021c1, "TODO_c000_21c1", 0),
+ MVI(0xc00021c2, "TODO_c000_21c2", 0),
+ MVI(0xc00021c3, "TODO_c000_21c3", 0),
+ MVI(0xc00021c4, "TODO_c000_21c4", 0),
+ MVI(0xc00021c5, "TODO_c000_21c5", 0),
+ MVI(0xc00021c6, "TODO_c000_21c6", 0),
+ MVI(0xc00021c7, "TODO_c000_21c7", 0),
+ MVI(0xc00021c8, "TODO_c000_21c8", 0),
+ MVI(0xc00021c9, "TODO_c000_21c9", 0),
+ MVI(0xc00021ca, "TODO_c000_21ca", 0),
+ MVI(0xc00021cb, "TODO_c000_21cb", 0),
+ MVI(0xc00021cc, "TODO_c000_21cc", 0),
+ MVI(0xc00021cd, "TODO_c000_21cd", 0),
+ MVI(0xc00021ce, "TODO_c000_21ce", 0),
+ MVI(0xc00021cf, "TODO_c000_21cf", 0),
+ MVI(0xc00021d0, "TODO_c000_21d0", 0),
+ MVI(0xc00021d1, "TODO_c000_21d1", 0),
+ MVI(0xc00021d2, "TODO_c000_21d2", 0),
+ MVI(0xc00021d3, "TODO_c000_21d3", 0),
+ MVI(0xc00021d4, "TODO_c000_21d4", 0),
+ MVI(0xc00021d5, "TODO_c000_21d5", 0),
+ MVI(0xc00021d6, "TODO_c000_21d6", 0),
+ MVI(0xc00021d7, "TODO_c000_21d7", 0),
+ MVI(0xc00021d8, "TODO_c000_21d8", 0),
+ MVI(0xc00021d9, "TODO_c000_21d9", 0),
+ MVI(0xc00021da, "TODO_c000_21da", 0),
+ MVI(0xc00021db, "TODO_c000_21db", 0),
+ MVI(0xc00021dc, "TODO_c000_21dc", 0),
+ MVI(0xc00021dd, "TODO_c000_21dd", 0),
+ MVI(0xc00021de, "TODO_c000_21de", 0),
+ MVI(0xc00021df, "TODO_c000_21df", 0),
+ MVI(0xc00021e0, "TODO_c000_21e0", 0),
+ MVI(0xc00021e1, "TODO_c000_21e1", 0),
+ MVI(0xc00021e2, "TODO_c000_21e2", 0),
+ MVI(0xc00021e3, "TODO_c000_21e3", 0),
+ MVI(0xc00021e4, "TODO_c000_21e4", 0),
+ MVI(0xc00021e5, "TODO_c000_21e5", 0),
+ MVI(0xc00021e6, "TODO_c000_21e6", 0),
+ MVI(0xc00021e7, "TODO_c000_21e7", 0),
+ MVI(0xc00021e8, "TODO_c000_21e8", 0),
+ MVI(0xc00021e9, "TODO_c000_21e9", 0),
+ MVI(0xc00021ea, "TODO_c000_21ea", 0),
+ MVI(0xc00021eb, "TODO_c000_21eb", 0),
+ MVI(0xc00021ec, "TODO_c000_21ec", 0),
+ MVI(0xc00021ed, "TODO_c000_21ed", 0),
+ MVI(0xc00021ee, "TODO_c000_21ee", 0),
+ MVI(0xc00021ef, "TODO_c000_21ef", 0),
+ MVI(0xc00021f0, "TODO_c000_21f0", 0),
+ MVI(0xc00021f1, "TODO_c000_21f1", 0),
+ MVI(0xc00021f2, "TODO_c000_21f2", 0),
+ MVI(0xc00021f3, "TODO_c000_21f3", 0),
+ MVI(0xc00021f4, "TODO_c000_21f4", 0),
+ MVI(0xc00021f5, "TODO_c000_21f5", 0),
+ MVI(0xc00021f6, "TODO_c000_21f6", 0),
+ MVI(0xc00021f7, "TODO_c000_21f7", 0),
+ MVI(0xc00021f8, "TODO_c000_21f8", 0),
+ MVI(0xc00021f9, "TODO_c000_21f9", 0),
+ MVI(0xc00021fa, "TODO_c000_21fa", 0),
+ MVI(0xc00021fb, "TODO_c000_21fb", 0),
+ MVI(0xc00021fc, "TODO_c000_21fc", 0),
+ MVI(0xc00021fd, "TODO_c000_21fd", 0),
+ MVI(0xc00021fe, "TODO_c000_21fe", 0),
+ MVI(0xc00021ff, "TODO_c000_21ff", 0),
+ MVI(0xc0002200, "TODO_c000_2200", 0),
+ MVI(0xc0002201, "TODO_c000_2201", 0),
+ MVI(0xc0002202, "TODO_c000_2202", 0),
+ MVI(0xc0002203, "TODO_c000_2203", 0),
+ MVI(0xc0002204, "TODO_c000_2204", 0),
+ MVI(0xc0002205, "TODO_c000_2205", 0),
+ MVI(0xc0002206, "TODO_c000_2206", 0),
+ MVI(0xc0002207, "TODO_c000_2207", 0),
+ MVI(0xc0002208, "TODO_c000_2208", 0),
+ MVI(0xc0002209, "TODO_c000_2209", 0),
+ MVI(0xc000220a, "TODO_c000_220a", 0),
+ MVI(0xc000220b, "TODO_c000_220b", 0),
+ MVI(0xc000220c, "TODO_c000_220c", 0),
+ MVI(0xc000220d, "TODO_c000_220d", 0),
+ MVI(0xc000220e, "TODO_c000_220e", 0),
+ MVI(0xc000220f, "TODO_c000_220f", 0),
+ MVI(0xc0002210, "TODO_c000_2210", 0),
+ MVI(0xc0002211, "TODO_c000_2211", 0),
+ MVI(0xc0002212, "TODO_c000_2212", 0),
+ MVI(0xc0002213, "TODO_c000_2213", 0),
+ MVI(0xc0002214, "TODO_c000_2214", 0),
+ MVI(0xc0002215, "TODO_c000_2215", 0),
+ MVI(0xc0002216, "TODO_c000_2216", 0),
+ MVI(0xc0002217, "TODO_c000_2217", 0),
+ MVI(0xc0002218, "TODO_c000_2218", 0),
+ MVI(0xc0002219, "TODO_c000_2219", 0),
+ MVI(0xc000221a, "TODO_c000_221a", 0),
+ MVI(0xc000221b, "TODO_c000_221b", 0),
+ MVI(0xc000221c, "TODO_c000_221c", 0),
+ MVI(0xc000221d, "TODO_c000_221d", 0),
+ MVI(0xc000221e, "TODO_c000_221e", 0),
+ MVI(0xc000221f, "TODO_c000_221f", 0),
+ MVI(0xc0002220, "TODO_c000_2220", 0),
+ MVI(0xc0002221, "TODO_c000_2221", 0),
+ MVI(0xc0002222, "TODO_c000_2222", 0),
+ MVI(0xc0002223, "TODO_c000_2223", 0),
+ MVI(0xc0002224, "TODO_c000_2224", 0),
+ MVI(0xc0002225, "TODO_c000_2225", 0),
+ MVI(0xc0002226, "TODO_c000_2226", 0),
+ MVI(0xc0002227, "TODO_c000_2227", 0),
+ MVI(0xc0002228, "TODO_c000_2228", 0),
+ MVI(0xc0002229, "TODO_c000_2229", 0),
+ MVI(0xc000222a, "TODO_c000_222a", 0),
+ MVI(0xc000222b, "TODO_c000_222b", 0),
+ MVI(0xc000222c, "TODO_c000_222c", 0),
+ MVI(0xc000222d, "TODO_c000_222d", 0),
+ MVI(0xc000222e, "TODO_c000_222e", 0),
+ MVI(0xc000222f, "TODO_c000_222f", 0),
+ MVI(0xc0002230, "TODO_c000_2230", 0),
+ MVI(0xc0002231, "TODO_c000_2231", 0),
+ MVI(0xc0002232, "TODO_c000_2232", 0),
+ MVI(0xc0002233, "TODO_c000_2233", 0),
+ MVI(0xc0002234, "TODO_c000_2234", 0),
+ MVI(0xc0002235, "TODO_c000_2235", 0),
+ MVI(0xc0002236, "TODO_c000_2236", 0),
+ MVI(0xc0002237, "TODO_c000_2237", 0),
+ MVI(0xc0002238, "TODO_c000_2238", 0),
+ MVI(0xc0002239, "TODO_c000_2239", 0),
+ MVI(0xc000223a, "TODO_c000_223a", 0),
+ MVI(0xc000223b, "TODO_c000_223b", 0),
+ MVI(0xc000223c, "TODO_c000_223c", 0),
+ MVI(0xc000223d, "TODO_c000_223d", 0),
+ MVI(0xc000223e, "TODO_c000_223e", 0),
+ MVI(0xc000223f, "TODO_c000_223f", 0),
+ MVI(0xc0002240, "TODO_c000_2240", 0),
+ MVI(0xc0002241, "TODO_c000_2241", 0),
+ MVI(0xc0002242, "TODO_c000_2242", 0),
+ MVI(0xc0002243, "TODO_c000_2243", 0),
+ MVI(0xc0002244, "TODO_c000_2244", 0),
+ MVI(0xc0002245, "TODO_c000_2245", 0),
+ MVI(0xc0002246, "TODO_c000_2246", 0),
+ MVI(0xc0002247, "TODO_c000_2247", 0),
+ MVI(0xc0002248, "TODO_c000_2248", 0),
+ MVI(0xc0002249, "TODO_c000_2249", 0),
+ MVI(0xc000224a, "TODO_c000_224a", 0),
+ MVI(0xc000224b, "TODO_c000_224b", 0),
+ MVI(0xc000224c, "TODO_c000_224c", 0),
+ MVI(0xc000224d, "TODO_c000_224d", 0),
+ MVI(0xc000224e, "TODO_c000_224e", 0),
+ MVI(0xc000224f, "TODO_c000_224f", 0),
+ MVI(0xc0002250, "TODO_c000_2250", 0),
+ MVI(0xc0002251, "TODO_c000_2251", 0),
+ MVI(0xc0002252, "TODO_c000_2252", 0),
+ MVI(0xc0002253, "TODO_c000_2253", 0),
+ MVI(0xc0002254, "TODO_c000_2254", 0),
+ MVI(0xc0002255, "TODO_c000_2255", 0),
+ MVI(0xc0002256, "TODO_c000_2256", 0),
+ MVI(0xc0002257, "TODO_c000_2257", 0),
+ MVI(0xc0002258, "TODO_c000_2258", 0),
+ MVI(0xc0002259, "TODO_c000_2259", 0),
+ MVI(0xc000225a, "TODO_c000_225a", 0),
+ MVI(0xc000225b, "TODO_c000_225b", 0),
+ MVI(0xc000225c, "TODO_c000_225c", 0),
+ MVI(0xc000225d, "TODO_c000_225d", 0),
+ MVI(0xc000225e, "TODO_c000_225e", 0),
+ MVI(0xc000225f, "TODO_c000_225f", 0),
+ MVI(0xc0002260, "TODO_c000_2260", 0),
+ MVI(0xc0002261, "TODO_c000_2261", 0),
+ MVI(0xc0002262, "TODO_c000_2262", 0),
+ MVI(0xc0002263, "TODO_c000_2263", 0),
+ MVI(0xc0002264, "TODO_c000_2264", 0),
+ MVI(0xc0002265, "TODO_c000_2265", 0),
+ MVI(0xc0002266, "TODO_c000_2266", 0),
+ MVI(0xc0002267, "TODO_c000_2267", 0),
+ MVI(0xc0002268, "TODO_c000_2268", 0),
+ MVI(0xc0002269, "TODO_c000_2269", 0),
+ MVI(0xc000226a, "TODO_c000_226a", 0),
+ MVI(0xc000226b, "TODO_c000_226b", 0),
+ MVI(0xc000226c, "TODO_c000_226c", 0),
+ MVI(0xc000226d, "TODO_c000_226d", 0),
+ MVI(0xc000226e, "TODO_c000_226e", 0),
+ MVI(0xc000226f, "TODO_c000_226f", 0),
+ MVI(0xc0002270, "TODO_c000_2270", 0),
+ MVI(0xc0002271, "TODO_c000_2271", 0),
+ MVI(0xc0002272, "TODO_c000_2272", 0),
+ MVI(0xc0002273, "TODO_c000_2273", 0),
+ MVI(0xc0002274, "TODO_c000_2274", 0),
+ MVI(0xc0002275, "TODO_c000_2275", 0),
+ MVI(0xc0002276, "TODO_c000_2276", 0),
+ MVI(0xc0002277, "TODO_c000_2277", 0),
+ MVI(0xc0002278, "TODO_c000_2278", 0),
+ MVI(0xc0002279, "TODO_c000_2279", 0),
+ MVI(0xc000227a, "TODO_c000_227a", 0),
+ MVI(0xc000227b, "TODO_c000_227b", 0),
+ MVI(0xc000227c, "TODO_c000_227c", 0),
+ MVI(0xc000227d, "TODO_c000_227d", 0),
+ MVI(0xc000227e, "TODO_c000_227e", 0),
+ MVI(0xc000227f, "TODO_c000_227f", 0),
+ MVI(0xc0002280, "TODO_c000_2280", 0),
+ MVI(0xc0002281, "TODO_c000_2281", 0),
+ MVI(0xc0002282, "TODO_c000_2282", 0),
+ MVI(0xc0002283, "TODO_c000_2283", 0),
+ MVI(0xc0002284, "TODO_c000_2284", 0),
+ MVI(0xc0002285, "TODO_c000_2285", 0),
+ MVI(0xc0002286, "TODO_c000_2286", 0),
+ MVI(0xc0002287, "TODO_c000_2287", 0),
+ MVI(0xc0002288, "TODO_c000_2288", 0),
+ MVI(0xc0002289, "TODO_c000_2289", 0),
+ MVI(0xc000228a, "TODO_c000_228a", 0),
+ MVI(0xc000228b, "TODO_c000_228b", 0),
+ MVI(0xc000228c, "TODO_c000_228c", 0),
+ MVI(0xc000228d, "TODO_c000_228d", 0),
+ MVI(0xc000228e, "TODO_c000_228e", 0),
+ MVI(0xc000228f, "TODO_c000_228f", 0),
+ MVI(0xc0002290, "TODO_c000_2290", 0),
+ MVI(0xc0002291, "TODO_c000_2291", 0),
+ MVI(0xc0002292, "TODO_c000_2292", 0),
+ MVI(0xc0002293, "TODO_c000_2293", 0),
+ MVI(0xc0002294, "TODO_c000_2294", 0),
+ MVI(0xc0002295, "TODO_c000_2295", 0),
+ MVI(0xc0002296, "TODO_c000_2296", 0),
+ MVI(0xc0002297, "TODO_c000_2297", 0),
+ MVI(0xc0002298, "TODO_c000_2298", 0),
+ MVI(0xc0002299, "TODO_c000_2299", 0),
+ MVI(0xc000229a, "TODO_c000_229a", 0),
+ MVI(0xc000229b, "TODO_c000_229b", 0),
+ MVI(0xc000229c, "TODO_c000_229c", 0),
+ MVI(0xc000229d, "TODO_c000_229d", 0),
+ MVI(0xc000229e, "TODO_c000_229e", 0),
+ MVI(0xc000229f, "TODO_c000_229f", 0),
+ MVI(0xc00022a0, "TODO_c000_22a0", 0),
+ MVI(0xc00022a1, "TODO_c000_22a1", 0),
+ MVI(0xc00022a2, "TODO_c000_22a2", 0),
+ MVI(0xc00022a3, "TODO_c000_22a3", 0),
+ MVI(0xc00022a4, "TODO_c000_22a4", 0),
+ MVI(0xc00022a5, "TODO_c000_22a5", 0),
+ MVI(0xc00022a6, "TODO_c000_22a6", 0),
+ MVI(0xc00022a7, "TODO_c000_22a7", 0),
+ MVI(0xc00022a8, "TODO_c000_22a8", 0),
+ MVI(0xc00022a9, "TODO_c000_22a9", 0),
+ MVI(0xc00022aa, "TODO_c000_22aa", 0),
+ MVI(0xc00022ab, "TODO_c000_22ab", 0),
+ MVI(0xc00022ac, "TODO_c000_22ac", 0),
+ MVI(0xc00022ad, "TODO_c000_22ad", 0),
+ MVI(0xc00022ae, "TODO_c000_22ae", 0),
+ MVI(0xc00022af, "TODO_c000_22af", 0),
+ MVI(0xc00022b0, "TODO_c000_22b0", 0),
+ MVI(0xc00022b1, "TODO_c000_22b1", 0),
+ MVI(0xc00022b2, "TODO_c000_22b2", 0),
+ MVI(0xc00022b3, "TODO_c000_22b3", 0),
+ MVI(0xc00022b4, "TODO_c000_22b4", 0),
+ MVI(0xc00022b5, "TODO_c000_22b5", 0),
+ MVI(0xc00022b6, "TODO_c000_22b6", 0),
+ MVI(0xc00022b7, "TODO_c000_22b7", 0),
+ MVI(0xc00022b8, "TODO_c000_22b8", 0),
+ MVI(0xc00022b9, "TODO_c000_22b9", 0),
+ MVI(0xc00022ba, "TODO_c000_22ba", 0),
+ MVI(0xc00022bb, "TODO_c000_22bb", 0),
+ MVI(0xc00022bc, "TODO_c000_22bc", 0),
+ MVI(0xc00022bd, "TODO_c000_22bd", 0),
+ MVI(0xc00022be, "TODO_c000_22be", 0),
+ MVI(0xc00022bf, "TODO_c000_22bf", 0),
+ MVI(0xc00022c0, "TODO_c000_22c0", 0),
+ MVI(0xc00022c1, "TODO_c000_22c1", 0),
+ MVI(0xc00022c2, "TODO_c000_22c2", 0),
+ MVI(0xc00022c3, "TODO_c000_22c3", 0),
+ MVI(0xc00022c4, "TODO_c000_22c4", 0),
+ MVI(0xc00022c5, "TODO_c000_22c5", 0),
+ MVI(0xc00022c6, "TODO_c000_22c6", 0),
+ MVI(0xc00022c7, "TODO_c000_22c7", 0),
+ MVI(0xc00022c8, "TODO_c000_22c8", 0),
+ MVI(0xc00022c9, "TODO_c000_22c9", 0),
+ MVI(0xc00022ca, "TODO_c000_22ca", 0),
+ MVI(0xc00022cb, "TODO_c000_22cb", 0),
+ MVI(0xc00022cc, "TODO_c000_22cc", 0),
+ MVI(0xc00022cd, "TODO_c000_22cd", 0),
+ MVI(0xc00022ce, "TODO_c000_22ce", 0),
+ MVI(0xc00022cf, "TODO_c000_22cf", 0),
+ MVI(0xc00022d0, "TODO_c000_22d0", 0),
+ MVI(0xc00022d1, "TODO_c000_22d1", 0),
+ MVI(0xc00022d2, "TODO_c000_22d2", 0),
+ MVI(0xc00022d3, "TODO_c000_22d3", 0),
+ MVI(0xc00022d4, "TODO_c000_22d4", 0),
+ MVI(0xc00022d5, "TODO_c000_22d5", 0),
+ MVI(0xc00022d6, "TODO_c000_22d6", 0),
+ MVI(0xc00022d7, "TODO_c000_22d7", 0),
+ MVI(0xc00022d8, "TODO_c000_22d8", 0),
+ MVI(0xc00022d9, "TODO_c000_22d9", 0),
+ MVI(0xc00022da, "TODO_c000_22da", 0),
+ MVI(0xc00022db, "TODO_c000_22db", 0),
+ MVI(0xc00022dc, "TODO_c000_22dc", 0),
+ MVI(0xc00022dd, "TODO_c000_22dd", 0),
+ MVI(0xc00022de, "TODO_c000_22de", 0),
+ MVI(0xc00022df, "TODO_c000_22df", 0),
+ MVI(0xc00022e0, "TODO_c000_22e0", 0),
+ MVI(0xc00022e1, "TODO_c000_22e1", 0),
+ MVI(0xc00022e2, "TODO_c000_22e2", 0),
+ MVI(0xc00022e3, "TODO_c000_22e3", 0),
+ MVI(0xc00022e4, "TODO_c000_22e4", 0),
+ MVI(0xc00022e5, "TODO_c000_22e5", 0),
+ MVI(0xc00022e6, "TODO_c000_22e6", 0),
+ MVI(0xc00022e7, "TODO_c000_22e7", 0),
+ MVI(0xc00022e8, "TODO_c000_22e8", 0),
+ MVI(0xc00022e9, "TODO_c000_22e9", 0),
+ MVI(0xc00022ea, "TODO_c000_22ea", 0),
+ MVI(0xc00022eb, "TODO_c000_22eb", 0),
+ MVI(0xc00022ec, "TODO_c000_22ec", 0),
+ MVI(0xc00022ed, "TODO_c000_22ed", 0),
+ MVI(0xc00022ee, "TODO_c000_22ee", 0),
+ MVI(0xc00022ef, "TODO_c000_22ef", 0),
+ MVI(0xc00022f0, "TODO_c000_22f0", 0),
+ MVI(0xc00022f1, "TODO_c000_22f1", 0),
+ MVI(0xc00022f2, "TODO_c000_22f2", 0),
+ MVI(0xc00022f3, "TODO_c000_22f3", 0),
+ MVI(0xc00022f4, "TODO_c000_22f4", 0),
+ MVI(0xc00022f5, "TODO_c000_22f5", 0),
+ MVI(0xc00022f6, "TODO_c000_22f6", 0),
+ MVI(0xc00022f7, "TODO_c000_22f7", 0),
+ MVI(0xc00022f8, "TODO_c000_22f8", 0),
+ MVI(0xc00022f9, "TODO_c000_22f9", 0),
+ MVI(0xc00022fa, "TODO_c000_22fa", 0),
+ MVI(0xc00022fb, "TODO_c000_22fb", 0),
+ MVI(0xc00022fc, "TODO_c000_22fc", 0),
+ MVI(0xc00022fd, "TODO_c000_22fd", 0),
+ MVI(0xc00022fe, "TODO_c000_22fe", 0),
+ MVI(0xc00022ff, "TODO_c000_22ff", 0),
+ MVI(0xc0002300, "TODO_c000_2300", 0),
+ MVI(0xc0002301, "TODO_c000_2301", 0),
+ MVI(0xc0002302, "TODO_c000_2302", 0),
+ MVI(0xc0002303, "TODO_c000_2303", 0),
+ MVI(0xc0002304, "TODO_c000_2304", 0),
+ MVI(0xc0002305, "TODO_c000_2305", 0),
+ MVI(0xc0002306, "TODO_c000_2306", 0),
+ MVI(0xc0002307, "TODO_c000_2307", 0),
+ MVI(0xc0002308, "TODO_c000_2308", 0),
+ MVI(0xc0002309, "TODO_c000_2309", 0),
+ MVI(0xc000230a, "TODO_c000_230a", 0),
+ MVI(0xc000230b, "TODO_c000_230b", 0),
+ MVI(0xc000230c, "TODO_c000_230c", 0),
+ MVI(0xc000230d, "TODO_c000_230d", 0),
+ MVI(0xc000230e, "TODO_c000_230e", 0),
+ MVI(0xc000230f, "TODO_c000_230f", 0),
+ MVI(0xc0002310, "TODO_c000_2310", 0),
+ MVI(0xc0002311, "TODO_c000_2311", 0),
+ MVI(0xc0002312, "TODO_c000_2312", 0),
+ MVI(0xc0002313, "TODO_c000_2313", 0),
+ MVI(0xc0002314, "TODO_c000_2314", 0),
+ MVI(0xc0002315, "TODO_c000_2315", 0),
+ MVI(0xc0002316, "TODO_c000_2316", 0),
+ MVI(0xc0002317, "TODO_c000_2317", 0),
+ MVI(0xc0002318, "TODO_c000_2318", 0),
+ MVI(0xc0002319, "TODO_c000_2319", 0),
+ MVI(0xc000231a, "TODO_c000_231a", 0),
+ MVI(0xc000231b, "TODO_c000_231b", 0),
+ MVI(0xc000231c, "TODO_c000_231c", 0),
+ MVI(0xc000231d, "TODO_c000_231d", 0),
+ MVI(0xc000231e, "TODO_c000_231e", 0),
+ MVI(0xc000231f, "TODO_c000_231f", 0),
+ MVI(0xc0002320, "TODO_c000_2320", 0),
+ MVI(0xc0002321, "TODO_c000_2321", 0),
+ MVI(0xc0002322, "TODO_c000_2322", 0),
+ MVI(0xc0002323, "TODO_c000_2323", 0),
+ MVI(0xc0002324, "TODO_c000_2324", 0),
+ MVI(0xc0002325, "TODO_c000_2325", 0),
+ MVI(0xc0002326, "TODO_c000_2326", 0),
+ MVI(0xc0002327, "TODO_c000_2327", 0),
+ MVI(0xc0002328, "TODO_c000_2328", 0),
+ MVI(0xc0002329, "TODO_c000_2329", 0),
+ MVI(0xc000232a, "TODO_c000_232a", 0),
+ MVI(0xc000232b, "TODO_c000_232b", 0),
+ MVI(0xc000232c, "TODO_c000_232c", 0),
+ MVI(0xc000232d, "TODO_c000_232d", 0),
+ MVI(0xc000232e, "TODO_c000_232e", 0),
+ MVI(0xc000232f, "TODO_c000_232f", 0),
+ MVI(0xc0002330, "TODO_c000_2330", 0),
+ MVI(0xc0002331, "TODO_c000_2331", 0),
+ MVI(0xc0002332, "TODO_c000_2332", 0),
+ MVI(0xc0002333, "TODO_c000_2333", 0),
+ MVI(0xc0002334, "TODO_c000_2334", 0),
+ MVI(0xc0002335, "TODO_c000_2335", 0),
+ MVI(0xc0002336, "TODO_c000_2336", 0),
+ MVI(0xc0002337, "TODO_c000_2337", 0),
+ MVI(0xc0002338, "TODO_c000_2338", 0),
+ MVI(0xc0002339, "TODO_c000_2339", 0),
+ MVI(0xc000233a, "TODO_c000_233a", 0),
+ MVI(0xc000233b, "TODO_c000_233b", 0),
+ MVI(0xc000233c, "TODO_c000_233c", 0),
+ MVI(0xc000233d, "TODO_c000_233d", 0),
+ MVI(0xc000233e, "TODO_c000_233e", 0),
+ MVI(0xc000233f, "TODO_c000_233f", 0),
+ MVI(0xc0002340, "TODO_c000_2340", 0),
+ MVI(0xc0002341, "TODO_c000_2341", 0),
+ MVI(0xc0002342, "TODO_c000_2342", 0),
+ MVI(0xc0002343, "TODO_c000_2343", 0),
+ MVI(0xc0002344, "TODO_c000_2344", 0),
+ MVI(0xc0002345, "TODO_c000_2345", 0),
+ MVI(0xc0002346, "TODO_c000_2346", 0),
+ MVI(0xc0002347, "TODO_c000_2347", 0),
+ MVI(0xc0002348, "TODO_c000_2348", 0),
+ MVI(0xc0002349, "TODO_c000_2349", 0),
+ MVI(0xc000234a, "TODO_c000_234a", 0),
+ MVI(0xc000234b, "TODO_c000_234b", 0),
+ MVI(0xc000234c, "TODO_c000_234c", 0),
+ MVI(0xc000234d, "TODO_c000_234d", 0),
+ MVI(0xc000234e, "TODO_c000_234e", 0),
+ MVI(0xc000234f, "TODO_c000_234f", 0),
+ MVI(0xc0002350, "TODO_c000_2350", 0),
+ MVI(0xc0002351, "TODO_c000_2351", 0),
+ MVI(0xc0002352, "TODO_c000_2352", 0),
+ MVI(0xc0002353, "TODO_c000_2353", 0),
+ MVI(0xc0002354, "TODO_c000_2354", 0),
+ MVI(0xc0002355, "TODO_c000_2355", 0),
+ MVI(0xc0002356, "TODO_c000_2356", 0),
+ MVI(0xc0002357, "TODO_c000_2357", 0),
+ MVI(0xc0002358, "TODO_c000_2358", 0),
+ MVI(0xc0002359, "TODO_c000_2359", 0),
+ MVI(0xc000235a, "TODO_c000_235a", 0),
+ MVI(0xc000235b, "TODO_c000_235b", 0),
+ MVI(0xc000235c, "TODO_c000_235c", 0),
+ MVI(0xc000235d, "TODO_c000_235d", 0),
+ MVI(0xc000235e, "TODO_c000_235e", 0),
+ MVI(0xc000235f, "TODO_c000_235f", 0),
+ MVI(0xc0002360, "TODO_c000_2360", 0),
+ MVI(0xc0002361, "TODO_c000_2361", 0),
+ MVI(0xc0002362, "TODO_c000_2362", 0),
+ MVI(0xc0002363, "TODO_c000_2363", 0),
+ MVI(0xc0002364, "TODO_c000_2364", 0),
+ MVI(0xc0002365, "TODO_c000_2365", 0),
+ MVI(0xc0002366, "TODO_c000_2366", 0),
+ MVI(0xc0002367, "TODO_c000_2367", 0),
+ MVI(0xc0002368, "TODO_c000_2368", 0),
+ MVI(0xc0002369, "TODO_c000_2369", 0),
+ MVI(0xc000236a, "TODO_c000_236a", 0),
+ MVI(0xc000236b, "TODO_c000_236b", 0),
+ MVI(0xc000236c, "TODO_c000_236c", 0),
+ MVI(0xc000236d, "TODO_c000_236d", 0),
+ MVI(0xc000236e, "TODO_c000_236e", 0),
+ MVI(0xc000236f, "TODO_c000_236f", 0),
+ MVI(0xc0002370, "TODO_c000_2370", 0),
+ MVI(0xc0002371, "TODO_c000_2371", 0),
+ MVI(0xc0002372, "TODO_c000_2372", 0),
+ MVI(0xc0002373, "TODO_c000_2373", 0),
+ MVI(0xc0002374, "TODO_c000_2374", 0),
+ MVI(0xc0002375, "TODO_c000_2375", 0),
+ MVI(0xc0002376, "TODO_c000_2376", 0),
+ MVI(0xc0002377, "TODO_c000_2377", 0),
+ MVI(0xc0002378, "TODO_c000_2378", 0),
+ MVI(0xc0002379, "TODO_c000_2379", 0),
+ MVI(0xc000237a, "TODO_c000_237a", 0),
+ MVI(0xc000237b, "TODO_c000_237b", 0),
+ MVI(0xc000237c, "TODO_c000_237c", 0),
+ MVI(0xc000237d, "TODO_c000_237d", 0),
+ MVI(0xc000237e, "TODO_c000_237e", 0),
+ MVI(0xc000237f, "TODO_c000_237f", 0),
+ MVI(0xc0002380, "TODO_c000_2380", 0),
+ MVI(0xc0002381, "TODO_c000_2381", 0),
+ MVI(0xc0002382, "TODO_c000_2382", 0),
+ MVI(0xc0002383, "TODO_c000_2383", 0),
+ MVI(0xc0002384, "TODO_c000_2384", 0),
+ MVI(0xc0002385, "TODO_c000_2385", 0),
+ MVI(0xc0002386, "TODO_c000_2386", 0),
+ MVI(0xc0002387, "TODO_c000_2387", 0),
+ MVI(0xc0002388, "TODO_c000_2388", 0),
+ MVI(0xc0002389, "TODO_c000_2389", 0),
+ MVI(0xc000238a, "TODO_c000_238a", 0),
+ MVI(0xc000238b, "TODO_c000_238b", 0),
+ MVI(0xc000238c, "TODO_c000_238c", 0),
+ MVI(0xc000238d, "TODO_c000_238d", 0),
+ MVI(0xc000238e, "TODO_c000_238e", 0),
+ MVI(0xc000238f, "TODO_c000_238f", 0),
+ MVI(0xc0002390, "TODO_c000_2390", 0),
+ MVI(0xc0002391, "TODO_c000_2391", 0),
+ MVI(0xc0002392, "TODO_c000_2392", 0),
+ MVI(0xc0002393, "TODO_c000_2393", 0),
+ MVI(0xc0002394, "TODO_c000_2394", 0),
+ MVI(0xc0002395, "TODO_c000_2395", 0),
+ MVI(0xc0002396, "TODO_c000_2396", 0),
+ MVI(0xc0002397, "TODO_c000_2397", 0),
+ MVI(0xc0002398, "TODO_c000_2398", 0),
+ MVI(0xc0002399, "TODO_c000_2399", 0),
+ MVI(0xc000239a, "TODO_c000_239a", 0),
+ MVI(0xc000239b, "TODO_c000_239b", 0),
+ MVI(0xc000239c, "TODO_c000_239c", 0),
+ MVI(0xc000239d, "TODO_c000_239d", 0),
+ MVI(0xc000239e, "TODO_c000_239e", 0),
+ MVI(0xc000239f, "TODO_c000_239f", 0),
+ MVI(0xc00023a0, "TODO_c000_23a0", 0),
+ MVI(0xc00023a1, "TODO_c000_23a1", 0),
+ MVI(0xc00023a2, "TODO_c000_23a2", 0),
+ MVI(0xc00023a3, "TODO_c000_23a3", 0),
+ MVI(0xc00023a4, "TODO_c000_23a4", 0),
+ MVI(0xc00023a5, "TODO_c000_23a5", 0),
+ MVI(0xc00023a6, "TODO_c000_23a6", 0),
+ MVI(0xc00023a7, "TODO_c000_23a7", 0),
+ MVI(0xc00023a8, "TODO_c000_23a8", 0),
+ MVI(0xc00023a9, "TODO_c000_23a9", 0),
+ MVI(0xc00023aa, "TODO_c000_23aa", 0),
+ MVI(0xc00023ab, "TODO_c000_23ab", 0),
+ MVI(0xc00023ac, "TODO_c000_23ac", 0),
+ MVI(0xc00023ad, "TODO_c000_23ad", 0),
+ MVI(0xc00023ae, "TODO_c000_23ae", 0),
+ MVI(0xc00023af, "TODO_c000_23af", 0),
+ MVI(0xc00023b0, "TODO_c000_23b0", 0),
+ MVI(0xc00023b1, "TODO_c000_23b1", 0),
+ MVI(0xc00023b2, "TODO_c000_23b2", 0),
+ MVI(0xc00023b3, "TODO_c000_23b3", 0),
+ MVI(0xc00023b4, "TODO_c000_23b4", 0),
+ MVI(0xc00023b5, "TODO_c000_23b5", 0),
+ MVI(0xc00023b6, "TODO_c000_23b6", 0),
+ MVI(0xc00023b7, "TODO_c000_23b7", 0),
+ MVI(0xc00023b8, "TODO_c000_23b8", 0),
+ MVI(0xc00023b9, "TODO_c000_23b9", 0),
+ MVI(0xc00023ba, "TODO_c000_23ba", 0),
+ MVI(0xc00023bb, "TODO_c000_23bb", 0),
+ MVI(0xc00023bc, "TODO_c000_23bc", 0),
+ MVI(0xc00023bd, "TODO_c000_23bd", 0),
+ MVI(0xc00023be, "TODO_c000_23be", 0),
+ MVI(0xc00023bf, "TODO_c000_23bf", 0),
+ MVI(0xc00023c0, "TODO_c000_23c0", 0),
+ MVI(0xc00023c1, "TODO_c000_23c1", 0),
+ MVI(0xc00023c2, "TODO_c000_23c2", 0),
+ MVI(0xc00023c3, "TODO_c000_23c3", 0),
+ MVI(0xc00023c4, "TODO_c000_23c4", 0),
+ MVI(0xc00023c5, "TODO_c000_23c5", 0),
+ MVI(0xc00023c6, "TODO_c000_23c6", 0),
+ MVI(0xc00023c7, "TODO_c000_23c7", 0),
+ MVI(0xc00023c8, "TODO_c000_23c8", 0),
+ MVI(0xc00023c9, "TODO_c000_23c9", 0),
+ MVI(0xc00023ca, "TODO_c000_23ca", 0),
+ MVI(0xc00023cb, "TODO_c000_23cb", 0),
+ MVI(0xc00023cc, "TODO_c000_23cc", 0),
+ MVI(0xc00023cd, "TODO_c000_23cd", 0),
+ MVI(0xc00023ce, "TODO_c000_23ce", 0),
+ MVI(0xc00023cf, "TODO_c000_23cf", 0),
+ MVI(0xc00023d0, "TODO_c000_23d0", 0),
+ MVI(0xc00023d1, "TODO_c000_23d1", 0),
+ MVI(0xc00023d2, "TODO_c000_23d2", 0),
+ MVI(0xc00023d3, "TODO_c000_23d3", 0),
+ MVI(0xc00023d4, "TODO_c000_23d4", 0),
+ MVI(0xc00023d5, "TODO_c000_23d5", 0),
+ MVI(0xc00023d6, "TODO_c000_23d6", 0),
+ MVI(0xc00023d7, "TODO_c000_23d7", 0),
+ MVI(0xc00023d8, "TODO_c000_23d8", 0),
+ MVI(0xc00023d9, "TODO_c000_23d9", 0),
+ MVI(0xc00023da, "TODO_c000_23da", 0),
+ MVI(0xc00023db, "TODO_c000_23db", 0),
+ MVI(0xc00023dc, "TODO_c000_23dc", 0),
+ MVI(0xc00023dd, "TODO_c000_23dd", 0),
+ MVI(0xc00023de, "TODO_c000_23de", 0),
+ MVI(0xc00023df, "TODO_c000_23df", 0),
+ MVI(0xc00023e0, "TODO_c000_23e0", 0),
+ MVI(0xc00023e1, "TODO_c000_23e1", 0),
+ MVI(0xc00023e2, "TODO_c000_23e2", 0),
+ MVI(0xc00023e3, "TODO_c000_23e3", 0),
+ MVI(0xc00023e4, "TODO_c000_23e4", 0),
+ MVI(0xc00023e5, "TODO_c000_23e5", 0),
+ MVI(0xc00023e6, "TODO_c000_23e6", 0),
+ MVI(0xc00023e7, "TODO_c000_23e7", 0),
+ MVI(0xc00023e8, "TODO_c000_23e8", 0),
+ MVI(0xc00023e9, "TODO_c000_23e9", 0),
+ MVI(0xc00023ea, "TODO_c000_23ea", 0),
+ MVI(0xc00023eb, "TODO_c000_23eb", 0),
+ MVI(0xc00023ec, "TODO_c000_23ec", 0),
+ MVI(0xc00023ed, "TODO_c000_23ed", 0),
+ MVI(0xc00023ee, "TODO_c000_23ee", 0),
+ MVI(0xc00023ef, "TODO_c000_23ef", 0),
+ MVI(0xc00023f0, "TODO_c000_23f0", 0),
+ MVI(0xc00023f1, "TODO_c000_23f1", 0),
+ MVI(0xc00023f2, "TODO_c000_23f2", 0),
+ MVI(0xc00023f3, "TODO_c000_23f3", 0),
+ MVI(0xc00023f4, "TODO_c000_23f4", 0),
+ MVI(0xc00023f5, "TODO_c000_23f5", 0),
+ MVI(0xc00023f6, "TODO_c000_23f6", 0),
+ MVI(0xc00023f7, "TODO_c000_23f7", 0),
+ MVI(0xc00023f8, "TODO_c000_23f8", 0),
+ MVI(0xc00023f9, "TODO_c000_23f9", 0),
+ MVI(0xc00023fa, "TODO_c000_23fa", 0),
+ MVI(0xc00023fb, "TODO_c000_23fb", 0),
+ MVI(0xc00023fc, "TODO_c000_23fc", 0),
+ MVI(0xc00023fd, "TODO_c000_23fd", 0),
+ MVI(0xc00023fe, "TODO_c000_23fe", 0),
+ MVI(0xc00023ff, "TODO_c000_23ff", 0),
+ MVI(0xc0002400, "TODO_c000_2400", 0),
+ MVI(0xc0002401, "TODO_c000_2401", 0),
+ MVI(0xc0002402, "TODO_c000_2402", 0),
+ MVI(0xc0002403, "TODO_c000_2403", 0),
+ MVI(0xc0002404, "TODO_c000_2404", 0),
+ MVI(0xc0002405, "TODO_c000_2405", 0),
+ MVI(0xc0002406, "TODO_c000_2406", 0),
+ MVI(0xc0002407, "TODO_c000_2407", 0),
+ MVI(0xc0002408, "TODO_c000_2408", 0),
+ MVI(0xc0002409, "TODO_c000_2409", 0),
+ MVI(0xc000240a, "TODO_c000_240a", 0),
+ MVI(0xc000240b, "TODO_c000_240b", 0),
+ MVI(0xc000240c, "TODO_c000_240c", 0),
+ MVI(0xc000240d, "TODO_c000_240d", 0),
+ MVI(0xc000240e, "TODO_c000_240e", 0),
+ MVI(0xc000240f, "TODO_c000_240f", 0),
+ MVI(0xc0002410, "TODO_c000_2410", 0),
+ MVI(0xc0002411, "TODO_c000_2411", 0),
+ MVI(0xc0002412, "TODO_c000_2412", 0),
+ MVI(0xc0002413, "TODO_c000_2413", 0),
+ MVI(0xc0002414, "TODO_c000_2414", 0),
+ MVI(0xc0002415, "TODO_c000_2415", 0),
+ MVI(0xc0002416, "TODO_c000_2416", 0),
+ MVI(0xc0002417, "TODO_c000_2417", 0),
+ MVI(0xc0002418, "TODO_c000_2418", 0),
+ MVI(0xc0002419, "TODO_c000_2419", 0),
+ MVI(0xc000241a, "TODO_c000_241a", 0),
+ MVI(0xc000241b, "TODO_c000_241b", 0),
+ MVI(0xc000241c, "TODO_c000_241c", 0),
+ MVI(0xc000241d, "TODO_c000_241d", 0),
+ MVI(0xc000241e, "TODO_c000_241e", 0),
+ MVI(0xc000241f, "TODO_c000_241f", 0),
+ MVI(0xc0002420, "TODO_c000_2420", 0),
+ MVI(0xc0002421, "TODO_c000_2421", 0),
+ MVI(0xc0002422, "TODO_c000_2422", 0),
+ MVI(0xc0002423, "TODO_c000_2423", 0),
+ MVI(0xc0002424, "TODO_c000_2424", 0),
+ MVI(0xc0002425, "TODO_c000_2425", 0),
+ MVI(0xc0002426, "TODO_c000_2426", 0),
+ MVI(0xc0002427, "TODO_c000_2427", 0),
+ MVI(0xc0002428, "TODO_c000_2428", 0),
+ MVI(0xc0002429, "TODO_c000_2429", 0),
+ MVI(0xc000242a, "TODO_c000_242a", 0),
+ MVI(0xc000242b, "TODO_c000_242b", 0),
+ MVI(0xc000242c, "TODO_c000_242c", 0),
+ MVI(0xc000242d, "TODO_c000_242d", 0),
+ MVI(0xc000242e, "TODO_c000_242e", 0),
+ MVI(0xc000242f, "TODO_c000_242f", 0),
+ MVI(0xc0002430, "TODO_c000_2430", 0),
+ MVI(0xc0002431, "TODO_c000_2431", 0),
+ MVI(0xc0002432, "TODO_c000_2432", 0),
+ MVI(0xc0002433, "TODO_c000_2433", 0),
+ MVI(0xc0002434, "TODO_c000_2434", 0),
+ MVI(0xc0002435, "TODO_c000_2435", 0),
+ MVI(0xc0002436, "TODO_c000_2436", 0),
+ MVI(0xc0002437, "TODO_c000_2437", 0),
+ MVI(0xc0002438, "TODO_c000_2438", 0),
+ MVI(0xc0002439, "TODO_c000_2439", 0),
+ MVI(0xc000243a, "TODO_c000_243a", 0),
+ MVI(0xc000243b, "TODO_c000_243b", 0),
+ MVI(0xc000243c, "TODO_c000_243c", 0),
+ MVI(0xc000243d, "TODO_c000_243d", 0),
+ MVI(0xc000243e, "TODO_c000_243e", 0),
+ MVI(0xc000243f, "TODO_c000_243f", 0),
+ MVI(0xc0002440, "TODO_c000_2440", 0),
+ MVI(0xc0002441, "TODO_c000_2441", 0),
+ MVI(0xc0002442, "TODO_c000_2442", 0),
+ MVI(0xc0002443, "TODO_c000_2443", 0),
+ MVI(0xc0002444, "TODO_c000_2444", 0),
+ MVI(0xc0002445, "TODO_c000_2445", 0),
+ MVI(0xc0002446, "TODO_c000_2446", 0),
+ MVI(0xc0002447, "TODO_c000_2447", 0),
+ MVI(0xc0002448, "TODO_c000_2448", 0),
+ MVI(0xc0002449, "TODO_c000_2449", 0),
+ MVI(0xc000244a, "TODO_c000_244a", 0),
+ MVI(0xc000244b, "TODO_c000_244b", 0),
+ MVI(0xc000244c, "TODO_c000_244c", 0),
+ MVI(0xc000244d, "TODO_c000_244d", 0),
+ MVI(0xc000244e, "TODO_c000_244e", 0),
+ MVI(0xc000244f, "TODO_c000_244f", 0),
+ MVI(0xc0002450, "TODO_c000_2450", 0),
+ MVI(0xc0002451, "TODO_c000_2451", 0),
+ MVI(0xc0002452, "TODO_c000_2452", 0),
+ MVI(0xc0002453, "TODO_c000_2453", 0),
+ MVI(0xc0002454, "TODO_c000_2454", 0),
+ MVI(0xc0002455, "TODO_c000_2455", 0),
+ MVI(0xc0002456, "TODO_c000_2456", 0),
+ MVI(0xc0002457, "TODO_c000_2457", 0),
+ MVI(0xc0002458, "TODO_c000_2458", 0),
+ MVI(0xc0002459, "TODO_c000_2459", 0),
+ MVI(0xc000245a, "TODO_c000_245a", 0),
+ MVI(0xc000245b, "TODO_c000_245b", 0),
+ MVI(0xc000245c, "TODO_c000_245c", 0),
+ MVI(0xc000245d, "TODO_c000_245d", 0),
+ MVI(0xc000245e, "TODO_c000_245e", 0),
+ MVI(0xc000245f, "TODO_c000_245f", 0),
+ MVI(0xc0002460, "TODO_c000_2460", 0),
+ MVI(0xc0002461, "TODO_c000_2461", 0),
+ MVI(0xc0002462, "TODO_c000_2462", 0),
+ MVI(0xc0002463, "TODO_c000_2463", 0),
+ MVI(0xc0002464, "TODO_c000_2464", 0),
+ MVI(0xc0002465, "TODO_c000_2465", 0),
+ MVI(0xc0002466, "TODO_c000_2466", 0),
+ MVI(0xc0002467, "TODO_c000_2467", 0),
+ MVI(0xc0002468, "TODO_c000_2468", 0),
+ MVI(0xc0002469, "TODO_c000_2469", 0),
+ MVI(0xc000246a, "TODO_c000_246a", 0),
+ MVI(0xc000246b, "TODO_c000_246b", 0),
+ MVI(0xc000246c, "TODO_c000_246c", 0),
+ MVI(0xc000246d, "TODO_c000_246d", 0),
+ MVI(0xc000246e, "TODO_c000_246e", 0),
+ MVI(0xc000246f, "TODO_c000_246f", 0),
+ MVI(0xc0002470, "TODO_c000_2470", 0),
+ MVI(0xc0002471, "TODO_c000_2471", 0),
+ MVI(0xc0002472, "TODO_c000_2472", 0),
+ MVI(0xc0002473, "TODO_c000_2473", 0),
+ MVI(0xc0002474, "TODO_c000_2474", 0),
+ MVI(0xc0002475, "TODO_c000_2475", 0),
+ MVI(0xc0002476, "TODO_c000_2476", 0),
+ MVI(0xc0002477, "TODO_c000_2477", 0),
+ MVI(0xc0002478, "TODO_c000_2478", 0),
+ MVI(0xc0002479, "TODO_c000_2479", 0),
+ MVI(0xc000247a, "TODO_c000_247a", 0),
+ MVI(0xc000247b, "TODO_c000_247b", 0),
+ MVI(0xc000247c, "TODO_c000_247c", 0),
+ MVI(0xc000247d, "TODO_c000_247d", 0),
+ MVI(0xc000247e, "TODO_c000_247e", 0),
+ MVI(0xc000247f, "TODO_c000_247f", 0),
+ MVI(0xc0002480, "TODO_c000_2480", 0),
+ MVI(0xc0002481, "TODO_c000_2481", 0),
+ MVI(0xc0002482, "TODO_c000_2482", 0),
+ MVI(0xc0002483, "TODO_c000_2483", 0),
+ MVI(0xc0002484, "TODO_c000_2484", 0),
+ MVI(0xc0002485, "TODO_c000_2485", 0),
+ MVI(0xc0002486, "TODO_c000_2486", 0),
+ MVI(0xc0002487, "TODO_c000_2487", 0),
+ MVI(0xc0002488, "TODO_c000_2488", 0),
+ MVI(0xc0002489, "TODO_c000_2489", 0),
+ MVI(0xc000248a, "TODO_c000_248a", 0),
+ MVI(0xc000248b, "TODO_c000_248b", 0),
+ MVI(0xc000248c, "TODO_c000_248c", 0),
+ MVI(0xc000248d, "TODO_c000_248d", 0),
+ MVI(0xc000248e, "TODO_c000_248e", 0),
+ MVI(0xc000248f, "TODO_c000_248f", 0),
+ MVI(0xc0002490, "TODO_c000_2490", 0),
+ MVI(0xc0002491, "TODO_c000_2491", 0),
+ MVI(0xc0002492, "TODO_c000_2492", 0),
+ MVI(0xc0002493, "TODO_c000_2493", 0),
+ MVI(0xc0002494, "TODO_c000_2494", 0),
+ MVI(0xc0002495, "TODO_c000_2495", 0),
+ MVI(0xc0002496, "TODO_c000_2496", 0),
+ MVI(0xc0002497, "TODO_c000_2497", 0),
+ MVI(0xc0002498, "TODO_c000_2498", 0),
+ MVI(0xc0002499, "TODO_c000_2499", 0),
+ MVI(0xc000249a, "TODO_c000_249a", 0),
+ MVI(0xc000249b, "TODO_c000_249b", 0),
+ MVI(0xc000249c, "TODO_c000_249c", 0),
+ MVI(0xc000249d, "TODO_c000_249d", 0),
+ MVI(0xc000249e, "TODO_c000_249e", 0),
+ MVI(0xc000249f, "TODO_c000_249f", 0),
+ MVI(0xc00024a0, "TODO_c000_24a0", 0),
+ MVI(0xc00024a1, "TODO_c000_24a1", 0),
+ MVI(0xc00024a2, "TODO_c000_24a2", 0),
+ MVI(0xc00024a3, "TODO_c000_24a3", 0),
+ MVI(0xc00024a4, "TODO_c000_24a4", 0),
+ MVI(0xc00024a5, "TODO_c000_24a5", 0),
+ MVI(0xc00024a6, "TODO_c000_24a6", 0),
+ MVI(0xc00024a7, "TODO_c000_24a7", 0),
+ MVI(0xc00024a8, "TODO_c000_24a8", 0),
+ MVI(0xc00024a9, "TODO_c000_24a9", 0),
+ MVI(0xc00024aa, "TODO_c000_24aa", 0),
+ MVI(0xc00024ab, "TODO_c000_24ab", 0),
+ MVI(0xc00024ac, "TODO_c000_24ac", 0),
+ MVI(0xc00024ad, "TODO_c000_24ad", 0),
+ MVI(0xc00024ae, "TODO_c000_24ae", 0),
+ MVI(0xc00024af, "TODO_c000_24af", 0),
+ MVI(0xc00024b0, "TODO_c000_24b0", 0),
+ MVI(0xc00024b1, "TODO_c000_24b1", 0),
+ MVI(0xc00024b2, "TODO_c000_24b2", 0),
+ MVI(0xc00024b3, "TODO_c000_24b3", 0),
+ MVI(0xc00024b4, "TODO_c000_24b4", 0),
+ MVI(0xc00024b5, "TODO_c000_24b5", 0),
+ MVI(0xc00024b6, "TODO_c000_24b6", 0),
+ MVI(0xc00024b7, "TODO_c000_24b7", 0),
+ MVI(0xc00024b8, "TODO_c000_24b8", 0),
+ MVI(0xc00024b9, "TODO_c000_24b9", 0),
+ MVI(0xc00024ba, "TODO_c000_24ba", 0),
+ MVI(0xc00024bb, "TODO_c000_24bb", 0),
+ MVI(0xc00024bc, "TODO_c000_24bc", 0),
+ MVI(0xc00024bd, "TODO_c000_24bd", 0),
+ MVI(0xc00024be, "TODO_c000_24be", 0),
+ MVI(0xc00024bf, "TODO_c000_24bf", 0),
+ MVI(0xc00024c0, "TODO_c000_24c0", 0),
+ MVI(0xc00024c1, "TODO_c000_24c1", 0),
+ MVI(0xc00024c2, "TODO_c000_24c2", 0),
+ MVI(0xc00024c3, "TODO_c000_24c3", 0),
+ MVI(0xc00024c4, "TODO_c000_24c4", 0),
+ MVI(0xc00024c5, "TODO_c000_24c5", 0),
+ MVI(0xc00024c6, "TODO_c000_24c6", 0),
+ MVI(0xc00024c7, "TODO_c000_24c7", 0),
+ MVI(0xc00024c8, "TODO_c000_24c8", 0),
+ MVI(0xc00024c9, "TODO_c000_24c9", 0),
+ MVI(0xc00024ca, "TODO_c000_24ca", 0),
+ MVI(0xc00024cb, "TODO_c000_24cb", 0),
+ MVI(0xc00024cc, "TODO_c000_24cc", 0),
+ MVI(0xc00024cd, "TODO_c000_24cd", 0),
+ MVI(0xc00024ce, "TODO_c000_24ce", 0),
+ MVI(0xc00024cf, "TODO_c000_24cf", 0),
+ MVI(0xc00024d0, "TODO_c000_24d0", 0),
+ MVI(0xc00024d1, "TODO_c000_24d1", 0),
+ MVI(0xc00024d2, "TODO_c000_24d2", 0),
+ MVI(0xc00024d3, "TODO_c000_24d3", 0),
+ MVI(0xc00024d4, "TODO_c000_24d4", 0),
+ MVI(0xc00024d5, "TODO_c000_24d5", 0),
+ MVI(0xc00024d6, "TODO_c000_24d6", 0),
+ MVI(0xc00024d7, "TODO_c000_24d7", 0),
+ MVI(0xc00024d8, "TODO_c000_24d8", 0),
+ MVI(0xc00024d9, "TODO_c000_24d9", 0),
+ MVI(0xc00024da, "TODO_c000_24da", 0),
+ MVI(0xc00024db, "TODO_c000_24db", 0),
+ MVI(0xc00024dc, "TODO_c000_24dc", 0),
+ MVI(0xc00024dd, "TODO_c000_24dd", 0),
+ MVI(0xc00024de, "TODO_c000_24de", 0),
+ MVI(0xc00024df, "TODO_c000_24df", 0),
+ MVI(0xc00024e0, "TODO_c000_24e0", 0),
+ MVI(0xc00024e1, "TODO_c000_24e1", 0),
+ MVI(0xc00024e2, "TODO_c000_24e2", 0),
+ MVI(0xc00024e3, "TODO_c000_24e3", 0),
+ MVI(0xc00024e4, "TODO_c000_24e4", 0),
+ MVI(0xc00024e5, "TODO_c000_24e5", 0),
+ MVI(0xc00024e6, "TODO_c000_24e6", 0),
+ MVI(0xc00024e7, "TODO_c000_24e7", 0),
+ MVI(0xc00024e8, "TODO_c000_24e8", 0),
+ MVI(0xc00024e9, "TODO_c000_24e9", 0),
+ MVI(0xc00024ea, "TODO_c000_24ea", 0),
+ MVI(0xc00024eb, "TODO_c000_24eb", 0),
+ MVI(0xc00024ec, "TODO_c000_24ec", 0),
+ MVI(0xc00024ed, "TODO_c000_24ed", 0),
+ MVI(0xc00024ee, "TODO_c000_24ee", 0),
+ MVI(0xc00024ef, "TODO_c000_24ef", 0),
+ MVI(0xc00024f0, "TODO_c000_24f0", 0),
+ MVI(0xc00024f1, "TODO_c000_24f1", 0),
+ MVI(0xc00024f2, "TODO_c000_24f2", 0),
+ MVI(0xc00024f3, "TODO_c000_24f3", 0),
+ MVI(0xc00024f4, "TODO_c000_24f4", 0),
+ MVI(0xc00024f5, "TODO_c000_24f5", 0),
+ MVI(0xc00024f6, "TODO_c000_24f6", 0),
+ MVI(0xc00024f7, "TODO_c000_24f7", 0),
+ MVI(0xc00024f8, "TODO_c000_24f8", 0),
+ MVI(0xc00024f9, "TODO_c000_24f9", 0),
+ MVI(0xc00024fa, "TODO_c000_24fa", 0),
+ MVI(0xc00024fb, "TODO_c000_24fb", 0),
+ MVI(0xc00024fc, "TODO_c000_24fc", 0),
+ MVI(0xc00024fd, "TODO_c000_24fd", 0),
+ MVI(0xc00024fe, "TODO_c000_24fe", 0),
+ MVI(0xc00024ff, "TODO_c000_24ff", 0),
+ MVI(0xc0002500, "TODO_c000_2500", 0),
+ MVI(0xc0002501, "TODO_c000_2501", 0),
+ MVI(0xc0002502, "TODO_c000_2502", 0),
+ MVI(0xc0002503, "TODO_c000_2503", 0),
+ MVI(0xc0002504, "TODO_c000_2504", 0),
+ MVI(0xc0002505, "TODO_c000_2505", 0),
+ MVI(0xc0002506, "TODO_c000_2506", 0),
+ MVI(0xc0002507, "TODO_c000_2507", 0),
+ MVI(0xc0002508, "TODO_c000_2508", 0),
+ MVI(0xc0002509, "TODO_c000_2509", 0),
+ MVI(0xc000250a, "TODO_c000_250a", 0),
+ MVI(0xc000250b, "TODO_c000_250b", 0),
+ MVI(0xc000250c, "TODO_c000_250c", 0),
+ MVI(0xc000250d, "TODO_c000_250d", 0),
+ MVI(0xc000250e, "TODO_c000_250e", 0),
+ MVI(0xc000250f, "TODO_c000_250f", 0),
+ MVI(0xc0002510, "TODO_c000_2510", 0),
+ MVI(0xc0002511, "TODO_c000_2511", 0),
+ MVI(0xc0002512, "TODO_c000_2512", 0),
+ MVI(0xc0002513, "TODO_c000_2513", 0),
+ MVI(0xc0002514, "TODO_c000_2514", 0),
+ MVI(0xc0002515, "TODO_c000_2515", 0),
+ MVI(0xc0002516, "TODO_c000_2516", 0),
+ MVI(0xc0002517, "TODO_c000_2517", 0),
+ MVI(0xc0002518, "TODO_c000_2518", 0),
+ MVI(0xc0002519, "TODO_c000_2519", 0),
+ MVI(0xc000251a, "TODO_c000_251a", 0),
+ MVI(0xc000251b, "TODO_c000_251b", 0),
+ MVI(0xc000251c, "TODO_c000_251c", 0),
+ MVI(0xc000251d, "TODO_c000_251d", 0),
+ MVI(0xc000251e, "TODO_c000_251e", 0),
+ MVI(0xc000251f, "TODO_c000_251f", 0),
+ MVI(0xc0002520, "TODO_c000_2520", 0),
+ MVI(0xc0002521, "TODO_c000_2521", 0),
+ MVI(0xc0002522, "TODO_c000_2522", 0),
+ MVI(0xc0002523, "TODO_c000_2523", 0),
+ MVI(0xc0002524, "TODO_c000_2524", 0),
+ MVI(0xc0002525, "TODO_c000_2525", 0),
+ MVI(0xc0002526, "TODO_c000_2526", 0),
+ MVI(0xc0002527, "TODO_c000_2527", 0),
+ MVI(0xc0002528, "TODO_c000_2528", 0),
+ MVI(0xc0002529, "TODO_c000_2529", 0),
+ MVI(0xc000252a, "TODO_c000_252a", 0),
+ MVI(0xc000252b, "TODO_c000_252b", 0),
+ MVI(0xc000252c, "TODO_c000_252c", 0),
+ MVI(0xc000252d, "TODO_c000_252d", 0),
+ MVI(0xc000252e, "TODO_c000_252e", 0),
+ MVI(0xc000252f, "TODO_c000_252f", 0),
+ MVI(0xc0002530, "TODO_c000_2530", 0),
+ MVI(0xc0002531, "TODO_c000_2531", 0),
+ MVI(0xc0002532, "TODO_c000_2532", 0),
+ MVI(0xc0002533, "TODO_c000_2533", 0),
+ MVI(0xc0002534, "TODO_c000_2534", 0),
+ MVI(0xc0002535, "TODO_c000_2535", 0),
+ MVI(0xc0002536, "TODO_c000_2536", 0),
+ MVI(0xc0002537, "TODO_c000_2537", 0),
+ MVI(0xc0002538, "TODO_c000_2538", 0),
+ MVI(0xc0002539, "TODO_c000_2539", 0),
+ MVI(0xc000253a, "TODO_c000_253a", 0),
+ MVI(0xc000253b, "TODO_c000_253b", 0),
+ MVI(0xc000253c, "TODO_c000_253c", 0),
+ MVI(0xc000253d, "TODO_c000_253d", 0),
+ MVI(0xc000253e, "TODO_c000_253e", 0),
+ MVI(0xc000253f, "TODO_c000_253f", 0),
+ MVI(0xc0002540, "TODO_c000_2540", 0),
+ MVI(0xc0002541, "TODO_c000_2541", 0),
+ MVI(0xc0002542, "TODO_c000_2542", 0),
+ MVI(0xc0002543, "TODO_c000_2543", 0),
+ MVI(0xc0002544, "TODO_c000_2544", 0),
+ MVI(0xc0002545, "TODO_c000_2545", 0),
+ MVI(0xc0002546, "TODO_c000_2546", 0),
+ MVI(0xc0002547, "TODO_c000_2547", 0),
+ MVI(0xc0002548, "TODO_c000_2548", 0),
+ MVI(0xc0002549, "TODO_c000_2549", 0),
+ MVI(0xc000254a, "TODO_c000_254a", 0),
+ MVI(0xc000254b, "TODO_c000_254b", 0),
+ MVI(0xc000254c, "TODO_c000_254c", 0),
+ MVI(0xc000254d, "TODO_c000_254d", 0),
+ MVI(0xc000254e, "TODO_c000_254e", 0),
+ MVI(0xc000254f, "TODO_c000_254f", 0),
+ MVI(0xc0002550, "TODO_c000_2550", 0),
+ MVI(0xc0002551, "TODO_c000_2551", 0),
+ MVI(0xc0002552, "TODO_c000_2552", 0),
+ MVI(0xc0002553, "TODO_c000_2553", 0),
+ MVI(0xc0002554, "TODO_c000_2554", 0),
+ MVI(0xc0002555, "TODO_c000_2555", 0),
+ MVI(0xc0002556, "TODO_c000_2556", 0),
+ MVI(0xc0002557, "TODO_c000_2557", 0),
+ MVI(0xc0002558, "TODO_c000_2558", 0),
+ MVI(0xc0002559, "TODO_c000_2559", 0),
+ MVI(0xc000255a, "TODO_c000_255a", 0),
+ MVI(0xc000255b, "TODO_c000_255b", 0),
+ MVI(0xc000255c, "TODO_c000_255c", 0),
+ MVI(0xc000255d, "TODO_c000_255d", 0),
+ MVI(0xc000255e, "TODO_c000_255e", 0),
+ MVI(0xc000255f, "TODO_c000_255f", 0),
+ MVI(0xc0002560, "TODO_c000_2560", 0),
+ MVI(0xc0002561, "TODO_c000_2561", 0),
+ MVI(0xc0002562, "TODO_c000_2562", 0),
+ MVI(0xc0002563, "TODO_c000_2563", 0),
+ MVI(0xc0002564, "TODO_c000_2564", 0),
+ MVI(0xc0002565, "TODO_c000_2565", 0),
+ MVI(0xc0002566, "TODO_c000_2566", 0),
+ MVI(0xc0002567, "TODO_c000_2567", 0),
+ MVI(0xc0002568, "TODO_c000_2568", 0),
+ MVI(0xc0002569, "TODO_c000_2569", 0),
+ MVI(0xc000256a, "TODO_c000_256a", 0),
+ MVI(0xc000256b, "TODO_c000_256b", 0),
+ MVI(0xc000256c, "TODO_c000_256c", 0),
+ MVI(0xc000256d, "TODO_c000_256d", 0),
+ MVI(0xc000256e, "TODO_c000_256e", 0),
+ MVI(0xc000256f, "TODO_c000_256f", 0),
+ MVI(0xc0002570, "TODO_c000_2570", 0),
+ MVI(0xc0002571, "TODO_c000_2571", 0),
+ MVI(0xc0002572, "TODO_c000_2572", 0),
+ MVI(0xc0002573, "TODO_c000_2573", 0),
+ MVI(0xc0002574, "TODO_c000_2574", 0),
+ MVI(0xc0002575, "TODO_c000_2575", 0),
+ MVI(0xc0002576, "TODO_c000_2576", 0),
+ MVI(0xc0002577, "TODO_c000_2577", 0),
+ MVI(0xc0002578, "TODO_c000_2578", 0),
+ MVI(0xc0002579, "TODO_c000_2579", 0),
+ MVI(0xc000257a, "TODO_c000_257a", 0),
+ MVI(0xc000257b, "TODO_c000_257b", 0),
+ MVI(0xc000257c, "TODO_c000_257c", 0),
+ MVI(0xc000257d, "TODO_c000_257d", 0),
+ MVI(0xc000257e, "TODO_c000_257e", 0),
+ MVI(0xc000257f, "TODO_c000_257f", 0),
+ MVI(0xc0002580, "TODO_c000_2580", 0),
+ MVI(0xc0002581, "TODO_c000_2581", 0),
+ MVI(0xc0002582, "TODO_c000_2582", 0),
+ MVI(0xc0002583, "TODO_c000_2583", 0),
+ MVI(0xc0002584, "TODO_c000_2584", 0),
+ MVI(0xc0002585, "TODO_c000_2585", 0),
+ MVI(0xc0002586, "TODO_c000_2586", 0),
+ MVI(0xc0002587, "TODO_c000_2587", 0),
+ MVI(0xc0002588, "TODO_c000_2588", 0),
+ MVI(0xc0002589, "TODO_c000_2589", 0),
+ MVI(0xc000258a, "TODO_c000_258a", 0),
+ MVI(0xc000258b, "TODO_c000_258b", 0),
+ MVI(0xc000258c, "TODO_c000_258c", 0),
+ MVI(0xc000258d, "TODO_c000_258d", 0),
+ MVI(0xc000258e, "TODO_c000_258e", 0),
+ MVI(0xc000258f, "TODO_c000_258f", 0),
+ MVI(0xc0002590, "TODO_c000_2590", 0),
+ MVI(0xc0002591, "TODO_c000_2591", 0),
+ MVI(0xc0002592, "TODO_c000_2592", 0),
+ MVI(0xc0002593, "TODO_c000_2593", 0),
+ MVI(0xc0002594, "TODO_c000_2594", 0),
+ MVI(0xc0002595, "TODO_c000_2595", 0),
+ MVI(0xc0002596, "TODO_c000_2596", 0),
+ MVI(0xc0002597, "TODO_c000_2597", 0),
+ MVI(0xc0002598, "TODO_c000_2598", 0),
+ MVI(0xc0002599, "TODO_c000_2599", 0),
+ MVI(0xc000259a, "TODO_c000_259a", 0),
+ MVI(0xc000259b, "TODO_c000_259b", 0),
+ MVI(0xc000259c, "TODO_c000_259c", 0),
+ MVI(0xc000259d, "TODO_c000_259d", 0),
+ MVI(0xc000259e, "TODO_c000_259e", 0),
+ MVI(0xc000259f, "TODO_c000_259f", 0),
+ MVI(0xc00025a0, "TODO_c000_25a0", 0),
+ MVI(0xc00025a1, "TODO_c000_25a1", 0),
+ MVI(0xc00025a2, "TODO_c000_25a2", 0),
+ MVI(0xc00025a3, "TODO_c000_25a3", 0),
+ MVI(0xc00025a4, "TODO_c000_25a4", 0),
+ MVI(0xc00025a5, "TODO_c000_25a5", 0),
+ MVI(0xc00025a6, "TODO_c000_25a6", 0),
+ MVI(0xc00025a7, "TODO_c000_25a7", 0),
+ MVI(0xc00025a8, "TODO_c000_25a8", 0),
+ MVI(0xc00025a9, "TODO_c000_25a9", 0),
+ MVI(0xc00025aa, "TODO_c000_25aa", 0),
+ MVI(0xc00025ab, "TODO_c000_25ab", 0),
+ MVI(0xc00025ac, "TODO_c000_25ac", 0),
+ MVI(0xc00025ad, "TODO_c000_25ad", 0),
+ MVI(0xc00025ae, "TODO_c000_25ae", 0),
+ MVI(0xc00025af, "TODO_c000_25af", 0),
+ MVI(0xc00025b0, "TODO_c000_25b0", 0),
+ MVI(0xc00025b1, "TODO_c000_25b1", 0),
+ MVI(0xc00025b2, "TODO_c000_25b2", 0),
+ MVI(0xc00025b3, "TODO_c000_25b3", 0),
+ MVI(0xc00025b4, "TODO_c000_25b4", 0),
+ MVI(0xc00025b5, "TODO_c000_25b5", 0),
+ MVI(0xc00025b6, "TODO_c000_25b6", 0),
+ MVI(0xc00025b7, "TODO_c000_25b7", 0),
+ MVI(0xc00025b8, "TODO_c000_25b8", 0),
+ MVI(0xc00025b9, "TODO_c000_25b9", 0),
+ MVI(0xc00025ba, "TODO_c000_25ba", 0),
+ MVI(0xc00025bb, "TODO_c000_25bb", 0),
+ MVI(0xc00025bc, "TODO_c000_25bc", 0),
+ MVI(0xc00025bd, "TODO_c000_25bd", 0),
+ MVI(0xc00025be, "TODO_c000_25be", 0),
+ MVI(0xc00025bf, "TODO_c000_25bf", 0),
+ MVI(0xc00025c0, "TODO_c000_25c0", 0),
+ MVI(0xc00025c1, "TODO_c000_25c1", 0),
+ MVI(0xc00025c2, "TODO_c000_25c2", 0),
+ MVI(0xc00025c3, "TODO_c000_25c3", 0),
+ MVI(0xc00025c4, "TODO_c000_25c4", 0),
+ MVI(0xc00025c5, "TODO_c000_25c5", 0),
+ MVI(0xc00025c6, "TODO_c000_25c6", 0),
+ MVI(0xc00025c7, "TODO_c000_25c7", 0),
+ MVI(0xc00025c8, "TODO_c000_25c8", 0),
+ MVI(0xc00025c9, "TODO_c000_25c9", 0),
+ MVI(0xc00025ca, "TODO_c000_25ca", 0),
+ MVI(0xc00025cb, "TODO_c000_25cb", 0),
+ MVI(0xc00025cc, "TODO_c000_25cc", 0),
+ MVI(0xc00025cd, "TODO_c000_25cd", 0),
+ MVI(0xc00025ce, "TODO_c000_25ce", 0),
+ MVI(0xc00025cf, "TODO_c000_25cf", 0),
+ MVI(0xc00025d0, "TODO_c000_25d0", 0),
+ MVI(0xc00025d1, "TODO_c000_25d1", 0),
+ MVI(0xc00025d2, "TODO_c000_25d2", 0),
+ MVI(0xc00025d3, "TODO_c000_25d3", 0),
+ MVI(0xc00025d4, "TODO_c000_25d4", 0),
+ MVI(0xc00025d5, "TODO_c000_25d5", 0),
+ MVI(0xc00025d6, "TODO_c000_25d6", 0),
+ MVI(0xc00025d7, "TODO_c000_25d7", 0),
+ MVI(0xc00025d8, "TODO_c000_25d8", 0),
+ MVI(0xc00025d9, "TODO_c000_25d9", 0),
+ MVI(0xc00025da, "TODO_c000_25da", 0),
+ MVI(0xc00025db, "TODO_c000_25db", 0),
+ MVI(0xc00025dc, "TODO_c000_25dc", 0),
+ MVI(0xc00025dd, "TODO_c000_25dd", 0),
+ MVI(0xc00025de, "TODO_c000_25de", 0),
+ MVI(0xc00025df, "TODO_c000_25df", 0),
+ MVI(0xc00025e0, "TODO_c000_25e0", 0),
+ MVI(0xc00025e1, "TODO_c000_25e1", 0),
+ MVI(0xc00025e2, "TODO_c000_25e2", 0),
+ MVI(0xc00025e3, "TODO_c000_25e3", 0),
+ MVI(0xc00025e4, "TODO_c000_25e4", 0),
+ MVI(0xc00025e5, "TODO_c000_25e5", 0),
+ MVI(0xc00025e6, "TODO_c000_25e6", 0),
+ MVI(0xc00025e7, "TODO_c000_25e7", 0),
+ MVI(0xc00025e8, "TODO_c000_25e8", 0),
+ MVI(0xc00025e9, "TODO_c000_25e9", 0),
+ MVI(0xc00025ea, "TODO_c000_25ea", 0),
+ MVI(0xc00025eb, "TODO_c000_25eb", 0),
+ MVI(0xc00025ec, "TODO_c000_25ec", 0),
+ MVI(0xc00025ed, "TODO_c000_25ed", 0),
+ MVI(0xc00025ee, "TODO_c000_25ee", 0),
+ MVI(0xc00025ef, "TODO_c000_25ef", 0),
+ MVI(0xc00025f0, "TODO_c000_25f0", 0),
+ MVI(0xc00025f1, "TODO_c000_25f1", 0),
+ MVI(0xc00025f2, "TODO_c000_25f2", 0),
+ MVI(0xc00025f3, "TODO_c000_25f3", 0),
+ MVI(0xc00025f4, "TODO_c000_25f4", 0),
+ MVI(0xc00025f5, "TODO_c000_25f5", 0),
+ MVI(0xc00025f6, "TODO_c000_25f6", 0),
+ MVI(0xc00025f7, "TODO_c000_25f7", 0),
+ MVI(0xc00025f8, "TODO_c000_25f8", 0),
+ MVI(0xc00025f9, "TODO_c000_25f9", 0),
+ MVI(0xc00025fa, "TODO_c000_25fa", 0),
+ MVI(0xc00025fb, "TODO_c000_25fb", 0),
+ MVI(0xc00025fc, "TODO_c000_25fc", 0),
+ MVI(0xc00025fd, "TODO_c000_25fd", 0),
+ MVI(0xc00025fe, "TODO_c000_25fe", 0),
+ MVI(0xc00025ff, "TODO_c000_25ff", 0),
+ MVI(0xc0002600, "TODO_c000_2600", 0),
+ MVI(0xc0002601, "TODO_c000_2601", 0),
+ MVI(0xc0002602, "TODO_c000_2602", 0),
+ MVI(0xc0002603, "TODO_c000_2603", 0),
+ MVI(0xc0002604, "TODO_c000_2604", 0),
+ MVI(0xc0002605, "TODO_c000_2605", 0),
+ MVI(0xc0002606, "TODO_c000_2606", 0),
+ MVI(0xc0002607, "TODO_c000_2607", 0),
+ MVI(0xc0002608, "TODO_c000_2608", 0),
+ MVI(0xc0002609, "TODO_c000_2609", 0),
+ MVI(0xc000260a, "TODO_c000_260a", 0),
+ MVI(0xc000260b, "TODO_c000_260b", 0),
+ MVI(0xc000260c, "TODO_c000_260c", 0),
+ MVI(0xc000260d, "TODO_c000_260d", 0),
+ MVI(0xc000260e, "TODO_c000_260e", 0),
+ MVI(0xc000260f, "TODO_c000_260f", 0),
+ MVI(0xc0002610, "TODO_c000_2610", 0),
+ MVI(0xc0002611, "TODO_c000_2611", 0),
+ MVI(0xc0002612, "TODO_c000_2612", 0),
+ MVI(0xc0002613, "TODO_c000_2613", 0),
+ MVI(0xc0002614, "TODO_c000_2614", 0),
+ MVI(0xc0002615, "TODO_c000_2615", 0),
+ MVI(0xc0002616, "TODO_c000_2616", 0),
+ MVI(0xc0002617, "TODO_c000_2617", 0),
+ MVI(0xc0002618, "TODO_c000_2618", 0),
+ MVI(0xc0002619, "TODO_c000_2619", 0),
+ MVI(0xc000261a, "TODO_c000_261a", 0),
+ MVI(0xc000261b, "TODO_c000_261b", 0),
+ MVI(0xc000261c, "TODO_c000_261c", 0),
+ MVI(0xc000261d, "TODO_c000_261d", 0),
+ MVI(0xc000261e, "TODO_c000_261e", 0),
+ MVI(0xc000261f, "TODO_c000_261f", 0),
+ MVI(0xc0002620, "TODO_c000_2620", 0),
+ MVI(0xc0002621, "TODO_c000_2621", 0),
+ MVI(0xc0002622, "TODO_c000_2622", 0),
+ MVI(0xc0002623, "TODO_c000_2623", 0),
+ MVI(0xc0002624, "TODO_c000_2624", 0),
+ MVI(0xc0002625, "TODO_c000_2625", 0),
+ MVI(0xc0002626, "TODO_c000_2626", 0),
+ MVI(0xc0002627, "TODO_c000_2627", 0),
+ MVI(0xc0002628, "TODO_c000_2628", 0),
+ MVI(0xc0002629, "TODO_c000_2629", 0),
+ MVI(0xc000262a, "TODO_c000_262a", 0),
+ MVI(0xc000262b, "TODO_c000_262b", 0),
+ MVI(0xc000262c, "TODO_c000_262c", 0),
+ MVI(0xc000262d, "TODO_c000_262d", 0),
+ MVI(0xc000262e, "TODO_c000_262e", 0),
+ MVI(0xc000262f, "TODO_c000_262f", 0),
+ MVI(0xc0002630, "TODO_c000_2630", 0),
+ MVI(0xc0002631, "TODO_c000_2631", 0),
+ MVI(0xc0002632, "TODO_c000_2632", 0),
+ MVI(0xc0002633, "TODO_c000_2633", 0),
+ MVI(0xc0002634, "TODO_c000_2634", 0),
+ MVI(0xc0002635, "TODO_c000_2635", 0),
+ MVI(0xc0002636, "TODO_c000_2636", 0),
+ MVI(0xc0002637, "TODO_c000_2637", 0),
+ MVI(0xc0002638, "TODO_c000_2638", 0),
+ MVI(0xc0002639, "TODO_c000_2639", 0),
+ MVI(0xc000263a, "TODO_c000_263a", 0),
+ MVI(0xc000263b, "TODO_c000_263b", 0),
+ MVI(0xc000263c, "TODO_c000_263c", 0),
+ MVI(0xc000263d, "TODO_c000_263d", 0),
+ MVI(0xc000263e, "TODO_c000_263e", 0),
+ MVI(0xc000263f, "TODO_c000_263f", 0),
+ MVI(0xc0002640, "TODO_c000_2640", 0),
+ MVI(0xc0002641, "TODO_c000_2641", 0),
+ MVI(0xc0002642, "TODO_c000_2642", 0),
+ MVI(0xc0002643, "TODO_c000_2643", 0),
+ MVI(0xc0002644, "TODO_c000_2644", 0),
+ MVI(0xc0002645, "TODO_c000_2645", 0),
+ MVI(0xc0002646, "TODO_c000_2646", 0),
+ MVI(0xc0002647, "TODO_c000_2647", 0),
+ MVI(0xc0002648, "TODO_c000_2648", 0),
+ MVI(0xc0002649, "TODO_c000_2649", 0),
+ MVI(0xc000264a, "TODO_c000_264a", 0),
+ MVI(0xc000264b, "TODO_c000_264b", 0),
+ MVI(0xc000264c, "TODO_c000_264c", 0),
+ MVI(0xc000264d, "TODO_c000_264d", 0),
+ MVI(0xc000264e, "TODO_c000_264e", 0),
+ MVI(0xc000264f, "TODO_c000_264f", 0),
+ MVI(0xc0002650, "TODO_c000_2650", 0),
+ MVI(0xc0002651, "TODO_c000_2651", 0),
+ MVI(0xc0002652, "TODO_c000_2652", 0),
+ MVI(0xc0002653, "TODO_c000_2653", 0),
+ MVI(0xc0002654, "TODO_c000_2654", 0),
+ MVI(0xc0002655, "TODO_c000_2655", 0),
+ MVI(0xc0002656, "TODO_c000_2656", 0),
+ MVI(0xc0002657, "TODO_c000_2657", 0),
+ MVI(0xc0002658, "TODO_c000_2658", 0),
+ MVI(0xc0002659, "TODO_c000_2659", 0),
+ MVI(0xc000265a, "TODO_c000_265a", 0),
+ MVI(0xc000265b, "TODO_c000_265b", 0),
+ MVI(0xc000265c, "TODO_c000_265c", 0),
+ MVI(0xc000265d, "TODO_c000_265d", 0),
+ MVI(0xc000265e, "TODO_c000_265e", 0),
+ MVI(0xc000265f, "TODO_c000_265f", 0),
+ MVI(0xc0002660, "TODO_c000_2660", 0),
+ MVI(0xc0002661, "TODO_c000_2661", 0),
+ MVI(0xc0002662, "TODO_c000_2662", 0),
+ MVI(0xc0002663, "TODO_c000_2663", 0),
+ MVI(0xc0002664, "TODO_c000_2664", 0),
+ MVI(0xc0002665, "TODO_c000_2665", 0),
+ MVI(0xc0002666, "TODO_c000_2666", 0),
+ MVI(0xc0002667, "TODO_c000_2667", 0),
+ MVI(0xc0002668, "TODO_c000_2668", 0),
+ MVI(0xc0002669, "TODO_c000_2669", 0),
+ MVI(0xc000266a, "TODO_c000_266a", 0),
+ MVI(0xc000266b, "TODO_c000_266b", 0),
+ MVI(0xc000266c, "TODO_c000_266c", 0),
+ MVI(0xc000266d, "TODO_c000_266d", 0),
+ MVI(0xc000266e, "TODO_c000_266e", 0),
+ MVI(0xc000266f, "TODO_c000_266f", 0),
+ MVI(0xc0002670, "TODO_c000_2670", 0),
+ MVI(0xc0002671, "TODO_c000_2671", 0),
+ MVI(0xc0002672, "TODO_c000_2672", 0),
+ MVI(0xc0002673, "TODO_c000_2673", 0),
+ MVI(0xc0002674, "TODO_c000_2674", 0),
+ MVI(0xc0002675, "TODO_c000_2675", 0),
+ MVI(0xc0002676, "TODO_c000_2676", 0),
+ MVI(0xc0002677, "TODO_c000_2677", 0),
+ MVI(0xc0002678, "TODO_c000_2678", 0),
+ MVI(0xc0002679, "TODO_c000_2679", 0),
+ MVI(0xc000267a, "TODO_c000_267a", 0),
+ MVI(0xc000267b, "TODO_c000_267b", 0),
+ MVI(0xc000267c, "TODO_c000_267c", 0),
+ MVI(0xc000267d, "TODO_c000_267d", 0),
+ MVI(0xc000267e, "TODO_c000_267e", 0),
+ MVI(0xc000267f, "TODO_c000_267f", 0),
+ MVI(0xc0002680, "TODO_c000_2680", 0),
+ MVI(0xc0002681, "TODO_c000_2681", 0),
+ MVI(0xc0002682, "TODO_c000_2682", 0),
+ MVI(0xc0002683, "TODO_c000_2683", 0),
+ MVI(0xc0002684, "TODO_c000_2684", 0),
+ MVI(0xc0002685, "TODO_c000_2685", 0),
+ MVI(0xc0002686, "TODO_c000_2686", 0),
+ MVI(0xc0002687, "TODO_c000_2687", 0),
+ MVI(0xc0002688, "TODO_c000_2688", 0),
+ MVI(0xc0002689, "TODO_c000_2689", 0),
+ MVI(0xc000268a, "TODO_c000_268a", 0),
+ MVI(0xc000268b, "TODO_c000_268b", 0),
+ MVI(0xc000268c, "TODO_c000_268c", 0),
+ MVI(0xc000268d, "TODO_c000_268d", 0),
+ MVI(0xc000268e, "TODO_c000_268e", 0),
+ MVI(0xc000268f, "TODO_c000_268f", 0),
+ MVI(0xc0002690, "TODO_c000_2690", 0),
+ MVI(0xc0002691, "TODO_c000_2691", 0),
+ MVI(0xc0002692, "TODO_c000_2692", 0),
+ MVI(0xc0002693, "TODO_c000_2693", 0),
+ MVI(0xc0002694, "TODO_c000_2694", 0),
+ MVI(0xc0002695, "TODO_c000_2695", 0),
+ MVI(0xc0002696, "TODO_c000_2696", 0),
+ MVI(0xc0002697, "TODO_c000_2697", 0),
+ MVI(0xc0002698, "TODO_c000_2698", 0),
+ MVI(0xc0002699, "TODO_c000_2699", 0),
+ MVI(0xc000269a, "TODO_c000_269a", 0),
+ MVI(0xc000269b, "TODO_c000_269b", 0),
+ MVI(0xc000269c, "TODO_c000_269c", 0),
+ MVI(0xc000269d, "TODO_c000_269d", 0),
+ MVI(0xc000269e, "TODO_c000_269e", 0),
+ MVI(0xc000269f, "TODO_c000_269f", 0),
+ MVI(0xc00026a0, "TODO_c000_26a0", 0),
+ MVI(0xc00026a1, "TODO_c000_26a1", 0),
+ MVI(0xc00026a2, "TODO_c000_26a2", 0),
+ MVI(0xc00026a3, "TODO_c000_26a3", 0),
+ MVI(0xc00026a4, "TODO_c000_26a4", 0),
+ MVI(0xc00026a5, "TODO_c000_26a5", 0),
+ MVI(0xc00026a6, "TODO_c000_26a6", 0),
+ MVI(0xc00026a7, "TODO_c000_26a7", 0),
+ MVI(0xc00026a8, "TODO_c000_26a8", 0),
+ MVI(0xc00026a9, "TODO_c000_26a9", 0),
+ MVI(0xc00026aa, "TODO_c000_26aa", 0),
+ MVI(0xc00026ab, "TODO_c000_26ab", 0),
+ MVI(0xc00026ac, "TODO_c000_26ac", 0),
+ MVI(0xc00026ad, "TODO_c000_26ad", 0),
+ MVI(0xc00026ae, "TODO_c000_26ae", 0),
+ MVI(0xc00026af, "TODO_c000_26af", 0),
+ MVI(0xc00026b0, "TODO_c000_26b0", 0),
+ MVI(0xc00026b1, "TODO_c000_26b1", 0),
+ MVI(0xc00026b2, "TODO_c000_26b2", 0),
+ MVI(0xc00026b3, "TODO_c000_26b3", 0),
+ MVI(0xc00026b4, "TODO_c000_26b4", 0),
+ MVI(0xc00026b5, "TODO_c000_26b5", 0),
+ MVI(0xc00026b6, "TODO_c000_26b6", 0),
+ MVI(0xc00026b7, "TODO_c000_26b7", 0),
+ MVI(0xc00026b8, "TODO_c000_26b8", 0),
+ MVI(0xc00026b9, "TODO_c000_26b9", 0),
+ MVI(0xc00026ba, "TODO_c000_26ba", 0),
+ MVI(0xc00026bb, "TODO_c000_26bb", 0),
+ MVI(0xc00026bc, "TODO_c000_26bc", 0),
+ MVI(0xc00026bd, "TODO_c000_26bd", 0),
+ MVI(0xc00026be, "TODO_c000_26be", 0),
+ MVI(0xc00026bf, "TODO_c000_26bf", 0),
+ MVI(0xc00026c0, "TODO_c000_26c0", 0),
+ MVI(0xc00026c1, "TODO_c000_26c1", 0),
+ MVI(0xc00026c2, "TODO_c000_26c2", 0),
+ MVI(0xc00026c3, "TODO_c000_26c3", 0),
+ MVI(0xc00026c4, "TODO_c000_26c4", 0),
+ MVI(0xc00026c5, "TODO_c000_26c5", 0),
+ MVI(0xc00026c6, "TODO_c000_26c6", 0),
+ MVI(0xc00026c7, "TODO_c000_26c7", 0),
+ MVI(0xc00026c8, "TODO_c000_26c8", 0),
+ MVI(0xc00026c9, "TODO_c000_26c9", 0),
+ MVI(0xc00026ca, "TODO_c000_26ca", 0),
+ MVI(0xc00026cb, "TODO_c000_26cb", 0),
+ MVI(0xc00026cc, "TODO_c000_26cc", 0),
+ MVI(0xc00026cd, "TODO_c000_26cd", 0),
+ MVI(0xc00026ce, "TODO_c000_26ce", 0),
+ MVI(0xc00026cf, "TODO_c000_26cf", 0),
+ MVI(0xc00026d0, "TODO_c000_26d0", 0),
+ MVI(0xc00026d1, "TODO_c000_26d1", 0),
+ MVI(0xc00026d2, "TODO_c000_26d2", 0),
+ MVI(0xc00026d3, "TODO_c000_26d3", 0),
+ MVI(0xc00026d4, "TODO_c000_26d4", 0),
+ MVI(0xc00026d5, "TODO_c000_26d5", 0),
+ MVI(0xc00026d6, "TODO_c000_26d6", 0),
+ MVI(0xc00026d7, "TODO_c000_26d7", 0),
+ MVI(0xc00026d8, "TODO_c000_26d8", 0),
+ MVI(0xc00026d9, "TODO_c000_26d9", 0),
+ MVI(0xc00026da, "TODO_c000_26da", 0),
+ MVI(0xc00026db, "TODO_c000_26db", 0),
+ MVI(0xc00026dc, "TODO_c000_26dc", 0),
+ MVI(0xc00026dd, "TODO_c000_26dd", 0),
+ MVI(0xc00026de, "TODO_c000_26de", 0),
+ MVI(0xc00026df, "TODO_c000_26df", 0),
+ MVI(0xc00026e0, "TODO_c000_26e0", 0),
+ MVI(0xc00026e1, "TODO_c000_26e1", 0),
+ MVI(0xc00026e2, "TODO_c000_26e2", 0),
+ MVI(0xc00026e3, "TODO_c000_26e3", 0),
+ MVI(0xc00026e4, "TODO_c000_26e4", 0),
+ MVI(0xc00026e5, "TODO_c000_26e5", 0),
+ MVI(0xc00026e6, "TODO_c000_26e6", 0),
+ MVI(0xc00026e7, "TODO_c000_26e7", 0),
+ MVI(0xc00026e8, "TODO_c000_26e8", 0),
+ MVI(0xc00026e9, "TODO_c000_26e9", 0),
+ MVI(0xc00026ea, "TODO_c000_26ea", 0),
+ MVI(0xc00026eb, "TODO_c000_26eb", 0),
+ MVI(0xc00026ec, "TODO_c000_26ec", 0),
+ MVI(0xc00026ed, "TODO_c000_26ed", 0),
+ MVI(0xc00026ee, "TODO_c000_26ee", 0),
+ MVI(0xc00026ef, "TODO_c000_26ef", 0),
+ MVI(0xc00026f0, "TODO_c000_26f0", 0),
+ MVI(0xc00026f1, "TODO_c000_26f1", 0),
+ MVI(0xc00026f2, "TODO_c000_26f2", 0),
+ MVI(0xc00026f3, "TODO_c000_26f3", 0),
+ MVI(0xc00026f4, "TODO_c000_26f4", 0),
+ MVI(0xc00026f5, "TODO_c000_26f5", 0),
+ MVI(0xc00026f6, "TODO_c000_26f6", 0),
+ MVI(0xc00026f7, "TODO_c000_26f7", 0),
+ MVI(0xc00026f8, "TODO_c000_26f8", 0),
+ MVI(0xc00026f9, "TODO_c000_26f9", 0),
+ MVI(0xc00026fa, "TODO_c000_26fa", 0),
+ MVI(0xc00026fb, "TODO_c000_26fb", 0),
+ MVI(0xc00026fc, "TODO_c000_26fc", 0),
+ MVI(0xc00026fd, "TODO_c000_26fd", 0),
+ MVI(0xc00026fe, "TODO_c000_26fe", 0),
+ MVI(0xc00026ff, "TODO_c000_26ff", 0),
+ MVI(0xc0002700, "TODO_c000_2700", 0),
+ MVI(0xc0002701, "TODO_c000_2701", 0),
+ MVI(0xc0002702, "TODO_c000_2702", 0),
+ MVI(0xc0002703, "TODO_c000_2703", 0),
+ MVI(0xc0002704, "TODO_c000_2704", 0),
+ MVI(0xc0002705, "TODO_c000_2705", 0),
+ MVI(0xc0002706, "TODO_c000_2706", 0),
+ MVI(0xc0002707, "TODO_c000_2707", 0),
+ MVI(0xc0002708, "TODO_c000_2708", 0),
+ MVI(0xc0002709, "TODO_c000_2709", 0),
+ MVI(0xc000270a, "TODO_c000_270a", 0),
+ MVI(0xc000270b, "TODO_c000_270b", 0),
+ MVI(0xc000270c, "TODO_c000_270c", 0),
+ MVI(0xc000270d, "TODO_c000_270d", 0),
+ MVI(0xc000270e, "TODO_c000_270e", 0),
+ MVI(0xc000270f, "TODO_c000_270f", 0),
+ MVI(0xc0002710, "TODO_c000_2710", 0),
+ MVI(0xc0002711, "TODO_c000_2711", 0),
+ MVI(0xc0002712, "TODO_c000_2712", 0),
+ MVI(0xc0002713, "TODO_c000_2713", 0),
+ MVI(0xc0002714, "TODO_c000_2714", 0),
+ MVI(0xc0002715, "TODO_c000_2715", 0),
+ MVI(0xc0002716, "TODO_c000_2716", 0),
+ MVI(0xc0002717, "TODO_c000_2717", 0),
+ MVI(0xc0002718, "TODO_c000_2718", 0),
+ MVI(0xc0002719, "TODO_c000_2719", 0),
+ MVI(0xc000271a, "TODO_c000_271a", 0),
+ MVI(0xc000271b, "TODO_c000_271b", 0),
+ MVI(0xc000271c, "TODO_c000_271c", 0),
+ MVI(0xc000271d, "TODO_c000_271d", 0),
+ MVI(0xc000271e, "TODO_c000_271e", 0),
+ MVI(0xc000271f, "TODO_c000_271f", 0),
+ MVI(0xc0002720, "TODO_c000_2720", 0),
+ MVI(0xc0002721, "TODO_c000_2721", 0),
+ MVI(0xc0002722, "TODO_c000_2722", 0),
+ MVI(0xc0002723, "TODO_c000_2723", 0),
+ MVI(0xc0002724, "TODO_c000_2724", 0),
+ MVI(0xc0002725, "TODO_c000_2725", 0),
+ MVI(0xc0002726, "TODO_c000_2726", 0),
+ MVI(0xc0002727, "TODO_c000_2727", 0),
+ MVI(0xc0002728, "TODO_c000_2728", 0),
+ MVI(0xc0002729, "TODO_c000_2729", 0),
+ MVI(0xc000272a, "TODO_c000_272a", 0),
+ MVI(0xc000272b, "TODO_c000_272b", 0),
+ MVI(0xc000272c, "TODO_c000_272c", 0),
+ MVI(0xc000272d, "TODO_c000_272d", 0),
+ MVI(0xc000272e, "TODO_c000_272e", 0),
+ MVI(0xc000272f, "TODO_c000_272f", 0),
+ MVI(0xc0002730, "TODO_c000_2730", 0),
+ MVI(0xc0002731, "TODO_c000_2731", 0),
+ MVI(0xc0002732, "TODO_c000_2732", 0),
+ MVI(0xc0002733, "TODO_c000_2733", 0),
+ MVI(0xc0002734, "TODO_c000_2734", 0),
+ MVI(0xc0002735, "TODO_c000_2735", 0),
+ MVI(0xc0002736, "TODO_c000_2736", 0),
+ MVI(0xc0002737, "TODO_c000_2737", 0),
+ MVI(0xc0002738, "TODO_c000_2738", 0),
+ MVI(0xc0002739, "TODO_c000_2739", 0),
+ MVI(0xc000273a, "TODO_c000_273a", 0),
+ MVI(0xc000273b, "TODO_c000_273b", 0),
+ MVI(0xc000273c, "TODO_c000_273c", 0),
+ MVI(0xc000273d, "TODO_c000_273d", 0),
+ MVI(0xc000273e, "TODO_c000_273e", 0),
+ MVI(0xc000273f, "TODO_c000_273f", 0),
+ MVI(0xc0002740, "TODO_c000_2740", 0),
+ MVI(0xc0002741, "TODO_c000_2741", 0),
+ MVI(0xc0002742, "TODO_c000_2742", 0),
+ MVI(0xc0002743, "TODO_c000_2743", 0),
+ MVI(0xc0002744, "TODO_c000_2744", 0),
+ MVI(0xc0002745, "TODO_c000_2745", 0),
+ MVI(0xc0002746, "TODO_c000_2746", 0),
+ MVI(0xc0002747, "TODO_c000_2747", 0),
+ MVI(0xc0002748, "TODO_c000_2748", 0),
+ MVI(0xc0002749, "TODO_c000_2749", 0),
+ MVI(0xc000274a, "TODO_c000_274a", 0),
+ MVI(0xc000274b, "TODO_c000_274b", 0),
+ MVI(0xc000274c, "TODO_c000_274c", 0),
+ MVI(0xc000274d, "TODO_c000_274d", 0),
+ MVI(0xc000274e, "TODO_c000_274e", 0),
+ MVI(0xc000274f, "TODO_c000_274f", 0),
+ MVI(0xc0002750, "TODO_c000_2750", 0),
+ MVI(0xc0002751, "TODO_c000_2751", 0),
+ MVI(0xc0002752, "TODO_c000_2752", 0),
+ MVI(0xc0002753, "TODO_c000_2753", 0),
+ MVI(0xc0002754, "TODO_c000_2754", 0),
+ MVI(0xc0002755, "TODO_c000_2755", 0),
+ MVI(0xc0002756, "TODO_c000_2756", 0),
+ MVI(0xc0002757, "TODO_c000_2757", 0),
+ MVI(0xc0002758, "TODO_c000_2758", 0),
+ MVI(0xc0002759, "TODO_c000_2759", 0),
+ MVI(0xc000275a, "TODO_c000_275a", 0),
+ MVI(0xc000275b, "TODO_c000_275b", 0),
+ MVI(0xc000275c, "TODO_c000_275c", 0),
+ MVI(0xc000275d, "TODO_c000_275d", 0),
+ MVI(0xc000275e, "TODO_c000_275e", 0),
+ MVI(0xc000275f, "TODO_c000_275f", 0),
+ MVI(0xc0002760, "TODO_c000_2760", 0),
+ MVI(0xc0002761, "TODO_c000_2761", 0),
+ MVI(0xc0002762, "TODO_c000_2762", 0),
+ MVI(0xc0002763, "TODO_c000_2763", 0),
+ MVI(0xc0002764, "TODO_c000_2764", 0),
+ MVI(0xc0002765, "TODO_c000_2765", 0),
+ MVI(0xc0002766, "TODO_c000_2766", 0),
+ MVI(0xc0002767, "TODO_c000_2767", 0),
+ MVI(0xc0002768, "TODO_c000_2768", 0),
+ MVI(0xc0002769, "TODO_c000_2769", 0),
+ MVI(0xc000276a, "TODO_c000_276a", 0),
+ MVI(0xc000276b, "TODO_c000_276b", 0),
+ MVI(0xc000276c, "TODO_c000_276c", 0),
+ MVI(0xc000276d, "TODO_c000_276d", 0),
+ MVI(0xc000276e, "TODO_c000_276e", 0),
+ MVI(0xc000276f, "TODO_c000_276f", 0),
+ MVI(0xc0002770, "TODO_c000_2770", 0),
+ MVI(0xc0002771, "TODO_c000_2771", 0),
+ MVI(0xc0002772, "TODO_c000_2772", 0),
+ MVI(0xc0002773, "TODO_c000_2773", 0),
+ MVI(0xc0002774, "TODO_c000_2774", 0),
+ MVI(0xc0002775, "TODO_c000_2775", 0),
+ MVI(0xc0002776, "TODO_c000_2776", 0),
+ MVI(0xc0002777, "TODO_c000_2777", 0),
+ MVI(0xc0002778, "TODO_c000_2778", 0),
+ MVI(0xc0002779, "TODO_c000_2779", 0),
+ MVI(0xc000277a, "TODO_c000_277a", 0),
+ MVI(0xc000277b, "TODO_c000_277b", 0),
+ MVI(0xc000277c, "TODO_c000_277c", 0),
+ MVI(0xc000277d, "TODO_c000_277d", 0),
+ MVI(0xc000277e, "TODO_c000_277e", 0),
+ MVI(0xc000277f, "TODO_c000_277f", 0),
+ MVI(0xc0002780, "TODO_c000_2780", 0),
+ MVI(0xc0002781, "TODO_c000_2781", 0),
+ MVI(0xc0002782, "TODO_c000_2782", 0),
+ MVI(0xc0002783, "TODO_c000_2783", 0),
+ MVI(0xc0002784, "TODO_c000_2784", 0),
+ MVI(0xc0002785, "TODO_c000_2785", 0),
+ MVI(0xc0002786, "TODO_c000_2786", 0),
+ MVI(0xc0002787, "TODO_c000_2787", 0),
+ MVI(0xc0002788, "TODO_c000_2788", 0),
+ MVI(0xc0002789, "TODO_c000_2789", 0),
+ MVI(0xc000278a, "TODO_c000_278a", 0),
+ MVI(0xc000278b, "TODO_c000_278b", 0),
+ MVI(0xc000278c, "TODO_c000_278c", 0),
+ MVI(0xc000278d, "TODO_c000_278d", 0),
+ MVI(0xc000278e, "TODO_c000_278e", 0),
+ MVI(0xc000278f, "TODO_c000_278f", 0),
+ MVI(0xc0002790, "TODO_c000_2790", 0),
+ MVI(0xc0002791, "TODO_c000_2791", 0),
+ MVI(0xc0002792, "TODO_c000_2792", 0),
+ MVI(0xc0002793, "TODO_c000_2793", 0),
+ MVI(0xc0002794, "TODO_c000_2794", 0),
+ MVI(0xc0002795, "TODO_c000_2795", 0),
+ MVI(0xc0002796, "TODO_c000_2796", 0),
+ MVI(0xc0002797, "TODO_c000_2797", 0),
+ MVI(0xc0002798, "TODO_c000_2798", 0),
+ MVI(0xc0002799, "TODO_c000_2799", 0),
+ MVI(0xc000279a, "TODO_c000_279a", 0),
+ MVI(0xc000279b, "TODO_c000_279b", 0),
+ MVI(0xc000279c, "TODO_c000_279c", 0),
+ MVI(0xc000279d, "TODO_c000_279d", 0),
+ MVI(0xc000279e, "TODO_c000_279e", 0),
+ MVI(0xc000279f, "TODO_c000_279f", 0),
+ MVI(0xc00027a0, "TODO_c000_27a0", 0),
+ MVI(0xc00027a1, "TODO_c000_27a1", 0),
+ MVI(0xc00027a2, "TODO_c000_27a2", 0),
+ MVI(0xc00027a3, "TODO_c000_27a3", 0),
+ MVI(0xc00027a4, "TODO_c000_27a4", 0),
+ MVI(0xc00027a5, "TODO_c000_27a5", 0),
+ MVI(0xc00027a6, "TODO_c000_27a6", 0),
+ MVI(0xc00027a7, "TODO_c000_27a7", 0),
+ MVI(0xc00027a8, "TODO_c000_27a8", 0),
+ MVI(0xc00027a9, "TODO_c000_27a9", 0),
+ MVI(0xc00027aa, "TODO_c000_27aa", 0),
+ MVI(0xc00027ab, "TODO_c000_27ab", 0),
+ MVI(0xc00027ac, "TODO_c000_27ac", 0),
+ MVI(0xc00027ad, "TODO_c000_27ad", 0),
+ MVI(0xc00027ae, "TODO_c000_27ae", 0),
+ MVI(0xc00027af, "TODO_c000_27af", 0),
+ MVI(0xc00027b0, "TODO_c000_27b0", 0),
+ MVI(0xc00027b1, "TODO_c000_27b1", 0),
+ MVI(0xc00027b2, "TODO_c000_27b2", 0),
+ MVI(0xc00027b3, "TODO_c000_27b3", 0),
+ MVI(0xc00027b4, "TODO_c000_27b4", 0),
+ MVI(0xc00027b5, "TODO_c000_27b5", 0),
+ MVI(0xc00027b6, "TODO_c000_27b6", 0),
+ MVI(0xc00027b7, "TODO_c000_27b7", 0),
+ MVI(0xc00027b8, "TODO_c000_27b8", 0),
+ MVI(0xc00027b9, "TODO_c000_27b9", 0),
+ MVI(0xc00027ba, "TODO_c000_27ba", 0),
+ MVI(0xc00027bb, "TODO_c000_27bb", 0),
+ MVI(0xc00027bc, "TODO_c000_27bc", 0),
+ MVI(0xc00027bd, "TODO_c000_27bd", 0),
+ MVI(0xc00027be, "TODO_c000_27be", 0),
+ MVI(0xc00027bf, "TODO_c000_27bf", 0),
+ MVI(0xc00027c0, "TODO_c000_27c0", 0),
+ MVI(0xc00027c1, "TODO_c000_27c1", 0),
+ MVI(0xc00027c2, "TODO_c000_27c2", 0),
+ MVI(0xc00027c3, "TODO_c000_27c3", 0),
+ MVI(0xc00027c4, "TODO_c000_27c4", 0),
+ MVI(0xc00027c5, "TODO_c000_27c5", 0),
+ MVI(0xc00027c6, "TODO_c000_27c6", 0),
+ MVI(0xc00027c7, "TODO_c000_27c7", 0),
+ MVI(0xc00027c8, "TODO_c000_27c8", 0),
+ MVI(0xc00027c9, "TODO_c000_27c9", 0),
+ MVI(0xc00027ca, "TODO_c000_27ca", 0),
+ MVI(0xc00027cb, "TODO_c000_27cb", 0),
+ MVI(0xc00027cc, "TODO_c000_27cc", 0),
+ MVI(0xc00027cd, "TODO_c000_27cd", 0),
+ MVI(0xc00027ce, "TODO_c000_27ce", 0),
+ MVI(0xc00027cf, "TODO_c000_27cf", 0),
+ MVI(0xc00027d0, "TODO_c000_27d0", 0),
+ MVI(0xc00027d1, "TODO_c000_27d1", 0),
+ MVI(0xc00027d2, "TODO_c000_27d2", 0),
+ MVI(0xc00027d3, "TODO_c000_27d3", 0),
+ MVI(0xc00027d4, "TODO_c000_27d4", 0),
+ MVI(0xc00027d5, "TODO_c000_27d5", 0),
+ MVI(0xc00027d6, "TODO_c000_27d6", 0),
+ MVI(0xc00027d7, "TODO_c000_27d7", 0),
+ MVI(0xc00027d8, "TODO_c000_27d8", 0),
+ MVI(0xc00027d9, "TODO_c000_27d9", 0),
+ MVI(0xc00027da, "TODO_c000_27da", 0),
+ MVI(0xc00027db, "TODO_c000_27db", 0),
+ MVI(0xc00027dc, "TODO_c000_27dc", 0),
+ MVI(0xc00027dd, "TODO_c000_27dd", 0),
+ MVI(0xc00027de, "TODO_c000_27de", 0),
+ MVI(0xc00027df, "TODO_c000_27df", 0),
+ MVI(0xc00027e0, "TODO_c000_27e0", 0),
+ MVI(0xc00027e1, "TODO_c000_27e1", 0),
+ MVI(0xc00027e2, "TODO_c000_27e2", 0),
+ MVI(0xc00027e3, "TODO_c000_27e3", 0),
+ MVI(0xc00027e4, "TODO_c000_27e4", 0),
+ MVI(0xc00027e5, "TODO_c000_27e5", 0),
+ MVI(0xc00027e6, "TODO_c000_27e6", 0),
+ MVI(0xc00027e7, "TODO_c000_27e7", 0),
+ MVI(0xc00027e8, "TODO_c000_27e8", 0),
+ MVI(0xc00027e9, "TODO_c000_27e9", 0),
+ MVI(0xc00027ea, "TODO_c000_27ea", 0),
+ MVI(0xc00027eb, "TODO_c000_27eb", 0),
+ MVI(0xc00027ec, "TODO_c000_27ec", 0),
+ MVI(0xc00027ed, "TODO_c000_27ed", 0),
+ MVI(0xc00027ee, "TODO_c000_27ee", 0),
+ MVI(0xc00027ef, "TODO_c000_27ef", 0),
+ MVI(0xc00027f0, "TODO_c000_27f0", 0),
+ MVI(0xc00027f1, "TODO_c000_27f1", 0),
+ MVI(0xc00027f2, "TODO_c000_27f2", 0),
+ MVI(0xc00027f3, "TODO_c000_27f3", 0),
+ MVI(0xc00027f4, "TODO_c000_27f4", 0),
+ MVI(0xc00027f5, "TODO_c000_27f5", 0),
+ MVI(0xc00027f6, "TODO_c000_27f6", 0),
+ MVI(0xc00027f7, "TODO_c000_27f7", 0),
+ MVI(0xc00027f8, "TODO_c000_27f8", 0),
+ MVI(0xc00027f9, "TODO_c000_27f9", 0),
+ MVI(0xc00027fa, "TODO_c000_27fa", 0),
+ MVI(0xc00027fb, "TODO_c000_27fb", 0),
+ MVI(0xc00027fc, "TODO_c000_27fc", 0),
+ MVI(0xc00027fd, "TODO_c000_27fd", 0),
+ MVI(0xc00027fe, "TODO_c000_27fe", 0),
+ MVI(0xc00027ff, "TODO_c000_27ff", 0),
+ MVI(0xc0002800, "TODO_c000_2800", 0),
+ MVI(0xc0002801, "TODO_c000_2801", 0),
+ MVI(0xc0002802, "TODO_c000_2802", 0),
+ MVI(0xc0002803, "TODO_c000_2803", 0),
+ MVI(0xc0002804, "TODO_c000_2804", 0),
+ MVI(0xc0002805, "TODO_c000_2805", 0),
+ MVI(0xc0002806, "TODO_c000_2806", 0),
+ MVI(0xc0002807, "TODO_c000_2807", 0),
+ MVI(0xc0002808, "TODO_c000_2808", 0),
+ MVI(0xc0002809, "TODO_c000_2809", 0),
+ MVI(0xc000280a, "TODO_c000_280a", 0),
+ MVI(0xc000280b, "TODO_c000_280b", 0),
+ MVI(0xc000280c, "TODO_c000_280c", 0),
+ MVI(0xc000280d, "TODO_c000_280d", 0),
+ MVI(0xc000280e, "TODO_c000_280e", 0),
+ MVI(0xc000280f, "TODO_c000_280f", 0),
+ MVI(0xc0002810, "TODO_c000_2810", 0),
+ MVI(0xc0002811, "TODO_c000_2811", 0),
+ MVI(0xc0002812, "TODO_c000_2812", 0),
+ MVI(0xc0002813, "TODO_c000_2813", 0),
+ MVI(0xc0002814, "TODO_c000_2814", 0),
+ MVI(0xc0002815, "TODO_c000_2815", 0),
+ MVI(0xc0002816, "TODO_c000_2816", 0),
+ MVI(0xc0002817, "TODO_c000_2817", 0),
+ MVI(0xc0002818, "TODO_c000_2818", 0),
+ MVI(0xc0002819, "TODO_c000_2819", 0),
+ MVI(0xc000281a, "TODO_c000_281a", 0),
+ MVI(0xc000281b, "TODO_c000_281b", 0),
+ MVI(0xc000281c, "TODO_c000_281c", 0),
+ MVI(0xc000281d, "TODO_c000_281d", 0),
+ MVI(0xc000281e, "TODO_c000_281e", 0),
+ MVI(0xc000281f, "TODO_c000_281f", 0),
+ MVI(0xc0002820, "TODO_c000_2820", 0),
+ MVI(0xc0002821, "TODO_c000_2821", 0),
+ MVI(0xc0002822, "TODO_c000_2822", 0),
+ MVI(0xc0002823, "TODO_c000_2823", 0),
+ MVI(0xc0002824, "TODO_c000_2824", 0),
+ MVI(0xc0002825, "TODO_c000_2825", 0),
+ MVI(0xc0002826, "TODO_c000_2826", 0),
+ MVI(0xc0002827, "TODO_c000_2827", 0),
+ MVI(0xc0002828, "TODO_c000_2828", 0),
+ MVI(0xc0002829, "TODO_c000_2829", 0),
+ MVI(0xc000282a, "TODO_c000_282a", 0),
+ MVI(0xc000282b, "TODO_c000_282b", 0),
+ MVI(0xc000282c, "TODO_c000_282c", 0),
+ MVI(0xc000282d, "TODO_c000_282d", 0),
+ MVI(0xc000282e, "TODO_c000_282e", 0),
+ MVI(0xc000282f, "TODO_c000_282f", 0),
+ MVI(0xc0002830, "TODO_c000_2830", 0),
+ MVI(0xc0002831, "TODO_c000_2831", 0),
+ MVI(0xc0002832, "TODO_c000_2832", 0),
+ MVI(0xc0002833, "TODO_c000_2833", 0),
+ MVI(0xc0002834, "TODO_c000_2834", 0),
+ MVI(0xc0002835, "TODO_c000_2835", 0),
+ MVI(0xc0002836, "TODO_c000_2836", 0),
+ MVI(0xc0002837, "TODO_c000_2837", 0),
+ MVI(0xc0002838, "TODO_c000_2838", 0),
+ MVI(0xc0002839, "TODO_c000_2839", 0),
+ MVI(0xc000283a, "TODO_c000_283a", 0),
+ MVI(0xc000283b, "TODO_c000_283b", 0),
+ MVI(0xc000283c, "TODO_c000_283c", 0),
+ MVI(0xc000283d, "TODO_c000_283d", 0),
+ MVI(0xc000283e, "TODO_c000_283e", 0),
+ MVI(0xc000283f, "TODO_c000_283f", 0),
+ MVI(0xc0002840, "TODO_c000_2840", 0),
+ MVI(0xc0002841, "TODO_c000_2841", 0),
+ MVI(0xc0002842, "TODO_c000_2842", 0),
+ MVI(0xc0002843, "TODO_c000_2843", 0),
+ MVI(0xc0002844, "TODO_c000_2844", 0),
+ MVI(0xc0002845, "TODO_c000_2845", 0),
+ MVI(0xc0002846, "TODO_c000_2846", 0),
+ MVI(0xc0002847, "TODO_c000_2847", 0),
+ MVI(0xc0002848, "TODO_c000_2848", 0),
+ MVI(0xc0002849, "TODO_c000_2849", 0),
+ MVI(0xc000284a, "TODO_c000_284a", 0),
+ MVI(0xc000284b, "TODO_c000_284b", 0),
+ MVI(0xc000284c, "TODO_c000_284c", 0),
+ MVI(0xc000284d, "TODO_c000_284d", 0),
+ MVI(0xc000284e, "TODO_c000_284e", 0),
+ MVI(0xc000284f, "TODO_c000_284f", 0),
+ MVI(0xc0002850, "TODO_c000_2850", 0),
+ MVI(0xc0002851, "TODO_c000_2851", 0),
+ MVI(0xc0002852, "TODO_c000_2852", 0),
+ MVI(0xc0002853, "TODO_c000_2853", 0),
+ MVI(0xc0002854, "TODO_c000_2854", 0),
+ MVI(0xc0002855, "TODO_c000_2855", 0),
+ MVI(0xc0002856, "TODO_c000_2856", 0),
+ MVI(0xc0002857, "TODO_c000_2857", 0),
+ MVI(0xc0002858, "TODO_c000_2858", 0),
+ MVI(0xc0002859, "TODO_c000_2859", 0),
+ MVI(0xc000285a, "TODO_c000_285a", 0),
+ MVI(0xc000285b, "TODO_c000_285b", 0),
+ MVI(0xc000285c, "TODO_c000_285c", 0),
+ MVI(0xc000285d, "TODO_c000_285d", 0),
+ MVI(0xc000285e, "TODO_c000_285e", 0),
+ MVI(0xc000285f, "TODO_c000_285f", 0),
+ MVI(0xc0002860, "TODO_c000_2860", 0),
+ MVI(0xc0002861, "TODO_c000_2861", 0),
+ MVI(0xc0002862, "TODO_c000_2862", 0),
+ MVI(0xc0002863, "TODO_c000_2863", 0),
+ MVI(0xc0002864, "TODO_c000_2864", 0),
+ MVI(0xc0002865, "TODO_c000_2865", 0),
+ MVI(0xc0002866, "TODO_c000_2866", 0),
+ MVI(0xc0002867, "TODO_c000_2867", 0),
+ MVI(0xc0002868, "TODO_c000_2868", 0),
+ MVI(0xc0002869, "TODO_c000_2869", 0),
+ MVI(0xc000286a, "TODO_c000_286a", 0),
+ MVI(0xc000286b, "TODO_c000_286b", 0),
+ MVI(0xc000286c, "TODO_c000_286c", 0),
+ MVI(0xc000286d, "TODO_c000_286d", 0),
+ MVI(0xc000286e, "TODO_c000_286e", 0),
+ MVI(0xc000286f, "TODO_c000_286f", 0),
+ MVI(0xc0002870, "TODO_c000_2870", 0),
+ MVI(0xc0002871, "TODO_c000_2871", 0),
+ MVI(0xc0002872, "TODO_c000_2872", 0),
+ MVI(0xc0002873, "TODO_c000_2873", 0),
+ MVI(0xc0002874, "TODO_c000_2874", 0),
+ MVI(0xc0002875, "TODO_c000_2875", 0),
+ MVI(0xc0002876, "TODO_c000_2876", 0),
+ MVI(0xc0002877, "TODO_c000_2877", 0),
+ MVI(0xc0002878, "TODO_c000_2878", 0),
+ MVI(0xc0002879, "TODO_c000_2879", 0),
+ MVI(0xc000287a, "TODO_c000_287a", 0),
+ MVI(0xc000287b, "TODO_c000_287b", 0),
+ MVI(0xc000287c, "TODO_c000_287c", 0),
+ MVI(0xc000287d, "TODO_c000_287d", 0),
+ MVI(0xc000287e, "TODO_c000_287e", 0),
+ MVI(0xc000287f, "TODO_c000_287f", 0),
+ MVI(0xc0002880, "TODO_c000_2880", 0),
+ MVI(0xc0002881, "TODO_c000_2881", 0),
+ MVI(0xc0002882, "TODO_c000_2882", 0),
+ MVI(0xc0002883, "TODO_c000_2883", 0),
+ MVI(0xc0002884, "TODO_c000_2884", 0),
+ MVI(0xc0002885, "TODO_c000_2885", 0),
+ MVI(0xc0002886, "TODO_c000_2886", 0),
+ MVI(0xc0002887, "TODO_c000_2887", 0),
+ MVI(0xc0002888, "TODO_c000_2888", 0),
+ MVI(0xc0002889, "TODO_c000_2889", 0),
+ MVI(0xc000288a, "TODO_c000_288a", 0),
+ MVI(0xc000288b, "TODO_c000_288b", 0),
+ MVI(0xc000288c, "TODO_c000_288c", 0),
+ MVI(0xc000288d, "TODO_c000_288d", 0),
+ MVI(0xc000288e, "TODO_c000_288e", 0),
+ MVI(0xc000288f, "TODO_c000_288f", 0),
+ MVI(0xc0002890, "TODO_c000_2890", 0),
+ MVI(0xc0002891, "TODO_c000_2891", 0),
+ MVI(0xc0002892, "TODO_c000_2892", 0),
+ MVI(0xc0002893, "TODO_c000_2893", 0),
+ MVI(0xc0002894, "TODO_c000_2894", 0),
+ MVI(0xc0002895, "TODO_c000_2895", 0),
+ MVI(0xc0002896, "TODO_c000_2896", 0),
+ MVI(0xc0002897, "TODO_c000_2897", 0),
+ MVI(0xc0002898, "TODO_c000_2898", 0),
+ MVI(0xc0002899, "TODO_c000_2899", 0),
+ MVI(0xc000289a, "TODO_c000_289a", 0),
+ MVI(0xc000289b, "TODO_c000_289b", 0),
+ MVI(0xc000289c, "TODO_c000_289c", 0),
+ MVI(0xc000289d, "TODO_c000_289d", 0),
+ MVI(0xc000289e, "TODO_c000_289e", 0),
+ MVI(0xc000289f, "TODO_c000_289f", 0),
+ MVI(0xc00028a0, "TODO_c000_28a0", 0),
+ MVI(0xc00028a1, "TODO_c000_28a1", 0),
+ MVI(0xc00028a2, "TODO_c000_28a2", 0),
+ MVI(0xc00028a3, "TODO_c000_28a3", 0),
+ MVI(0xc00028a4, "TODO_c000_28a4", 0),
+ MVI(0xc00028a5, "TODO_c000_28a5", 0),
+ MVI(0xc00028a6, "TODO_c000_28a6", 0),
+ MVI(0xc00028a7, "TODO_c000_28a7", 0),
+ MVI(0xc00028a8, "TODO_c000_28a8", 0),
+ MVI(0xc00028a9, "TODO_c000_28a9", 0),
+ MVI(0xc00028aa, "TODO_c000_28aa", 0),
+ MVI(0xc00028ab, "TODO_c000_28ab", 0),
+ MVI(0xc00028ac, "TODO_c000_28ac", 0),
+ MVI(0xc00028ad, "TODO_c000_28ad", 0),
+ MVI(0xc00028ae, "TODO_c000_28ae", 0),
+ MVI(0xc00028af, "TODO_c000_28af", 0),
+ MVI(0xc00028b0, "TODO_c000_28b0", 0),
+ MVI(0xc00028b1, "TODO_c000_28b1", 0),
+ MVI(0xc00028b2, "TODO_c000_28b2", 0),
+ MVI(0xc00028b3, "TODO_c000_28b3", 0),
+ MVI(0xc00028b4, "TODO_c000_28b4", 0),
+ MVI(0xc00028b5, "TODO_c000_28b5", 0),
+ MVI(0xc00028b6, "TODO_c000_28b6", 0),
+ MVI(0xc00028b7, "TODO_c000_28b7", 0),
+ MVI(0xc00028b8, "TODO_c000_28b8", 0),
+ MVI(0xc00028b9, "TODO_c000_28b9", 0),
+ MVI(0xc00028ba, "TODO_c000_28ba", 0),
+ MVI(0xc00028bb, "TODO_c000_28bb", 0),
+ MVI(0xc00028bc, "TODO_c000_28bc", 0),
+ MVI(0xc00028bd, "TODO_c000_28bd", 0),
+ MVI(0xc00028be, "TODO_c000_28be", 0),
+ MVI(0xc00028bf, "TODO_c000_28bf", 0),
+ MVI(0xc00028c0, "TODO_c000_28c0", 0),
+ MVI(0xc00028c1, "TODO_c000_28c1", 0),
+ MVI(0xc00028c2, "TODO_c000_28c2", 0),
+ MVI(0xc00028c3, "TODO_c000_28c3", 0),
+ MVI(0xc00028c4, "TODO_c000_28c4", 0),
+ MVI(0xc00028c5, "TODO_c000_28c5", 0),
+ MVI(0xc00028c6, "TODO_c000_28c6", 0),
+ MVI(0xc00028c7, "TODO_c000_28c7", 0),
+ MVI(0xc00028c8, "TODO_c000_28c8", 0),
+ MVI(0xc00028c9, "TODO_c000_28c9", 0),
+ MVI(0xc00028ca, "TODO_c000_28ca", 0),
+ MVI(0xc00028cb, "TODO_c000_28cb", 0),
+ MVI(0xc00028cc, "TODO_c000_28cc", 0),
+ MVI(0xc00028cd, "TODO_c000_28cd", 0),
+ MVI(0xc00028ce, "TODO_c000_28ce", 0),
+ MVI(0xc00028cf, "TODO_c000_28cf", 0),
+ MVI(0xc00028d0, "TODO_c000_28d0", 0),
+ MVI(0xc00028d1, "TODO_c000_28d1", 0),
+ MVI(0xc00028d2, "TODO_c000_28d2", 0),
+ MVI(0xc00028d3, "TODO_c000_28d3", 0),
+ MVI(0xc00028d4, "TODO_c000_28d4", 0),
+ MVI(0xc00028d5, "TODO_c000_28d5", 0),
+ MVI(0xc00028d6, "TODO_c000_28d6", 0),
+ MVI(0xc00028d7, "TODO_c000_28d7", 0),
+ MVI(0xc00028d8, "TODO_c000_28d8", 0),
+ MVI(0xc00028d9, "TODO_c000_28d9", 0),
+ MVI(0xc00028da, "TODO_c000_28da", 0),
+ MVI(0xc00028db, "TODO_c000_28db", 0),
+ MVI(0xc00028dc, "TODO_c000_28dc", 0),
+ MVI(0xc00028dd, "TODO_c000_28dd", 0),
+ MVI(0xc00028de, "TODO_c000_28de", 0),
+ MVI(0xc00028df, "TODO_c000_28df", 0),
+ MVI(0xc00028e0, "TODO_c000_28e0", 0),
+ MVI(0xc00028e1, "TODO_c000_28e1", 0),
+ MVI(0xc00028e2, "TODO_c000_28e2", 0),
+ MVI(0xc00028e3, "TODO_c000_28e3", 0),
+ MVI(0xc00028e4, "TODO_c000_28e4", 0),
+ MVI(0xc00028e5, "TODO_c000_28e5", 0),
+ MVI(0xc00028e6, "TODO_c000_28e6", 0),
+ MVI(0xc00028e7, "TODO_c000_28e7", 0),
+ MVI(0xc00028e8, "TODO_c000_28e8", 0),
+ MVI(0xc00028e9, "TODO_c000_28e9", 0),
+ MVI(0xc00028ea, "TODO_c000_28ea", 0),
+ MVI(0xc00028eb, "TODO_c000_28eb", 0),
+ MVI(0xc00028ec, "TODO_c000_28ec", 0),
+ MVI(0xc00028ed, "TODO_c000_28ed", 0),
+ MVI(0xc00028ee, "TODO_c000_28ee", 0),
+ MVI(0xc00028ef, "TODO_c000_28ef", 0),
+ MVI(0xc00028f0, "TODO_c000_28f0", 0),
+ MVI(0xc00028f1, "TODO_c000_28f1", 0),
+ MVI(0xc00028f2, "TODO_c000_28f2", 0),
+ MVI(0xc00028f3, "TODO_c000_28f3", 0),
+ MVI(0xc00028f4, "TODO_c000_28f4", 0),
+ MVI(0xc00028f5, "TODO_c000_28f5", 0),
+ MVI(0xc00028f6, "TODO_c000_28f6", 0),
+ MVI(0xc00028f7, "TODO_c000_28f7", 0),
+ MVI(0xc00028f8, "TODO_c000_28f8", 0),
+ MVI(0xc00028f9, "TODO_c000_28f9", 0),
+ MVI(0xc00028fa, "TODO_c000_28fa", 0),
+ MVI(0xc00028fb, "TODO_c000_28fb", 0),
+ MVI(0xc00028fc, "TODO_c000_28fc", 0),
+ MVI(0xc00028fd, "TODO_c000_28fd", 0),
+ MVI(0xc00028fe, "TODO_c000_28fe", 0),
+ MVI(0xc00028ff, "TODO_c000_28ff", 0),
+ MVI(0xc0002900, "TODO_c000_2900", 0),
+ MVI(0xc0002901, "TODO_c000_2901", 0),
+ MVI(0xc0002902, "TODO_c000_2902", 0),
+ MVI(0xc0002903, "TODO_c000_2903", 0),
+ MVI(0xc0002904, "TODO_c000_2904", 0),
+ MVI(0xc0002905, "TODO_c000_2905", 0),
+ MVI(0xc0002906, "TODO_c000_2906", 0),
+ MVI(0xc0002907, "TODO_c000_2907", 0),
+ MVI(0xc0002908, "TODO_c000_2908", 0),
+ MVI(0xc0002909, "TODO_c000_2909", 0),
+ MVI(0xc000290a, "TODO_c000_290a", 0),
+ MVI(0xc000290b, "TODO_c000_290b", 0),
+ MVI(0xc000290c, "TODO_c000_290c", 0),
+ MVI(0xc000290d, "TODO_c000_290d", 0),
+ MVI(0xc000290e, "TODO_c000_290e", 0),
+ MVI(0xc000290f, "TODO_c000_290f", 0),
+ MVI(0xc0002910, "TODO_c000_2910", 0),
+ MVI(0xc0002911, "TODO_c000_2911", 0),
+ MVI(0xc0002912, "TODO_c000_2912", 0),
+ MVI(0xc0002913, "TODO_c000_2913", 0),
+ MVI(0xc0002914, "TODO_c000_2914", 0),
+ MVI(0xc0002915, "TODO_c000_2915", 0),
+ MVI(0xc0002916, "TODO_c000_2916", 0),
+ MVI(0xc0002917, "TODO_c000_2917", 0),
+ MVI(0xc0002918, "TODO_c000_2918", 0),
+ MVI(0xc0002919, "TODO_c000_2919", 0),
+ MVI(0xc000291a, "TODO_c000_291a", 0),
+ MVI(0xc000291b, "TODO_c000_291b", 0),
+ MVI(0xc000291c, "TODO_c000_291c", 0),
+ MVI(0xc000291d, "TODO_c000_291d", 0),
+ MVI(0xc000291e, "TODO_c000_291e", 0),
+ MVI(0xc000291f, "TODO_c000_291f", 0),
+ MVI(0xc0002920, "TODO_c000_2920", 0),
+ MVI(0xc0002921, "TODO_c000_2921", 0),
+ MVI(0xc0002922, "TODO_c000_2922", 0),
+ MVI(0xc0002923, "TODO_c000_2923", 0),
+ MVI(0xc0002924, "TODO_c000_2924", 0),
+ MVI(0xc0002925, "TODO_c000_2925", 0),
+ MVI(0xc0002926, "TODO_c000_2926", 0),
+ MVI(0xc0002927, "TODO_c000_2927", 0),
+ MVI(0xc0002928, "TODO_c000_2928", 0),
+ MVI(0xc0002929, "TODO_c000_2929", 0),
+ MVI(0xc000292a, "TODO_c000_292a", 0),
+ MVI(0xc000292b, "TODO_c000_292b", 0),
+ MVI(0xc000292c, "TODO_c000_292c", 0),
+ MVI(0xc000292d, "TODO_c000_292d", 0),
+ MVI(0xc000292e, "TODO_c000_292e", 0),
+ MVI(0xc000292f, "TODO_c000_292f", 0),
+ MVI(0xc0002930, "TODO_c000_2930", 0),
+ MVI(0xc0002931, "TODO_c000_2931", 0),
+ MVI(0xc0002932, "TODO_c000_2932", 0),
+ MVI(0xc0002933, "TODO_c000_2933", 0),
+ MVI(0xc0002934, "TODO_c000_2934", 0),
+ MVI(0xc0002935, "TODO_c000_2935", 0),
+ MVI(0xc0002936, "TODO_c000_2936", 0),
+ MVI(0xc0002937, "TODO_c000_2937", 0),
+ MVI(0xc0002938, "TODO_c000_2938", 0),
+ MVI(0xc0002939, "TODO_c000_2939", 0),
+ MVI(0xc000293a, "TODO_c000_293a", 0),
+ MVI(0xc000293b, "TODO_c000_293b", 0),
+ MVI(0xc000293c, "TODO_c000_293c", 0),
+ MVI(0xc000293d, "TODO_c000_293d", 0),
+ MVI(0xc000293e, "TODO_c000_293e", 0),
+ MVI(0xc000293f, "TODO_c000_293f", 0),
+ MVI(0xc0002940, "TODO_c000_2940", 0),
+ MVI(0xc0002941, "TODO_c000_2941", 0),
+ MVI(0xc0002942, "TODO_c000_2942", 0),
+ MVI(0xc0002943, "TODO_c000_2943", 0),
+ MVI(0xc0002944, "TODO_c000_2944", 0),
+ MVI(0xc0002945, "TODO_c000_2945", 0),
+ MVI(0xc0002946, "TODO_c000_2946", 0),
+ MVI(0xc0002947, "TODO_c000_2947", 0),
+ MVI(0xc0002948, "TODO_c000_2948", 0),
+ MVI(0xc0002949, "TODO_c000_2949", 0),
+ MVI(0xc000294a, "TODO_c000_294a", 0),
+ MVI(0xc000294b, "TODO_c000_294b", 0),
+ MVI(0xc000294c, "TODO_c000_294c", 0),
+ MVI(0xc000294d, "TODO_c000_294d", 0),
+ MVI(0xc000294e, "TODO_c000_294e", 0),
+ MVI(0xc000294f, "TODO_c000_294f", 0),
+ MVI(0xc0002950, "TODO_c000_2950", 0),
+ MVI(0xc0002951, "TODO_c000_2951", 0),
+ MVI(0xc0002952, "TODO_c000_2952", 0),
+ MVI(0xc0002953, "TODO_c000_2953", 0),
+ MVI(0xc0002954, "TODO_c000_2954", 0),
+ MVI(0xc0002955, "TODO_c000_2955", 0),
+ MVI(0xc0002956, "TODO_c000_2956", 0),
+ MVI(0xc0002957, "TODO_c000_2957", 0),
+ MVI(0xc0002958, "TODO_c000_2958", 0),
+ MVI(0xc0002959, "TODO_c000_2959", 0),
+ MVI(0xc000295a, "TODO_c000_295a", 0),
+ MVI(0xc000295b, "TODO_c000_295b", 0),
+ MVI(0xc000295c, "TODO_c000_295c", 0),
+ MVI(0xc000295d, "TODO_c000_295d", 0),
+ MVI(0xc000295e, "TODO_c000_295e", 0),
+ MVI(0xc000295f, "TODO_c000_295f", 0),
+ MVI(0xc0002960, "TODO_c000_2960", 0),
+ MVI(0xc0002961, "TODO_c000_2961", 0),
+ MVI(0xc0002962, "TODO_c000_2962", 0),
+ MVI(0xc0002963, "TODO_c000_2963", 0),
+ MVI(0xc0002964, "TODO_c000_2964", 0),
+ MVI(0xc0002965, "TODO_c000_2965", 0),
+ MVI(0xc0002966, "TODO_c000_2966", 0),
+ MVI(0xc0002967, "TODO_c000_2967", 0),
+ MVI(0xc0002968, "TODO_c000_2968", 0),
+ MVI(0xc0002969, "TODO_c000_2969", 0),
+ MVI(0xc000296a, "TODO_c000_296a", 0),
+ MVI(0xc000296b, "TODO_c000_296b", 0),
+ MVI(0xc000296c, "TODO_c000_296c", 0),
+ MVI(0xc000296d, "TODO_c000_296d", 0),
+ MVI(0xc000296e, "TODO_c000_296e", 0),
+ MVI(0xc000296f, "TODO_c000_296f", 0),
+ MVI(0xc0002970, "TODO_c000_2970", 0),
+ MVI(0xc0002971, "TODO_c000_2971", 0),
+ MVI(0xc0002972, "TODO_c000_2972", 0),
+ MVI(0xc0002973, "TODO_c000_2973", 0),
+ MVI(0xc0002974, "TODO_c000_2974", 0),
+ MVI(0xc0002975, "TODO_c000_2975", 0),
+ MVI(0xc0002976, "TODO_c000_2976", 0),
+ MVI(0xc0002977, "TODO_c000_2977", 0),
+ MVI(0xc0002978, "TODO_c000_2978", 0),
+ MVI(0xc0002979, "TODO_c000_2979", 0),
+ MVI(0xc000297a, "TODO_c000_297a", 0),
+ MVI(0xc000297b, "TODO_c000_297b", 0),
+ MVI(0xc000297c, "TODO_c000_297c", 0),
+ MVI(0xc000297d, "TODO_c000_297d", 0),
+ MVI(0xc000297e, "TODO_c000_297e", 0),
+ MVI(0xc000297f, "TODO_c000_297f", 0),
+ MVI(0xc0002980, "TODO_c000_2980", 0),
+ MVI(0xc0002981, "TODO_c000_2981", 0),
+ MVI(0xc0002982, "TODO_c000_2982", 0),
+ MVI(0xc0002983, "TODO_c000_2983", 0),
+ MVI(0xc0002984, "TODO_c000_2984", 0),
+ MVI(0xc0002985, "TODO_c000_2985", 0),
+ MVI(0xc0002986, "TODO_c000_2986", 0),
+ MVI(0xc0002987, "TODO_c000_2987", 0),
+ MVI(0xc0002988, "TODO_c000_2988", 0),
+ MVI(0xc0002989, "TODO_c000_2989", 0),
+ MVI(0xc000298a, "TODO_c000_298a", 0),
+ MVI(0xc000298b, "TODO_c000_298b", 0),
+ MVI(0xc000298c, "TODO_c000_298c", 0),
+ MVI(0xc000298d, "TODO_c000_298d", 0),
+ MVI(0xc000298e, "TODO_c000_298e", 0),
+ MVI(0xc000298f, "TODO_c000_298f", 0),
+ MVI(0xc0002990, "TODO_c000_2990", 0),
+ MVI(0xc0002991, "TODO_c000_2991", 0),
+ MVI(0xc0002992, "TODO_c000_2992", 0),
+ MVI(0xc0002993, "TODO_c000_2993", 0),
+ MVI(0xc0002994, "TODO_c000_2994", 0),
+ MVI(0xc0002995, "TODO_c000_2995", 0),
+ MVI(0xc0002996, "TODO_c000_2996", 0),
+ MVI(0xc0002997, "TODO_c000_2997", 0),
+ MVI(0xc0002998, "TODO_c000_2998", 0),
+ MVI(0xc0002999, "TODO_c000_2999", 0),
+ MVI(0xc000299a, "TODO_c000_299a", 0),
+ MVI(0xc000299b, "TODO_c000_299b", 0),
+ MVI(0xc000299c, "TODO_c000_299c", 0),
+ MVI(0xc000299d, "TODO_c000_299d", 0),
+ MVI(0xc000299e, "TODO_c000_299e", 0),
+ MVI(0xc000299f, "TODO_c000_299f", 0),
+ MVI(0xc00029a0, "TODO_c000_29a0", 0),
+ MVI(0xc00029a1, "TODO_c000_29a1", 0),
+ MVI(0xc00029a2, "TODO_c000_29a2", 0),
+ MVI(0xc00029a3, "TODO_c000_29a3", 0),
+ MVI(0xc00029a4, "TODO_c000_29a4", 0),
+ MVI(0xc00029a5, "TODO_c000_29a5", 0),
+ MVI(0xc00029a6, "TODO_c000_29a6", 0),
+ MVI(0xc00029a7, "TODO_c000_29a7", 0),
+ MVI(0xc00029a8, "TODO_c000_29a8", 0),
+ MVI(0xc00029a9, "TODO_c000_29a9", 0),
+ MVI(0xc00029aa, "TODO_c000_29aa", 0),
+ MVI(0xc00029ab, "TODO_c000_29ab", 0),
+ MVI(0xc00029ac, "TODO_c000_29ac", 0),
+ MVI(0xc00029ad, "TODO_c000_29ad", 0),
+ MVI(0xc00029ae, "TODO_c000_29ae", 0),
+ MVI(0xc00029af, "TODO_c000_29af", 0),
+ MVI(0xc00029b0, "TODO_c000_29b0", 0),
+ MVI(0xc00029b1, "TODO_c000_29b1", 0),
+ MVI(0xc00029b2, "TODO_c000_29b2", 0),
+ MVI(0xc00029b3, "TODO_c000_29b3", 0),
+ MVI(0xc00029b4, "TODO_c000_29b4", 0),
+ MVI(0xc00029b5, "TODO_c000_29b5", 0),
+ MVI(0xc00029b6, "TODO_c000_29b6", 0),
+ MVI(0xc00029b7, "TODO_c000_29b7", 0),
+ MVI(0xc00029b8, "TODO_c000_29b8", 0),
+ MVI(0xc00029b9, "TODO_c000_29b9", 0),
+ MVI(0xc00029ba, "TODO_c000_29ba", 0),
+ MVI(0xc00029bb, "TODO_c000_29bb", 0),
+ MVI(0xc00029bc, "TODO_c000_29bc", 0),
+ MVI(0xc00029bd, "TODO_c000_29bd", 0),
+ MVI(0xc00029be, "TODO_c000_29be", 0),
+ MVI(0xc00029bf, "TODO_c000_29bf", 0),
+ MVI(0xc00029c0, "TODO_c000_29c0", 0),
+ MVI(0xc00029c1, "TODO_c000_29c1", 0),
+ MVI(0xc00029c2, "TODO_c000_29c2", 0),
+ MVI(0xc00029c3, "TODO_c000_29c3", 0),
+ MVI(0xc00029c4, "TODO_c000_29c4", 0),
+ MVI(0xc00029c5, "TODO_c000_29c5", 0),
+ MVI(0xc00029c6, "TODO_c000_29c6", 0),
+ MVI(0xc00029c7, "TODO_c000_29c7", 0),
+ MVI(0xc00029c8, "TODO_c000_29c8", 0),
+ MVI(0xc00029c9, "TODO_c000_29c9", 0),
+ MVI(0xc00029ca, "TODO_c000_29ca", 0),
+ MVI(0xc00029cb, "TODO_c000_29cb", 0),
+ MVI(0xc00029cc, "TODO_c000_29cc", 0),
+ MVI(0xc00029cd, "TODO_c000_29cd", 0),
+ MVI(0xc00029ce, "TODO_c000_29ce", 0),
+ MVI(0xc00029cf, "TODO_c000_29cf", 0),
+ MVI(0xc00029d0, "TODO_c000_29d0", 0),
+ MVI(0xc00029d1, "TODO_c000_29d1", 0),
+ MVI(0xc00029d2, "TODO_c000_29d2", 0),
+ MVI(0xc00029d3, "TODO_c000_29d3", 0),
+ MVI(0xc00029d4, "TODO_c000_29d4", 0),
+ MVI(0xc00029d5, "TODO_c000_29d5", 0),
+ MVI(0xc00029d6, "TODO_c000_29d6", 0),
+ MVI(0xc00029d7, "TODO_c000_29d7", 0),
+ MVI(0xc00029d8, "TODO_c000_29d8", 0),
+ MVI(0xc00029d9, "TODO_c000_29d9", 0),
+ MVI(0xc00029da, "TODO_c000_29da", 0),
+ MVI(0xc00029db, "TODO_c000_29db", 0),
+ MVI(0xc00029dc, "TODO_c000_29dc", 0),
+ MVI(0xc00029dd, "TODO_c000_29dd", 0),
+ MVI(0xc00029de, "TODO_c000_29de", 0),
+ MVI(0xc00029df, "TODO_c000_29df", 0),
+ MVI(0xc00029e0, "TODO_c000_29e0", 0),
+ MVI(0xc00029e1, "TODO_c000_29e1", 0),
+ MVI(0xc00029e2, "TODO_c000_29e2", 0),
+ MVI(0xc00029e3, "TODO_c000_29e3", 0),
+ MVI(0xc00029e4, "TODO_c000_29e4", 0),
+ MVI(0xc00029e5, "TODO_c000_29e5", 0),
+ MVI(0xc00029e6, "TODO_c000_29e6", 0),
+ MVI(0xc00029e7, "TODO_c000_29e7", 0),
+ MVI(0xc00029e8, "TODO_c000_29e8", 0),
+ MVI(0xc00029e9, "TODO_c000_29e9", 0),
+ MVI(0xc00029ea, "TODO_c000_29ea", 0),
+ MVI(0xc00029eb, "TODO_c000_29eb", 0),
+ MVI(0xc00029ec, "TODO_c000_29ec", 0),
+ MVI(0xc00029ed, "TODO_c000_29ed", 0),
+ MVI(0xc00029ee, "TODO_c000_29ee", 0),
+ MVI(0xc00029ef, "TODO_c000_29ef", 0),
+ MVI(0xc00029f0, "TODO_c000_29f0", 0),
+ MVI(0xc00029f1, "TODO_c000_29f1", 0),
+ MVI(0xc00029f2, "TODO_c000_29f2", 0),
+ MVI(0xc00029f3, "TODO_c000_29f3", 0),
+ MVI(0xc00029f4, "TODO_c000_29f4", 0),
+ MVI(0xc00029f5, "TODO_c000_29f5", 0),
+ MVI(0xc00029f6, "TODO_c000_29f6", 0),
+ MVI(0xc00029f7, "TODO_c000_29f7", 0),
+ MVI(0xc00029f8, "TODO_c000_29f8", 0),
+ MVI(0xc00029f9, "TODO_c000_29f9", 0),
+ MVI(0xc00029fa, "TODO_c000_29fa", 0),
+ MVI(0xc00029fb, "TODO_c000_29fb", 0),
+ MVI(0xc00029fc, "TODO_c000_29fc", 0),
+ MVI(0xc00029fd, "TODO_c000_29fd", 0),
+ MVI(0xc00029fe, "TODO_c000_29fe", 0),
+ MVI(0xc00029ff, "TODO_c000_29ff", 0),
+ MVI(0xc0002a00, "TODO_c000_2a00", 0),
+ MVI(0xc0002a01, "TODO_c000_2a01", 0),
+ MVI(0xc0002a02, "TODO_c000_2a02", 0),
+ MVI(0xc0002a03, "TODO_c000_2a03", 0),
+ MVI(0xc0002a04, "TODO_c000_2a04", 0),
+ MVI(0xc0002a05, "TODO_c000_2a05", 0),
+ MVI(0xc0002a06, "TODO_c000_2a06", 0),
+ MVI(0xc0002a07, "TODO_c000_2a07", 0),
+ MVI(0xc0002a08, "TODO_c000_2a08", 0),
+ MVI(0xc0002a09, "TODO_c000_2a09", 0),
+ MVI(0xc0002a0a, "TODO_c000_2a0a", 0),
+ MVI(0xc0002a0b, "TODO_c000_2a0b", 0),
+ MVI(0xc0002a0c, "TODO_c000_2a0c", 0),
+ MVI(0xc0002a0d, "TODO_c000_2a0d", 0),
+ MVI(0xc0002a0e, "TODO_c000_2a0e", 0),
+ MVI(0xc0002a0f, "TODO_c000_2a0f", 0),
+ MVI(0xc0002a10, "TODO_c000_2a10", 0),
+ MVI(0xc0002a11, "TODO_c000_2a11", 0),
+ MVI(0xc0002a12, "TODO_c000_2a12", 0),
+ MVI(0xc0002a13, "TODO_c000_2a13", 0),
+ MVI(0xc0002a14, "TODO_c000_2a14", 0),
+ MVI(0xc0002a15, "TODO_c000_2a15", 0),
+ MVI(0xc0002a16, "TODO_c000_2a16", 0),
+ MVI(0xc0002a17, "TODO_c000_2a17", 0),
+ MVI(0xc0002a18, "TODO_c000_2a18", 0),
+ MVI(0xc0002a19, "TODO_c000_2a19", 0),
+ MVI(0xc0002a1a, "TODO_c000_2a1a", 0),
+ MVI(0xc0002a1b, "TODO_c000_2a1b", 0),
+ MVI(0xc0002a1c, "TODO_c000_2a1c", 0),
+ MVI(0xc0002a1d, "TODO_c000_2a1d", 0),
+ MVI(0xc0002a1e, "TODO_c000_2a1e", 0),
+ MVI(0xc0002a1f, "TODO_c000_2a1f", 0),
+ MVI(0xc0002a20, "TODO_c000_2a20", 0),
+ MVI(0xc0002a21, "TODO_c000_2a21", 0),
+ MVI(0xc0002a22, "TODO_c000_2a22", 0),
+ MVI(0xc0002a23, "TODO_c000_2a23", 0),
+ MVI(0xc0002a24, "TODO_c000_2a24", 0),
+ MVI(0xc0002a25, "TODO_c000_2a25", 0),
+ MVI(0xc0002a26, "TODO_c000_2a26", 0),
+ MVI(0xc0002a27, "TODO_c000_2a27", 0),
+ MVI(0xc0002a28, "TODO_c000_2a28", 0),
+ MVI(0xc0002a29, "TODO_c000_2a29", 0),
+ MVI(0xc0002a2a, "TODO_c000_2a2a", 0),
+ MVI(0xc0002a2b, "TODO_c000_2a2b", 0),
+ MVI(0xc0002a2c, "TODO_c000_2a2c", 0),
+ MVI(0xc0002a2d, "TODO_c000_2a2d", 0),
+ MVI(0xc0002a2e, "TODO_c000_2a2e", 0),
+ MVI(0xc0002a2f, "TODO_c000_2a2f", 0),
+ MVI(0xc0002a30, "TODO_c000_2a30", 0),
+ MVI(0xc0002a31, "TODO_c000_2a31", 0),
+ MVI(0xc0002a32, "TODO_c000_2a32", 0),
+ MVI(0xc0002a33, "TODO_c000_2a33", 0),
+ MVI(0xc0002a34, "TODO_c000_2a34", 0),
+ MVI(0xc0002a35, "TODO_c000_2a35", 0),
+ MVI(0xc0002a36, "TODO_c000_2a36", 0),
+ MVI(0xc0002a37, "TODO_c000_2a37", 0),
+ MVI(0xc0002a38, "TODO_c000_2a38", 0),
+ MVI(0xc0002a39, "TODO_c000_2a39", 0),
+ MVI(0xc0002a3a, "TODO_c000_2a3a", 0),
+ MVI(0xc0002a3b, "TODO_c000_2a3b", 0),
+ MVI(0xc0002a3c, "TODO_c000_2a3c", 0),
+ MVI(0xc0002a3d, "TODO_c000_2a3d", 0),
+ MVI(0xc0002a3e, "TODO_c000_2a3e", 0),
+ MVI(0xc0002a3f, "TODO_c000_2a3f", 0),
+ MVI(0xc0002a40, "TODO_c000_2a40", 0),
+ MVI(0xc0002a41, "TODO_c000_2a41", 0),
+ MVI(0xc0002a42, "TODO_c000_2a42", 0),
+ MVI(0xc0002a43, "TODO_c000_2a43", 0),
+ MVI(0xc0002a44, "TODO_c000_2a44", 0),
+ MVI(0xc0002a45, "TODO_c000_2a45", 0),
+ MVI(0xc0002a46, "TODO_c000_2a46", 0),
+ MVI(0xc0002a47, "TODO_c000_2a47", 0),
+ MVI(0xc0002a48, "TODO_c000_2a48", 0),
+ MVI(0xc0002a49, "TODO_c000_2a49", 0),
+ MVI(0xc0002a4a, "TODO_c000_2a4a", 0),
+ MVI(0xc0002a4b, "TODO_c000_2a4b", 0),
+ MVI(0xc0002a4c, "TODO_c000_2a4c", 0),
+ MVI(0xc0002a4d, "TODO_c000_2a4d", 0),
+ MVI(0xc0002a4e, "TODO_c000_2a4e", 0),
+ MVI(0xc0002a4f, "TODO_c000_2a4f", 0),
+ MVI(0xc0002a50, "TODO_c000_2a50", 0),
+ MVI(0xc0002a51, "TODO_c000_2a51", 0),
+ MVI(0xc0002a52, "TODO_c000_2a52", 0),
+ MVI(0xc0002a53, "TODO_c000_2a53", 0),
+ MVI(0xc0002a54, "TODO_c000_2a54", 0),
+ MVI(0xc0002a55, "TODO_c000_2a55", 0),
+ MVI(0xc0002a56, "TODO_c000_2a56", 0),
+ MVI(0xc0002a57, "TODO_c000_2a57", 0),
+ MVI(0xc0002a58, "TODO_c000_2a58", 0),
+ MVI(0xc0002a59, "TODO_c000_2a59", 0),
+ MVI(0xc0002a5a, "TODO_c000_2a5a", 0),
+ MVI(0xc0002a5b, "TODO_c000_2a5b", 0),
+ MVI(0xc0002a5c, "TODO_c000_2a5c", 0),
+ MVI(0xc0002a5d, "TODO_c000_2a5d", 0),
+ MVI(0xc0002a5e, "TODO_c000_2a5e", 0),
+ MVI(0xc0002a5f, "TODO_c000_2a5f", 0),
+ MVI(0xc0002a60, "TODO_c000_2a60", 0),
+ MVI(0xc0002a61, "TODO_c000_2a61", 0),
+ MVI(0xc0002a62, "TODO_c000_2a62", 0),
+ MVI(0xc0002a63, "TODO_c000_2a63", 0),
+ MVI(0xc0002a64, "TODO_c000_2a64", 0),
+ MVI(0xc0002a65, "TODO_c000_2a65", 0),
+ MVI(0xc0002a66, "TODO_c000_2a66", 0),
+ MVI(0xc0002a67, "TODO_c000_2a67", 0),
+ MVI(0xc0002a68, "TODO_c000_2a68", 0),
+ MVI(0xc0002a69, "TODO_c000_2a69", 0),
+ MVI(0xc0002a6a, "TODO_c000_2a6a", 0),
+ MVI(0xc0002a6b, "TODO_c000_2a6b", 0),
+ MVI(0xc0002a6c, "TODO_c000_2a6c", 0),
+ MVI(0xc0002a6d, "TODO_c000_2a6d", 0),
+ MVI(0xc0002a6e, "TODO_c000_2a6e", 0),
+ MVI(0xc0002a6f, "TODO_c000_2a6f", 0),
+ MVI(0xc0002a70, "TODO_c000_2a70", 0),
+ MVI(0xc0002a71, "TODO_c000_2a71", 0),
+ MVI(0xc0002a72, "TODO_c000_2a72", 0),
+ MVI(0xc0002a73, "TODO_c000_2a73", 0),
+ MVI(0xc0002a74, "TODO_c000_2a74", 0),
+ MVI(0xc0002a75, "TODO_c000_2a75", 0),
+ MVI(0xc0002a76, "TODO_c000_2a76", 0),
+ MVI(0xc0002a77, "TODO_c000_2a77", 0),
+ MVI(0xc0002a78, "TODO_c000_2a78", 0),
+ MVI(0xc0002a79, "TODO_c000_2a79", 0),
+ MVI(0xc0002a7a, "TODO_c000_2a7a", 0),
+ MVI(0xc0002a7b, "TODO_c000_2a7b", 0),
+ MVI(0xc0002a7c, "TODO_c000_2a7c", 0),
+ MVI(0xc0002a7d, "TODO_c000_2a7d", 0),
+ MVI(0xc0002a7e, "TODO_c000_2a7e", 0),
+ MVI(0xc0002a7f, "TODO_c000_2a7f", 0),
+ MVI(0xc0002a80, "TODO_c000_2a80", 0),
+ MVI(0xc0002a81, "TODO_c000_2a81", 0),
+ MVI(0xc0002a82, "TODO_c000_2a82", 0),
+ MVI(0xc0002a83, "TODO_c000_2a83", 0),
+ MVI(0xc0002a84, "TODO_c000_2a84", 0),
+ MVI(0xc0002a85, "TODO_c000_2a85", 0),
+ MVI(0xc0002a86, "TODO_c000_2a86", 0),
+ MVI(0xc0002a87, "TODO_c000_2a87", 0),
+ MVI(0xc0002a88, "TODO_c000_2a88", 0),
+ MVI(0xc0002a89, "TODO_c000_2a89", 0),
+ MVI(0xc0002a8a, "TODO_c000_2a8a", 0),
+ MVI(0xc0002a8b, "TODO_c000_2a8b", 0),
+ MVI(0xc0002a8c, "TODO_c000_2a8c", 0),
+ MVI(0xc0002a8d, "TODO_c000_2a8d", 0),
+ MVI(0xc0002a8e, "TODO_c000_2a8e", 0),
+ MVI(0xc0002a8f, "TODO_c000_2a8f", 0),
+ MVI(0xc0002a90, "TODO_c000_2a90", 0),
+ MVI(0xc0002a91, "TODO_c000_2a91", 0),
+ MVI(0xc0002a92, "TODO_c000_2a92", 0),
+ MVI(0xc0002a93, "TODO_c000_2a93", 0),
+ MVI(0xc0002a94, "TODO_c000_2a94", 0),
+ MVI(0xc0002a95, "TODO_c000_2a95", 0),
+ MVI(0xc0002a96, "TODO_c000_2a96", 0),
+ MVI(0xc0002a97, "TODO_c000_2a97", 0),
+ MVI(0xc0002a98, "TODO_c000_2a98", 0),
+ MVI(0xc0002a99, "TODO_c000_2a99", 0),
+ MVI(0xc0002a9a, "TODO_c000_2a9a", 0),
+ MVI(0xc0002a9b, "TODO_c000_2a9b", 0),
+ MVI(0xc0002a9c, "TODO_c000_2a9c", 0),
+ MVI(0xc0002a9d, "TODO_c000_2a9d", 0),
+ MVI(0xc0002a9e, "TODO_c000_2a9e", 0),
+ MVI(0xc0002a9f, "TODO_c000_2a9f", 0),
+ MVI(0xc0002aa0, "TODO_c000_2aa0", 0),
+ MVI(0xc0002aa1, "TODO_c000_2aa1", 0),
+ MVI(0xc0002aa2, "TODO_c000_2aa2", 0),
+ MVI(0xc0002aa3, "TODO_c000_2aa3", 0),
+ MVI(0xc0002aa4, "TODO_c000_2aa4", 0),
+ MVI(0xc0002aa5, "TODO_c000_2aa5", 0),
+ MVI(0xc0002aa6, "TODO_c000_2aa6", 0),
+ MVI(0xc0002aa7, "TODO_c000_2aa7", 0),
+ MVI(0xc0002aa8, "TODO_c000_2aa8", 0),
+ MVI(0xc0002aa9, "TODO_c000_2aa9", 0),
+ MVI(0xc0002aaa, "TODO_c000_2aaa", 0),
+ MVI(0xc0002aab, "TODO_c000_2aab", 0),
+ MVI(0xc0002aac, "TODO_c000_2aac", 0),
+ MVI(0xc0002aad, "TODO_c000_2aad", 0),
+ MVI(0xc0002aae, "TODO_c000_2aae", 0),
+ MVI(0xc0002aaf, "TODO_c000_2aaf", 0),
+ MVI(0xc0002ab0, "TODO_c000_2ab0", 0),
+ MVI(0xc0002ab1, "TODO_c000_2ab1", 0),
+ MVI(0xc0002ab2, "TODO_c000_2ab2", 0),
+ MVI(0xc0002ab3, "TODO_c000_2ab3", 0),
+ MVI(0xc0002ab4, "TODO_c000_2ab4", 0),
+ MVI(0xc0002ab5, "TODO_c000_2ab5", 0),
+ MVI(0xc0002ab6, "TODO_c000_2ab6", 0),
+ MVI(0xc0002ab7, "TODO_c000_2ab7", 0),
+ MVI(0xc0002ab8, "TODO_c000_2ab8", 0),
+ MVI(0xc0002ab9, "TODO_c000_2ab9", 0),
+ MVI(0xc0002aba, "TODO_c000_2aba", 0),
+ MVI(0xc0002abb, "TODO_c000_2abb", 0),
+ MVI(0xc0002abc, "TODO_c000_2abc", 0),
+ MVI(0xc0002abd, "TODO_c000_2abd", 0),
+ MVI(0xc0002abe, "TODO_c000_2abe", 0),
+ MVI(0xc0002abf, "TODO_c000_2abf", 0),
+ MVI(0xc0002ac0, "TODO_c000_2ac0", 0),
+ MVI(0xc0002ac1, "TODO_c000_2ac1", 0),
+ MVI(0xc0002ac2, "TODO_c000_2ac2", 0),
+ MVI(0xc0002ac3, "TODO_c000_2ac3", 0),
+ MVI(0xc0002ac4, "TODO_c000_2ac4", 0),
+ MVI(0xc0002ac5, "TODO_c000_2ac5", 0),
+ MVI(0xc0002ac6, "TODO_c000_2ac6", 0),
+ MVI(0xc0002ac7, "TODO_c000_2ac7", 0),
+ MVI(0xc0002ac8, "TODO_c000_2ac8", 0),
+ MVI(0xc0002ac9, "TODO_c000_2ac9", 0),
+ MVI(0xc0002aca, "TODO_c000_2aca", 0),
+ MVI(0xc0002acb, "TODO_c000_2acb", 0),
+ MVI(0xc0002acc, "TODO_c000_2acc", 0),
+ MVI(0xc0002acd, "TODO_c000_2acd", 0),
+ MVI(0xc0002ace, "TODO_c000_2ace", 0),
+ MVI(0xc0002acf, "TODO_c000_2acf", 0),
+ MVI(0xc0002ad0, "TODO_c000_2ad0", 0),
+ MVI(0xc0002ad1, "TODO_c000_2ad1", 0),
+ MVI(0xc0002ad2, "TODO_c000_2ad2", 0),
+ MVI(0xc0002ad3, "TODO_c000_2ad3", 0),
+ MVI(0xc0002ad4, "TODO_c000_2ad4", 0),
+ MVI(0xc0002ad5, "TODO_c000_2ad5", 0),
+ MVI(0xc0002ad6, "TODO_c000_2ad6", 0),
+ MVI(0xc0002ad7, "TODO_c000_2ad7", 0),
+ MVI(0xc0002ad8, "TODO_c000_2ad8", 0),
+ MVI(0xc0002ad9, "TODO_c000_2ad9", 0),
+ MVI(0xc0002ada, "TODO_c000_2ada", 0),
+ MVI(0xc0002adb, "TODO_c000_2adb", 0),
+ MVI(0xc0002adc, "TODO_c000_2adc", 0),
+ MVI(0xc0002add, "TODO_c000_2add", 0),
+ MVI(0xc0002ade, "TODO_c000_2ade", 0),
+ MVI(0xc0002adf, "TODO_c000_2adf", 0),
+ MVI(0xc0002ae0, "TODO_c000_2ae0", 0),
+ MVI(0xc0002ae1, "TODO_c000_2ae1", 0),
+ MVI(0xc0002ae2, "TODO_c000_2ae2", 0),
+ MVI(0xc0002ae3, "TODO_c000_2ae3", 0),
+ MVI(0xc0002ae4, "TODO_c000_2ae4", 0),
+ MVI(0xc0002ae5, "TODO_c000_2ae5", 0),
+ MVI(0xc0002ae6, "TODO_c000_2ae6", 0),
+ MVI(0xc0002ae7, "TODO_c000_2ae7", 0),
+ MVI(0xc0002ae8, "TODO_c000_2ae8", 0),
+ MVI(0xc0002ae9, "TODO_c000_2ae9", 0),
+ MVI(0xc0002aea, "TODO_c000_2aea", 0),
+ MVI(0xc0002aeb, "TODO_c000_2aeb", 0),
+ MVI(0xc0002aec, "TODO_c000_2aec", 0),
+ MVI(0xc0002aed, "TODO_c000_2aed", 0),
+ MVI(0xc0002aee, "TODO_c000_2aee", 0),
+ MVI(0xc0002aef, "TODO_c000_2aef", 0),
+ MVI(0xc0002af0, "TODO_c000_2af0", 0),
+ MVI(0xc0002af1, "TODO_c000_2af1", 0),
+ MVI(0xc0002af2, "TODO_c000_2af2", 0),
+ MVI(0xc0002af3, "TODO_c000_2af3", 0),
+ MVI(0xc0002af4, "TODO_c000_2af4", 0),
+ MVI(0xc0002af5, "TODO_c000_2af5", 0),
+ MVI(0xc0002af6, "TODO_c000_2af6", 0),
+ MVI(0xc0002af7, "TODO_c000_2af7", 0),
+ MVI(0xc0002af8, "TODO_c000_2af8", 0),
+ MVI(0xc0002af9, "TODO_c000_2af9", 0),
+ MVI(0xc0002afa, "TODO_c000_2afa", 0),
+ MVI(0xc0002afb, "TODO_c000_2afb", 0),
+ MVI(0xc0002afc, "TODO_c000_2afc", 0),
+ MVI(0xc0002afd, "TODO_c000_2afd", 0),
+ MVI(0xc0002afe, "TODO_c000_2afe", 0),
+ MVI(0xc0002aff, "TODO_c000_2aff", 0),
+ MVI(0xc0002b00, "TODO_c000_2b00", 0),
+ MVI(0xc0002b01, "TODO_c000_2b01", 0),
+ MVI(0xc0002b02, "TODO_c000_2b02", 0),
+ MVI(0xc0002b03, "TODO_c000_2b03", 0),
+ MVI(0xc0002b04, "TODO_c000_2b04", 0),
+ MVI(0xc0002b05, "TODO_c000_2b05", 0),
+ MVI(0xc0002b06, "TODO_c000_2b06", 0),
+ MVI(0xc0002b07, "TODO_c000_2b07", 0),
+ MVI(0xc0002b08, "TODO_c000_2b08", 0),
+ MVI(0xc0002b09, "TODO_c000_2b09", 0),
+ MVI(0xc0002b0a, "TODO_c000_2b0a", 0),
+ MVI(0xc0002b0b, "TODO_c000_2b0b", 0),
+ MVI(0xc0002b0c, "TODO_c000_2b0c", 0),
+ MVI(0xc0002b0d, "TODO_c000_2b0d", 0),
+ MVI(0xc0002b0e, "TODO_c000_2b0e", 0),
+ MVI(0xc0002b0f, "TODO_c000_2b0f", 0),
+ MVI(0xc0002b10, "TODO_c000_2b10", 0),
+ MVI(0xc0002b11, "TODO_c000_2b11", 0),
+ MVI(0xc0002b12, "TODO_c000_2b12", 0),
+ MVI(0xc0002b13, "TODO_c000_2b13", 0),
+ MVI(0xc0002b14, "TODO_c000_2b14", 0),
+ MVI(0xc0002b15, "TODO_c000_2b15", 0),
+ MVI(0xc0002b16, "TODO_c000_2b16", 0),
+ MVI(0xc0002b17, "TODO_c000_2b17", 0),
+ MVI(0xc0002b18, "TODO_c000_2b18", 0),
+ MVI(0xc0002b19, "TODO_c000_2b19", 0),
+ MVI(0xc0002b1a, "TODO_c000_2b1a", 0),
+ MVI(0xc0002b1b, "TODO_c000_2b1b", 0),
+ MVI(0xc0002b1c, "TODO_c000_2b1c", 0),
+ MVI(0xc0002b1d, "TODO_c000_2b1d", 0),
+ MVI(0xc0002b1e, "TODO_c000_2b1e", 0),
+ MVI(0xc0002b1f, "TODO_c000_2b1f", 0),
+ MVI(0xc0002b20, "TODO_c000_2b20", 0),
+ MVI(0xc0002b21, "TODO_c000_2b21", 0),
+ MVI(0xc0002b22, "TODO_c000_2b22", 0),
+ MVI(0xc0002b23, "TODO_c000_2b23", 0),
+ MVI(0xc0002b24, "TODO_c000_2b24", 0),
+ MVI(0xc0002b25, "TODO_c000_2b25", 0),
+ MVI(0xc0002b26, "TODO_c000_2b26", 0),
+ MVI(0xc0002b27, "TODO_c000_2b27", 0),
+ MVI(0xc0002b28, "TODO_c000_2b28", 0),
+ MVI(0xc0002b29, "TODO_c000_2b29", 0),
+ MVI(0xc0002b2a, "TODO_c000_2b2a", 0),
+ MVI(0xc0002b2b, "TODO_c000_2b2b", 0),
+ MVI(0xc0002b2c, "TODO_c000_2b2c", 0),
+ MVI(0xc0002b2d, "TODO_c000_2b2d", 0),
+ MVI(0xc0002b2e, "TODO_c000_2b2e", 0),
+ MVI(0xc0002b2f, "TODO_c000_2b2f", 0),
+ MVI(0xc0002b30, "TODO_c000_2b30", 0),
+ MVI(0xc0002b31, "TODO_c000_2b31", 0),
+ MVI(0xc0002b32, "TODO_c000_2b32", 0),
+ MVI(0xc0002b33, "TODO_c000_2b33", 0),
+ MVI(0xc0002b34, "TODO_c000_2b34", 0),
+ MVI(0xc0002b35, "TODO_c000_2b35", 0),
+ MVI(0xc0002b36, "TODO_c000_2b36", 0),
+ MVI(0xc0002b37, "TODO_c000_2b37", 0),
+ MVI(0xc0002b38, "TODO_c000_2b38", 0),
+ MVI(0xc0002b39, "TODO_c000_2b39", 0),
+ MVI(0xc0002b3a, "TODO_c000_2b3a", 0),
+ MVI(0xc0002b3b, "TODO_c000_2b3b", 0),
+ MVI(0xc0002b3c, "TODO_c000_2b3c", 0),
+ MVI(0xc0002b3d, "TODO_c000_2b3d", 0),
+ MVI(0xc0002b3e, "TODO_c000_2b3e", 0),
+ MVI(0xc0002b3f, "TODO_c000_2b3f", 0),
+ MVI(0xc0002b40, "TODO_c000_2b40", 0),
+ MVI(0xc0002b41, "TODO_c000_2b41", 0),
+ MVI(0xc0002b42, "TODO_c000_2b42", 0),
+ MVI(0xc0002b43, "TODO_c000_2b43", 0),
+ MVI(0xc0002b44, "TODO_c000_2b44", 0),
+ MVI(0xc0002b45, "TODO_c000_2b45", 0),
+ MVI(0xc0002b46, "TODO_c000_2b46", 0),
+ MVI(0xc0002b47, "TODO_c000_2b47", 0),
+ MVI(0xc0002b48, "TODO_c000_2b48", 0),
+ MVI(0xc0002b49, "TODO_c000_2b49", 0),
+ MVI(0xc0002b4a, "TODO_c000_2b4a", 0),
+ MVI(0xc0002b4b, "TODO_c000_2b4b", 0),
+ MVI(0xc0002b4c, "TODO_c000_2b4c", 0),
+ MVI(0xc0002b4d, "TODO_c000_2b4d", 0),
+ MVI(0xc0002b4e, "TODO_c000_2b4e", 0),
+ MVI(0xc0002b4f, "TODO_c000_2b4f", 0),
+ MVI(0xc0002b50, "TODO_c000_2b50", 0),
+ MVI(0xc0002b51, "TODO_c000_2b51", 0),
+ MVI(0xc0002b52, "TODO_c000_2b52", 0),
+ MVI(0xc0002b53, "TODO_c000_2b53", 0),
+ MVI(0xc0002b54, "TODO_c000_2b54", 0),
+ MVI(0xc0002b55, "TODO_c000_2b55", 0),
+ MVI(0xc0002b56, "TODO_c000_2b56", 0),
+ MVI(0xc0002b57, "TODO_c000_2b57", 0),
+ MVI(0xc0002b58, "TODO_c000_2b58", 0),
+ MVI(0xc0002b59, "TODO_c000_2b59", 0),
+ MVI(0xc0002b5a, "TODO_c000_2b5a", 0),
+ MVI(0xc0002b5b, "TODO_c000_2b5b", 0),
+ MVI(0xc0002b5c, "TODO_c000_2b5c", 0),
+ MVI(0xc0002b5d, "TODO_c000_2b5d", 0),
+ MVI(0xc0002b5e, "TODO_c000_2b5e", 0),
+ MVI(0xc0002b5f, "TODO_c000_2b5f", 0),
+ MVI(0xc0002b60, "TODO_c000_2b60", 0),
+ MVI(0xc0002b61, "TODO_c000_2b61", 0),
+ MVI(0xc0002b62, "TODO_c000_2b62", 0),
+ MVI(0xc0002b63, "TODO_c000_2b63", 0),
+ MVI(0xc0002b64, "TODO_c000_2b64", 0),
+ MVI(0xc0002b65, "TODO_c000_2b65", 0),
+ MVI(0xc0002b66, "TODO_c000_2b66", 0),
+ MVI(0xc0002b67, "TODO_c000_2b67", 0),
+ MVI(0xc0002b68, "TODO_c000_2b68", 0),
+ MVI(0xc0002b69, "TODO_c000_2b69", 0),
+ MVI(0xc0002b6a, "TODO_c000_2b6a", 0),
+ MVI(0xc0002b6b, "TODO_c000_2b6b", 0),
+ MVI(0xc0002b6c, "TODO_c000_2b6c", 0),
+ MVI(0xc0002b6d, "TODO_c000_2b6d", 0),
+ MVI(0xc0002b6e, "TODO_c000_2b6e", 0),
+ MVI(0xc0002b6f, "TODO_c000_2b6f", 0),
+ MVI(0xc0002b70, "TODO_c000_2b70", 0),
+ MVI(0xc0002b71, "TODO_c000_2b71", 0),
+ MVI(0xc0002b72, "TODO_c000_2b72", 0),
+ MVI(0xc0002b73, "TODO_c000_2b73", 0),
+ MVI(0xc0002b74, "TODO_c000_2b74", 0),
+ MVI(0xc0002b75, "TODO_c000_2b75", 0),
+ MVI(0xc0002b76, "TODO_c000_2b76", 0),
+ MVI(0xc0002b77, "TODO_c000_2b77", 0),
+ MVI(0xc0002b78, "TODO_c000_2b78", 0),
+ MVI(0xc0002b79, "TODO_c000_2b79", 0),
+ MVI(0xc0002b7a, "TODO_c000_2b7a", 0),
+ MVI(0xc0002b7b, "TODO_c000_2b7b", 0),
+ MVI(0xc0002b7c, "TODO_c000_2b7c", 0),
+ MVI(0xc0002b7d, "TODO_c000_2b7d", 0),
+ MVI(0xc0002b7e, "TODO_c000_2b7e", 0),
+ MVI(0xc0002b7f, "TODO_c000_2b7f", 0),
+ MVI(0xc0002b80, "TODO_c000_2b80", 0),
+ MVI(0xc0002b81, "TODO_c000_2b81", 0),
+ MVI(0xc0002b82, "TODO_c000_2b82", 0),
+ MVI(0xc0002b83, "TODO_c000_2b83", 0),
+ MVI(0xc0002b84, "TODO_c000_2b84", 0),
+ MVI(0xc0002b85, "TODO_c000_2b85", 0),
+ MVI(0xc0002b86, "TODO_c000_2b86", 0),
+ MVI(0xc0002b87, "TODO_c000_2b87", 0),
+ MVI(0xc0002b88, "TODO_c000_2b88", 0),
+ MVI(0xc0002b89, "TODO_c000_2b89", 0),
+ MVI(0xc0002b8a, "TODO_c000_2b8a", 0),
+ MVI(0xc0002b8b, "TODO_c000_2b8b", 0),
+ MVI(0xc0002b8c, "TODO_c000_2b8c", 0),
+ MVI(0xc0002b8d, "TODO_c000_2b8d", 0),
+ MVI(0xc0002b8e, "TODO_c000_2b8e", 0),
+ MVI(0xc0002b8f, "TODO_c000_2b8f", 0),
+ MVI(0xc0002b90, "TODO_c000_2b90", 0),
+ MVI(0xc0002b91, "TODO_c000_2b91", 0),
+ MVI(0xc0002b92, "TODO_c000_2b92", 0),
+ MVI(0xc0002b93, "TODO_c000_2b93", 0),
+ MVI(0xc0002b94, "TODO_c000_2b94", 0),
+ MVI(0xc0002b95, "TODO_c000_2b95", 0),
+ MVI(0xc0002b96, "TODO_c000_2b96", 0),
+ MVI(0xc0002b97, "TODO_c000_2b97", 0),
+ MVI(0xc0002b98, "TODO_c000_2b98", 0),
+ MVI(0xc0002b99, "TODO_c000_2b99", 0),
+ MVI(0xc0002b9a, "TODO_c000_2b9a", 0),
+ MVI(0xc0002b9b, "TODO_c000_2b9b", 0),
+ MVI(0xc0002b9c, "TODO_c000_2b9c", 0),
+ MVI(0xc0002b9d, "TODO_c000_2b9d", 0),
+ MVI(0xc0002b9e, "TODO_c000_2b9e", 0),
+ MVI(0xc0002b9f, "TODO_c000_2b9f", 0),
+ MVI(0xc0002ba0, "TODO_c000_2ba0", 0),
+ MVI(0xc0002ba1, "TODO_c000_2ba1", 0),
+ MVI(0xc0002ba2, "TODO_c000_2ba2", 0),
+ MVI(0xc0002ba3, "TODO_c000_2ba3", 0),
+ MVI(0xc0002ba4, "TODO_c000_2ba4", 0),
+ MVI(0xc0002ba5, "TODO_c000_2ba5", 0),
+ MVI(0xc0002ba6, "TODO_c000_2ba6", 0),
+ MVI(0xc0002ba7, "TODO_c000_2ba7", 0),
+ MVI(0xc0002ba8, "TODO_c000_2ba8", 0),
+ MVI(0xc0002ba9, "TODO_c000_2ba9", 0),
+ MVI(0xc0002baa, "TODO_c000_2baa", 0),
+ MVI(0xc0002bab, "TODO_c000_2bab", 0),
+ MVI(0xc0002bac, "TODO_c000_2bac", 0),
+ MVI(0xc0002bad, "TODO_c000_2bad", 0),
+ MVI(0xc0002bae, "TODO_c000_2bae", 0),
+ MVI(0xc0002baf, "TODO_c000_2baf", 0),
+ MVI(0xc0002bb0, "TODO_c000_2bb0", 0),
+ MVI(0xc0002bb1, "TODO_c000_2bb1", 0),
+ MVI(0xc0002bb2, "TODO_c000_2bb2", 0),
+ MVI(0xc0002bb3, "TODO_c000_2bb3", 0),
+ MVI(0xc0002bb4, "TODO_c000_2bb4", 0),
+ MVI(0xc0002bb5, "TODO_c000_2bb5", 0),
+ MVI(0xc0002bb6, "TODO_c000_2bb6", 0),
+ MVI(0xc0002bb7, "TODO_c000_2bb7", 0),
+ MVI(0xc0002bb8, "TODO_c000_2bb8", 0),
+ MVI(0xc0002bb9, "TODO_c000_2bb9", 0),
+ MVI(0xc0002bba, "TODO_c000_2bba", 0),
+ MVI(0xc0002bbb, "TODO_c000_2bbb", 0),
+ MVI(0xc0002bbc, "TODO_c000_2bbc", 0),
+ MVI(0xc0002bbd, "TODO_c000_2bbd", 0),
+ MVI(0xc0002bbe, "TODO_c000_2bbe", 0),
+ MVI(0xc0002bbf, "TODO_c000_2bbf", 0),
+ MVI(0xc0002bc0, "TODO_c000_2bc0", 0),
+ MVI(0xc0002bc1, "TODO_c000_2bc1", 0),
+ MVI(0xc0002bc2, "TODO_c000_2bc2", 0),
+ MVI(0xc0002bc3, "TODO_c000_2bc3", 0),
+ MVI(0xc0002bc4, "TODO_c000_2bc4", 0),
+ MVI(0xc0002bc5, "TODO_c000_2bc5", 0),
+ MVI(0xc0002bc6, "TODO_c000_2bc6", 0),
+ MVI(0xc0002bc7, "TODO_c000_2bc7", 0),
+ MVI(0xc0002bc8, "TODO_c000_2bc8", 0),
+ MVI(0xc0002bc9, "TODO_c000_2bc9", 0),
+ MVI(0xc0002bca, "TODO_c000_2bca", 0),
+ MVI(0xc0002bcb, "TODO_c000_2bcb", 0),
+ MVI(0xc0002bcc, "TODO_c000_2bcc", 0),
+ MVI(0xc0002bcd, "TODO_c000_2bcd", 0),
+ MVI(0xc0002bce, "TODO_c000_2bce", 0),
+ MVI(0xc0002bcf, "TODO_c000_2bcf", 0),
+ MVI(0xc0002bd0, "TODO_c000_2bd0", 0),
+ MVI(0xc0002bd1, "TODO_c000_2bd1", 0),
+ MVI(0xc0002bd2, "TODO_c000_2bd2", 0),
+ MVI(0xc0002bd3, "TODO_c000_2bd3", 0),
+ MVI(0xc0002bd4, "TODO_c000_2bd4", 0),
+ MVI(0xc0002bd5, "TODO_c000_2bd5", 0),
+ MVI(0xc0002bd6, "TODO_c000_2bd6", 0),
+ MVI(0xc0002bd7, "TODO_c000_2bd7", 0),
+ MVI(0xc0002bd8, "TODO_c000_2bd8", 0),
+ MVI(0xc0002bd9, "TODO_c000_2bd9", 0),
+ MVI(0xc0002bda, "TODO_c000_2bda", 0),
+ MVI(0xc0002bdb, "TODO_c000_2bdb", 0),
+ MVI(0xc0002bdc, "TODO_c000_2bdc", 0),
+ MVI(0xc0002bdd, "TODO_c000_2bdd", 0),
+ MVI(0xc0002bde, "TODO_c000_2bde", 0),
+ MVI(0xc0002bdf, "TODO_c000_2bdf", 0),
+ MVI(0xc0002be0, "TODO_c000_2be0", 0),
+ MVI(0xc0002be1, "TODO_c000_2be1", 0),
+ MVI(0xc0002be2, "TODO_c000_2be2", 0),
+ MVI(0xc0002be3, "TODO_c000_2be3", 0),
+ MVI(0xc0002be4, "TODO_c000_2be4", 0),
+ MVI(0xc0002be5, "TODO_c000_2be5", 0),
+ MVI(0xc0002be6, "TODO_c000_2be6", 0),
+ MVI(0xc0002be7, "TODO_c000_2be7", 0),
+ MVI(0xc0002be8, "TODO_c000_2be8", 0),
+ MVI(0xc0002be9, "TODO_c000_2be9", 0),
+ MVI(0xc0002bea, "TODO_c000_2bea", 0),
+ MVI(0xc0002beb, "TODO_c000_2beb", 0),
+ MVI(0xc0002bec, "TODO_c000_2bec", 0),
+ MVI(0xc0002bed, "TODO_c000_2bed", 0),
+ MVI(0xc0002bee, "TODO_c000_2bee", 0),
+ MVI(0xc0002bef, "TODO_c000_2bef", 0),
+ MVI(0xc0002bf0, "TODO_c000_2bf0", 0),
+ MVI(0xc0002bf1, "TODO_c000_2bf1", 0),
+ MVI(0xc0002bf2, "TODO_c000_2bf2", 0),
+ MVI(0xc0002bf3, "TODO_c000_2bf3", 0),
+ MVI(0xc0002bf4, "TODO_c000_2bf4", 0),
+ MVI(0xc0002bf5, "TODO_c000_2bf5", 0),
+ MVI(0xc0002bf6, "TODO_c000_2bf6", 0),
+ MVI(0xc0002bf7, "TODO_c000_2bf7", 0),
+ MVI(0xc0002bf8, "TODO_c000_2bf8", 0),
+ MVI(0xc0002bf9, "TODO_c000_2bf9", 0),
+ MVI(0xc0002bfa, "TODO_c000_2bfa", 0),
+ MVI(0xc0002bfb, "TODO_c000_2bfb", 0),
+ MVI(0xc0002bfc, "TODO_c000_2bfc", 0),
+ MVI(0xc0002bfd, "TODO_c000_2bfd", 0),
+ MVI(0xc0002bfe, "TODO_c000_2bfe", 0),
+ MVI(0xc0002bff, "TODO_c000_2bff", 0),
+ MVI(0xc0002c00, "TODO_c000_2c00", 0),
+ MVI(0xc0002c01, "TODO_c000_2c01", 0),
+ MVI(0xc0002c02, "TODO_c000_2c02", 0),
+ MVI(0xc0002c03, "TODO_c000_2c03", 0),
+ MVI(0xc0002c04, "TODO_c000_2c04", 0),
+ MVI(0xc0002c05, "TODO_c000_2c05", 0),
+ MVI(0xc0002c06, "TODO_c000_2c06", 0),
+ MVI(0xc0002c07, "TODO_c000_2c07", 0),
+ MVI(0xc0002c08, "TODO_c000_2c08", 0),
+ MVI(0xc0002c09, "TODO_c000_2c09", 0),
+ MVI(0xc0002c0a, "TODO_c000_2c0a", 0),
+ MVI(0xc0002c0b, "TODO_c000_2c0b", 0),
+ MVI(0xc0002c0c, "TODO_c000_2c0c", 0),
+ MVI(0xc0002c0d, "TODO_c000_2c0d", 0),
+ MVI(0xc0002c0e, "TODO_c000_2c0e", 0),
+ MVI(0xc0002c0f, "TODO_c000_2c0f", 0),
+ MVI(0xc0002c10, "TODO_c000_2c10", 0),
+ MVI(0xc0002c11, "TODO_c000_2c11", 0),
+ MVI(0xc0002c12, "TODO_c000_2c12", 0),
+ MVI(0xc0002c13, "TODO_c000_2c13", 0),
+ MVI(0xc0002c14, "TODO_c000_2c14", 0),
+ MVI(0xc0002c15, "TODO_c000_2c15", 0),
+ MVI(0xc0002c16, "TODO_c000_2c16", 0),
+ MVI(0xc0002c17, "TODO_c000_2c17", 0),
+ MVI(0xc0002c18, "TODO_c000_2c18", 0),
+ MVI(0xc0002c19, "TODO_c000_2c19", 0),
+ MVI(0xc0002c1a, "TODO_c000_2c1a", 0),
+ MVI(0xc0002c1b, "TODO_c000_2c1b", 0),
+ MVI(0xc0002c1c, "TODO_c000_2c1c", 0),
+ MVI(0xc0002c1d, "TODO_c000_2c1d", 0),
+ MVI(0xc0002c1e, "TODO_c000_2c1e", 0),
+ MVI(0xc0002c1f, "TODO_c000_2c1f", 0),
+ MVI(0xc0002c20, "TODO_c000_2c20", 0),
+ MVI(0xc0002c21, "TODO_c000_2c21", 0),
+ MVI(0xc0002c22, "TODO_c000_2c22", 0),
+ MVI(0xc0002c23, "TODO_c000_2c23", 0),
+ MVI(0xc0002c24, "TODO_c000_2c24", 0),
+ MVI(0xc0002c25, "TODO_c000_2c25", 0),
+ MVI(0xc0002c26, "TODO_c000_2c26", 0),
+ MVI(0xc0002c27, "TODO_c000_2c27", 0),
+ MVI(0xc0002c28, "TODO_c000_2c28", 0),
+ MVI(0xc0002c29, "TODO_c000_2c29", 0),
+ MVI(0xc0002c2a, "TODO_c000_2c2a", 0),
+ MVI(0xc0002c2b, "TODO_c000_2c2b", 0),
+ MVI(0xc0002c2c, "TODO_c000_2c2c", 0),
+ MVI(0xc0002c2d, "TODO_c000_2c2d", 0),
+ MVI(0xc0002c2e, "TODO_c000_2c2e", 0),
+ MVI(0xc0002c2f, "TODO_c000_2c2f", 0),
+ MVI(0xc0002c30, "TODO_c000_2c30", 0),
+ MVI(0xc0002c31, "TODO_c000_2c31", 0),
+ MVI(0xc0002c32, "TODO_c000_2c32", 0),
+ MVI(0xc0002c33, "TODO_c000_2c33", 0),
+ MVI(0xc0002c34, "TODO_c000_2c34", 0),
+ MVI(0xc0002c35, "TODO_c000_2c35", 0),
+ MVI(0xc0002c36, "TODO_c000_2c36", 0),
+ MVI(0xc0002c37, "TODO_c000_2c37", 0),
+ MVI(0xc0002c38, "TODO_c000_2c38", 0),
+ MVI(0xc0002c39, "TODO_c000_2c39", 0),
+ MVI(0xc0002c3a, "TODO_c000_2c3a", 0),
+ MVI(0xc0002c3b, "TODO_c000_2c3b", 0),
+ MVI(0xc0002c3c, "TODO_c000_2c3c", 0),
+ MVI(0xc0002c3d, "TODO_c000_2c3d", 0),
+ MVI(0xc0002c3e, "TODO_c000_2c3e", 0),
+ MVI(0xc0002c3f, "TODO_c000_2c3f", 0),
+ MVI(0xc0002c40, "TODO_c000_2c40", 0),
+ MVI(0xc0002c41, "TODO_c000_2c41", 0),
+ MVI(0xc0002c42, "TODO_c000_2c42", 0),
+ MVI(0xc0002c43, "TODO_c000_2c43", 0),
+ MVI(0xc0002c44, "TODO_c000_2c44", 0),
+ MVI(0xc0002c45, "TODO_c000_2c45", 0),
+ MVI(0xc0002c46, "TODO_c000_2c46", 0),
+ MVI(0xc0002c47, "TODO_c000_2c47", 0),
+ MVI(0xc0002c48, "TODO_c000_2c48", 0),
+ MVI(0xc0002c49, "TODO_c000_2c49", 0),
+ MVI(0xc0002c4a, "TODO_c000_2c4a", 0),
+ MVI(0xc0002c4b, "TODO_c000_2c4b", 0),
+ MVI(0xc0002c4c, "TODO_c000_2c4c", 0),
+ MVI(0xc0002c4d, "TODO_c000_2c4d", 0),
+ MVI(0xc0002c4e, "TODO_c000_2c4e", 0),
+ MVI(0xc0002c4f, "TODO_c000_2c4f", 0),
+ MVI(0xc0002c50, "TODO_c000_2c50", 0),
+ MVI(0xc0002c51, "TODO_c000_2c51", 0),
+ MVI(0xc0002c52, "TODO_c000_2c52", 0),
+ MVI(0xc0002c53, "TODO_c000_2c53", 0),
+ MVI(0xc0002c54, "TODO_c000_2c54", 0),
+ MVI(0xc0002c55, "TODO_c000_2c55", 0),
+ MVI(0xc0002c56, "TODO_c000_2c56", 0),
+ MVI(0xc0002c57, "TODO_c000_2c57", 0),
+ MVI(0xc0002c58, "TODO_c000_2c58", 0),
+ MVI(0xc0002c59, "TODO_c000_2c59", 0),
+ MVI(0xc0002c5a, "TODO_c000_2c5a", 0),
+ MVI(0xc0002c5b, "TODO_c000_2c5b", 0),
+ MVI(0xc0002c5c, "TODO_c000_2c5c", 0),
+ MVI(0xc0002c5d, "TODO_c000_2c5d", 0),
+ MVI(0xc0002c5e, "TODO_c000_2c5e", 0),
+ MVI(0xc0002c5f, "TODO_c000_2c5f", 0),
+ MVI(0xc0002c60, "TODO_c000_2c60", 0),
+ MVI(0xc0002c61, "TODO_c000_2c61", 0),
+ MVI(0xc0002c62, "TODO_c000_2c62", 0),
+ MVI(0xc0002c63, "TODO_c000_2c63", 0),
+ MVI(0xc0002c64, "TODO_c000_2c64", 0),
+ MVI(0xc0002c65, "TODO_c000_2c65", 0),
+ MVI(0xc0002c66, "TODO_c000_2c66", 0),
+ MVI(0xc0002c67, "TODO_c000_2c67", 0),
+ MVI(0xc0002c68, "TODO_c000_2c68", 0),
+ MVI(0xc0002c69, "TODO_c000_2c69", 0),
+ MVI(0xc0002c6a, "TODO_c000_2c6a", 0),
+ MVI(0xc0002c6b, "TODO_c000_2c6b", 0),
+ MVI(0xc0002c6c, "TODO_c000_2c6c", 0),
+ MVI(0xc0002c6d, "TODO_c000_2c6d", 0),
+ MVI(0xc0002c6e, "TODO_c000_2c6e", 0),
+ MVI(0xc0002c6f, "TODO_c000_2c6f", 0),
+ MVI(0xc0002c70, "TODO_c000_2c70", 0),
+ MVI(0xc0002c71, "TODO_c000_2c71", 0),
+ MVI(0xc0002c72, "TODO_c000_2c72", 0),
+ MVI(0xc0002c73, "TODO_c000_2c73", 0),
+ MVI(0xc0002c74, "TODO_c000_2c74", 0),
+ MVI(0xc0002c75, "TODO_c000_2c75", 0),
+ MVI(0xc0002c76, "TODO_c000_2c76", 0),
+ MVI(0xc0002c77, "TODO_c000_2c77", 0),
+ MVI(0xc0002c78, "TODO_c000_2c78", 0),
+ MVI(0xc0002c79, "TODO_c000_2c79", 0),
+ MVI(0xc0002c7a, "TODO_c000_2c7a", 0),
+ MVI(0xc0002c7b, "TODO_c000_2c7b", 0),
+ MVI(0xc0002c7c, "TODO_c000_2c7c", 0),
+ MVI(0xc0002c7d, "TODO_c000_2c7d", 0),
+ MVI(0xc0002c7e, "TODO_c000_2c7e", 0),
+ MVI(0xc0002c7f, "TODO_c000_2c7f", 0),
+ MVI(0xc0002c80, "TODO_c000_2c80", 0),
+ MVI(0xc0002c81, "TODO_c000_2c81", 0),
+ MVI(0xc0002c82, "TODO_c000_2c82", 0),
+ MVI(0xc0002c83, "TODO_c000_2c83", 0),
+ MVI(0xc0002c84, "TODO_c000_2c84", 0),
+ MVI(0xc0002c85, "TODO_c000_2c85", 0),
+ MVI(0xc0002c86, "TODO_c000_2c86", 0),
+ MVI(0xc0002c87, "TODO_c000_2c87", 0),
+ MVI(0xc0002c88, "TODO_c000_2c88", 0),
+ MVI(0xc0002c89, "TODO_c000_2c89", 0),
+ MVI(0xc0002c8a, "TODO_c000_2c8a", 0),
+ MVI(0xc0002c8b, "TODO_c000_2c8b", 0),
+ MVI(0xc0002c8c, "TODO_c000_2c8c", 0),
+ MVI(0xc0002c8d, "TODO_c000_2c8d", 0),
+ MVI(0xc0002c8e, "TODO_c000_2c8e", 0),
+ MVI(0xc0002c8f, "TODO_c000_2c8f", 0),
+ MVI(0xc0002c90, "TODO_c000_2c90", 0),
+ MVI(0xc0002c91, "TODO_c000_2c91", 0),
+ MVI(0xc0002c92, "TODO_c000_2c92", 0),
+ MVI(0xc0002c93, "TODO_c000_2c93", 0),
+ MVI(0xc0002c94, "TODO_c000_2c94", 0),
+ MVI(0xc0002c95, "TODO_c000_2c95", 0),
+ MVI(0xc0002c96, "TODO_c000_2c96", 0),
+ MVI(0xc0002c97, "TODO_c000_2c97", 0),
+ MVI(0xc0002c98, "TODO_c000_2c98", 0),
+ MVI(0xc0002c99, "TODO_c000_2c99", 0),
+ MVI(0xc0002c9a, "TODO_c000_2c9a", 0),
+ MVI(0xc0002c9b, "TODO_c000_2c9b", 0),
+ MVI(0xc0002c9c, "TODO_c000_2c9c", 0),
+ MVI(0xc0002c9d, "TODO_c000_2c9d", 0),
+ MVI(0xc0002c9e, "TODO_c000_2c9e", 0),
+ MVI(0xc0002c9f, "TODO_c000_2c9f", 0),
+ MVI(0xc0002ca0, "TODO_c000_2ca0", 0),
+ MVI(0xc0002ca1, "TODO_c000_2ca1", 0),
+ MVI(0xc0002ca2, "TODO_c000_2ca2", 0),
+ MVI(0xc0002ca3, "TODO_c000_2ca3", 0),
+ MVI(0xc0002ca4, "TODO_c000_2ca4", 0),
+ MVI(0xc0002ca5, "TODO_c000_2ca5", 0),
+ MVI(0xc0002ca6, "TODO_c000_2ca6", 0),
+ MVI(0xc0002ca7, "TODO_c000_2ca7", 0),
+ MVI(0xc0002ca8, "TODO_c000_2ca8", 0),
+ MVI(0xc0002ca9, "TODO_c000_2ca9", 0),
+ MVI(0xc0002caa, "TODO_c000_2caa", 0),
+ MVI(0xc0002cab, "TODO_c000_2cab", 0),
+ MVI(0xc0002cac, "TODO_c000_2cac", 0),
+ MVI(0xc0002cad, "TODO_c000_2cad", 0),
+ MVI(0xc0002cae, "TODO_c000_2cae", 0),
+ MVI(0xc0002caf, "TODO_c000_2caf", 0),
+ MVI(0xc0002cb0, "TODO_c000_2cb0", 0),
+ MVI(0xc0002cb1, "TODO_c000_2cb1", 0),
+ MVI(0xc0002cb2, "TODO_c000_2cb2", 0),
+ MVI(0xc0002cb3, "TODO_c000_2cb3", 0),
+ MVI(0xc0002cb4, "TODO_c000_2cb4", 0),
+ MVI(0xc0002cb5, "TODO_c000_2cb5", 0),
+ MVI(0xc0002cb6, "TODO_c000_2cb6", 0),
+ MVI(0xc0002cb7, "TODO_c000_2cb7", 0),
+ MVI(0xc0002cb8, "TODO_c000_2cb8", 0),
+ MVI(0xc0002cb9, "TODO_c000_2cb9", 0),
+ MVI(0xc0002cba, "TODO_c000_2cba", 0),
+ MVI(0xc0002cbb, "TODO_c000_2cbb", 0),
+ MVI(0xc0002cbc, "TODO_c000_2cbc", 0),
+ MVI(0xc0002cbd, "TODO_c000_2cbd", 0),
+ MVI(0xc0002cbe, "TODO_c000_2cbe", 0),
+ MVI(0xc0002cbf, "TODO_c000_2cbf", 0),
+ MVI(0xc0002cc0, "TODO_c000_2cc0", 0),
+ MVI(0xc0002cc1, "TODO_c000_2cc1", 0),
+ MVI(0xc0002cc2, "TODO_c000_2cc2", 0),
+ MVI(0xc0002cc3, "TODO_c000_2cc3", 0),
+ MVI(0xc0002cc4, "TODO_c000_2cc4", 0),
+ MVI(0xc0002cc5, "TODO_c000_2cc5", 0),
+ MVI(0xc0002cc6, "TODO_c000_2cc6", 0),
+ MVI(0xc0002cc7, "TODO_c000_2cc7", 0),
+ MVI(0xc0002cc8, "TODO_c000_2cc8", 0),
+ MVI(0xc0002cc9, "TODO_c000_2cc9", 0),
+ MVI(0xc0002cca, "TODO_c000_2cca", 0),
+ MVI(0xc0002ccb, "TODO_c000_2ccb", 0),
+ MVI(0xc0002ccc, "TODO_c000_2ccc", 0),
+ MVI(0xc0002ccd, "TODO_c000_2ccd", 0),
+ MVI(0xc0002cce, "TODO_c000_2cce", 0),
+ MVI(0xc0002ccf, "TODO_c000_2ccf", 0),
+ MVI(0xc0002cd0, "TODO_c000_2cd0", 0),
+ MVI(0xc0002cd1, "TODO_c000_2cd1", 0),
+ MVI(0xc0002cd2, "TODO_c000_2cd2", 0),
+ MVI(0xc0002cd3, "TODO_c000_2cd3", 0),
+ MVI(0xc0002cd4, "TODO_c000_2cd4", 0),
+ MVI(0xc0002cd5, "TODO_c000_2cd5", 0),
+ MVI(0xc0002cd6, "TODO_c000_2cd6", 0),
+ MVI(0xc0002cd7, "TODO_c000_2cd7", 0),
+ MVI(0xc0002cd8, "TODO_c000_2cd8", 0),
+ MVI(0xc0002cd9, "TODO_c000_2cd9", 0),
+ MVI(0xc0002cda, "TODO_c000_2cda", 0),
+ MVI(0xc0002cdb, "TODO_c000_2cdb", 0),
+ MVI(0xc0002cdc, "TODO_c000_2cdc", 0),
+ MVI(0xc0002cdd, "TODO_c000_2cdd", 0),
+ MVI(0xc0002cde, "TODO_c000_2cde", 0),
+ MVI(0xc0002cdf, "TODO_c000_2cdf", 0),
+ MVI(0xc0002ce0, "TODO_c000_2ce0", 0),
+ MVI(0xc0002ce1, "TODO_c000_2ce1", 0),
+ MVI(0xc0002ce2, "TODO_c000_2ce2", 0),
+ MVI(0xc0002ce3, "TODO_c000_2ce3", 0),
+ MVI(0xc0002ce4, "TODO_c000_2ce4", 0),
+ MVI(0xc0002ce5, "TODO_c000_2ce5", 0),
+ MVI(0xc0002ce6, "TODO_c000_2ce6", 0),
+ MVI(0xc0002ce7, "TODO_c000_2ce7", 0),
+ MVI(0xc0002ce8, "TODO_c000_2ce8", 0),
+ MVI(0xc0002ce9, "TODO_c000_2ce9", 0),
+ MVI(0xc0002cea, "TODO_c000_2cea", 0),
+ MVI(0xc0002ceb, "TODO_c000_2ceb", 0),
+ MVI(0xc0002cec, "TODO_c000_2cec", 0),
+ MVI(0xc0002ced, "TODO_c000_2ced", 0),
+ MVI(0xc0002cee, "TODO_c000_2cee", 0),
+ MVI(0xc0002cef, "TODO_c000_2cef", 0),
+ MVI(0xc0002cf0, "TODO_c000_2cf0", 0),
+ MVI(0xc0002cf1, "TODO_c000_2cf1", 0),
+ MVI(0xc0002cf2, "TODO_c000_2cf2", 0),
+ MVI(0xc0002cf3, "TODO_c000_2cf3", 0),
+ MVI(0xc0002cf4, "TODO_c000_2cf4", 0),
+ MVI(0xc0002cf5, "TODO_c000_2cf5", 0),
+ MVI(0xc0002cf6, "TODO_c000_2cf6", 0),
+ MVI(0xc0002cf7, "TODO_c000_2cf7", 0),
+ MVI(0xc0002cf8, "TODO_c000_2cf8", 0),
+ MVI(0xc0002cf9, "TODO_c000_2cf9", 0),
+ MVI(0xc0002cfa, "TODO_c000_2cfa", 0),
+ MVI(0xc0002cfb, "TODO_c000_2cfb", 0),
+ MVI(0xc0002cfc, "TODO_c000_2cfc", 0),
+ MVI(0xc0002cfd, "TODO_c000_2cfd", 0),
+ MVI(0xc0002cfe, "TODO_c000_2cfe", 0),
+ MVI(0xc0002cff, "TODO_c000_2cff", 0),
+ MVI(0xc0002d00, "TODO_c000_2d00", 0),
+ MVI(0xc0002d01, "TODO_c000_2d01", 0),
+ MVI(0xc0002d02, "TODO_c000_2d02", 0),
+ MVI(0xc0002d03, "TODO_c000_2d03", 0),
+ MVI(0xc0002d04, "TODO_c000_2d04", 0),
+ MVI(0xc0002d05, "TODO_c000_2d05", 0),
+ MVI(0xc0002d06, "TODO_c000_2d06", 0),
+ MVI(0xc0002d07, "TODO_c000_2d07", 0),
+ MVI(0xc0002d08, "TODO_c000_2d08", 0),
+ MVI(0xc0002d09, "TODO_c000_2d09", 0),
+ MVI(0xc0002d0a, "TODO_c000_2d0a", 0),
+ MVI(0xc0002d0b, "TODO_c000_2d0b", 0),
+ MVI(0xc0002d0c, "TODO_c000_2d0c", 0),
+ MVI(0xc0002d0d, "TODO_c000_2d0d", 0),
+ MVI(0xc0002d0e, "TODO_c000_2d0e", 0),
+ MVI(0xc0002d0f, "TODO_c000_2d0f", 0),
+ MVI(0xc0002d10, "TODO_c000_2d10", 0),
+ MVI(0xc0002d11, "TODO_c000_2d11", 0),
+ MVI(0xc0002d12, "TODO_c000_2d12", 0),
+ MVI(0xc0002d13, "TODO_c000_2d13", 0),
+ MVI(0xc0002d14, "TODO_c000_2d14", 0),
+ MVI(0xc0002d15, "TODO_c000_2d15", 0),
+ MVI(0xc0002d16, "TODO_c000_2d16", 0),
+ MVI(0xc0002d17, "TODO_c000_2d17", 0),
+ MVI(0xc0002d18, "TODO_c000_2d18", 0),
+ MVI(0xc0002d19, "TODO_c000_2d19", 0),
+ MVI(0xc0002d1a, "TODO_c000_2d1a", 0),
+ MVI(0xc0002d1b, "TODO_c000_2d1b", 0),
+ MVI(0xc0002d1c, "TODO_c000_2d1c", 0),
+ MVI(0xc0002d1d, "TODO_c000_2d1d", 0),
+ MVI(0xc0002d1e, "TODO_c000_2d1e", 0),
+ MVI(0xc0002d1f, "TODO_c000_2d1f", 0),
+ MVI(0xc0002d20, "TODO_c000_2d20", 0),
+ MVI(0xc0002d21, "TODO_c000_2d21", 0),
+ MVI(0xc0002d22, "TODO_c000_2d22", 0),
+ MVI(0xc0002d23, "TODO_c000_2d23", 0),
+ MVI(0xc0002d24, "TODO_c000_2d24", 0),
+ MVI(0xc0002d25, "TODO_c000_2d25", 0),
+ MVI(0xc0002d26, "TODO_c000_2d26", 0),
+ MVI(0xc0002d27, "TODO_c000_2d27", 0),
+ MVI(0xc0002d28, "TODO_c000_2d28", 0),
+ MVI(0xc0002d29, "TODO_c000_2d29", 0),
+ MVI(0xc0002d2a, "TODO_c000_2d2a", 0),
+ MVI(0xc0002d2b, "TODO_c000_2d2b", 0),
+ MVI(0xc0002d2c, "TODO_c000_2d2c", 0),
+ MVI(0xc0002d2d, "TODO_c000_2d2d", 0),
+ MVI(0xc0002d2e, "TODO_c000_2d2e", 0),
+ MVI(0xc0002d2f, "TODO_c000_2d2f", 0),
+ MVI(0xc0002d30, "TODO_c000_2d30", 0),
+ MVI(0xc0002d31, "TODO_c000_2d31", 0),
+ MVI(0xc0002d32, "TODO_c000_2d32", 0),
+ MVI(0xc0002d33, "TODO_c000_2d33", 0),
+ MVI(0xc0002d34, "TODO_c000_2d34", 0),
+ MVI(0xc0002d35, "TODO_c000_2d35", 0),
+ MVI(0xc0002d36, "TODO_c000_2d36", 0),
+ MVI(0xc0002d37, "TODO_c000_2d37", 0),
+ MVI(0xc0002d38, "TODO_c000_2d38", 0),
+ MVI(0xc0002d39, "TODO_c000_2d39", 0),
+ MVI(0xc0002d3a, "TODO_c000_2d3a", 0),
+ MVI(0xc0002d3b, "TODO_c000_2d3b", 0),
+ MVI(0xc0002d3c, "TODO_c000_2d3c", 0),
+ MVI(0xc0002d3d, "TODO_c000_2d3d", 0),
+ MVI(0xc0002d3e, "TODO_c000_2d3e", 0),
+ MVI(0xc0002d3f, "TODO_c000_2d3f", 0),
+ MVI(0xc0002d40, "TODO_c000_2d40", 0),
+ MVI(0xc0002d41, "TODO_c000_2d41", 0),
+ MVI(0xc0002d42, "TODO_c000_2d42", 0),
+ MVI(0xc0002d43, "TODO_c000_2d43", 0),
+ MVI(0xc0002d44, "TODO_c000_2d44", 0),
+ MVI(0xc0002d45, "TODO_c000_2d45", 0),
+ MVI(0xc0002d46, "TODO_c000_2d46", 0),
+ MVI(0xc0002d47, "TODO_c000_2d47", 0),
+ MVI(0xc0002d48, "TODO_c000_2d48", 0),
+ MVI(0xc0002d49, "TODO_c000_2d49", 0),
+ MVI(0xc0002d4a, "TODO_c000_2d4a", 0),
+ MVI(0xc0002d4b, "TODO_c000_2d4b", 0),
+ MVI(0xc0002d4c, "TODO_c000_2d4c", 0),
+ MVI(0xc0002d4d, "TODO_c000_2d4d", 0),
+ MVI(0xc0002d4e, "TODO_c000_2d4e", 0),
+ MVI(0xc0002d4f, "TODO_c000_2d4f", 0),
+ MVI(0xc0002d50, "TODO_c000_2d50", 0),
+ MVI(0xc0002d51, "TODO_c000_2d51", 0),
+ MVI(0xc0002d52, "TODO_c000_2d52", 0),
+ MVI(0xc0002d53, "TODO_c000_2d53", 0),
+ MVI(0xc0002d54, "TODO_c000_2d54", 0),
+ MVI(0xc0002d55, "TODO_c000_2d55", 0),
+ MVI(0xc0002d56, "TODO_c000_2d56", 0),
+ MVI(0xc0002d57, "TODO_c000_2d57", 0),
+ MVI(0xc0002d58, "TODO_c000_2d58", 0),
+ MVI(0xc0002d59, "TODO_c000_2d59", 0),
+ MVI(0xc0002d5a, "TODO_c000_2d5a", 0),
+ MVI(0xc0002d5b, "TODO_c000_2d5b", 0),
+ MVI(0xc0002d5c, "TODO_c000_2d5c", 0),
+ MVI(0xc0002d5d, "TODO_c000_2d5d", 0),
+ MVI(0xc0002d5e, "TODO_c000_2d5e", 0),
+ MVI(0xc0002d5f, "TODO_c000_2d5f", 0),
+ MVI(0xc0002d60, "TODO_c000_2d60", 0),
+ MVI(0xc0002d61, "TODO_c000_2d61", 0),
+ MVI(0xc0002d62, "TODO_c000_2d62", 0),
+ MVI(0xc0002d63, "TODO_c000_2d63", 0),
+ MVI(0xc0002d64, "TODO_c000_2d64", 0),
+ MVI(0xc0002d65, "TODO_c000_2d65", 0),
+ MVI(0xc0002d66, "TODO_c000_2d66", 0),
+ MVI(0xc0002d67, "TODO_c000_2d67", 0),
+ MVI(0xc0002d68, "TODO_c000_2d68", 0),
+ MVI(0xc0002d69, "TODO_c000_2d69", 0),
+ MVI(0xc0002d6a, "TODO_c000_2d6a", 0),
+ MVI(0xc0002d6b, "TODO_c000_2d6b", 0),
+ MVI(0xc0002d6c, "TODO_c000_2d6c", 0),
+ MVI(0xc0002d6d, "TODO_c000_2d6d", 0),
+ MVI(0xc0002d6e, "TODO_c000_2d6e", 0),
+ MVI(0xc0002d6f, "TODO_c000_2d6f", 0),
+ MVI(0xc0002d70, "TODO_c000_2d70", 0),
+ MVI(0xc0002d71, "TODO_c000_2d71", 0),
+ MVI(0xc0002d72, "TODO_c000_2d72", 0),
+ MVI(0xc0002d73, "TODO_c000_2d73", 0),
+ MVI(0xc0002d74, "TODO_c000_2d74", 0),
+ MVI(0xc0002d75, "TODO_c000_2d75", 0),
+ MVI(0xc0002d76, "TODO_c000_2d76", 0),
+ MVI(0xc0002d77, "TODO_c000_2d77", 0),
+ MVI(0xc0002d78, "TODO_c000_2d78", 0),
+ MVI(0xc0002d79, "TODO_c000_2d79", 0),
+ MVI(0xc0002d7a, "TODO_c000_2d7a", 0),
+ MVI(0xc0002d7b, "TODO_c000_2d7b", 0),
+ MVI(0xc0002d7c, "TODO_c000_2d7c", 0),
+ MVI(0xc0002d7d, "TODO_c000_2d7d", 0),
+ MVI(0xc0002d7e, "TODO_c000_2d7e", 0),
+ MVI(0xc0002d7f, "TODO_c000_2d7f", 0),
+ MVI(0xc0002d80, "TODO_c000_2d80", 0),
+ MVI(0xc0002d81, "TODO_c000_2d81", 0),
+ MVI(0xc0002d82, "TODO_c000_2d82", 0),
+ MVI(0xc0002d83, "TODO_c000_2d83", 0),
+ MVI(0xc0002d84, "TODO_c000_2d84", 0),
+ MVI(0xc0002d85, "TODO_c000_2d85", 0),
+ MVI(0xc0002d86, "TODO_c000_2d86", 0),
+ MVI(0xc0002d87, "TODO_c000_2d87", 0),
+ MVI(0xc0002d88, "TODO_c000_2d88", 0),
+ MVI(0xc0002d89, "TODO_c000_2d89", 0),
+ MVI(0xc0002d8a, "TODO_c000_2d8a", 0),
+ MVI(0xc0002d8b, "TODO_c000_2d8b", 0),
+ MVI(0xc0002d8c, "TODO_c000_2d8c", 0),
+ MVI(0xc0002d8d, "TODO_c000_2d8d", 0),
+ MVI(0xc0002d8e, "TODO_c000_2d8e", 0),
+ MVI(0xc0002d8f, "TODO_c000_2d8f", 0),
+ MVI(0xc0002d90, "TODO_c000_2d90", 0),
+ MVI(0xc0002d91, "TODO_c000_2d91", 0),
+ MVI(0xc0002d92, "TODO_c000_2d92", 0),
+ MVI(0xc0002d93, "TODO_c000_2d93", 0),
+ MVI(0xc0002d94, "TODO_c000_2d94", 0),
+ MVI(0xc0002d95, "TODO_c000_2d95", 0),
+ MVI(0xc0002d96, "TODO_c000_2d96", 0),
+ MVI(0xc0002d97, "TODO_c000_2d97", 0),
+ MVI(0xc0002d98, "TODO_c000_2d98", 0),
+ MVI(0xc0002d99, "TODO_c000_2d99", 0),
+ MVI(0xc0002d9a, "TODO_c000_2d9a", 0),
+ MVI(0xc0002d9b, "TODO_c000_2d9b", 0),
+ MVI(0xc0002d9c, "TODO_c000_2d9c", 0),
+ MVI(0xc0002d9d, "TODO_c000_2d9d", 0),
+ MVI(0xc0002d9e, "TODO_c000_2d9e", 0),
+ MVI(0xc0002d9f, "TODO_c000_2d9f", 0),
+ MVI(0xc0002da0, "TODO_c000_2da0", 0),
+ MVI(0xc0002da1, "TODO_c000_2da1", 0),
+ MVI(0xc0002da2, "TODO_c000_2da2", 0),
+ MVI(0xc0002da3, "TODO_c000_2da3", 0),
+ MVI(0xc0002da4, "TODO_c000_2da4", 0),
+ MVI(0xc0002da5, "TODO_c000_2da5", 0),
+ MVI(0xc0002da6, "TODO_c000_2da6", 0),
+ MVI(0xc0002da7, "TODO_c000_2da7", 0),
+ MVI(0xc0002da8, "TODO_c000_2da8", 0),
+ MVI(0xc0002da9, "TODO_c000_2da9", 0),
+ MVI(0xc0002daa, "TODO_c000_2daa", 0),
+ MVI(0xc0002dab, "TODO_c000_2dab", 0),
+ MVI(0xc0002dac, "TODO_c000_2dac", 0),
+ MVI(0xc0002dad, "TODO_c000_2dad", 0),
+ MVI(0xc0002dae, "TODO_c000_2dae", 0),
+ MVI(0xc0002daf, "TODO_c000_2daf", 0),
+ MVI(0xc0002db0, "TODO_c000_2db0", 0),
+ MVI(0xc0002db1, "TODO_c000_2db1", 0),
+ MVI(0xc0002db2, "TODO_c000_2db2", 0),
+ MVI(0xc0002db3, "TODO_c000_2db3", 0),
+ MVI(0xc0002db4, "TODO_c000_2db4", 0),
+ MVI(0xc0002db5, "TODO_c000_2db5", 0),
+ MVI(0xc0002db6, "TODO_c000_2db6", 0),
+ MVI(0xc0002db7, "TODO_c000_2db7", 0),
+ MVI(0xc0002db8, "TODO_c000_2db8", 0),
+ MVI(0xc0002db9, "TODO_c000_2db9", 0),
+ MVI(0xc0002dba, "TODO_c000_2dba", 0),
+ MVI(0xc0002dbb, "TODO_c000_2dbb", 0),
+ MVI(0xc0002dbc, "TODO_c000_2dbc", 0),
+ MVI(0xc0002dbd, "TODO_c000_2dbd", 0),
+ MVI(0xc0002dbe, "TODO_c000_2dbe", 0),
+ MVI(0xc0002dbf, "TODO_c000_2dbf", 0),
+ MVI(0xc0002dc0, "TODO_c000_2dc0", 0),
+ MVI(0xc0002dc1, "TODO_c000_2dc1", 0),
+ MVI(0xc0002dc2, "TODO_c000_2dc2", 0),
+ MVI(0xc0002dc3, "TODO_c000_2dc3", 0),
+ MVI(0xc0002dc4, "TODO_c000_2dc4", 0),
+ MVI(0xc0002dc5, "TODO_c000_2dc5", 0),
+ MVI(0xc0002dc6, "TODO_c000_2dc6", 0),
+ MVI(0xc0002dc7, "TODO_c000_2dc7", 0),
+ MVI(0xc0002dc8, "TODO_c000_2dc8", 0),
+ MVI(0xc0002dc9, "TODO_c000_2dc9", 0),
+ MVI(0xc0002dca, "TODO_c000_2dca", 0),
+ MVI(0xc0002dcb, "TODO_c000_2dcb", 0),
+ MVI(0xc0002dcc, "TODO_c000_2dcc", 0),
+ MVI(0xc0002dcd, "TODO_c000_2dcd", 0),
+ MVI(0xc0002dce, "TODO_c000_2dce", 0),
+ MVI(0xc0002dcf, "TODO_c000_2dcf", 0),
+ MVI(0xc0002dd0, "TODO_c000_2dd0", 0),
+ MVI(0xc0002dd1, "TODO_c000_2dd1", 0),
+ MVI(0xc0002dd2, "TODO_c000_2dd2", 0),
+ MVI(0xc0002dd3, "TODO_c000_2dd3", 0),
+ MVI(0xc0002dd4, "TODO_c000_2dd4", 0),
+ MVI(0xc0002dd5, "TODO_c000_2dd5", 0),
+ MVI(0xc0002dd6, "TODO_c000_2dd6", 0),
+ MVI(0xc0002dd7, "TODO_c000_2dd7", 0),
+ MVI(0xc0002dd8, "TODO_c000_2dd8", 0),
+ MVI(0xc0002dd9, "TODO_c000_2dd9", 0),
+ MVI(0xc0002dda, "TODO_c000_2dda", 0),
+ MVI(0xc0002ddb, "TODO_c000_2ddb", 0),
+ MVI(0xc0002ddc, "TODO_c000_2ddc", 0),
+ MVI(0xc0002ddd, "TODO_c000_2ddd", 0),
+ MVI(0xc0002dde, "TODO_c000_2dde", 0),
+ MVI(0xc0002ddf, "TODO_c000_2ddf", 0),
+ MVI(0xc0002de0, "TODO_c000_2de0", 0),
+ MVI(0xc0002de1, "TODO_c000_2de1", 0),
+ MVI(0xc0002de2, "TODO_c000_2de2", 0),
+ MVI(0xc0002de3, "TODO_c000_2de3", 0),
+ MVI(0xc0002de4, "TODO_c000_2de4", 0),
+ MVI(0xc0002de5, "TODO_c000_2de5", 0),
+ MVI(0xc0002de6, "TODO_c000_2de6", 0),
+ MVI(0xc0002de7, "TODO_c000_2de7", 0),
+ MVI(0xc0002de8, "TODO_c000_2de8", 0),
+ MVI(0xc0002de9, "TODO_c000_2de9", 0),
+ MVI(0xc0002dea, "TODO_c000_2dea", 0),
+ MVI(0xc0002deb, "TODO_c000_2deb", 0),
+ MVI(0xc0002dec, "TODO_c000_2dec", 0),
+ MVI(0xc0002ded, "TODO_c000_2ded", 0),
+ MVI(0xc0002dee, "TODO_c000_2dee", 0),
+ MVI(0xc0002def, "TODO_c000_2def", 0),
+ MVI(0xc0002df0, "TODO_c000_2df0", 0),
+ MVI(0xc0002df1, "TODO_c000_2df1", 0),
+ MVI(0xc0002df2, "TODO_c000_2df2", 0),
+ MVI(0xc0002df3, "TODO_c000_2df3", 0),
+ MVI(0xc0002df4, "TODO_c000_2df4", 0),
+ MVI(0xc0002df5, "TODO_c000_2df5", 0),
+ MVI(0xc0002df6, "TODO_c000_2df6", 0),
+ MVI(0xc0002df7, "TODO_c000_2df7", 0),
+ MVI(0xc0002df8, "TODO_c000_2df8", 0),
+ MVI(0xc0002df9, "TODO_c000_2df9", 0),
+ MVI(0xc0002dfa, "TODO_c000_2dfa", 0),
+ MVI(0xc0002dfb, "TODO_c000_2dfb", 0),
+ MVI(0xc0002dfc, "TODO_c000_2dfc", 0),
+ MVI(0xc0002dfd, "TODO_c000_2dfd", 0),
+ MVI(0xc0002dfe, "TODO_c000_2dfe", 0),
+ MVI(0xc0002dff, "TODO_c000_2dff", 0),
+ MVI(0xc0002e00, "TODO_c000_2e00", 0),
+ MVI(0xc0002e01, "TODO_c000_2e01", 0),
+ MVI(0xc0002e02, "TODO_c000_2e02", 0),
+ MVI(0xc0002e03, "TODO_c000_2e03", 0),
+ MVI(0xc0002e04, "TODO_c000_2e04", 0),
+ MVI(0xc0002e05, "TODO_c000_2e05", 0),
+ MVI(0xc0002e06, "TODO_c000_2e06", 0),
+ MVI(0xc0002e07, "TODO_c000_2e07", 0),
+ MVI(0xc0002e08, "TODO_c000_2e08", 0),
+ MVI(0xc0002e09, "TODO_c000_2e09", 0),
+ MVI(0xc0002e0a, "TODO_c000_2e0a", 0),
+ MVI(0xc0002e0b, "TODO_c000_2e0b", 0),
+ MVI(0xc0002e0c, "TODO_c000_2e0c", 0),
+ MVI(0xc0002e0d, "TODO_c000_2e0d", 0),
+ MVI(0xc0002e0e, "TODO_c000_2e0e", 0),
+ MVI(0xc0002e0f, "TODO_c000_2e0f", 0),
+ MVI(0xc0002e10, "TODO_c000_2e10", 0),
+ MVI(0xc0002e11, "TODO_c000_2e11", 0),
+ MVI(0xc0002e12, "TODO_c000_2e12", 0),
+ MVI(0xc0002e13, "TODO_c000_2e13", 0),
+ MVI(0xc0002e14, "TODO_c000_2e14", 0),
+ MVI(0xc0002e15, "TODO_c000_2e15", 0),
+ MVI(0xc0002e16, "TODO_c000_2e16", 0),
+ MVI(0xc0002e17, "TODO_c000_2e17", 0),
+ MVI(0xc0002e18, "TODO_c000_2e18", 0),
+ MVI(0xc0002e19, "TODO_c000_2e19", 0),
+ MVI(0xc0002e1a, "TODO_c000_2e1a", 0),
+ MVI(0xc0002e1b, "TODO_c000_2e1b", 0),
+ MVI(0xc0002e1c, "TODO_c000_2e1c", 0),
+ MVI(0xc0002e1d, "TODO_c000_2e1d", 0),
+ MVI(0xc0002e1e, "TODO_c000_2e1e", 0),
+ MVI(0xc0002e1f, "TODO_c000_2e1f", 0),
+ MVI(0xc0002e20, "TODO_c000_2e20", 0),
+ MVI(0xc0002e21, "TODO_c000_2e21", 0),
+ MVI(0xc0002e22, "TODO_c000_2e22", 0),
+ MVI(0xc0002e23, "TODO_c000_2e23", 0),
+ MVI(0xc0002e24, "TODO_c000_2e24", 0),
+ MVI(0xc0002e25, "TODO_c000_2e25", 0),
+ MVI(0xc0002e26, "TODO_c000_2e26", 0),
+ MVI(0xc0002e27, "TODO_c000_2e27", 0),
+ MVI(0xc0002e28, "TODO_c000_2e28", 0),
+ MVI(0xc0002e29, "TODO_c000_2e29", 0),
+ MVI(0xc0002e2a, "TODO_c000_2e2a", 0),
+ MVI(0xc0002e2b, "TODO_c000_2e2b", 0),
+ MVI(0xc0002e2c, "TODO_c000_2e2c", 0),
+ MVI(0xc0002e2d, "TODO_c000_2e2d", 0),
+ MVI(0xc0002e2e, "TODO_c000_2e2e", 0),
+ MVI(0xc0002e2f, "TODO_c000_2e2f", 0),
+ MVI(0xc0002e30, "TODO_c000_2e30", 0),
+ MVI(0xc0002e31, "TODO_c000_2e31", 0),
+ MVI(0xc0002e32, "TODO_c000_2e32", 0),
+ MVI(0xc0002e33, "TODO_c000_2e33", 0),
+ MVI(0xc0002e34, "TODO_c000_2e34", 0),
+ MVI(0xc0002e35, "TODO_c000_2e35", 0),
+ MVI(0xc0002e36, "TODO_c000_2e36", 0),
+ MVI(0xc0002e37, "TODO_c000_2e37", 0),
+ MVI(0xc0002e38, "TODO_c000_2e38", 0),
+ MVI(0xc0002e39, "TODO_c000_2e39", 0),
+ MVI(0xc0002e3a, "TODO_c000_2e3a", 0),
+ MVI(0xc0002e3b, "TODO_c000_2e3b", 0),
+ MVI(0xc0002e3c, "TODO_c000_2e3c", 0),
+ MVI(0xc0002e3d, "TODO_c000_2e3d", 0),
+ MVI(0xc0002e3e, "TODO_c000_2e3e", 0),
+ MVI(0xc0002e3f, "TODO_c000_2e3f", 0),
+ MVI(0xc0002e40, "TODO_c000_2e40", 0),
+ MVI(0xc0002e41, "TODO_c000_2e41", 0),
+ MVI(0xc0002e42, "TODO_c000_2e42", 0),
+ MVI(0xc0002e43, "TODO_c000_2e43", 0),
+ MVI(0xc0002e44, "TODO_c000_2e44", 0),
+ MVI(0xc0002e45, "TODO_c000_2e45", 0),
+ MVI(0xc0002e46, "TODO_c000_2e46", 0),
+ MVI(0xc0002e47, "TODO_c000_2e47", 0),
+ MVI(0xc0002e48, "TODO_c000_2e48", 0),
+ MVI(0xc0002e49, "TODO_c000_2e49", 0),
+ MVI(0xc0002e4a, "TODO_c000_2e4a", 0),
+ MVI(0xc0002e4b, "TODO_c000_2e4b", 0),
+ MVI(0xc0002e4c, "TODO_c000_2e4c", 0),
+ MVI(0xc0002e4d, "TODO_c000_2e4d", 0),
+ MVI(0xc0002e4e, "TODO_c000_2e4e", 0),
+ MVI(0xc0002e4f, "TODO_c000_2e4f", 0),
+ MVI(0xc0002e50, "TODO_c000_2e50", 0),
+ MVI(0xc0002e51, "TODO_c000_2e51", 0),
+ MVI(0xc0002e52, "TODO_c000_2e52", 0),
+ MVI(0xc0002e53, "TODO_c000_2e53", 0),
+ MVI(0xc0002e54, "TODO_c000_2e54", 0),
+ MVI(0xc0002e55, "TODO_c000_2e55", 0),
+ MVI(0xc0002e56, "TODO_c000_2e56", 0),
+ MVI(0xc0002e57, "TODO_c000_2e57", 0),
+ MVI(0xc0002e58, "TODO_c000_2e58", 0),
+ MVI(0xc0002e59, "TODO_c000_2e59", 0),
+ MVI(0xc0002e5a, "TODO_c000_2e5a", 0),
+ MVI(0xc0002e5b, "TODO_c000_2e5b", 0),
+ MVI(0xc0002e5c, "TODO_c000_2e5c", 0),
+ MVI(0xc0002e5d, "TODO_c000_2e5d", 0),
+ MVI(0xc0002e5e, "TODO_c000_2e5e", 0),
+ MVI(0xc0002e5f, "TODO_c000_2e5f", 0),
+ MVI(0xc0002e60, "TODO_c000_2e60", 0),
+ MVI(0xc0002e61, "TODO_c000_2e61", 0),
+ MVI(0xc0002e62, "TODO_c000_2e62", 0),
+ MVI(0xc0002e63, "TODO_c000_2e63", 0),
+ MVI(0xc0002e64, "TODO_c000_2e64", 0),
+ MVI(0xc0002e65, "TODO_c000_2e65", 0),
+ MVI(0xc0002e66, "TODO_c000_2e66", 0),
+ MVI(0xc0002e67, "TODO_c000_2e67", 0),
+ MVI(0xc0002e68, "TODO_c000_2e68", 0),
+ MVI(0xc0002e69, "TODO_c000_2e69", 0),
+ MVI(0xc0002e6a, "TODO_c000_2e6a", 0),
+ MVI(0xc0002e6b, "TODO_c000_2e6b", 0),
+ MVI(0xc0002e6c, "TODO_c000_2e6c", 0),
+ MVI(0xc0002e6d, "TODO_c000_2e6d", 0),
+ MVI(0xc0002e6e, "TODO_c000_2e6e", 0),
+ MVI(0xc0002e6f, "TODO_c000_2e6f", 0),
+ MVI(0xc0002e70, "TODO_c000_2e70", 0),
+ MVI(0xc0002e71, "TODO_c000_2e71", 0),
+ MVI(0xc0002e72, "TODO_c000_2e72", 0),
+ MVI(0xc0002e73, "TODO_c000_2e73", 0),
+ MVI(0xc0002e74, "TODO_c000_2e74", 0),
+ MVI(0xc0002e75, "TODO_c000_2e75", 0),
+ MVI(0xc0002e76, "TODO_c000_2e76", 0),
+ MVI(0xc0002e77, "TODO_c000_2e77", 0),
+ MVI(0xc0002e78, "TODO_c000_2e78", 0),
+ MVI(0xc0002e79, "TODO_c000_2e79", 0),
+ MVI(0xc0002e7a, "TODO_c000_2e7a", 0),
+ MVI(0xc0002e7b, "TODO_c000_2e7b", 0),
+ MVI(0xc0002e7c, "TODO_c000_2e7c", 0),
+ MVI(0xc0002e7d, "TODO_c000_2e7d", 0),
+ MVI(0xc0002e7e, "TODO_c000_2e7e", 0),
+ MVI(0xc0002e7f, "TODO_c000_2e7f", 0),
+ MVI(0xc0002e80, "TODO_c000_2e80", 0),
+ MVI(0xc0002e81, "TODO_c000_2e81", 0),
+ MVI(0xc0002e82, "TODO_c000_2e82", 0),
+ MVI(0xc0002e83, "TODO_c000_2e83", 0),
+ MVI(0xc0002e84, "TODO_c000_2e84", 0),
+ MVI(0xc0002e85, "TODO_c000_2e85", 0),
+ MVI(0xc0002e86, "TODO_c000_2e86", 0),
+ MVI(0xc0002e87, "TODO_c000_2e87", 0),
+ MVI(0xc0002e88, "TODO_c000_2e88", 0),
+ MVI(0xc0002e89, "TODO_c000_2e89", 0),
+ MVI(0xc0002e8a, "TODO_c000_2e8a", 0),
+ MVI(0xc0002e8b, "TODO_c000_2e8b", 0),
+ MVI(0xc0002e8c, "TODO_c000_2e8c", 0),
+ MVI(0xc0002e8d, "TODO_c000_2e8d", 0),
+ MVI(0xc0002e8e, "TODO_c000_2e8e", 0),
+ MVI(0xc0002e8f, "TODO_c000_2e8f", 0),
+ MVI(0xc0002e90, "TODO_c000_2e90", 0),
+ MVI(0xc0002e91, "TODO_c000_2e91", 0),
+ MVI(0xc0002e92, "TODO_c000_2e92", 0),
+ MVI(0xc0002e93, "TODO_c000_2e93", 0),
+ MVI(0xc0002e94, "TODO_c000_2e94", 0),
+ MVI(0xc0002e95, "TODO_c000_2e95", 0),
+ MVI(0xc0002e96, "TODO_c000_2e96", 0),
+ MVI(0xc0002e97, "TODO_c000_2e97", 0),
+ MVI(0xc0002e98, "TODO_c000_2e98", 0),
+ MVI(0xc0002e99, "TODO_c000_2e99", 0),
+ MVI(0xc0002e9a, "TODO_c000_2e9a", 0),
+ MVI(0xc0002e9b, "TODO_c000_2e9b", 0),
+ MVI(0xc0002e9c, "TODO_c000_2e9c", 0),
+ MVI(0xc0002e9d, "TODO_c000_2e9d", 0),
+ MVI(0xc0002e9e, "TODO_c000_2e9e", 0),
+ MVI(0xc0002e9f, "TODO_c000_2e9f", 0),
+ MVI(0xc0002ea0, "TODO_c000_2ea0", 0),
+ MVI(0xc0002ea1, "TODO_c000_2ea1", 0),
+ MVI(0xc0002ea2, "TODO_c000_2ea2", 0),
+ MVI(0xc0002ea3, "TODO_c000_2ea3", 0),
+ MVI(0xc0002ea4, "TODO_c000_2ea4", 0),
+ MVI(0xc0002ea5, "TODO_c000_2ea5", 0),
+ MVI(0xc0002ea6, "TODO_c000_2ea6", 0),
+ MVI(0xc0002ea7, "TODO_c000_2ea7", 0),
+ MVI(0xc0002ea8, "TODO_c000_2ea8", 0),
+ MVI(0xc0002ea9, "TODO_c000_2ea9", 0),
+ MVI(0xc0002eaa, "TODO_c000_2eaa", 0),
+ MVI(0xc0002eab, "TODO_c000_2eab", 0),
+ MVI(0xc0002eac, "TODO_c000_2eac", 0),
+ MVI(0xc0002ead, "TODO_c000_2ead", 0),
+ MVI(0xc0002eae, "TODO_c000_2eae", 0),
+ MVI(0xc0002eaf, "TODO_c000_2eaf", 0),
+ MVI(0xc0002eb0, "TODO_c000_2eb0", 0),
+ MVI(0xc0002eb1, "TODO_c000_2eb1", 0),
+ MVI(0xc0002eb2, "TODO_c000_2eb2", 0),
+ MVI(0xc0002eb3, "TODO_c000_2eb3", 0),
+ MVI(0xc0002eb4, "TODO_c000_2eb4", 0),
+ MVI(0xc0002eb5, "TODO_c000_2eb5", 0),
+ MVI(0xc0002eb6, "TODO_c000_2eb6", 0),
+ MVI(0xc0002eb7, "TODO_c000_2eb7", 0),
+ MVI(0xc0002eb8, "TODO_c000_2eb8", 0),
+ MVI(0xc0002eb9, "TODO_c000_2eb9", 0),
+ MVI(0xc0002eba, "TODO_c000_2eba", 0),
+ MVI(0xc0002ebb, "TODO_c000_2ebb", 0),
+ MVI(0xc0002ebc, "TODO_c000_2ebc", 0),
+ MVI(0xc0002ebd, "TODO_c000_2ebd", 0),
+ MVI(0xc0002ebe, "TODO_c000_2ebe", 0),
+ MVI(0xc0002ebf, "TODO_c000_2ebf", 0),
+ MVI(0xc0002ec0, "TODO_c000_2ec0", 0),
+ MVI(0xc0002ec1, "TODO_c000_2ec1", 0),
+ MVI(0xc0002ec2, "TODO_c000_2ec2", 0),
+ MVI(0xc0002ec3, "TODO_c000_2ec3", 0),
+ MVI(0xc0002ec4, "TODO_c000_2ec4", 0),
+ MVI(0xc0002ec5, "TODO_c000_2ec5", 0),
+ MVI(0xc0002ec6, "TODO_c000_2ec6", 0),
+ MVI(0xc0002ec7, "TODO_c000_2ec7", 0),
+ MVI(0xc0002ec8, "TODO_c000_2ec8", 0),
+ MVI(0xc0002ec9, "TODO_c000_2ec9", 0),
+ MVI(0xc0002eca, "TODO_c000_2eca", 0),
+ MVI(0xc0002ecb, "TODO_c000_2ecb", 0),
+ MVI(0xc0002ecc, "TODO_c000_2ecc", 0),
+ MVI(0xc0002ecd, "TODO_c000_2ecd", 0),
+ MVI(0xc0002ece, "TODO_c000_2ece", 0),
+ MVI(0xc0002ecf, "TODO_c000_2ecf", 0),
+ MVI(0xc0002ed0, "TODO_c000_2ed0", 0),
+ MVI(0xc0002ed1, "TODO_c000_2ed1", 0),
+ MVI(0xc0002ed2, "TODO_c000_2ed2", 0),
+ MVI(0xc0002ed3, "TODO_c000_2ed3", 0),
+ MVI(0xc0002ed4, "TODO_c000_2ed4", 0),
+ MVI(0xc0002ed5, "TODO_c000_2ed5", 0),
+ MVI(0xc0002ed6, "TODO_c000_2ed6", 0),
+ MVI(0xc0002ed7, "TODO_c000_2ed7", 0),
+ MVI(0xc0002ed8, "TODO_c000_2ed8", 0),
+ MVI(0xc0002ed9, "TODO_c000_2ed9", 0),
+ MVI(0xc0002eda, "TODO_c000_2eda", 0),
+ MVI(0xc0002edb, "TODO_c000_2edb", 0),
+ MVI(0xc0002edc, "TODO_c000_2edc", 0),
+ MVI(0xc0002edd, "TODO_c000_2edd", 0),
+ MVI(0xc0002ede, "TODO_c000_2ede", 0),
+ MVI(0xc0002edf, "TODO_c000_2edf", 0),
+ MVI(0xc0002ee0, "TODO_c000_2ee0", 0),
+ MVI(0xc0002ee1, "TODO_c000_2ee1", 0),
+ MVI(0xc0002ee2, "TODO_c000_2ee2", 0),
+ MVI(0xc0002ee3, "TODO_c000_2ee3", 0),
+ MVI(0xc0002ee4, "TODO_c000_2ee4", 0),
+ MVI(0xc0002ee5, "TODO_c000_2ee5", 0),
+ MVI(0xc0002ee6, "TODO_c000_2ee6", 0),
+ MVI(0xc0002ee7, "TODO_c000_2ee7", 0),
+ MVI(0xc0002ee8, "TODO_c000_2ee8", 0),
+ MVI(0xc0002ee9, "TODO_c000_2ee9", 0),
+ MVI(0xc0002eea, "TODO_c000_2eea", 0),
+ MVI(0xc0002eeb, "TODO_c000_2eeb", 0),
+ MVI(0xc0002eec, "TODO_c000_2eec", 0),
+ MVI(0xc0002eed, "TODO_c000_2eed", 0),
+ MVI(0xc0002eee, "TODO_c000_2eee", 0),
+ MVI(0xc0002eef, "TODO_c000_2eef", 0),
+ MVI(0xc0002ef0, "TODO_c000_2ef0", 0),
+ MVI(0xc0002ef1, "TODO_c000_2ef1", 0),
+ MVI(0xc0002ef2, "TODO_c000_2ef2", 0),
+ MVI(0xc0002ef3, "TODO_c000_2ef3", 0),
+ MVI(0xc0002ef4, "TODO_c000_2ef4", 0),
+ MVI(0xc0002ef5, "TODO_c000_2ef5", 0),
+ MVI(0xc0002ef6, "TODO_c000_2ef6", 0),
+ MVI(0xc0002ef7, "TODO_c000_2ef7", 0),
+ MVI(0xc0002ef8, "TODO_c000_2ef8", 0),
+ MVI(0xc0002ef9, "TODO_c000_2ef9", 0),
+ MVI(0xc0002efa, "TODO_c000_2efa", 0),
+ MVI(0xc0002efb, "TODO_c000_2efb", 0),
+ MVI(0xc0002efc, "TODO_c000_2efc", 0),
+ MVI(0xc0002efd, "TODO_c000_2efd", 0),
+ MVI(0xc0002efe, "TODO_c000_2efe", 0),
+ MVI(0xc0002eff, "TODO_c000_2eff", 0),
+ MVI(0xc0002f00, "TODO_c000_2f00", 0),
+ MVI(0xc0002f01, "TODO_c000_2f01", 0),
+ MVI(0xc0002f02, "TODO_c000_2f02", 0),
+ MVI(0xc0002f03, "TODO_c000_2f03", 0),
+ MVI(0xc0002f04, "TODO_c000_2f04", 0),
+ MVI(0xc0002f05, "TODO_c000_2f05", 0),
+ MVI(0xc0002f06, "TODO_c000_2f06", 0),
+ MVI(0xc0002f07, "TODO_c000_2f07", 0),
+ MVI(0xc0002f08, "TODO_c000_2f08", 0),
+ MVI(0xc0002f09, "TODO_c000_2f09", 0),
+ MVI(0xc0002f0a, "TODO_c000_2f0a", 0),
+ MVI(0xc0002f0b, "TODO_c000_2f0b", 0),
+ MVI(0xc0002f0c, "TODO_c000_2f0c", 0),
+ MVI(0xc0002f0d, "TODO_c000_2f0d", 0),
+ MVI(0xc0002f0e, "TODO_c000_2f0e", 0),
+ MVI(0xc0002f0f, "TODO_c000_2f0f", 0),
+ MVI(0xc0002f10, "TODO_c000_2f10", 0),
+ MVI(0xc0002f11, "TODO_c000_2f11", 0),
+ MVI(0xc0002f12, "TODO_c000_2f12", 0),
+ MVI(0xc0002f13, "TODO_c000_2f13", 0),
+ MVI(0xc0002f14, "TODO_c000_2f14", 0),
+ MVI(0xc0002f15, "TODO_c000_2f15", 0),
+ MVI(0xc0002f16, "TODO_c000_2f16", 0),
+ MVI(0xc0002f17, "TODO_c000_2f17", 0),
+ MVI(0xc0002f18, "TODO_c000_2f18", 0),
+ MVI(0xc0002f19, "TODO_c000_2f19", 0),
+ MVI(0xc0002f1a, "TODO_c000_2f1a", 0),
+ MVI(0xc0002f1b, "TODO_c000_2f1b", 0),
+ MVI(0xc0002f1c, "TODO_c000_2f1c", 0),
+ MVI(0xc0002f1d, "TODO_c000_2f1d", 0),
+ MVI(0xc0002f1e, "TODO_c000_2f1e", 0),
+ MVI(0xc0002f1f, "TODO_c000_2f1f", 0),
+ MVI(0xc0002f20, "TODO_c000_2f20", 0),
+ MVI(0xc0002f21, "TODO_c000_2f21", 0),
+ MVI(0xc0002f22, "TODO_c000_2f22", 0),
+ MVI(0xc0002f23, "TODO_c000_2f23", 0),
+ MVI(0xc0002f24, "TODO_c000_2f24", 0),
+ MVI(0xc0002f25, "TODO_c000_2f25", 0),
+ MVI(0xc0002f26, "TODO_c000_2f26", 0),
+ MVI(0xc0002f27, "TODO_c000_2f27", 0),
+ MVI(0xc0002f28, "TODO_c000_2f28", 0),
+ MVI(0xc0002f29, "TODO_c000_2f29", 0),
+ MVI(0xc0002f2a, "TODO_c000_2f2a", 0),
+ MVI(0xc0002f2b, "TODO_c000_2f2b", 0),
+ MVI(0xc0002f2c, "TODO_c000_2f2c", 0),
+ MVI(0xc0002f2d, "TODO_c000_2f2d", 0),
+ MVI(0xc0002f2e, "TODO_c000_2f2e", 0),
+ MVI(0xc0002f2f, "TODO_c000_2f2f", 0),
+ MVI(0xc0002f30, "TODO_c000_2f30", 0),
+ MVI(0xc0002f31, "TODO_c000_2f31", 0),
+ MVI(0xc0002f32, "TODO_c000_2f32", 0),
+ MVI(0xc0002f33, "TODO_c000_2f33", 0),
+ MVI(0xc0002f34, "TODO_c000_2f34", 0),
+ MVI(0xc0002f35, "TODO_c000_2f35", 0),
+ MVI(0xc0002f36, "TODO_c000_2f36", 0),
+ MVI(0xc0002f37, "TODO_c000_2f37", 0),
+ MVI(0xc0002f38, "TODO_c000_2f38", 0),
+ MVI(0xc0002f39, "TODO_c000_2f39", 0),
+ MVI(0xc0002f3a, "TODO_c000_2f3a", 0),
+ MVI(0xc0002f3b, "TODO_c000_2f3b", 0),
+ MVI(0xc0002f3c, "TODO_c000_2f3c", 0),
+ MVI(0xc0002f3d, "TODO_c000_2f3d", 0),
+ MVI(0xc0002f3e, "TODO_c000_2f3e", 0),
+ MVI(0xc0002f3f, "TODO_c000_2f3f", 0),
+ MVI(0xc0002f40, "TODO_c000_2f40", 0),
+ MVI(0xc0002f41, "TODO_c000_2f41", 0),
+ MVI(0xc0002f42, "TODO_c000_2f42", 0),
+ MVI(0xc0002f43, "TODO_c000_2f43", 0),
+ MVI(0xc0002f44, "TODO_c000_2f44", 0),
+ MVI(0xc0002f45, "TODO_c000_2f45", 0),
+ MVI(0xc0002f46, "TODO_c000_2f46", 0),
+ MVI(0xc0002f47, "TODO_c000_2f47", 0),
+ MVI(0xc0002f48, "TODO_c000_2f48", 0),
+ MVI(0xc0002f49, "TODO_c000_2f49", 0),
+ MVI(0xc0002f4a, "TODO_c000_2f4a", 0),
+ MVI(0xc0002f4b, "TODO_c000_2f4b", 0),
+ MVI(0xc0002f4c, "TODO_c000_2f4c", 0),
+ MVI(0xc0002f4d, "TODO_c000_2f4d", 0),
+ MVI(0xc0002f4e, "TODO_c000_2f4e", 0),
+ MVI(0xc0002f4f, "TODO_c000_2f4f", 0),
+ MVI(0xc0002f50, "TODO_c000_2f50", 0),
+ MVI(0xc0002f51, "TODO_c000_2f51", 0),
+ MVI(0xc0002f52, "TODO_c000_2f52", 0),
+ MVI(0xc0002f53, "TODO_c000_2f53", 0),
+ MVI(0xc0002f54, "TODO_c000_2f54", 0),
+ MVI(0xc0002f55, "TODO_c000_2f55", 0),
+ MVI(0xc0002f56, "TODO_c000_2f56", 0),
+ MVI(0xc0002f57, "TODO_c000_2f57", 0),
+ MVI(0xc0002f58, "TODO_c000_2f58", 0),
+ MVI(0xc0002f59, "TODO_c000_2f59", 0),
+ MVI(0xc0002f5a, "TODO_c000_2f5a", 0),
+ MVI(0xc0002f5b, "TODO_c000_2f5b", 0),
+ MVI(0xc0002f5c, "TODO_c000_2f5c", 0),
+ MVI(0xc0002f5d, "TODO_c000_2f5d", 0),
+ MVI(0xc0002f5e, "TODO_c000_2f5e", 0),
+ MVI(0xc0002f5f, "TODO_c000_2f5f", 0),
+ MVI(0xc0002f60, "TODO_c000_2f60", 0),
+ MVI(0xc0002f61, "TODO_c000_2f61", 0),
+ MVI(0xc0002f62, "TODO_c000_2f62", 0),
+ MVI(0xc0002f63, "TODO_c000_2f63", 0),
+ MVI(0xc0002f64, "TODO_c000_2f64", 0),
+ MVI(0xc0002f65, "TODO_c000_2f65", 0),
+ MVI(0xc0002f66, "TODO_c000_2f66", 0),
+ MVI(0xc0002f67, "TODO_c000_2f67", 0),
+ MVI(0xc0002f68, "TODO_c000_2f68", 0),
+ MVI(0xc0002f69, "TODO_c000_2f69", 0),
+ MVI(0xc0002f6a, "TODO_c000_2f6a", 0),
+ MVI(0xc0002f6b, "TODO_c000_2f6b", 0),
+ MVI(0xc0002f6c, "TODO_c000_2f6c", 0),
+ MVI(0xc0002f6d, "TODO_c000_2f6d", 0),
+ MVI(0xc0002f6e, "TODO_c000_2f6e", 0),
+ MVI(0xc0002f6f, "TODO_c000_2f6f", 0),
+ MVI(0xc0002f70, "TODO_c000_2f70", 0),
+ MVI(0xc0002f71, "TODO_c000_2f71", 0),
+ MVI(0xc0002f72, "TODO_c000_2f72", 0),
+ MVI(0xc0002f73, "TODO_c000_2f73", 0),
+ MVI(0xc0002f74, "TODO_c000_2f74", 0),
+ MVI(0xc0002f75, "TODO_c000_2f75", 0),
+ MVI(0xc0002f76, "TODO_c000_2f76", 0),
+ MVI(0xc0002f77, "TODO_c000_2f77", 0),
+ MVI(0xc0002f78, "TODO_c000_2f78", 0),
+ MVI(0xc0002f79, "TODO_c000_2f79", 0),
+ MVI(0xc0002f7a, "TODO_c000_2f7a", 0),
+ MVI(0xc0002f7b, "TODO_c000_2f7b", 0),
+ MVI(0xc0002f7c, "TODO_c000_2f7c", 0),
+ MVI(0xc0002f7d, "TODO_c000_2f7d", 0),
+ MVI(0xc0002f7e, "TODO_c000_2f7e", 0),
+ MVI(0xc0002f7f, "TODO_c000_2f7f", 0),
+ MVI(0xc0002f80, "TODO_c000_2f80", 0),
+ MVI(0xc0002f81, "TODO_c000_2f81", 0),
+ MVI(0xc0002f82, "TODO_c000_2f82", 0),
+ MVI(0xc0002f83, "TODO_c000_2f83", 0),
+ MVI(0xc0002f84, "TODO_c000_2f84", 0),
+ MVI(0xc0002f85, "TODO_c000_2f85", 0),
+ MVI(0xc0002f86, "TODO_c000_2f86", 0),
+ MVI(0xc0002f87, "TODO_c000_2f87", 0),
+ MVI(0xc0002f88, "TODO_c000_2f88", 0),
+ MVI(0xc0002f89, "TODO_c000_2f89", 0),
+ MVI(0xc0002f8a, "TODO_c000_2f8a", 0),
+ MVI(0xc0002f8b, "TODO_c000_2f8b", 0),
+ MVI(0xc0002f8c, "TODO_c000_2f8c", 0),
+ MVI(0xc0002f8d, "TODO_c000_2f8d", 0),
+ MVI(0xc0002f8e, "TODO_c000_2f8e", 0),
+ MVI(0xc0002f8f, "TODO_c000_2f8f", 0),
+ MVI(0xc0002f90, "TODO_c000_2f90", 0),
+ MVI(0xc0002f91, "TODO_c000_2f91", 0),
+ MVI(0xc0002f92, "TODO_c000_2f92", 0),
+ MVI(0xc0002f93, "TODO_c000_2f93", 0),
+ MVI(0xc0002f94, "TODO_c000_2f94", 0),
+ MVI(0xc0002f95, "TODO_c000_2f95", 0),
+ MVI(0xc0002f96, "TODO_c000_2f96", 0),
+ MVI(0xc0002f97, "TODO_c000_2f97", 0),
+ MVI(0xc0002f98, "TODO_c000_2f98", 0),
+ MVI(0xc0002f99, "TODO_c000_2f99", 0),
+ MVI(0xc0002f9a, "TODO_c000_2f9a", 0),
+ MVI(0xc0002f9b, "TODO_c000_2f9b", 0),
+ MVI(0xc0002f9c, "TODO_c000_2f9c", 0),
+ MVI(0xc0002f9d, "TODO_c000_2f9d", 0),
+ MVI(0xc0002f9e, "TODO_c000_2f9e", 0),
+ MVI(0xc0002f9f, "TODO_c000_2f9f", 0),
+ MVI(0xc0002fa0, "TODO_c000_2fa0", 0),
+ MVI(0xc0002fa1, "TODO_c000_2fa1", 0),
+ MVI(0xc0002fa2, "TODO_c000_2fa2", 0),
+ MVI(0xc0002fa3, "TODO_c000_2fa3", 0),
+ MVI(0xc0002fa4, "TODO_c000_2fa4", 0),
+ MVI(0xc0002fa5, "TODO_c000_2fa5", 0),
+ MVI(0xc0002fa6, "TODO_c000_2fa6", 0),
+ MVI(0xc0002fa7, "TODO_c000_2fa7", 0),
+ MVI(0xc0002fa8, "TODO_c000_2fa8", 0),
+ MVI(0xc0002fa9, "TODO_c000_2fa9", 0),
+ MVI(0xc0002faa, "TODO_c000_2faa", 0),
+ MVI(0xc0002fab, "TODO_c000_2fab", 0),
+ MVI(0xc0002fac, "TODO_c000_2fac", 0),
+ MVI(0xc0002fad, "TODO_c000_2fad", 0),
+ MVI(0xc0002fae, "TODO_c000_2fae", 0),
+ MVI(0xc0002faf, "TODO_c000_2faf", 0),
+ MVI(0xc0002fb0, "TODO_c000_2fb0", 0),
+ MVI(0xc0002fb1, "TODO_c000_2fb1", 0),
+ MVI(0xc0002fb2, "TODO_c000_2fb2", 0),
+ MVI(0xc0002fb3, "TODO_c000_2fb3", 0),
+ MVI(0xc0002fb4, "TODO_c000_2fb4", 0),
+ MVI(0xc0002fb5, "TODO_c000_2fb5", 0),
+ MVI(0xc0002fb6, "TODO_c000_2fb6", 0),
+ MVI(0xc0002fb7, "TODO_c000_2fb7", 0),
+ MVI(0xc0002fb8, "TODO_c000_2fb8", 0),
+ MVI(0xc0002fb9, "TODO_c000_2fb9", 0),
+ MVI(0xc0002fba, "TODO_c000_2fba", 0),
+ MVI(0xc0002fbb, "TODO_c000_2fbb", 0),
+ MVI(0xc0002fbc, "TODO_c000_2fbc", 0),
+ MVI(0xc0002fbd, "TODO_c000_2fbd", 0),
+ MVI(0xc0002fbe, "TODO_c000_2fbe", 0),
+ MVI(0xc0002fbf, "TODO_c000_2fbf", 0),
+ MVI(0xc0002fc0, "TODO_c000_2fc0", 0),
+ MVI(0xc0002fc1, "TODO_c000_2fc1", 0),
+ MVI(0xc0002fc2, "TODO_c000_2fc2", 0),
+ MVI(0xc0002fc3, "TODO_c000_2fc3", 0),
+ MVI(0xc0002fc4, "TODO_c000_2fc4", 0),
+ MVI(0xc0002fc5, "TODO_c000_2fc5", 0),
+ MVI(0xc0002fc6, "TODO_c000_2fc6", 0),
+ MVI(0xc0002fc7, "TODO_c000_2fc7", 0),
+ MVI(0xc0002fc8, "TODO_c000_2fc8", 0),
+ MVI(0xc0002fc9, "TODO_c000_2fc9", 0),
+ MVI(0xc0002fca, "TODO_c000_2fca", 0),
+ MVI(0xc0002fcb, "TODO_c000_2fcb", 0),
+ MVI(0xc0002fcc, "TODO_c000_2fcc", 0),
+ MVI(0xc0002fcd, "TODO_c000_2fcd", 0),
+ MVI(0xc0002fce, "TODO_c000_2fce", 0),
+ MVI(0xc0002fcf, "TODO_c000_2fcf", 0),
+ MVI(0xc0002fd0, "TODO_c000_2fd0", 0),
+ MVI(0xc0002fd1, "TODO_c000_2fd1", 0),
+ MVI(0xc0002fd2, "TODO_c000_2fd2", 0),
+ MVI(0xc0002fd3, "TODO_c000_2fd3", 0),
+ MVI(0xc0002fd4, "TODO_c000_2fd4", 0),
+ MVI(0xc0002fd5, "TODO_c000_2fd5", 0),
+ MVI(0xc0002fd6, "TODO_c000_2fd6", 0),
+ MVI(0xc0002fd7, "TODO_c000_2fd7", 0),
+ MVI(0xc0002fd8, "TODO_c000_2fd8", 0),
+ MVI(0xc0002fd9, "TODO_c000_2fd9", 0),
+ MVI(0xc0002fda, "TODO_c000_2fda", 0),
+ MVI(0xc0002fdb, "TODO_c000_2fdb", 0),
+ MVI(0xc0002fdc, "TODO_c000_2fdc", 0),
+ MVI(0xc0002fdd, "TODO_c000_2fdd", 0),
+ MVI(0xc0002fde, "TODO_c000_2fde", 0),
+ MVI(0xc0002fdf, "TODO_c000_2fdf", 0),
+ MVI(0xc0002fe0, "TODO_c000_2fe0", 0),
+ MVI(0xc0002fe1, "TODO_c000_2fe1", 0),
+ MVI(0xc0002fe2, "TODO_c000_2fe2", 0),
+ MVI(0xc0002fe3, "TODO_c000_2fe3", 0),
+ MVI(0xc0002fe4, "TODO_c000_2fe4", 0),
+ MVI(0xc0002fe5, "TODO_c000_2fe5", 0),
+ MVI(0xc0002fe6, "TODO_c000_2fe6", 0),
+ MVI(0xc0002fe7, "TODO_c000_2fe7", 0),
+ MVI(0xc0002fe8, "TODO_c000_2fe8", 0),
+ MVI(0xc0002fe9, "TODO_c000_2fe9", 0),
+ MVI(0xc0002fea, "TODO_c000_2fea", 0),
+ MVI(0xc0002feb, "TODO_c000_2feb", 0),
+ MVI(0xc0002fec, "TODO_c000_2fec", 0),
+ MVI(0xc0002fed, "TODO_c000_2fed", 0),
+ MVI(0xc0002fee, "TODO_c000_2fee", 0),
+ MVI(0xc0002fef, "TODO_c000_2fef", 0),
+ MVI(0xc0002ff0, "TODO_c000_2ff0", 0),
+ MVI(0xc0002ff1, "TODO_c000_2ff1", 0),
+ MVI(0xc0002ff2, "TODO_c000_2ff2", 0),
+ MVI(0xc0002ff3, "TODO_c000_2ff3", 0),
+ MVI(0xc0002ff4, "TODO_c000_2ff4", 0),
+ MVI(0xc0002ff5, "TODO_c000_2ff5", 0),
+ MVI(0xc0002ff6, "TODO_c000_2ff6", 0),
+ MVI(0xc0002ff7, "TODO_c000_2ff7", 0),
+ MVI(0xc0002ff8, "TODO_c000_2ff8", 0),
+ MVI(0xc0002ff9, "TODO_c000_2ff9", 0),
+ MVI(0xc0002ffa, "TODO_c000_2ffa", 0),
+ MVI(0xc0002ffb, "TODO_c000_2ffb", 0),
+ MVI(0xc0002ffc, "TODO_c000_2ffc", 0),
+ MVI(0xc0002ffd, "TODO_c000_2ffd", 0),
+ MVI(0xc0002ffe, "TODO_c000_2ffe", 0),
+ MVI(0xc0002fff, "TODO_c000_2fff", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ MFX(0xc0010004, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff0000000022a0), 0), /* XXX: The range ended earlier than expected! */
+ MFX(0xc0010005, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010006, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010007, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x740000, UINT64_C(0xffffffffff00ffff), 0), /* value=0x740000 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x9000011, UINT64_C(0xffffffff89006000), 0), /* value=0x9000011 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0x80000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x8`80000000 */
+ MFI(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1), /* value=0x0 */
+ MFN(0xc0010021, "AMD_K8_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_MAX, 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFI(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage), /* value=0x0 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x20, 0, 0), /* value=0x20 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x2, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x2 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x2, 0, 0), /* value=0x2 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000000000178a64), 0, 0), /* value=0x80000000`00178a64 */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800000000018cc60), 0, 0), /* value=0x80000000`0018cc60 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x80000000001a5060), 0, 0), /* value=0x80000000`001a5060 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010069, "AMD_10H_P_ST_5", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc001006a, "AMD_10H_P_ST_6", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc001006b, "AMD_10H_P_ST_7", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr, AmdFam10hCStateIoBaseAddr, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x813 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x1 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7fbcd000 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0x7c000000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fc006003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, UINT64_C(0xffffffff00000005), UINT32_C(0xffffffe0)), /* value=0x0 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc001011b, "AMD_K8_UNK_c001_011b", WriteOnly, IgnoreWrite),
+ MVI(0xc0010120, "TODO_c001_0120", 0),
+ MVI(0xc0010121, "TODO_c001_0121", 0),
+ MVI(0xc0010122, "TODO_c001_0122", 0),
+ MFN(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength), /* value=0x0 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MVX(0xc0010188, "TODO_c001_0188", 0, UINT64_C(0xfffffcf000200000), 0),
+ MVX(0xc0010189, "TODO_c001_0189", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001018a, "TODO_c001_018a", 0, UINT64_C(0xfffffcf000200000), 0),
+ MVX(0xc001018b, "TODO_c001_018b", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010200, "AMD_K8_PERF_CTL_0", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0), /* value=0x530076 */
+ MFX(0xc0010201, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000002191), 0), /* value=0xffff`9124f0a3 */
+ MFX(0xc0010202, "AMD_K8_PERF_CTL_1", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x1, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010203, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010204, "AMD_K8_PERF_CTL_2", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x2, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010205, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010206, "AMD_K8_PERF_CTL_3", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x3, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010207, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010208, "AMD_K8_PERF_CTL_4", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x4, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010209, "AMD_K8_PERF_CTR_4", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x4, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc001020a, "AMD_K8_PERF_CTL_5", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x5, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc001020b, "AMD_K8_PERF_CTR_5", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x5, UINT64_C(0xffff000000000000), 0), /* value=0xffff */
+ MFX(0xc0010230, "AMD_16H_L2I_PERF_CTL_0", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x0, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010231, "AMD_16H_L2I_PERF_CTR_0", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x0, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010232, "AMD_16H_L2I_PERF_CTL_1", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x1, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010233, "AMD_16H_L2I_PERF_CTR_1", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x1, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010234, "AMD_16H_L2I_PERF_CTL_2", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x2, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010235, "AMD_16H_L2I_PERF_CTR_2", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x2, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MFX(0xc0010236, "AMD_16H_L2I_PERF_CTL_3", AmdFam16hL2IPerfCtlN, AmdFam16hL2IPerfCtlN, 0x3, UINT64_C(0xf0ffffffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010237, "AMD_16H_L2I_PERF_CTR_3", AmdFam16hL2IPerfCtrN, AmdFam16hL2IPerfCtrN, 0x3, UINT64_C(0xfffe000000000000), 0), /* value=0x0 */
+ MVX(0xc0010238, "TODO_c001_0238", 0, UINT64_C(0xf0ffffffbf0000), 0),
+ MVX(0xc0010239, "TODO_c001_0239", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001023a, "TODO_c001_023a", 0, UINT64_C(0xf0ffffffbf0000), 0),
+ MVX(0xc001023b, "TODO_c001_023b", 0, UINT64_C(0xfffe000000000000), 0),
+ MFX(0xc0010240, "AMD_15H_NB_PERF_CTL_0", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x0, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010241, "AMD_15H_NB_PERF_CTR_0", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010242, "AMD_15H_NB_PERF_CTL_1", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x1, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010243, "AMD_15H_NB_PERF_CTR_1", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010244, "AMD_15H_NB_PERF_CTL_2", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x2, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010245, "AMD_15H_NB_PERF_CTR_2", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010246, "AMD_15H_NB_PERF_CTL_3", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x3, UINT64_C(0x1ffffff0ff970000), 0), /* value=0x0 */
+ MFX(0xc0010247, "AMD_15H_NB_PERF_CTR_3", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MVX(0xc0010290, "TODO_c001_0290", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0010292, "TODO_c001_0292", 0xf0052, UINT32_MAX, 0),
+ MVI(0xc0010293, "TODO_c001_0293", 0xa01060),
+ MVX(0xc0010294, "TODO_c001_0294", UINT64_C(0x1001f47f000f0912), UINT64_C(0xc0000000ffe00000), 0),
+ MVX(0xc0010296, "TODO_c001_0296", 0x484848, UINT64_C(0xffffffffff808080), 0),
+ MVI(0xc0010297, "TODO_c001_0297", UINT64_C(0x380000fc000)),
+ MVI(0xc0010299, "TODO_c001_0299", 0xa1003),
+ MVI(0xc001029a, "TODO_c001_029a", 0x5382a5),
+ MVI(0xc001029b, "TODO_c001_029b", 0x7a66d6c3),
+ MVX(0xc0010400, "TODO_c001_0400", 0x600, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0xc0010401, "TODO_c001_0401", 0x2000, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0xc0010402, "TODO_c001_0402", 0x8, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc0010403, "TODO_c001_0403", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVI(0xc0010404, "TODO_c001_0404", 0),
+ MVX(0xc0010405, "TODO_c001_0405", 0, UINT64_C(0xfffffffffffff800), 0),
+ MVX(0xc0010406, "TODO_c001_0406", 0x40, UINT64_C(0xffffffffffffff80), 0),
+ MVX(0xc0010407, "TODO_c001_0407", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc0010408, "TODO_c001_0408", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc0010409, "TODO_c001_0409", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040a, "TODO_c001_040a", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040b, "TODO_c001_040b", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040c, "TODO_c001_040c", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040d, "TODO_c001_040d", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040e, "TODO_c001_040e", 0x80, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0xc001040f, "TODO_c001_040f", 0, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0xc0010410, "TODO_c001_0410", 0, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0xc0010411, "TODO_c001_0411", 0, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0010412, "TODO_c001_0412", 0, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0010413, "TODO_c001_0413", 0, UINT64_C(0xfffffffffffffffe), 0),
+ MVX(0xc0010414, "TODO_c001_0414", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0010415, "TODO_c001_0415", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0xc0010416, "TODO_c001_0416", 0, UINT64_C(0xfffffffffffffff0), 0),
+ MVI(0xc0010417, "TODO_c001_0417", 0),
+ MVI(0xc0010418, "TODO_c001_0418", 0),
+ MVI(0xc0010419, "TODO_c001_0419", 0),
+ MVI(0xc001041a, "TODO_c001_041a", 0),
+ MVI(0xc001041b, "TODO_c001_041b", 0),
+ MVI(0xc001041c, "TODO_c001_041c", 0),
+ MVI(0xc001041d, "TODO_c001_041d", 0),
+ MVI(0xc001041e, "TODO_c001_041e", 0),
+ MVI(0xc001041f, "TODO_c001_041f", 0),
+ MVI(0xc0010420, "TODO_c001_0420", 0),
+ MVI(0xc0010421, "TODO_c001_0421", 0),
+ MVI(0xc0010422, "TODO_c001_0422", 0),
+ MVI(0xc0010423, "TODO_c001_0423", 0),
+ MVI(0xc0010424, "TODO_c001_0424", 0),
+ MVI(0xc0010425, "TODO_c001_0425", 0),
+ MVI(0xc0010426, "TODO_c001_0426", 0),
+ MVI(0xc0010427, "TODO_c001_0427", 0),
+ MVI(0xc0010428, "TODO_c001_0428", 0),
+ MVI(0xc0010429, "TODO_c001_0429", 0),
+ MVI(0xc001042a, "TODO_c001_042a", 0),
+ MVI(0xc001042b, "TODO_c001_042b", 0),
+ MVI(0xc001042c, "TODO_c001_042c", 0),
+ MVI(0xc001042d, "TODO_c001_042d", 0),
+ MVI(0xc001042e, "TODO_c001_042e", 0),
+ MVI(0xc001042f, "TODO_c001_042f", 0),
+ MVI(0xc0010430, "TODO_c001_0430", 0),
+ MVI(0xc0010431, "TODO_c001_0431", 0),
+ MVI(0xc0010432, "TODO_c001_0432", 0),
+ MVI(0xc0010433, "TODO_c001_0433", 0),
+ MVI(0xc0010434, "TODO_c001_0434", 0),
+ MVI(0xc0010435, "TODO_c001_0435", 0),
+ MVI(0xc0010436, "TODO_c001_0436", 0),
+ MVI(0xc0010437, "TODO_c001_0437", 0),
+ MVI(0xc0010438, "TODO_c001_0438", 0),
+ MVI(0xc0010439, "TODO_c001_0439", 0),
+ MVI(0xc001043a, "TODO_c001_043a", 0),
+ MVI(0xc001043b, "TODO_c001_043b", 0),
+ MVI(0xc001043c, "TODO_c001_043c", 0),
+ MVI(0xc001043d, "TODO_c001_043d", 0),
+ MVI(0xc001043e, "TODO_c001_043e", 0),
+ MVI(0xc001043f, "TODO_c001_043f", 0),
+ MVI(0xc0010440, "TODO_c001_0440", 0),
+ MVI(0xc0010441, "TODO_c001_0441", 0),
+ MVI(0xc0010442, "TODO_c001_0442", 0),
+ MVI(0xc0010443, "TODO_c001_0443", 0),
+ MVI(0xc0010444, "TODO_c001_0444", 0),
+ MVI(0xc0010445, "TODO_c001_0445", 0),
+ MVI(0xc0010446, "TODO_c001_0446", 0),
+ MVI(0xc0010447, "TODO_c001_0447", 0),
+ MVI(0xc0010448, "TODO_c001_0448", 0),
+ MVI(0xc0010449, "TODO_c001_0449", 0),
+ MVI(0xc001044a, "TODO_c001_044a", 0),
+ MVI(0xc001044b, "TODO_c001_044b", 0),
+ MVI(0xc001044c, "TODO_c001_044c", 0),
+ MVI(0xc001044d, "TODO_c001_044d", 0),
+ MVI(0xc001044e, "TODO_c001_044e", 0),
+ MVI(0xc001044f, "TODO_c001_044f", 0),
+ MVI(0xc0010450, "TODO_c001_0450", 0),
+ MVI(0xc0010451, "TODO_c001_0451", 0),
+ MVI(0xc0010452, "TODO_c001_0452", 0),
+ MVI(0xc0010453, "TODO_c001_0453", 0),
+ MVI(0xc0010454, "TODO_c001_0454", 0),
+ MVI(0xc0010455, "TODO_c001_0455", 0),
+ MVI(0xc0010456, "TODO_c001_0456", 0),
+ MVI(0xc0010457, "TODO_c001_0457", 0),
+ MVI(0xc0010458, "TODO_c001_0458", 0),
+ MVI(0xc0010459, "TODO_c001_0459", 0),
+ MVI(0xc001045a, "TODO_c001_045a", 0),
+ MVI(0xc001045b, "TODO_c001_045b", 0),
+ MVI(0xc001045c, "TODO_c001_045c", 0),
+ MVI(0xc001045d, "TODO_c001_045d", 0),
+ MVI(0xc001045e, "TODO_c001_045e", 0),
+ MVI(0xc001045f, "TODO_c001_045f", 0),
+ MVI(0xc0010460, "TODO_c001_0460", 0),
+ MVI(0xc0010461, "TODO_c001_0461", 0),
+ MVI(0xc0010462, "TODO_c001_0462", 0),
+ MVI(0xc0010463, "TODO_c001_0463", 0),
+ MVI(0xc0010464, "TODO_c001_0464", 0),
+ MVI(0xc0010465, "TODO_c001_0465", 0),
+ MVI(0xc0010466, "TODO_c001_0466", 0),
+ MVI(0xc0010467, "TODO_c001_0467", 0),
+ MVI(0xc0010468, "TODO_c001_0468", 0),
+ MVI(0xc0010469, "TODO_c001_0469", 0),
+ MVI(0xc001046a, "TODO_c001_046a", 0),
+ MVI(0xc001046b, "TODO_c001_046b", 0),
+ MVI(0xc001046c, "TODO_c001_046c", 0),
+ MVI(0xc001046d, "TODO_c001_046d", 0),
+ MVI(0xc001046e, "TODO_c001_046e", 0),
+ MVI(0xc001046f, "TODO_c001_046f", 0),
+ MVI(0xc0010470, "TODO_c001_0470", 0),
+ MVI(0xc0010471, "TODO_c001_0471", 0),
+ MVI(0xc0010472, "TODO_c001_0472", 0),
+ MVI(0xc0010473, "TODO_c001_0473", 0),
+ MVI(0xc0010474, "TODO_c001_0474", 0),
+ MVI(0xc0010475, "TODO_c001_0475", 0),
+ MVI(0xc0010476, "TODO_c001_0476", 0),
+ MVI(0xc0010477, "TODO_c001_0477", 0),
+ MVI(0xc0010478, "TODO_c001_0478", 0),
+ MVI(0xc0010479, "TODO_c001_0479", 0),
+ MVI(0xc001047a, "TODO_c001_047a", 0),
+ MVI(0xc001047b, "TODO_c001_047b", 0),
+ MVI(0xc001047c, "TODO_c001_047c", 0),
+ MVI(0xc001047d, "TODO_c001_047d", 0),
+ MVI(0xc001047e, "TODO_c001_047e", 0),
+ MVI(0xc001047f, "TODO_c001_047f", 0),
+ MVI(0xc0010480, "TODO_c001_0480", 0),
+ MVI(0xc0010481, "TODO_c001_0481", 0),
+ MVI(0xc0010482, "TODO_c001_0482", 0),
+ MVI(0xc0010483, "TODO_c001_0483", 0),
+ MVI(0xc0010484, "TODO_c001_0484", 0),
+ MVI(0xc0010485, "TODO_c001_0485", 0),
+ MVI(0xc0010486, "TODO_c001_0486", 0),
+ MVI(0xc0010487, "TODO_c001_0487", 0),
+ MVI(0xc0010488, "TODO_c001_0488", 0),
+ MVI(0xc0010489, "TODO_c001_0489", 0),
+ MVI(0xc001048a, "TODO_c001_048a", 0),
+ MVI(0xc001048b, "TODO_c001_048b", 0),
+ MVI(0xc001048c, "TODO_c001_048c", 0),
+ MVI(0xc001048d, "TODO_c001_048d", 0),
+ MVI(0xc001048e, "TODO_c001_048e", 0),
+ MVI(0xc001048f, "TODO_c001_048f", 0),
+ MVI(0xc0010490, "TODO_c001_0490", 0),
+ MVI(0xc0010491, "TODO_c001_0491", 0),
+ MVI(0xc0010492, "TODO_c001_0492", 0),
+ MVI(0xc0010493, "TODO_c001_0493", 0),
+ MVI(0xc0010494, "TODO_c001_0494", 0),
+ MVI(0xc0010495, "TODO_c001_0495", 0),
+ MVI(0xc0010496, "TODO_c001_0496", 0),
+ MVI(0xc0010497, "TODO_c001_0497", 0),
+ MVI(0xc0010498, "TODO_c001_0498", 0),
+ MVI(0xc0010499, "TODO_c001_0499", 0),
+ MVI(0xc001049a, "TODO_c001_049a", 0),
+ MVI(0xc001049b, "TODO_c001_049b", 0),
+ MVI(0xc001049c, "TODO_c001_049c", 0),
+ MVI(0xc001049d, "TODO_c001_049d", 0),
+ MVI(0xc001049e, "TODO_c001_049e", 0),
+ MVI(0xc001049f, "TODO_c001_049f", 0),
+ MVI(0xc00104a0, "TODO_c001_04a0", 0),
+ MVI(0xc00104a1, "TODO_c001_04a1", 0),
+ MVI(0xc00104a2, "TODO_c001_04a2", 0),
+ MVI(0xc00104a3, "TODO_c001_04a3", 0),
+ MVI(0xc00104a4, "TODO_c001_04a4", 0),
+ MVI(0xc00104a5, "TODO_c001_04a5", 0),
+ MVI(0xc00104a6, "TODO_c001_04a6", 0),
+ MVI(0xc00104a7, "TODO_c001_04a7", 0),
+ MVI(0xc00104a8, "TODO_c001_04a8", 0),
+ MVI(0xc00104a9, "TODO_c001_04a9", 0),
+ MVI(0xc00104aa, "TODO_c001_04aa", 0),
+ MVI(0xc00104ab, "TODO_c001_04ab", 0),
+ MVI(0xc00104ac, "TODO_c001_04ac", 0),
+ MVI(0xc00104ad, "TODO_c001_04ad", 0),
+ MVI(0xc00104ae, "TODO_c001_04ae", 0),
+ MVI(0xc00104af, "TODO_c001_04af", 0),
+ MVI(0xc00104b0, "TODO_c001_04b0", 0),
+ MVI(0xc00104b1, "TODO_c001_04b1", 0),
+ MVI(0xc00104b2, "TODO_c001_04b2", 0),
+ MVI(0xc00104b3, "TODO_c001_04b3", 0),
+ MVI(0xc00104b4, "TODO_c001_04b4", 0),
+ MVI(0xc00104b5, "TODO_c001_04b5", 0),
+ MVI(0xc00104b6, "TODO_c001_04b6", 0),
+ MVI(0xc00104b7, "TODO_c001_04b7", 0),
+ MVI(0xc00104b8, "TODO_c001_04b8", 0),
+ MVI(0xc00104b9, "TODO_c001_04b9", 0),
+ MVI(0xc00104ba, "TODO_c001_04ba", 0),
+ MVI(0xc00104bb, "TODO_c001_04bb", 0),
+ MVI(0xc00104bc, "TODO_c001_04bc", 0),
+ MVI(0xc00104bd, "TODO_c001_04bd", 0),
+ MVI(0xc00104be, "TODO_c001_04be", 0),
+ MVI(0xc00104bf, "TODO_c001_04bf", 0),
+ MVI(0xc00104c0, "TODO_c001_04c0", 0),
+ MVI(0xc00104c1, "TODO_c001_04c1", 0),
+ MVI(0xc00104c2, "TODO_c001_04c2", 0),
+ MVI(0xc00104c3, "TODO_c001_04c3", 0),
+ MVI(0xc00104c4, "TODO_c001_04c4", 0),
+ MVI(0xc00104c5, "TODO_c001_04c5", 0),
+ MVI(0xc00104c6, "TODO_c001_04c6", 0),
+ MVI(0xc00104c7, "TODO_c001_04c7", 0),
+ MVI(0xc00104c8, "TODO_c001_04c8", 0),
+ MVI(0xc00104c9, "TODO_c001_04c9", 0),
+ MVI(0xc00104ca, "TODO_c001_04ca", 0),
+ MVI(0xc00104cb, "TODO_c001_04cb", 0),
+ MVI(0xc00104cc, "TODO_c001_04cc", 0),
+ MVI(0xc00104cd, "TODO_c001_04cd", 0),
+ MVI(0xc00104ce, "TODO_c001_04ce", 0),
+ MVI(0xc00104cf, "TODO_c001_04cf", 0),
+ MVI(0xc00104d0, "TODO_c001_04d0", 0),
+ MVI(0xc00104d1, "TODO_c001_04d1", 0),
+ MVI(0xc00104d2, "TODO_c001_04d2", 0),
+ MVI(0xc00104d3, "TODO_c001_04d3", 0),
+ MVI(0xc00104d4, "TODO_c001_04d4", 0),
+ MVI(0xc00104d5, "TODO_c001_04d5", 0),
+ MVI(0xc00104d6, "TODO_c001_04d6", 0),
+ MVI(0xc00104d7, "TODO_c001_04d7", 0),
+ MVI(0xc00104d8, "TODO_c001_04d8", 0),
+ MVI(0xc00104d9, "TODO_c001_04d9", 0),
+ MVI(0xc00104da, "TODO_c001_04da", 0),
+ MVI(0xc00104db, "TODO_c001_04db", 0),
+ MVI(0xc00104dc, "TODO_c001_04dc", 0),
+ MVI(0xc00104dd, "TODO_c001_04dd", 0),
+ MVI(0xc00104de, "TODO_c001_04de", 0),
+ MVI(0xc00104df, "TODO_c001_04df", 0),
+ MVI(0xc00104e0, "TODO_c001_04e0", 0),
+ MVI(0xc00104e1, "TODO_c001_04e1", 0),
+ MVI(0xc00104e2, "TODO_c001_04e2", 0),
+ MVI(0xc00104e3, "TODO_c001_04e3", 0),
+ MVI(0xc00104e4, "TODO_c001_04e4", 0),
+ MVI(0xc00104e5, "TODO_c001_04e5", 0),
+ MVI(0xc00104e6, "TODO_c001_04e6", 0),
+ MVI(0xc00104e7, "TODO_c001_04e7", 0),
+ MVI(0xc00104e8, "TODO_c001_04e8", 0),
+ MVI(0xc00104e9, "TODO_c001_04e9", 0),
+ MVI(0xc00104ea, "TODO_c001_04ea", 0),
+ MVI(0xc00104eb, "TODO_c001_04eb", 0),
+ MVI(0xc00104ec, "TODO_c001_04ec", 0),
+ MVI(0xc00104ed, "TODO_c001_04ed", 0),
+ MVI(0xc00104ee, "TODO_c001_04ee", 0),
+ MVI(0xc00104ef, "TODO_c001_04ef", 0),
+ MVI(0xc00104f0, "TODO_c001_04f0", 0),
+ MVI(0xc00104f1, "TODO_c001_04f1", 0),
+ MVI(0xc00104f2, "TODO_c001_04f2", 0),
+ MVI(0xc00104f3, "TODO_c001_04f3", 0),
+ MVI(0xc00104f4, "TODO_c001_04f4", 0),
+ MVI(0xc00104f5, "TODO_c001_04f5", 0),
+ MVI(0xc00104f6, "TODO_c001_04f6", 0),
+ MVI(0xc00104f7, "TODO_c001_04f7", 0),
+ MVI(0xc00104f8, "TODO_c001_04f8", 0),
+ MVI(0xc00104f9, "TODO_c001_04f9", 0),
+ MVI(0xc00104fa, "TODO_c001_04fa", 0),
+ MVI(0xc00104fb, "TODO_c001_04fb", 0),
+ MVI(0xc00104fc, "TODO_c001_04fc", 0),
+ MVI(0xc00104fd, "TODO_c001_04fd", 0),
+ MVI(0xc00104fe, "TODO_c001_04fe", 0),
+ MVI(0xc00104ff, "TODO_c001_04ff", 0),
+ MVI(0xc0010500, "TODO_c001_0500", 0),
+ MVI(0xc0010501, "TODO_c001_0501", 0),
+ MVX(0xc0010502, "TODO_c001_0502", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc0010503, "TODO_c001_0503", 0),
+ MVI(0xc0010504, "TODO_c001_0504", 0),
+ MVI(0xc0010505, "TODO_c001_0505", 0),
+ MVI(0xc0010506, "TODO_c001_0506", 0),
+ MVX(0xc0010507, "TODO_c001_0507", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc0010508, "TODO_c001_0508", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc0010509, "TODO_c001_0509", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001050a, "TODO_c001_050a", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001050b, "TODO_c001_050b", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001050c, "TODO_c001_050c", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001050d, "TODO_c001_050d", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0xc001050e, "TODO_c001_050e", 0, UINT64_C(0xfffe000000000000), 0),
+ MVI(0xc001050f, "TODO_c001_050f", 0),
+ MVI(0xc0010510, "TODO_c001_0510", 0),
+ MVI(0xc0010511, "TODO_c001_0511", 0),
+ MVI(0xc0010512, "TODO_c001_0512", 0),
+ MVI(0xc0010513, "TODO_c001_0513", 0),
+ MVX(0xc0010514, "TODO_c001_0514", 0, UINT64_C(0xfffffe0000000000), 0),
+ MVX(0xc0010515, "TODO_c001_0515", 0, UINT64_C(0xfffffe0000000000), 0),
+ MVI(0xc0010516, "TODO_c001_0516", 0),
+ MVI(0xc0010517, "TODO_c001_0517", 0),
+ MVI(0xc0010518, "TODO_c001_0518", 0),
+ MVI(0xc0010519, "TODO_c001_0519", 0),
+ MVI(0xc001051a, "TODO_c001_051a", 0),
+ MVI(0xc001051b, "TODO_c001_051b", 0),
+ MVI(0xc001051c, "TODO_c001_051c", 0),
+ MVI(0xc001051d, "TODO_c001_051d", 0),
+ MVI(0xc001051e, "TODO_c001_051e", 0),
+ MVI(0xc001051f, "TODO_c001_051f", 0),
+ MVI(0xc0010520, "TODO_c001_0520", 0),
+ MVI(0xc0010521, "TODO_c001_0521", 0),
+ MVI(0xc0010522, "TODO_c001_0522", 0),
+ MVI(0xc0010523, "TODO_c001_0523", 0),
+ MVI(0xc0010524, "TODO_c001_0524", 0),
+ MVI(0xc0010525, "TODO_c001_0525", 0),
+ MVI(0xc0010526, "TODO_c001_0526", 0),
+ MVI(0xc0010527, "TODO_c001_0527", 0),
+ MVI(0xc0010528, "TODO_c001_0528", 0),
+ MVI(0xc0010529, "TODO_c001_0529", 0),
+ MVI(0xc001052a, "TODO_c001_052a", 0),
+ MVI(0xc001052b, "TODO_c001_052b", 0),
+ MVI(0xc001052c, "TODO_c001_052c", 0),
+ MVI(0xc001052d, "TODO_c001_052d", 0),
+ MVI(0xc001052e, "TODO_c001_052e", 0),
+ MVI(0xc001052f, "TODO_c001_052f", 0),
+ MVI(0xc0010530, "TODO_c001_0530", 0),
+ MVI(0xc0010531, "TODO_c001_0531", 0),
+ MVI(0xc0010532, "TODO_c001_0532", 0),
+ MVI(0xc0010533, "TODO_c001_0533", 0),
+ MVI(0xc0010534, "TODO_c001_0534", 0),
+ MVI(0xc0010535, "TODO_c001_0535", 0),
+ MVI(0xc0010536, "TODO_c001_0536", 0),
+ MVI(0xc0010537, "TODO_c001_0537", 0),
+ MVI(0xc0010538, "TODO_c001_0538", 0),
+ MVI(0xc0010539, "TODO_c001_0539", 0),
+ MVI(0xc001053a, "TODO_c001_053a", 0),
+ MVI(0xc001053b, "TODO_c001_053b", 0),
+ MVI(0xc001053c, "TODO_c001_053c", 0),
+ MVI(0xc001053d, "TODO_c001_053d", 0),
+ MVI(0xc001053e, "TODO_c001_053e", 0),
+ MVI(0xc001053f, "TODO_c001_053f", 0),
+ MVI(0xc0010540, "TODO_c001_0540", 0),
+ MVI(0xc0010541, "TODO_c001_0541", 0),
+ MVI(0xc0010542, "TODO_c001_0542", 0),
+ MVI(0xc0010543, "TODO_c001_0543", 0),
+ MVI(0xc0010544, "TODO_c001_0544", 0),
+ MVI(0xc0010545, "TODO_c001_0545", 0),
+ MVI(0xc0010546, "TODO_c001_0546", 0),
+ MVI(0xc0010547, "TODO_c001_0547", 0),
+ MVI(0xc0010548, "TODO_c001_0548", 0),
+ MVI(0xc0010549, "TODO_c001_0549", 0),
+ MVI(0xc001054a, "TODO_c001_054a", 0),
+ MVI(0xc001054b, "TODO_c001_054b", 0),
+ MVI(0xc001054c, "TODO_c001_054c", 0),
+ MVI(0xc001054d, "TODO_c001_054d", 0),
+ MVI(0xc001054e, "TODO_c001_054e", 0),
+ MVI(0xc001054f, "TODO_c001_054f", 0),
+ MVI(0xc0010550, "TODO_c001_0550", 0),
+ MVI(0xc0010551, "TODO_c001_0551", 0),
+ MVI(0xc0010552, "TODO_c001_0552", 0),
+ MVI(0xc0010553, "TODO_c001_0553", 0),
+ MVI(0xc0010554, "TODO_c001_0554", 0),
+ MVI(0xc0010555, "TODO_c001_0555", 0),
+ MVI(0xc0010556, "TODO_c001_0556", 0),
+ MVI(0xc0010557, "TODO_c001_0557", 0),
+ MVI(0xc0010558, "TODO_c001_0558", 0),
+ MVI(0xc0010559, "TODO_c001_0559", 0),
+ MVI(0xc001055a, "TODO_c001_055a", 0),
+ MVI(0xc001055b, "TODO_c001_055b", 0),
+ MVI(0xc001055c, "TODO_c001_055c", 0),
+ MVI(0xc001055d, "TODO_c001_055d", 0),
+ MVI(0xc001055e, "TODO_c001_055e", 0),
+ MVI(0xc001055f, "TODO_c001_055f", 0),
+ MVI(0xc0010560, "TODO_c001_0560", 0),
+ MVI(0xc0010561, "TODO_c001_0561", 0),
+ MVI(0xc0010562, "TODO_c001_0562", 0),
+ MVI(0xc0010563, "TODO_c001_0563", 0),
+ MVI(0xc0010564, "TODO_c001_0564", 0),
+ MVI(0xc0010565, "TODO_c001_0565", 0),
+ MVI(0xc0010566, "TODO_c001_0566", 0),
+ MVI(0xc0010567, "TODO_c001_0567", 0),
+ MVI(0xc0010568, "TODO_c001_0568", 0),
+ MVI(0xc0010569, "TODO_c001_0569", 0),
+ MVI(0xc001056a, "TODO_c001_056a", 0),
+ MVI(0xc001056b, "TODO_c001_056b", 0),
+ MVI(0xc001056c, "TODO_c001_056c", 0),
+ MVI(0xc001056d, "TODO_c001_056d", 0),
+ MVI(0xc001056e, "TODO_c001_056e", 0),
+ MVI(0xc001056f, "TODO_c001_056f", 0),
+ MVI(0xc0010570, "TODO_c001_0570", 0),
+ MVI(0xc0010571, "TODO_c001_0571", 0),
+ MVI(0xc0010572, "TODO_c001_0572", 0),
+ MVI(0xc0010573, "TODO_c001_0573", 0),
+ MVI(0xc0010574, "TODO_c001_0574", 0),
+ MVI(0xc0010575, "TODO_c001_0575", 0),
+ MVI(0xc0010576, "TODO_c001_0576", 0),
+ MVI(0xc0010577, "TODO_c001_0577", 0),
+ MVI(0xc0010578, "TODO_c001_0578", 0),
+ MVI(0xc0010579, "TODO_c001_0579", 0),
+ MVI(0xc001057a, "TODO_c001_057a", 0),
+ MVI(0xc001057b, "TODO_c001_057b", 0),
+ MVI(0xc001057c, "TODO_c001_057c", 0),
+ MVI(0xc001057d, "TODO_c001_057d", 0),
+ MVI(0xc001057e, "TODO_c001_057e", 0),
+ MVI(0xc001057f, "TODO_c001_057f", 0),
+ MVI(0xc0010580, "TODO_c001_0580", 0),
+ MVI(0xc0010581, "TODO_c001_0581", 0),
+ MVI(0xc0010582, "TODO_c001_0582", 0),
+ MVI(0xc0010583, "TODO_c001_0583", 0),
+ MVI(0xc0010584, "TODO_c001_0584", 0),
+ MVI(0xc0010585, "TODO_c001_0585", 0),
+ MVI(0xc0010586, "TODO_c001_0586", 0),
+ MVI(0xc0010587, "TODO_c001_0587", 0),
+ MVI(0xc0010588, "TODO_c001_0588", 0),
+ MVI(0xc0010589, "TODO_c001_0589", 0),
+ MVI(0xc001058a, "TODO_c001_058a", 0),
+ MVI(0xc001058b, "TODO_c001_058b", 0),
+ MVI(0xc001058c, "TODO_c001_058c", 0),
+ MVI(0xc001058d, "TODO_c001_058d", 0),
+ MVI(0xc001058e, "TODO_c001_058e", 0),
+ MVI(0xc001058f, "TODO_c001_058f", 0),
+ MVI(0xc0010590, "TODO_c001_0590", 0),
+ MVI(0xc0010591, "TODO_c001_0591", 0),
+ MVI(0xc0010592, "TODO_c001_0592", 0),
+ MVI(0xc0010593, "TODO_c001_0593", 0),
+ MVI(0xc0010594, "TODO_c001_0594", 0),
+ MVI(0xc0010595, "TODO_c001_0595", 0),
+ MVI(0xc0010596, "TODO_c001_0596", 0),
+ MVI(0xc0010597, "TODO_c001_0597", 0),
+ MVI(0xc0010598, "TODO_c001_0598", 0),
+ MVI(0xc0010599, "TODO_c001_0599", 0),
+ MVI(0xc001059a, "TODO_c001_059a", 0),
+ MVI(0xc001059b, "TODO_c001_059b", 0),
+ MVI(0xc001059c, "TODO_c001_059c", 0),
+ MVI(0xc001059d, "TODO_c001_059d", 0),
+ MVI(0xc001059e, "TODO_c001_059e", 0),
+ MVI(0xc001059f, "TODO_c001_059f", 0),
+ MVI(0xc00105a0, "TODO_c001_05a0", 0),
+ MVI(0xc00105a1, "TODO_c001_05a1", 0),
+ MVI(0xc00105a2, "TODO_c001_05a2", 0),
+ MVI(0xc00105a3, "TODO_c001_05a3", 0),
+ MVI(0xc00105a4, "TODO_c001_05a4", 0),
+ MVI(0xc00105a5, "TODO_c001_05a5", 0),
+ MVI(0xc00105a6, "TODO_c001_05a6", 0),
+ MVI(0xc00105a7, "TODO_c001_05a7", 0),
+ MVI(0xc00105a8, "TODO_c001_05a8", 0),
+ MVI(0xc00105a9, "TODO_c001_05a9", 0),
+ MVI(0xc00105aa, "TODO_c001_05aa", 0),
+ MVI(0xc00105ab, "TODO_c001_05ab", 0),
+ MVI(0xc00105ac, "TODO_c001_05ac", 0),
+ MVI(0xc00105ad, "TODO_c001_05ad", 0),
+ MVI(0xc00105ae, "TODO_c001_05ae", 0),
+ MVI(0xc00105af, "TODO_c001_05af", 0),
+ MVI(0xc00105b0, "TODO_c001_05b0", 0),
+ MVI(0xc00105b1, "TODO_c001_05b1", 0),
+ MVI(0xc00105b2, "TODO_c001_05b2", 0),
+ MVI(0xc00105b3, "TODO_c001_05b3", 0),
+ MVI(0xc00105b4, "TODO_c001_05b4", 0),
+ MVI(0xc00105b5, "TODO_c001_05b5", 0),
+ MVI(0xc00105b6, "TODO_c001_05b6", 0),
+ MVI(0xc00105b7, "TODO_c001_05b7", 0),
+ MVI(0xc00105b8, "TODO_c001_05b8", 0),
+ MVI(0xc00105b9, "TODO_c001_05b9", 0),
+ MVI(0xc00105ba, "TODO_c001_05ba", 0),
+ MVI(0xc00105bb, "TODO_c001_05bb", 0),
+ MVI(0xc00105bc, "TODO_c001_05bc", 0),
+ MVI(0xc00105bd, "TODO_c001_05bd", 0),
+ MVI(0xc00105be, "TODO_c001_05be", 0),
+ MVI(0xc00105bf, "TODO_c001_05bf", 0),
+ MVI(0xc00105c0, "TODO_c001_05c0", 0),
+ MVI(0xc00105c1, "TODO_c001_05c1", 0),
+ MVI(0xc00105c2, "TODO_c001_05c2", 0),
+ MVI(0xc00105c3, "TODO_c001_05c3", 0),
+ MVI(0xc00105c4, "TODO_c001_05c4", 0),
+ MVI(0xc00105c5, "TODO_c001_05c5", 0),
+ MVI(0xc00105c6, "TODO_c001_05c6", 0),
+ MVI(0xc00105c7, "TODO_c001_05c7", 0),
+ MVI(0xc00105c8, "TODO_c001_05c8", 0),
+ MVI(0xc00105c9, "TODO_c001_05c9", 0),
+ MVI(0xc00105ca, "TODO_c001_05ca", 0),
+ MVI(0xc00105cb, "TODO_c001_05cb", 0),
+ MVI(0xc00105cc, "TODO_c001_05cc", 0),
+ MVI(0xc00105cd, "TODO_c001_05cd", 0),
+ MVI(0xc00105ce, "TODO_c001_05ce", 0),
+ MVI(0xc00105cf, "TODO_c001_05cf", 0),
+ MVI(0xc00105d0, "TODO_c001_05d0", 0),
+ MVI(0xc00105d1, "TODO_c001_05d1", 0),
+ MVI(0xc00105d2, "TODO_c001_05d2", 0),
+ MVI(0xc00105d3, "TODO_c001_05d3", 0),
+ MVI(0xc00105d4, "TODO_c001_05d4", 0),
+ MVI(0xc00105d5, "TODO_c001_05d5", 0),
+ MVI(0xc00105d6, "TODO_c001_05d6", 0),
+ MVI(0xc00105d7, "TODO_c001_05d7", 0),
+ MVI(0xc00105d8, "TODO_c001_05d8", 0),
+ MVI(0xc00105d9, "TODO_c001_05d9", 0),
+ MVI(0xc00105da, "TODO_c001_05da", 0),
+ MVI(0xc00105db, "TODO_c001_05db", 0),
+ MVI(0xc00105dc, "TODO_c001_05dc", 0),
+ MVI(0xc00105dd, "TODO_c001_05dd", 0),
+ MVI(0xc00105de, "TODO_c001_05de", 0),
+ MVI(0xc00105df, "TODO_c001_05df", 0),
+ MVI(0xc00105e0, "TODO_c001_05e0", 0),
+ MVI(0xc00105e1, "TODO_c001_05e1", 0),
+ MVI(0xc00105e2, "TODO_c001_05e2", 0),
+ MVI(0xc00105e3, "TODO_c001_05e3", 0),
+ MVI(0xc00105e4, "TODO_c001_05e4", 0),
+ MVI(0xc00105e5, "TODO_c001_05e5", 0),
+ MVI(0xc00105e6, "TODO_c001_05e6", 0),
+ MVI(0xc00105e7, "TODO_c001_05e7", 0),
+ MVI(0xc00105e8, "TODO_c001_05e8", 0),
+ MVI(0xc00105e9, "TODO_c001_05e9", 0),
+ MVI(0xc00105ea, "TODO_c001_05ea", 0),
+ MVI(0xc00105eb, "TODO_c001_05eb", 0),
+ MVI(0xc00105ec, "TODO_c001_05ec", 0),
+ MVI(0xc00105ed, "TODO_c001_05ed", 0),
+ MVI(0xc00105ee, "TODO_c001_05ee", 0),
+ MVI(0xc00105ef, "TODO_c001_05ef", 0),
+ MVI(0xc00105f0, "TODO_c001_05f0", 0),
+ MVI(0xc00105f1, "TODO_c001_05f1", 0),
+ MVI(0xc00105f2, "TODO_c001_05f2", 0),
+ MVI(0xc00105f3, "TODO_c001_05f3", 0),
+ MVI(0xc00105f4, "TODO_c001_05f4", 0),
+ MVI(0xc00105f5, "TODO_c001_05f5", 0),
+ MVI(0xc00105f6, "TODO_c001_05f6", 0),
+ MVI(0xc00105f7, "TODO_c001_05f7", 0),
+ MVI(0xc00105f8, "TODO_c001_05f8", 0),
+ MVI(0xc00105f9, "TODO_c001_05f9", 0),
+ MVI(0xc00105fa, "TODO_c001_05fa", 0),
+ MVI(0xc00105fb, "TODO_c001_05fb", 0),
+ MVI(0xc00105fc, "TODO_c001_05fc", 0),
+ MVI(0xc00105fd, "TODO_c001_05fd", 0),
+ MVI(0xc00105fe, "TODO_c001_05fe", 0),
+ MVI(0xc00105ff, "TODO_c001_05ff", 0),
+ MVI(0xc0010600, "TODO_c001_0600", 0),
+ MVI(0xc0010601, "TODO_c001_0601", 0),
+ MVX(0xc0010602, "TODO_c001_0602", 0, 0, 0),
+ MVI(0xc0010603, "TODO_c001_0603", 0),
+ MVI(0xc0010604, "TODO_c001_0604", 0),
+ MVI(0xc0010605, "TODO_c001_0605", 0),
+ MVI(0xc0010606, "TODO_c001_0606", 0),
+ MVX(0xc0010607, "TODO_c001_0607", 0, 0, 0),
+ MVX(0xc0010608, "TODO_c001_0608", 0, 0, 0),
+ MVX(0xc0010609, "TODO_c001_0609", 0, 0, 0),
+ MVX(0xc001060a, "TODO_c001_060a", 0, 0, 0),
+ MVX(0xc001060b, "TODO_c001_060b", 0, 0, 0),
+ MVX(0xc001060c, "TODO_c001_060c", 0, 0, 0),
+ MVX(0xc001060d, "TODO_c001_060d", 0, 0, 0),
+ MVX(0xc001060e, "TODO_c001_060e", 0, 0, 0),
+ MVI(0xc001060f, "TODO_c001_060f", 0),
+ MVI(0xc0010610, "TODO_c001_0610", 0),
+ MVI(0xc0010611, "TODO_c001_0611", 0),
+ MVI(0xc0010612, "TODO_c001_0612", 0),
+ MVI(0xc0010613, "TODO_c001_0613", 0),
+ MVX(0xc0010614, "TODO_c001_0614", 0, 0, 0),
+ MVX(0xc0010615, "TODO_c001_0615", 0, 0, 0),
+ MVI(0xc0010616, "TODO_c001_0616", 0),
+ MVI(0xc0010617, "TODO_c001_0617", 0),
+ MVI(0xc0010618, "TODO_c001_0618", 0),
+ MVI(0xc0010619, "TODO_c001_0619", 0),
+ MVI(0xc001061a, "TODO_c001_061a", 0),
+ MVI(0xc001061b, "TODO_c001_061b", 0),
+ MVI(0xc001061c, "TODO_c001_061c", 0),
+ MVI(0xc001061d, "TODO_c001_061d", 0),
+ MVI(0xc001061e, "TODO_c001_061e", 0),
+ MVI(0xc001061f, "TODO_c001_061f", 0),
+ MVI(0xc0010620, "TODO_c001_0620", 0),
+ MVI(0xc0010621, "TODO_c001_0621", 0),
+ MVI(0xc0010622, "TODO_c001_0622", 0),
+ MVI(0xc0010623, "TODO_c001_0623", 0),
+ MVI(0xc0010624, "TODO_c001_0624", 0),
+ MVI(0xc0010625, "TODO_c001_0625", 0),
+ MVI(0xc0010626, "TODO_c001_0626", 0),
+ MVI(0xc0010627, "TODO_c001_0627", 0),
+ MVI(0xc0010628, "TODO_c001_0628", 0),
+ MVI(0xc0010629, "TODO_c001_0629", 0),
+ MVI(0xc001062a, "TODO_c001_062a", 0),
+ MVI(0xc001062b, "TODO_c001_062b", 0),
+ MVI(0xc001062c, "TODO_c001_062c", 0),
+ MVI(0xc001062d, "TODO_c001_062d", 0),
+ MVI(0xc001062e, "TODO_c001_062e", 0),
+ MVI(0xc001062f, "TODO_c001_062f", 0),
+ MVI(0xc0010630, "TODO_c001_0630", 0),
+ MVI(0xc0010631, "TODO_c001_0631", 0),
+ MVI(0xc0010632, "TODO_c001_0632", 0),
+ MVI(0xc0010633, "TODO_c001_0633", 0),
+ MVI(0xc0010634, "TODO_c001_0634", 0),
+ MVI(0xc0010635, "TODO_c001_0635", 0),
+ MVI(0xc0010636, "TODO_c001_0636", 0),
+ MVI(0xc0010637, "TODO_c001_0637", 0),
+ MVI(0xc0010638, "TODO_c001_0638", 0),
+ MVI(0xc0010639, "TODO_c001_0639", 0),
+ MVI(0xc001063a, "TODO_c001_063a", 0),
+ MVI(0xc001063b, "TODO_c001_063b", 0),
+ MVI(0xc001063c, "TODO_c001_063c", 0),
+ MVI(0xc001063d, "TODO_c001_063d", 0),
+ MVI(0xc001063e, "TODO_c001_063e", 0),
+ MVI(0xc001063f, "TODO_c001_063f", 0),
+ MVI(0xc0010640, "TODO_c001_0640", 0),
+ MVI(0xc0010641, "TODO_c001_0641", 0),
+ MVI(0xc0010642, "TODO_c001_0642", 0),
+ MVI(0xc0010643, "TODO_c001_0643", 0),
+ MVI(0xc0010644, "TODO_c001_0644", 0),
+ MVI(0xc0010645, "TODO_c001_0645", 0),
+ MVI(0xc0010646, "TODO_c001_0646", 0),
+ MVI(0xc0010647, "TODO_c001_0647", 0),
+ MVI(0xc0010648, "TODO_c001_0648", 0),
+ MVI(0xc0010649, "TODO_c001_0649", 0),
+ MVI(0xc001064a, "TODO_c001_064a", 0),
+ MVI(0xc001064b, "TODO_c001_064b", 0),
+ MVI(0xc001064c, "TODO_c001_064c", 0),
+ MVI(0xc001064d, "TODO_c001_064d", 0),
+ MVI(0xc001064e, "TODO_c001_064e", 0),
+ MVI(0xc001064f, "TODO_c001_064f", 0),
+ MVI(0xc0010650, "TODO_c001_0650", 0),
+ MVI(0xc0010651, "TODO_c001_0651", 0),
+ MVI(0xc0010652, "TODO_c001_0652", 0),
+ MVI(0xc0010653, "TODO_c001_0653", 0),
+ MVI(0xc0010654, "TODO_c001_0654", 0),
+ MVI(0xc0010655, "TODO_c001_0655", 0),
+ MVI(0xc0010656, "TODO_c001_0656", 0),
+ MVI(0xc0010657, "TODO_c001_0657", 0),
+ MVI(0xc0010658, "TODO_c001_0658", 0),
+ MVI(0xc0010659, "TODO_c001_0659", 0),
+ MVI(0xc001065a, "TODO_c001_065a", 0),
+ MVI(0xc001065b, "TODO_c001_065b", 0),
+ MVI(0xc001065c, "TODO_c001_065c", 0),
+ MVI(0xc001065d, "TODO_c001_065d", 0),
+ MVI(0xc001065e, "TODO_c001_065e", 0),
+ MVI(0xc001065f, "TODO_c001_065f", 0),
+ MVI(0xc0010660, "TODO_c001_0660", 0),
+ MVI(0xc0010661, "TODO_c001_0661", 0),
+ MVI(0xc0010662, "TODO_c001_0662", 0),
+ MVI(0xc0010663, "TODO_c001_0663", 0),
+ MVI(0xc0010664, "TODO_c001_0664", 0),
+ MVI(0xc0010665, "TODO_c001_0665", 0),
+ MVI(0xc0010666, "TODO_c001_0666", 0),
+ MVI(0xc0010667, "TODO_c001_0667", 0),
+ MVI(0xc0010668, "TODO_c001_0668", 0),
+ MVI(0xc0010669, "TODO_c001_0669", 0),
+ MVI(0xc001066a, "TODO_c001_066a", 0),
+ MVI(0xc001066b, "TODO_c001_066b", 0),
+ MVI(0xc001066c, "TODO_c001_066c", 0),
+ MVI(0xc001066d, "TODO_c001_066d", 0),
+ MVI(0xc001066e, "TODO_c001_066e", 0),
+ MVI(0xc001066f, "TODO_c001_066f", 0),
+ MVI(0xc0010670, "TODO_c001_0670", 0),
+ MVI(0xc0010671, "TODO_c001_0671", 0),
+ MVI(0xc0010672, "TODO_c001_0672", 0),
+ MVI(0xc0010673, "TODO_c001_0673", 0),
+ MVI(0xc0010674, "TODO_c001_0674", 0),
+ MVI(0xc0010675, "TODO_c001_0675", 0),
+ MVI(0xc0010676, "TODO_c001_0676", 0),
+ MVI(0xc0010677, "TODO_c001_0677", 0),
+ MVI(0xc0010678, "TODO_c001_0678", 0),
+ MVI(0xc0010679, "TODO_c001_0679", 0),
+ MVI(0xc001067a, "TODO_c001_067a", 0),
+ MVI(0xc001067b, "TODO_c001_067b", 0),
+ MVI(0xc001067c, "TODO_c001_067c", 0),
+ MVI(0xc001067d, "TODO_c001_067d", 0),
+ MVI(0xc001067e, "TODO_c001_067e", 0),
+ MVI(0xc001067f, "TODO_c001_067f", 0),
+ MVI(0xc0010680, "TODO_c001_0680", 0),
+ MVI(0xc0010681, "TODO_c001_0681", 0),
+ MVI(0xc0010682, "TODO_c001_0682", 0),
+ MVI(0xc0010683, "TODO_c001_0683", 0),
+ MVI(0xc0010684, "TODO_c001_0684", 0),
+ MVI(0xc0010685, "TODO_c001_0685", 0),
+ MVI(0xc0010686, "TODO_c001_0686", 0),
+ MVI(0xc0010687, "TODO_c001_0687", 0),
+ MVI(0xc0010688, "TODO_c001_0688", 0),
+ MVI(0xc0010689, "TODO_c001_0689", 0),
+ MVI(0xc001068a, "TODO_c001_068a", 0),
+ MVI(0xc001068b, "TODO_c001_068b", 0),
+ MVI(0xc001068c, "TODO_c001_068c", 0),
+ MVI(0xc001068d, "TODO_c001_068d", 0),
+ MVI(0xc001068e, "TODO_c001_068e", 0),
+ MVI(0xc001068f, "TODO_c001_068f", 0),
+ MVI(0xc0010690, "TODO_c001_0690", 0),
+ MVI(0xc0010691, "TODO_c001_0691", 0),
+ MVI(0xc0010692, "TODO_c001_0692", 0),
+ MVI(0xc0010693, "TODO_c001_0693", 0),
+ MVI(0xc0010694, "TODO_c001_0694", 0),
+ MVI(0xc0010695, "TODO_c001_0695", 0),
+ MVI(0xc0010696, "TODO_c001_0696", 0),
+ MVI(0xc0010697, "TODO_c001_0697", 0),
+ MVI(0xc0010698, "TODO_c001_0698", 0),
+ MVI(0xc0010699, "TODO_c001_0699", 0),
+ MVI(0xc001069a, "TODO_c001_069a", 0),
+ MVI(0xc001069b, "TODO_c001_069b", 0),
+ MVI(0xc001069c, "TODO_c001_069c", 0),
+ MVI(0xc001069d, "TODO_c001_069d", 0),
+ MVI(0xc001069e, "TODO_c001_069e", 0),
+ MVI(0xc001069f, "TODO_c001_069f", 0),
+ MVI(0xc00106a0, "TODO_c001_06a0", 0),
+ MVI(0xc00106a1, "TODO_c001_06a1", 0),
+ MVI(0xc00106a2, "TODO_c001_06a2", 0),
+ MVI(0xc00106a3, "TODO_c001_06a3", 0),
+ MVI(0xc00106a4, "TODO_c001_06a4", 0),
+ MVI(0xc00106a5, "TODO_c001_06a5", 0),
+ MVI(0xc00106a6, "TODO_c001_06a6", 0),
+ MVI(0xc00106a7, "TODO_c001_06a7", 0),
+ MVI(0xc00106a8, "TODO_c001_06a8", 0),
+ MVI(0xc00106a9, "TODO_c001_06a9", 0),
+ MVI(0xc00106aa, "TODO_c001_06aa", 0),
+ MVI(0xc00106ab, "TODO_c001_06ab", 0),
+ MVI(0xc00106ac, "TODO_c001_06ac", 0),
+ MVI(0xc00106ad, "TODO_c001_06ad", 0),
+ MVI(0xc00106ae, "TODO_c001_06ae", 0),
+ MVI(0xc00106af, "TODO_c001_06af", 0),
+ MVI(0xc00106b0, "TODO_c001_06b0", 0),
+ MVI(0xc00106b1, "TODO_c001_06b1", 0),
+ MVI(0xc00106b2, "TODO_c001_06b2", 0),
+ MVI(0xc00106b3, "TODO_c001_06b3", 0),
+ MVI(0xc00106b4, "TODO_c001_06b4", 0),
+ MVI(0xc00106b5, "TODO_c001_06b5", 0),
+ MVI(0xc00106b6, "TODO_c001_06b6", 0),
+ MVI(0xc00106b7, "TODO_c001_06b7", 0),
+ MVI(0xc00106b8, "TODO_c001_06b8", 0),
+ MVI(0xc00106b9, "TODO_c001_06b9", 0),
+ MVI(0xc00106ba, "TODO_c001_06ba", 0),
+ MVI(0xc00106bb, "TODO_c001_06bb", 0),
+ MVI(0xc00106bc, "TODO_c001_06bc", 0),
+ MVI(0xc00106bd, "TODO_c001_06bd", 0),
+ MVI(0xc00106be, "TODO_c001_06be", 0),
+ MVI(0xc00106bf, "TODO_c001_06bf", 0),
+ MVI(0xc00106c0, "TODO_c001_06c0", 0),
+ MVI(0xc00106c1, "TODO_c001_06c1", 0),
+ MVI(0xc00106c2, "TODO_c001_06c2", 0),
+ MVI(0xc00106c3, "TODO_c001_06c3", 0),
+ MVI(0xc00106c4, "TODO_c001_06c4", 0),
+ MVI(0xc00106c5, "TODO_c001_06c5", 0),
+ MVI(0xc00106c6, "TODO_c001_06c6", 0),
+ MVI(0xc00106c7, "TODO_c001_06c7", 0),
+ MVI(0xc00106c8, "TODO_c001_06c8", 0),
+ MVI(0xc00106c9, "TODO_c001_06c9", 0),
+ MVI(0xc00106ca, "TODO_c001_06ca", 0),
+ MVI(0xc00106cb, "TODO_c001_06cb", 0),
+ MVI(0xc00106cc, "TODO_c001_06cc", 0),
+ MVI(0xc00106cd, "TODO_c001_06cd", 0),
+ MVI(0xc00106ce, "TODO_c001_06ce", 0),
+ MVI(0xc00106cf, "TODO_c001_06cf", 0),
+ MVI(0xc00106d0, "TODO_c001_06d0", 0),
+ MVI(0xc00106d1, "TODO_c001_06d1", 0),
+ MVI(0xc00106d2, "TODO_c001_06d2", 0),
+ MVI(0xc00106d3, "TODO_c001_06d3", 0),
+ MVI(0xc00106d4, "TODO_c001_06d4", 0),
+ MVI(0xc00106d5, "TODO_c001_06d5", 0),
+ MVI(0xc00106d6, "TODO_c001_06d6", 0),
+ MVI(0xc00106d7, "TODO_c001_06d7", 0),
+ MVI(0xc00106d8, "TODO_c001_06d8", 0),
+ MVI(0xc00106d9, "TODO_c001_06d9", 0),
+ MVI(0xc00106da, "TODO_c001_06da", 0),
+ MVI(0xc00106db, "TODO_c001_06db", 0),
+ MVI(0xc00106dc, "TODO_c001_06dc", 0),
+ MVI(0xc00106dd, "TODO_c001_06dd", 0),
+ MVI(0xc00106de, "TODO_c001_06de", 0),
+ MVI(0xc00106df, "TODO_c001_06df", 0),
+ MVI(0xc00106e0, "TODO_c001_06e0", 0),
+ MVI(0xc00106e1, "TODO_c001_06e1", 0),
+ MVI(0xc00106e2, "TODO_c001_06e2", 0),
+ MVI(0xc00106e3, "TODO_c001_06e3", 0),
+ MVI(0xc00106e4, "TODO_c001_06e4", 0),
+ MVI(0xc00106e5, "TODO_c001_06e5", 0),
+ MVI(0xc00106e6, "TODO_c001_06e6", 0),
+ MVI(0xc00106e7, "TODO_c001_06e7", 0),
+ MVI(0xc00106e8, "TODO_c001_06e8", 0),
+ MVI(0xc00106e9, "TODO_c001_06e9", 0),
+ MVI(0xc00106ea, "TODO_c001_06ea", 0),
+ MVI(0xc00106eb, "TODO_c001_06eb", 0),
+ MVI(0xc00106ec, "TODO_c001_06ec", 0),
+ MVI(0xc00106ed, "TODO_c001_06ed", 0),
+ MVI(0xc00106ee, "TODO_c001_06ee", 0),
+ MVI(0xc00106ef, "TODO_c001_06ef", 0),
+ MVI(0xc00106f0, "TODO_c001_06f0", 0),
+ MVI(0xc00106f1, "TODO_c001_06f1", 0),
+ MVI(0xc00106f2, "TODO_c001_06f2", 0),
+ MVI(0xc00106f3, "TODO_c001_06f3", 0),
+ MVI(0xc00106f4, "TODO_c001_06f4", 0),
+ MVI(0xc00106f5, "TODO_c001_06f5", 0),
+ MVI(0xc00106f6, "TODO_c001_06f6", 0),
+ MVI(0xc00106f7, "TODO_c001_06f7", 0),
+ MVI(0xc00106f8, "TODO_c001_06f8", 0),
+ MVI(0xc00106f9, "TODO_c001_06f9", 0),
+ MVI(0xc00106fa, "TODO_c001_06fa", 0),
+ MVI(0xc00106fb, "TODO_c001_06fb", 0),
+ MVI(0xc00106fc, "TODO_c001_06fc", 0),
+ MVI(0xc00106fd, "TODO_c001_06fd", 0),
+ MVI(0xc00106fe, "TODO_c001_06fe", 0),
+ MVI(0xc00106ff, "TODO_c001_06ff", 0),
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0x18000000, ~(uint64_t)UINT32_MAX, 0x4), /* value=0x18000000 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011002, "AMD_K8_CPUID_CTL_STD07", AmdK8CpuIdCtlStd07hEbax, AmdK8CpuIdCtlStd07hEbax), /* value=0x9c01a9 */
+ MFX(0xc0011003, "AMD_K8_CPUID_CTL_STD06", AmdK8CpuIdCtlStd06hEcx, AmdK8CpuIdCtlStd06hEcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x7cd83209`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x35c233ff`2fd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFI(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, UINT64_C(0xffffffff00010000), 0),
+ MVI(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0), /* value=0x0 */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, 0, 0),
+ RSN(0xc0011019, 0xc001101a, "AMD_16H_DR1_ADDR_MASn", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001101b, "AMD_16H_DR3_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0x7c000fffc000), 0), /* value=0x2068000`00000000 */
+ MFX(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, 0x201000, 0, 0), /* value=0x201000 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xfffffffff000000), 0), /* value=0x500000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x20000`00000000 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011028, "AMD_15H_FP_CFG", AmdFam15hFpuCfg, AmdFam15hFpuCfg, 0, UINT64_C(0xffa0f0fc0004fc00), 0), /* value=0x140a00`248000d4 */
+ MFX(0xc0011029, "AMD_15H_DC_CFG", AmdFam15hDecoderCfg, AmdFam15hDecoderCfg, 0, 0x18001, 0), /* value=0x10e26002 */
+ MFN(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2), /* value=0x6800000`00028080 */
+ MVX(0xc001102b, "TODO_c001_102b", 0x1808cc17, 0, 0),
+ MVI(0xc001102c, "TODO_c001_102c", UINT64_C(0x68000000000000)), /* Villain? */
+ MVX(0xc001102d, "TODO_c001_102d", UINT64_C(0x1000000500000020), UINT64_C(0x1ffc0ffe00000), 0),
+ MVX(0xc001102e, "TODO_c001_102e", 0x1, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001102f, "TODO_c001_102f", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfeffffffff0000), 0), /* value=0x0 */
+ MFX(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr, AmdFam10hIbsFetchLinAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr, AmdFam10hIbsFetchPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xf8000000f8010000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x0 */
+ MFX(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData, AmdFam10hIbsOpData, 0, UINT64_C(0xfffffe0300000000), 0), /* value=0x0 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFX(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3, AmdFam10hIbsOpData3, 0, 0x1e00, 0), /* value=0x0 */
+ MFN(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr), /* value=0x0 */
+ MFX(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr, AmdFam10hIbsDcPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+ MFN(0xc001103b, "AMD_14H_IBS_BR_TARGET", AmdFam14hIbsBrTarget, AmdFam14hIbsBrTarget), /* value=0x0 */
+ MVI(0xc001103c, "TODO_c001_103c", 0),
+ MVX(0xc0011041, "AMD_15H_UNK_c001_1041", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0011042, "AMD_15H_UNK_c001_1042", 0),
+ MVX(0xc0011074, "TODO_c001_1074", UINT64_C(0x8000000000000000), UINT64_C(0x8fffffffffffffff), 0),
+ MVX(0xc0011075, "TODO_c001_1075", 0, UINT64_C(0xfffffffff0000000), 0),
+ MVX(0xc0011076, "TODO_c001_1076", 0x14, UINT64_C(0xffffffffffffffe0), 0),
+ MVI(0xc0011077, "TODO_c001_1077", UINT64_C(0xd400005d5150595b)),
+ MVI(0xc0011078, "TODO_c001_1078", 0),
+ MVI(0xc0011083, "TODO_c001_1083", UINT64_C(0x1bc6f1bc1bc6f1bc)),
+ MVX(0xc0011093, "TODO_c001_1093", 0xe860e0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011094, "TODO_c001_1094", 0x11fd, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011095, "TODO_c001_1095", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011096, "TODO_c001_1096", 0, UINT64_C(0xffffffff87ff0000), 0),
+ MVX(0xc0011097, "TODO_c001_1097", 0xff6, UINT64_C(0xffffffffffffc000), 0),
+ MVO(0xc00110a2, "TODO_c001_10a2", UINT32_C(0xfeb00000)),
+ MFN(0xc00110e0, "TODO_c001_10e0", WriteOnly, IgnoreWrite),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Hygon C86 7185 32-core Processor.
+ */
+static CPUMDBENTRY const g_Entry_Hygon_C86_7185_32_core =
+{
+ /*.pszName = */ "Hygon C86 7185 32-core",
+ /*.pszFullName = */ "Hygon C86 7185 32-core Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_HYGON,
+ /*.uFamily = */ 24,
+ /*.uModel = */ 0,
+ /*.uStepping = */ 1,
+ /*.enmMicroarch = */ kCpumMicroarch_Hygon_Dhyana,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.fMxCsrMask = */ 0x0002ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Hygon_C86_7185_32_core),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Hygon_C86_7185_32_core)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Hygon_C86_7185_32_core)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Hygon_C86_7185_32_core),
+};
+
+#endif /* !VBOX_CPUDB_Hygon_C86_7185_32_core_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_80186.h b/src/VBox/VMM/VMMR3/cpus/Intel_80186.h
new file mode 100644
index 00000000..5d63cc01
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_80186.h
@@ -0,0 +1,85 @@
+/* $Id: Intel_80186.h $ */
+/** @file
+ * CPU database entry "Intel 80186".
+ * Handcrafted.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_80186_h
+#define VBOX_CPUDB_Intel_80186_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * Fake CPUID leaves for Intel(R) 80186.
+ *
+ * We fake these to keep the CPUM ignorant of CPUs wihtout CPUID leaves
+ * and avoid having to seed CPUM::GuestFeatures filling with bits from the
+ * CPUMDBENTRY.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_80186[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000100, 0x00000100, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x31303820, 0x20203638, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00001414, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+/**
+ * Database entry for Intel(R) 80186.
+ */
+static CPUMDBENTRY const g_Entry_Intel_80186 =
+{
+ /*.pszName = */ "Intel 80186",
+ /*.pszFullName = */ "Intel(R) 80186",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 2,
+ /*.uModel = */ 0,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_80186,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ CPUMDB_F_EXECUTE_ALL_IN_IEM,
+ /*.cMaxPhysAddrWidth= */ 20,
+ /*.fMxCsrMask = */ 0,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_80186),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_80186)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ 0,
+ /*.cMsrRanges = */ 0,
+ /*.paMsrRanges = */ NULL,
+};
+
+#endif /* !VBOX_CPUDB_Intel_80186_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_80286.h b/src/VBox/VMM/VMMR3/cpus/Intel_80286.h
new file mode 100644
index 00000000..2fd635e1
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_80286.h
@@ -0,0 +1,85 @@
+/* $Id: Intel_80286.h $ */
+/** @file
+ * CPU database entry "Intel 80286".
+ * Handcrafted.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_80286_h
+#define VBOX_CPUDB_Intel_80286_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * Fake CPUID leaves for Intel(R) 80286.
+ *
+ * We fake these to keep the CPUM ignorant of CPUs wihtout CPUID leaves
+ * and avoid having to seed CPUM::GuestFeatures filling with bits from the
+ * CPUMDBENTRY.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_80286[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000200, 0x00000100, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x32303820, 0x20203638, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00001818, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+/**
+ * Database entry for Intel(R) 80286.
+ */
+static CPUMDBENTRY const g_Entry_Intel_80286 =
+{
+ /*.pszName = */ "Intel 80286",
+ /*.pszFullName = */ "Intel(R) 80286",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 2,
+ /*.uModel = */ 0,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_80286,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ CPUMDB_F_EXECUTE_ALL_IN_IEM,
+ /*.cMaxPhysAddrWidth= */ 24,
+ /*.fMxCsrMask = */ 0,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_80286),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_80286)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ 0,
+ /*.cMsrRanges = */ 0,
+ /*.paMsrRanges = */ NULL,
+};
+
+#endif /* !VBOX_CPUDB_Intel_80286_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_80386.h b/src/VBox/VMM/VMMR3/cpus/Intel_80386.h
new file mode 100644
index 00000000..b6b18bcf
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_80386.h
@@ -0,0 +1,85 @@
+/* $Id: Intel_80386.h $ */
+/** @file
+ * CPU database entry "Intel 80386".
+ * Handcrafted.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_80386_h
+#define VBOX_CPUDB_Intel_80386_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * Fake CPUID leaves for Intel(R) 80386.
+ *
+ * We fake these to keep the CPUM ignorant of CPUs withou CPUID leaves
+ * and avoid having to seed CPUM::GuestFeatures filling with bits from the
+ * CPUMDBENTRY.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_80386[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000300, 0x00000100, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x33303820, 0x20203638, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00001818, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+/**
+ * Database entry for Intel(R) 80386.
+ */
+static CPUMDBENTRY const g_Entry_Intel_80386 =
+{
+ /*.pszName = */ "Intel 80386",
+ /*.pszFullName = */ "Intel(R) 80386",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 3,
+ /*.uModel = */ 0,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_80386,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ CPUMDB_F_EXECUTE_ALL_IN_IEM,
+ /*.cMaxPhysAddrWidth= */ 24,
+ /*.fMxCsrMask = */ 0,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_80386),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_80386)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ 0,
+ /*.cMsrRanges = */ 0,
+ /*.paMsrRanges = */ NULL,
+};
+
+#endif /* !VBOX_CPUDB_Intel_80386_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_80486.h b/src/VBox/VMM/VMMR3/cpus/Intel_80486.h
new file mode 100644
index 00000000..5c342e67
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_80486.h
@@ -0,0 +1,83 @@
+/* $Id: Intel_80486.h $ */
+/** @file
+ * CPU database entry "Intel 80486".
+ * Handcrafted.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_80486_h
+#define VBOX_CPUDB_Intel_80486_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * Fake CPUID leaves for Intel(R) 80486(DX2).
+ *
+ * The extended leaves are fake to make CPUM happy.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_80486[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000430, 0x00000100, 0x00000000, 0x00000111, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x34303820, 0x58443638, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x20202032, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00002020, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+/**
+ * Database entry for Intel(R) 80486.
+ */
+static CPUMDBENTRY const g_Entry_Intel_80486 =
+{
+ /*.pszName = */ "Intel 80486",
+ /*.pszFullName = */ "Intel(R) 80486DX2",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 4,
+ /*.uModel = */ 3,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_80486,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 32,
+ /*.fMxCsrMask = */ 0,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_80486),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_80486)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ 0,
+ /*.cMsrRanges = */ 0,
+ /*.paMsrRanges = */ NULL,
+};
+
+#endif /* !VBOX_CPUDB_Intel_80486_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_8086.h b/src/VBox/VMM/VMMR3/cpus/Intel_8086.h
new file mode 100644
index 00000000..6a7d0346
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_8086.h
@@ -0,0 +1,85 @@
+/* $Id: Intel_8086.h $ */
+/** @file
+ * CPU database entry "Intel 8086".
+ * Handcrafted.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_8086_h
+#define VBOX_CPUDB_Intel_8086_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * Fake CPUID leaves for Intel(R) 8086.
+ *
+ * We fake these to keep the CPUM ignorant of CPUs wihtout CPUID leaves
+ * and avoid having to seed CPUM::GuestFeatures filling with bits from the
+ * CPUMDBENTRY.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_8086[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x38303820, 0x20202036, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00001414, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+/**
+ * Database entry for Intel(R) 8086.
+ */
+static CPUMDBENTRY const g_Entry_Intel_8086 =
+{
+ /*.pszName = */ "Intel 8086",
+ /*.pszFullName = */ "Intel(R) 8086",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 2,
+ /*.uModel = */ 0,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_8086,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ CPUMDB_F_EXECUTE_ALL_IN_IEM,
+ /*.cMaxPhysAddrWidth= */ 20,
+ /*.fMxCsrMask = */ 0,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_8086),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_8086)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ 0,
+ /*.cMsrRanges = */ 0,
+ /*.paMsrRanges = */ NULL,
+};
+
+#endif /* !VBOX_CPUDB_Intel_8086_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Atom_330_1_60GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Atom_330_1_60GHz.h
new file mode 100644
index 00000000..0dd20ff3
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Atom_330_1_60GHz.h
@@ -0,0 +1,220 @@
+/* $Id: Intel_Atom_330_1_60GHz.h $ */
+/** @file
+ * CPU database entry "Intel Atom 330 1.60GHz".
+ * Generated at 2015-11-04T12:58:59Z by VBoxCpuReport v5.0.51r103818 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Atom_330_1_60GHz_h
+#define VBOX_CPUDB_Intel_Atom_330_1_60GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Atom(TM) CPU 330 @ 1.60GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Atom_330_1_60GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000106c2, 0x01040800, 0x0040e31d, 0xbfe9fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x4fba5901, 0x0e3080c0, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x04004121, 0x0140003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x04004122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x04004143, 0x01c0003f, 0x000003ff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000010, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280203, 0x00000000, 0x00000000, 0x00002501, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x746e4920, 0x52286c65, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x74412029, 0x54286d6f, 0x4320294d, 0x20205550, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20303333, 0x20402020, 0x30362e31, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003020, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Atom(TM) CPU 330 @ 1.60GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Atom_330_1_60GHz[] =
+{
+ MFI(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x0 */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x5a7`e94bd2c0 */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0xc00008836ac1b), 0, 0), /* value=0xc0000`8836ac1b */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffffff000006ff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000039, "C2_UNK_0000_0039", 0x1),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x1 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0),
+ RFN(0x00000040, 0x00000047, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ RFN(0x00000060, 0x00000067, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_MAX), /* value=0x20d`00000000 */
+ RSN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x000000c7, "IA32_PMC6", Ia32PmcN, Ia32PmcN, 0, UINT64_C(0xfff7bdefff7df7df), 0), /* value=0x16101c00`00000000 */
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x101, 0, 0), /* value=0x101 */
+ MVO(0x000000ce, "IA32_PLATFORM_INFO", UINT64_C(0x1b1b0c004e4e0000)),
+ MVO(0x000000cf, "C2_UNK_0000_00cf", 0x1f),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x6800f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0xbfff, UINT64_C(0xfffffffffc804000)), /* value=0x26b001 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xffffffffff800000)), /* value=0x0 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0xd00a00f8)),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x63`19743600 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x63`199424b8 */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT64_C(0xff7bdeffffc5ffff), 0), /* value=0x3384103 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVX(0x00000116, "BBL_CR_ADDR", 0x3fc0, UINT64_C(0xfffffff00000001f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", 0, UINT64_C(0xfffc0000fffc0000), 0),
+ MFX(0x00000119, "BBL_CR_CTL", IntelBblCrCtl, IntelBblCrCtl, 0x938008, 0x4080017f, ~(uint64_t)UINT32_MAX), /* value=0x938008 */
+ MFN(0x0000011a, "BBL_CR_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000011b, "P6_UNK_0000_011b", 0, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x0000011c, "C2_UNK_0000_011c", 0xd96000, 0, UINT64_C(0xfffffffff0000000)),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0x7f00011f, UINT32_C(0xff83f81f), UINT64_C(0xffffffff007c06e0)), /* value=0x7f00011f */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffffff`81573970 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x805, 0, 0), /* value=0x805 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, ~(uint64_t)UINT32_MAX),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, UINT32_C(0xfffee0c0), ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0xc1b0c1b06000c1b), 0, 0), /* value=0xc1b0c1b`06000c1b */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0xc1b, 0, 0), /* Might bite. value=0xc1b */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, 0, UINT64_C(0xffffffffffffffe1)), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x3, 0, UINT64_C(0xffffffffff0000e0)), /* value=0x3 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x884c0000), UINT32_C(0xf87f03ff), UINT64_C(0xffffffff0780fc00)), /* value=0x884c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x61b, 0, 0), /* value=0x61b */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT32_C(0xffff0000), ~(uint64_t)UINT32_MAX),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x60940488, UINT64_C(0x366131884), UINT64_C(0xfffffff89908c372)), /* value=0x60940488 */
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", UINT32_C(0x800f0421), UINT64_C(0xffffffffff80000e), 0),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffe03c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffff`a07ac16e */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`8105c4f0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffffff00000ff8)), /* value=0xe0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xe0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x6 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffffff00000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xffffffff7ffffffc)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MFX(0x00000309, "IA32_FIXED_CTR0", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffffff0000000000)), /* value=0x8c */
+ MFX(0x0000030a, "IA32_FIXED_CTR1", Ia32FixedCtrN, Ia32FixedCtrN, 0x1, 0x81201, UINT64_C(0xffffff0000000000)), /* value=0xff`ad893763 */
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, 0, UINT64_C(0xffffff0000000000)), /* value=0x8f4 */
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0xc1, 0, 0), /* value=0xc1 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0xb0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffffc)), /* value=0x7`00000003 */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xc000000700000003), UINT64_C(0x3ffffff8fffffffc)), /* value=0x0 */
+ MVX(0x000003ca, "TODO_0000_03ca", 0x10510, 0, UINT64_C(0xffffffffffe00000)),
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x0 */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVX(0x000004f8, "C2_UNK_0000_04f8", 0, 0, 0),
+ MVX(0x000004f9, "C2_UNK_0000_04f9", 0, 0, 0),
+ MVX(0x000004fa, "C2_UNK_0000_04fa", 0, 0, 0),
+ MVX(0x000004fb, "C2_UNK_0000_04fb", 0, 0, 0),
+ MVX(0x000004fc, "C2_UNK_0000_04fc", 0, 0, 0),
+ MVX(0x000004fd, "C2_UNK_0000_04fd", 0, 0, 0),
+ MVX(0x000004fe, "C2_UNK_0000_04fe", 0, 0, 0),
+ MVX(0x000004ff, "C2_UNK_0000_04ff", 0, 0, 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0xffff8800`d6ee1c00 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`815715d0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`81573ad0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x47700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7fe4`93136740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`db500000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Atom(TM) CPU 330 @ 1.60GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Atom_330_1_60GHz =
+{
+ /*.pszName = */ "Intel Atom 330 1.60GHz",
+ /*.pszFullName = */ "Intel(R) Atom(TM) CPU 330 @ 1.60GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 28,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Atom_Bonnell,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_133MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 32,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Atom_330_1_60GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Atom_330_1_60GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280203, 0x00000000, 0x00000000, 0x00002501 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Atom_330_1_60GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Atom_330_1_60GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Atom_330_1_60GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core2_T7600_2_33GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core2_T7600_2_33GHz.h
new file mode 100644
index 00000000..ba4a07fc
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core2_T7600_2_33GHz.h
@@ -0,0 +1,205 @@
+/* $Id: Intel_Core2_T7600_2_33GHz.h $ */
+/** @file
+ * CPU database entry "Intel Core2 T7600 2.33GHz".
+ * Generated at 2017-10-12T18:17:56Z by VBoxCpuReport v5.2.0_RC1r118339 on linux.x86.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core2_T7600_2_33GHz_h
+#define VBOX_CPUDB_Intel_Core2_T7600_2_33GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM)2 CPU T7600 @ 2.33GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core2_T7600_2_33GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006f6, 0x00020800, 0x0000e3bd, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x05b0b101, 0x005657f0, 0x00000000, 0x2cb43049, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x04000121, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x04000122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x04004143, 0x03c0003f, 0x00000fff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00022220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000400, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280202, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x43203229, 0x20205550, 0x20202020, 0x54202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30303637, 0x20402020, 0x33332e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM)2 CPU T7600 @ 2.33GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core2_T7600_2_33GHz[] =
+{
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x12c5e80 */
+ MFO(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0x0 */
+ MFO(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize), /* value=0x40 */
+ MFO(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter), /* value=0x215`a3e44b5c */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x14000098548e25), 0, 0), /* value=0x140000`98548e25 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xfffffffffffff7ff)),
+ MVO(0x00000021, "C2_UNK_0000_0021", 0),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0x41880000, 0, 0), /* value=0x41880000 */
+ MVO(0x0000002f, "P6_UNK_0000_002f", 0),
+ MVO(0x00000032, "P6_UNK_0000_0032", 0),
+ MVO(0x00000033, "TEST_CTL", 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0),
+ RFN(0x00000040, 0x00000043, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, ReadOnly),
+ MVO(0x0000004a, "P6_UNK_0000_004a", 0), /* value=0x0 */
+ MVO(0x0000004b, "P6_UNK_0000_004b", 0), /* value=0x0 */
+ MVO(0x0000004c, "P6_UNK_0000_004c", 0), /* value=0x0 */
+ MVO(0x0000004d, "P6_UNK_0000_004d", 0), /* value=0x3c3a9b64`1d8552bb */
+ MVO(0x0000004e, "P6_UNK_0000_004e", 0), /* value=0x3b96f62f`156143b9 */
+ MVO(0x0000004f, "P6_UNK_0000_004f", 0), /* value=0xb8 */
+ RFN(0x00000060, 0x00000063, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, ReadOnly),
+ MVO(0x0000006c, "P6_UNK_0000_006c", 0),
+ MVO(0x0000006d, "P6_UNK_0000_006d", 0),
+ MVO(0x0000006e, "P6_UNK_0000_006e", 0),
+ MVO(0x0000006f, "P6_UNK_0000_006f", 0xadb),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId), /* value=0xc7`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000a8, "C2_EMTTM_CR_TABLES_0", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MFX(0x000000a9, "C2_EMTTM_CR_TABLES_1", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MFX(0x000000aa, "C2_EMTTM_CR_TABLES_2", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MFX(0x000000ab, "C2_EMTTM_CR_TABLES_3", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MFX(0x000000ac, "C2_EMTTM_CR_TABLES_4", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MFX(0x000000ad, "C2_EMTTM_CR_TABLES_5", IntelCore2EmttmCrTablesN, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ RFN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, ReadOnly),
+ MVO(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x1e00000042000000)),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x933, 0, 0), /* value=0x933 */
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x130e253b530613)),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x14860f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0x404000, UINT64_C(0xfffffffffc001000)), /* value=0x202a01 */
+ MFO(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo), /* value=0x8040414 */
+ MFO(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase), /* value=0x20414 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0xd0220dc8)),
+ MFO(0x000000e7, "IA32_MPERF", Ia32MPerf), /* value=0xc7`b82ef32a */
+ MFO(0x000000e8, "IA32_APERF", Ia32APerf), /* value=0x55`9818510c */
+ MFO(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig), /* value=0x80b90400 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVO(0x00000116, "BBL_CR_ADDR", 0),
+ MVO(0x00000118, "BBL_CR_DECC", 0xffebe),
+ MVO(0x0000011b, "P6_UNK_0000_011b", 0),
+ MVO(0x0000011c, "C2_UNK_0000_011c", UINT32_C(0xe00000cc)),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, ReadOnly, 0x74702109, 0, 0), /* value=0x74702109 */
+ MVO(0x0000014a, "TODO_0000_014a", 0),
+ MVO(0x0000014b, "TODO_0000_014b", 0),
+ MVO(0x0000014c, "TODO_0000_014c", 0),
+ MVO(0x0000014e, "P6_UNK_0000_014e", UINT32_C(0xe4dfe927)),
+ MVO(0x0000014f, "P6_UNK_0000_014f", 0),
+ MVO(0x00000151, "P6_UNK_0000_0151", 0x3bfcb56f),
+ MFO(0x0000015f, "C1_DTS_CAL_CTRL", IntelCore1DtsCalControl), /* value=0x230613 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x60 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0xf5a07c40 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xc15af09c */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x806, 0, 0), /* value=0x806 */
+ MFO(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus), /* value=0x0 */
+ RFN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, ReadOnly),
+ MVO(0x00000193, "C2_UNK_0000_0193", 0),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x6130e2506040613), 0, 0), /* value=0x6130e25`06040613 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, ReadOnly, 0x613, 0, UINT64_MAX), /* Might bite. value=0x613 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, ReadOnly, 0x2, 0, 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, ReadOnly, 0x3, 0, 0), /* value=0x3 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, ReadOnly, UINT32_C(0x8831000c), 0, 0), /* value=0x8831000c */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MVO(0x0000019e, "P6_UNK_0000_019e", 0xb240000),
+ MVO(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, UINT64_C(0x4066a52489), UINT64_C(0x52600099f6), UINT64_C(0xffffff0019004000)), /* value=0x40`66a52489 */
+ MVO(0x000001a1, "P6_UNK_0000_01a1", 0),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x000001aa, "P6_PIC_SENS_CFG", 0x5ebf042f),
+ MVO(0x000001bf, "C2_UNK_0000_01bf", 0x404),
+ MFO(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos), /* value=0x3 */
+ MVO(0x000001d3, "P6_UNK_0000_01d3", 0x8000),
+ MFO(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xc12c5d73 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xc10357d0 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0xc132a284 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0xc1329543 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFO(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap), /* value=0x0 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x0 */
+ MFO(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap), /* value=0xc01e488 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`0d2ce6c0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff82`0dcfd000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7c7511e0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM)2 CPU T7600 @ 2.33GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core2_T7600_2_33GHz =
+{
+ /*.pszName = */ "Intel Core2 T7600 2.33GHz",
+ /*.pszFullName = */ "Intel(R) Core(TM)2 CPU T7600 @ 2.33GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 15,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core2_Merom,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_167MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0x0000ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core2_T7600_2_33GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core2_T7600_2_33GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280202, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core2_T7600_2_33GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core2_T7600_2_33GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core2_T7600_2_33GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core2_X6800_2_93GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core2_X6800_2_93GHz.h
new file mode 100644
index 00000000..99f5fe9b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core2_X6800_2_93GHz.h
@@ -0,0 +1,272 @@
+/* $Id: Intel_Core2_X6800_2_93GHz.h $ */
+/** @file
+ * CPU database entry "Intel Core2 X6800 2.93GHz".
+ * Generated at 2017-11-04T22:32:41Z by VBoxCpuReport v5.2.1r118907 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core2_X6800_2_93GHz_h
+#define VBOX_CPUDB_Intel_Core2_X6800_2_93GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM)2 CPU X6800 @ 2.93GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core2_X6800_2_93GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006f6, 0x00020800, 0x0000e3bd, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x05b0b101, 0x005657f0, 0x00000000, 0x2cb43049, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x04000121, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x04000122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x04004143, 0x03c0003f, 0x00000fff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000020, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000400, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280202, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x43203229, 0x20205550, 0x20202020, 0x58202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30303836, 0x20402020, 0x33392e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM)2 CPU X6800 @ 2.93GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core2_X6800_2_93GHz[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xfffffffffffbffff), 0), /* value=0x1398780 */
+ MFI(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* Villain? value=0x11d1`f468a982 */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT32_C(0x88040b27), 0, 0), /* value=0x88040b27 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0x600, UINT64_C(0xfffffff0000000ff)),
+ MVX(0x00000021, "C2_UNK_0000_0021", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x41880000, UINT64_C(0xffffffffdff7ffbe), 0), /* value=0x41880000 */
+ MVI(0x0000002f, "P6_UNK_0000_002f", 0),
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xffffffff01fe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff7fffffff), 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0xea),
+ RFN(0x00000040, 0x00000043, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MVX(0x0000004a, "P6_UNK_0000_004a", 0, UINT64_C(0xffffff0000000000), 0), /* value=0x0 */
+ MVX(0x0000004b, "P6_UNK_0000_004b", 0, UINT64_C(0xffffff0000000000), 0), /* value=0x0 */
+ MVX(0x0000004c, "P6_UNK_0000_004c", 0, UINT64_C(0xffffff0000000000), 0), /* value=0x0 */
+ MVX(0x0000004d, "P6_UNK_0000_004d", 0, 0, 0), /* value=0xf53ed6ff`f9f9e16e */
+ MVX(0x0000004e, "P6_UNK_0000_004e", 0, 0, 0), /* value=0xf7ffbdfb`bfbfabeb */
+ MVX(0x0000004f, "P6_UNK_0000_004f", 0, UINT64_C(0xffffffffffffff00), 0), /* value=0xff */
+ RFN(0x00000060, 0x00000063, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MVX(0x0000006c, "P6_UNK_0000_006c", 0, UINT64_C(0xffffffff00000080), 0),
+ MVX(0x0000006d, "P6_UNK_0000_006d", 0, UINT64_C(0xffffffff00000080), 0),
+ MVX(0x0000006e, "P6_UNK_0000_006e", 0, UINT64_C(0xffffffff00000080), 0),
+ MVO(0x0000006f, "P6_UNK_0000_006f", 0xadb),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, UINT32_MAX, 0), /* value=0xc6`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000a8, "C2_EMTTM_CR_TABLES_0", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x61b, UINT64_MAX, 0), /* value=0x61b */
+ MFX(0x000000a9, "C2_EMTTM_CR_TABLES_1", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x61b, UINT64_MAX, 0), /* value=0x61b */
+ MFX(0x000000aa, "C2_EMTTM_CR_TABLES_2", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x61b, UINT64_MAX, 0), /* value=0x61b */
+ MFX(0x000000ab, "C2_EMTTM_CR_TABLES_3", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x61b, UINT64_MAX, 0), /* value=0x61b */
+ MFX(0x000000ac, "C2_EMTTM_CR_TABLES_4", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x61b, UINT64_MAX, 0), /* value=0x61b */
+ MFX(0x000000ad, "C2_EMTTM_CR_TABLES_5", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, UINT32_C(0x8000061b), UINT64_MAX, 0), /* value=0x8000061b */
+ RSN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x3200000058000000)),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x800, 0, 0), /* value=0x800 */
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x1b0b277f7f071b)),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x7820f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, UINT64_C(0xffffffff0000ffff), UINT32_C(0xff000000)), /* value=0x26b204 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xff800000)), /* value=0x0 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0xd00201c8)),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0xa0`16e07631 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x7e`79c4e805 */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT64_C(0xffffffffefc5ffff), 0), /* value=0xa8000000`c17d4300 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVX(0x00000116, "BBL_CR_ADDR", 0, UINT64_C(0xffffff000000001f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", 0xffdfe, UINT64_C(0xfffffffffff00000), 0),
+ MFN(0x0000011a, "BBL_CR_TRIG", WriteOnly, IgnoreWrite),
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MVX(0x0000011c, "C2_UNK_0000_011c", UINT32_C(0xe003cf6f), UINT64_C(0xffffffff07f80000), 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, UINT32_C(0xbf702109), UINT64_C(0xfffffffffff3fe9f), 0), /* value=0xbf702109 */
+ MVX(0x0000014a, "TODO_0000_014a", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x0000014b, "TODO_0000_014b", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x0000014c, "TODO_0000_014c", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0x7ab9f777, UINT64_C(0xffffffff00000080), 0),
+ MVI(0x0000014f, "P6_UNK_0000_014f", 0xf000),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x42100400, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x0000015f, "C1_DTS_CAL_CTRL", IntelCore1DtsCalControl, IntelCore1DtsCalControl, 0, UINT64_C(0xffffffffffc0ffff), 0), /* value=0x230820 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffffff`81846c20 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x6, 0, 0), /* value=0x6 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVO(0x00000193, "C2_UNK_0000_0193", 0),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, UINT64_C(0xfffffffffffee0c0), 0), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0xb270b2786320620), 0, 0), /* value=0xb270b27`86320620 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x820, 0, 0), /* Might bite. value=0x820 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x3, UINT64_C(0xffffffff00010100), UINT32_C(0xff0000e0)), /* value=0x3 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x881c0000), UINT64_C(0xfffffffff87f017f), 0x780fc00), /* value=0x881c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x61b, 0, 0), /* value=0x61b */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0x6930000, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, UINT64_C(0x4062972489), UINT64_C(0x52603199f6), 0), /* value=0x40`62972489 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, ReadOnly, 0x1000, 0, 0), /* value=0x1000 */
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", 0x7e1f042f, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x000001bf, "C2_UNK_0000_01bf", 0x404, UINT64_C(0xffffffffffff0000), 0),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x3 */
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffe03c)), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffff`8142d5f6 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`810644e0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0xffffffff`81039669 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0xffffffff`81039020 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap, Ia32PlatformDcaCap, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x0 */
+ MFX(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap, Ia32Dca0Cap, 0, UINT64_C(0xfffffffffefe17ff), 0), /* value=0xc01e488 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xcff00000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fff00800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x1`00000006 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x1`20000006 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MFX(0x00000309, "IA32_FIXED_CTR0", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, UINT64_C(0xffffff0000000000), 0), /* value=0xc4e */
+ MFX(0x0000030a, "IA32_FIXED_CTR1", Ia32FixedCtrN, Ia32FixedCtrN, 0x1, UINT64_C(0xffffff0000000c00), 0), /* value=0xff`9dd0e550 */
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, UINT64_C(0xffffff0000000000), 0), /* value=0x205b */
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x82, 0, 0), /* value=0x82 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, UINT64_C(0xfffffffffffff000), 0), /* value=0xf0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0x7`00000003 */
+ MFI(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl), /* value=0x7`00000007 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVX(0x00000478, "TODO_0000_0478", 0, 0, 0),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0x1a040000000007), 0, 0), /* value=0x1a0400`00000007 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x1f00000016), 0, 0), /* value=0x1f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0x77b9fffe0401e172), 0, 0), /* value=0x77b9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3efff00036dff), 0, 0), /* value=0x3efff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x1fff000011ff), 0, 0), /* value=0x1fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ MVX(0x000004f8, "C2_UNK_0000_04f8", UINT64_C(0xf5d5fc5e567f6a8e), 0, 0),
+ MVX(0x000004f9, "C2_UNK_0000_04f9", UINT64_C(0xb595ed5afff3a8ff), 0, 0),
+ MVX(0x000004fa, "C2_UNK_0000_04fa", UINT64_C(0xfddfae7f5bfb7c47), 0, 0),
+ MVX(0x000004fb, "C2_UNK_0000_04fb", UINT64_C(0xf7ffbc5f93fd6fde), 0, 0),
+ MVX(0x000004fc, "C2_UNK_0000_04fc", UINT64_C(0xb7c5c95891fb71c6), 0, 0),
+ MVX(0x000004fd, "C2_UNK_0000_04fd", UINT64_C(0xb5d5cc5c95799df6), 0, 0),
+ MVX(0x000004fe, "C2_UNK_0000_04fe", UINT64_C(0xba95c85ad1fb3973), 0, 0),
+ MVX(0x000004ff, "C2_UNK_0000_04ff", UINT64_C(0xf5bdda4f9aff3943), 0, 0),
+ MVX(0x00000590, "C2_UNK_0000_0590", 0, 0, 0),
+ MVX(0x00000591, "C2_UNK_0000_0591", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFN(0x000005a0, "C2_PECI_CTL", IntelCore2PeciControl, IntelCore2PeciControl), /* Might bite. value=0x1 */
+ MVI(0x000005a1, "C2_UNK_0000_05a1", 0), /* Might bite. */
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0xffff8801`2aaeba00 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* Might bite. value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* Might bite. value=0xffffffff`81844650 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* Might bite. value=0xffffffff`81846c90 */
+ /** @todo r=bird: This seems wrong, all others CPUs \#GP if any of thie high
+ * bits are set. */
+ MFN(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask), /* Might bite. value=0x47700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* Might bite. value=0x7fb5`e58d0740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* Might bite. value=0xffff8801`2fc00000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* Might bite. value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM)2 CPU X6800 @ 2.93GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core2_X6800_2_93GHz =
+{
+ /*.pszName = */ "Intel Core2 X6800 2.93GHz",
+ /*.pszFullName = */ "Intel(R) Core(TM)2 CPU X6800 @ 2.93GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 15,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core2_Merom,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_267MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0x0000ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core2_X6800_2_93GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core2_X6800_2_93GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280202, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core2_X6800_2_93GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core2_X6800_2_93GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core2_X6800_2_93GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_Duo_T2600_2_16GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_Duo_T2600_2_16GHz.h
new file mode 100644
index 00000000..a372e1d9
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_Duo_T2600_2_16GHz.h
@@ -0,0 +1,235 @@
+/* $Id: Intel_Core_Duo_T2600_2_16GHz.h $ */
+/** @file
+ * CPU database entry "Intel Core Duo T2600 2.16GHz".
+ * Generated at 2017-11-02T10:39:16Z by VBoxCpuReport v5.2.0_RC1r118339 on linux.x86.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_Duo_T2600_2_16GHz_h
+#define VBOX_CPUDB_Intel_Core_Duo_T2600_2_16GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) Duo CPU T2600 @ 2.16GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_Duo_T2600_2_16GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006e8, 0x01020800, 0x0000c1a9, 0xbfe9fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x02b3b001, 0x000000f0, 0x00000000, 0x2c04307d, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x04000121, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x04000122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x04004143, 0x01c0003f, 0x00000fff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00022220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280201, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00100000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x756e6547, 0x20656e69, 0x65746e49, 0x2952286c, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x55504320, 0x20202020, 0x20202020, 0x54202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30303632, 0x20402020, 0x36312e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00002020, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) Duo CPU T2600 @ 2.16GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_Duo_T2600_2_16GHz[] =
+{
+ MFI(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0xf`eeda5160 */
+ MFI(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* Villain? value=0x243`e2b88071 */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x140000d80486ac), 0, 0), /* value=0x140000`d80486ac */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0x600, UINT64_C(0xfffffff0000000ff)),
+ MVX(0x00000021, "C2_UNK_0000_0021", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x41880000, UINT64_C(0xfffffffffff7fffe), 0), /* value=0x41880000 */
+ MVI(0x0000002f, "P6_UNK_0000_002f", 0),
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xffffffff01fe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff7fffffff), 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0),
+ RFN(0x00000040, 0x00000047, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MVX(0x0000004a, "P6_UNK_0000_004a", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004b, "P6_UNK_0000_004b", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004c, "P6_UNK_0000_004c", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004d, "P6_UNK_0000_004d", 0, 0, 0), /* value=0x3392fbd9`ffbefffd */
+ MVX(0x0000004e, "P6_UNK_0000_004e", 0, 0, 0), /* value=0xa6b77ad3`7ffbffe7 */
+ MVX(0x0000004f, "P6_UNK_0000_004f", 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x9d`0000009d */
+ MVX(0x0000006c, "P6_UNK_0000_006c", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006d, "P6_UNK_0000_006d", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006e, "P6_UNK_0000_006e", UINT32_C(0x80000000), UINT64_C(0xffffffff00000082), 0),
+ MVO(0x0000006f, "P6_UNK_0000_006f", 0xadb),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, UINT32_MAX, 0), /* value=0x39`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000c1, "IA32_PMC0", Ia32PmcN, Ia32PmcN, 0x0, UINT64_C(0xffffffff00124101), 0), /* XXX: The range ended earlier than expected! */
+ MFX(0x000000c2, "IA32_PMC1", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x1f00000044000000)),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x133, 0, 0), /* value=0x133 */
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x2c130d003b538000)),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x14ce0f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xff000000)), /* value=0x26740c */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8040414 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xff800000)), /* value=0x20414 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", 0x51c20cc0),
+ MFX(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf, 0, UINT64_C(0xffffffffe0000000), 0), /* value=0x5e`dc779a5a */
+ MFX(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf, 0, UINT64_C(0xffffffffe0000000), 0), /* value=0x2b`c8585b9a */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT64_C(0xffffffffffc5ffff), 0), /* value=0x82b90000 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVX(0x00000116, "BBL_CR_ADDR", 0, UINT64_C(0xffffff000000001f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", UINT64_C(0x88000fef00030892), UINT64_C(0x4780000fff00000), 0),
+ MFN(0x0000011a, "BBL_CR_TRIG", WriteOnly, IgnoreWrite),
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0x7874211f, UINT64_C(0xffffffffc0f3feff), 0), /* value=0x7874211f */
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0x49a49f20, UINT64_C(0xffffffff0000008f), 0),
+ MVX(0x0000014f, "P6_UNK_0000_014f", UINT32_MAX, UINT64_C(0xffffffff00100000), 0),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x25febbf6, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x0000015f, "C1_DTS_CAL_CTRL", IntelCore1DtsCalControl, IntelCore1DtsCalControl, 0, UINT64_C(0xffffffffffc0ffff), 0), /* value=0x260613 */
+ MFN(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs), /* Villain? value=0x60 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* Villain? value=0xf5a07c40 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* Villain? value=0xc15af09c */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x6, 0, 0), /* value=0x6 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, UINT64_C(0xfffffffffffee0c0), 0), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x6130d2c060c0613), 0, 0), /* value=0x6130d2c`060c0613 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x613, 0, 0), /* Might bite. value=0x613 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x3, UINT64_C(0xffffffff00616100), UINT32_C(0xff0000e0)), /* value=0x3 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x8838000c), UINT64_C(0xfffffffff87f017f), 0x780fc00), /* value=0x8838000c */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x613, 0, 0), /* value=0x613 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0x11b0000, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, UINT64_C(0x264973488), 0x60319bf7, 0), /* value=0x2`64973488 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", 0x263f04b7, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffffa), 0), /* value=0x8000003 */
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xfffffffffffffe3c)), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xc12c5d73 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xc10357d0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, UINT64_C(0xffffffffff97dc5d), 0), /* value=0xc132a284 */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, UINT64_C(0xfffffffffffffff0), 0), /* value=0xc1329543 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xffe00005 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x7ff00000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fff00800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x80000001 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf0000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0, 0, 0), /* value=0x0 */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0x1b040000000005), 0, 0), /* value=0x1b0400`00000005 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x1f00000016), 0, 0), /* value=0x1f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0x7781fffe0401e172), 0, 0), /* value=0x7781fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3edff00036dff), 0, 0), /* value=0x3edff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x1dff000011ff), 0, 0), /* value=0x1dff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ MVX(0x000004f8, "C2_UNK_0000_04f8", UINT64_C(0x1f5e86fb9f7f6dce), 0, 0),
+ MVX(0x000004f9, "C2_UNK_0000_04f9", UINT64_C(0xafb14bb80b893244), 0, 0),
+ MVX(0x000004fa, "C2_UNK_0000_04fa", UINT64_C(0xfecd26a6e39aeefe), 0, 0),
+ MVX(0x000004fb, "C2_UNK_0000_04fb", UINT64_C(0xd5baca676b503675), 0, 0),
+ MVX(0x000004fc, "C2_UNK_0000_04fc", UINT64_C(0x2e9b76a2bdde6ed7), 0, 0),
+ MVX(0x000004fd, "C2_UNK_0000_04fd", UINT64_C(0xfdbb141e45043200), 0, 0),
+ MVX(0x000004fe, "C2_UNK_0000_04fe", UINT64_C(0x4a68f426372a837f), 0, 0),
+ MVX(0x000004ff, "C2_UNK_0000_04ff", UINT64_C(0x4104628e2e437f40), 0, 0),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x800, 0, UINT64_C(0xfffffffffffff3ff)),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) Duo CPU T2600 @ 2.16GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_Duo_T2600_2_16GHz =
+{
+ /*.pszName = */ "Intel Core Duo T2600 2.16GHz",
+ /*.pszFullName = */ "Genuine Intel(R) CPU T2600 @ 2.16GHz",
+// /*.pszFullName = */ "Intel(R) Core(TM) Duo CPU T2600 @ 2.16GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 14,
+ /*.uStepping = */ 8,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core_Yonah,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_167MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 32,
+ /*.fMxCsrMask = */ 0x0000ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_Duo_T2600_2_16GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_Duo_T2600_2_16GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280201, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_Duo_T2600_2_16GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_Duo_T2600_2_16GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_Duo_T2600_2_16GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h
new file mode 100644
index 00000000..dead6370
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h
@@ -0,0 +1,349 @@
+/* $Id: Intel_Core_i5_3570.h $ */
+/** @file
+ * CPU database entry "Intel Core i5-3570".
+ * Generated at 2013-12-13T16:13:56Z by VBoxCpuReport v4.3.53r91216 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i5_3570_h
+#define VBOX_CPUDB_Intel_Core_i5_3570_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i5_3570[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000306a9, 0x04100800, 0x7fbae3ff, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b0ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00001120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000281, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300803, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000001, 0x00000001, 0x00000100, 0x00000004, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, 0x00000000, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x65746e49, 0x2952286c, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x726f4320, 0x4d542865, 0x35692029, 0x3735332d, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x50432030, 0x20402055, 0x30342e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i5_3570[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x4293`b0a3f54a */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x4000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00c00), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x285),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40004*/
+ MVO(0x00000036, "I7_UNK_0000_0036", UINT64_C(0x1000000000105df2)),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1900000000), 0x1, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c8, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "MSR_PLATFORM_INFO", IntelPlatformInfo), /* value=0x81010'e0012200*/
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x1e008403 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x10414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x3a`2c710584 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x39`f97c8410 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MVX(0x00000102, "I7_IB_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IB_UNK_0000_0103", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVX(0x00000104, "I7_IB_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl, IntelI7SandyAesNiCtl, 0, 0, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MVX(0x00000140, "I7_IB_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000142, "I7_IB_UNK_0000_0142", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffffff`8159cbe0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x0000018d, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0x190000, 0x1e00ff, UINT64_C(0xffffffffffe00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x1d2400001000), 0, 0), /* value=0x1d24`00001000 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x1000, 0, 0), /* Might bite. value=0x1000 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x1000013, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x1000013 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x884c0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x884c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x691400, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x691400 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400000 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, ReadOnly, 0x24252626, 0, 0), /* value=0x24252626 */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88490000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0x1000003, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x8 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffffff`a061f4c9 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`810473c0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0xdb000006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff800800 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0x20, UINT64_C(0xffffffffffc20000)), /* value=0x14005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xc`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`00000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xe0000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xdc000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xdb800000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1f000000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1e800000 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1e600000 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x40000001 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000288, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "I7_IB_UNK_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ MFX(0x00000309, "IA32_FIXED_CTR0", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)), /* value=0x46 */
+ MFX(0x0000030a, "IA32_FIXED_CTR1", Ia32FixedCtrN, Ia32FixedCtrN, 0x1, 0x816506, UINT64_C(0xffff000000000000)), /* value=0xffff`d65aa6fb */
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, 0, UINT64_C(0xffff000000000000)), /* value=0x264 */
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0xb0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8ffffff00)), /* value=0x7`000000ff */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe0000007000000ff), UINT64_C(0x1ffffff8ffffff00)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, 0, UINT64_C(0xffffffff1fffffe0)), /* value=0x2000000f */
+ MFX(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus, IntelI7UncPerfGlobalStatus, 0, 0xf, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x1950 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x5 */
+ MVX(0x00000397, "I7_IB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffc0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffc0230000)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x7`7827f19a */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x1`3e604592 */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x1767ff, 0, 0), /* value=0x1767ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x8ff00000000), 0, 0), /* value=0x8ff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106114141), 0, 0), /* value=0xf01`06114141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c8, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0xffff8804`07da1cc0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x18141494`80000380 */
+ MVX(0x00000602, "I7_IB_UNK_0000_0602", UINT64_C(0x1814149480000170), UINT32_C(0x80001fff), 0x7fffe000),
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x802c2c2c */
+ MVX(0x00000604, "I7_IB_UNK_0000_0602", UINT32_C(0x80686868), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x883b */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x76c`bd67b914 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x80008302`00148268 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x3451b969 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0xd0000`01e00268 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x357de52e */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x6eeef */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ MFO(0x00000648, "I7_IB_MSR_CONFIG_TDP_NOMINAL", IntelI7IvyConfigTdpNominal), /* value=0x22 */
+ MFO(0x00000649, "I7_IB_MSR_CONFIG_TDP_LEVEL1", IntelI7IvyConfigTdpLevel1), /* value=0x1e00000`00000000 */
+ MFO(0x0000064a, "I7_IB_MSR_CONFIG_TDP_LEVEL2", IntelI7IvyConfigTdpLevel2), /* value=0x1e00000`00000000 */
+ MFO(0x0000064b, "I7_IB_MSR_CONFIG_TDP_CONTROL", IntelI7IvyConfigTdpControl), /* value=0x80000000 */
+ MFX(0x0000064c, "I7_IB_MSR_TURBO_ACTIVATION_RATIO", IntelI7IvyTurboActivationRatio, IntelI7IvyTurboActivationRatio, 0, 0, UINT64_C(0xffffffff7fffff00)), /* value=0x80000000 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFX(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline, Ia32TscDeadline, 0, UINT64_C(0xb280452208b), 0), /* value=0x4293`ef1535a6 */
+ MVX(0x00000700, "I7_IB_UNK_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "I7_IB_UNK_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "I7_IB_UNK_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "I7_IB_UNK_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "I7_IB_UNK_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "I7_IB_UNK_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "I7_IB_UNK_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "I7_IB_UNK_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "I7_IB_UNK_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "I7_IB_UNK_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "I7_IB_UNK_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "I7_IB_UNK_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "I7_IB_UNK_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "I7_IB_UNK_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "I7_IB_UNK_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "I7_IB_UNK_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "I7_IB_UNK_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "I7_IB_UNK_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "I7_IB_UNK_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "I7_IB_UNK_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "I7_IB_UNK_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "I7_IB_UNK_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "I7_IB_UNK_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "I7_IB_UNK_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "I7_IB_UNK_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "I7_IB_UNK_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "I7_IB_UNK_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "I7_IB_UNK_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "I7_IB_UNK_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "I7_IB_UNK_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "I7_IB_UNK_0000_0730", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "I7_IB_UNK_0000_0731", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "I7_IB_UNK_0000_0732", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "I7_IB_UNK_0000_0733", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "I7_IB_UNK_0000_0734", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "I7_IB_UNK_0000_0735", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "I7_IB_UNK_0000_0736", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "I7_IB_UNK_0000_0737", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "I7_IB_UNK_0000_0738", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "I7_IB_UNK_0000_0739", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "I7_IB_UNK_0000_0740", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "I7_IB_UNK_0000_0741", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "I7_IB_UNK_0000_0742", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "I7_IB_UNK_0000_0743", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "I7_IB_UNK_0000_0744", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "I7_IB_UNK_0000_0745", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "I7_IB_UNK_0000_0746", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "I7_IB_UNK_0000_0747", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "I7_IB_UNK_0000_0748", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "I7_IB_UNK_0000_0749", 0, 0, UINT64_C(0xfffff00000000000)),
+ RFN(0x00000800, 0x000008ff, "IA32_X2APIC_n", Ia32X2ApicN, Ia32X2ApicN),
+ MFN(0x00000c80, "IA32_DEBUG_INTERFACE", Ia32DebugInterface, Ia32DebugInterface), /* value=0x0 */
+ MVX(0x00000c81, "I7_IB_UNK_0000_0c81", 0, 0, 0),
+ MVX(0x00000c82, "I7_IB_UNK_0000_0c82", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000c83, "I7_IB_UNK_0000_0c83", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`8159b620 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8159ce10 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x43700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x908880 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8804`1e200000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i5_3570 =
+{
+ /*.pszName = */ "Intel Core i5-3570",
+ /*.pszFullName = */ "Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 58,
+ /*.uStepping = */ 9,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i5_3570),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i5_3570)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i5_3570)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i5_3570),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i5_3570_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h
new file mode 100644
index 00000000..9a11e455
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h
@@ -0,0 +1,342 @@
+/* $Id: Intel_Core_i7_2635QM.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-2635QM".
+ * Generated at 2014-02-28T18:53:09Z by VBoxCpuReport v4.3.53r92586 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_2635QM_h
+#define VBOX_CPUDB_Intel_Core_i7_2635QM_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_2635QM[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000206a7, 0x04100800, 0x1fbae3bf, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x1c03c163, 0x02c0003f, 0x00001fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ /** @todo the b entry here is WRONG! */
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x6e492020, 0x286c6574, 0x43202952, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x2865726f, 0x20294d54, 0x322d3769, 0x51353336, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x5043204d, 0x20402055, 0x30302e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_2635QM[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x94d`1967512c */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x10000000000000), 0, 0), /* value=0x100000`00000000 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x5),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40008 */
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_C(0xfffffffe)), /* value=0x28`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "IA32_PLATFORM_INFO", IntelPlatformInfo), /* value=0x800`60011400 */
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x405 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x6a`9190b14b */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x69`df4de05c */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl, IntelI7SandyAesNiCtl, 0, 0, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0xffffff80`22904080 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffff80`222f3030 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, 0xe0000, UINT64_C(0xfffffffffff00000)), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x1d4d00000e00), 0, 0), /* value=0x1d4d`00000e00 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x1d00, 0, 0), /* Might bite. value=0x1d00 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x883d0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x883d0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x640e00, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x640e00 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400001 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, ReadOnly, 0x1a1a1c1d, 0, 0), /* value=0x1a1a1c1d */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x4, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x883a0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0xc */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffff7f`a4a6e188 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`222d5ad0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MVO(0x000001e1, "I7_SB_UNK_0000_01e1", 0x2),
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x0 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0x0 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0x20, UINT64_C(0xfffffffffff20000)), /* value=0x4005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xa0000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x90000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8b800000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000288, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "I7_IB_UNK_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, 0, UINT64_C(0xffffffff1fffffe0)), /* value=0x0 */
+ MFX(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus, IntelI7UncPerfGlobalStatus, 0, 0xf, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x0 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x5 */
+ MVX(0x00000397, "I7_SB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x278ad50 */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x627ff, 0, 0), /* value=0x627ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0xff00000000), 0, 0), /* value=0xff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106114141), 0, 0), /* value=0xf01`06114141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000502, "I7_SB_UNK_0000_0502", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x18141494`8000030c */
+ MVX(0x00000602, "I7_IB_UNK_0000_0602", UINT64_C(0x1814149480000104), UINT32_C(0x80001fff), 0x7fffe000),
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80303030 */
+ MVX(0x00000604, "I7_IB_UNK_0000_0602", UINT32_C(0x80646464), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MVX(0x00000609, "I7_SB_UNK_0000_0609", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x8c02 */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x11`06f311d4 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x800001c2`00dc8168 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x55a9ec99 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x100240`01200168 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x1dcdc9a0 */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x39748b6 */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MFX(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline, Ia32TscDeadline, 0, UINT64_C(0x1000000018), 0), /* value=0x94d`402e841f */
+ MVX(0x00000700, "MSR_UNC_CBO_0_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "MSR_UNC_CBO_0_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "MSR_UNC_CBO_0_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "MSR_UNC_CBO_0_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "MSR_UNC_CBO_0_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "MSR_UNC_CBO_0_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "MSR_UNC_CBO_0_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "MSR_UNC_CBO_0_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "MSR_UNC_CBO_0_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "MSR_UNC_CBO_0_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "MSR_UNC_CBO_1_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "MSR_UNC_CBO_1_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "MSR_UNC_CBO_1_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "MSR_UNC_CBO_1_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "MSR_UNC_CBO_1_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "MSR_UNC_CBO_1_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "MSR_UNC_CBO_1_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "MSR_UNC_CBO_1_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "MSR_UNC_CBO_1_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "MSR_UNC_CBO_1_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "MSR_UNC_CBO_2_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "MSR_UNC_CBO_2_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "MSR_UNC_CBO_2_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "MSR_UNC_CBO_2_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "MSR_UNC_CBO_2_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "MSR_UNC_CBO_2_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "MSR_UNC_CBO_2_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "MSR_UNC_CBO_2_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "MSR_UNC_CBO_2_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "MSR_UNC_CBO_2_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "MSR_UNC_CBO_3_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "MSR_UNC_CBO_3_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "MSR_UNC_CBO_3_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "MSR_UNC_CBO_3_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "MSR_UNC_CBO_3_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "MSR_UNC_CBO_3_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "MSR_UNC_CBO_3_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "MSR_UNC_CBO_3_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "MSR_UNC_CBO_3_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "MSR_UNC_CBO_3_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "MSR_UNC_CBO_4_PERFEVTSEL0?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "MSR_UNC_CBO_4_PERFEVTSEL1?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "MSR_UNC_CBO_4_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "MSR_UNC_CBO_4_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "MSR_UNC_CBO_4_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "MSR_UNC_CBO_4_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "MSR_UNC_CBO_4_PER_CTR0?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "MSR_UNC_CBO_4_PER_CTR1?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "MSR_UNC_CBO_4_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "MSR_UNC_CBO_4_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`222f2fd0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff81`0500f000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7b14d3f0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_2635QM =
+{
+ /*.pszName = */ "Intel Core i7-2635QM",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 42,
+ /*.uStepping = */ 7,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_2635QM),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_2635QM)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_2635QM)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_2635QM),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i7_2635QM_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h
new file mode 100644
index 00000000..6fac59eb
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h
@@ -0,0 +1,396 @@
+/* $Id: Intel_Core_i7_3820QM.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-3820QM".
+ * Generated at 2013-12-04T12:54:32Z by VBoxCpuReport v4.3.51r91071 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_3820QM_h
+#define VBOX_CPUDB_Intel_Core_i7_3820QM_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_3820QM[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000306a9, 0x02100800, 0x7fbae3ff, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000281, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000100, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, 0x00000000, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x6e492020, 0x286c6574, 0x43202952, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x2865726f, 0x20294d54, 0x332d3769, 0x51303238, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x5043204d, 0x20402055, 0x30372e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_3820QM[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFX(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter, 0, 0, 0),
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x10000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0xe),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40008*/
+ MVO(0x00000036, "I7_UNK_0000_0036", 0x6c405eec),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0xff07 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1500000000), 0x1, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000c1, "IA32_PMC0", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c2, "IA32_PMC1", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c3, "IA32_PMC2", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c4, "IA32_PMC3", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x80c10f0011b00)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1fffbf8)), /* value=0x8405 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFX(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf, 0, 0x47810, 0), /* value=0x6b`5d075e9c */
+ MFX(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf, 0, 0x1121880, 0), /* value=0x55`2bec768b */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MVX(0x00000102, "I7_IVY_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IVY_UNK_0000_0103", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVX(0x00000104, "I7_IVY_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000132, "I7_UNK_0000_0132", UINT64_MAX, 0, 0),
+ MVX(0x00000133, "I7_UNK_0000_0133", UINT64_MAX, 0, 0),
+ MVX(0x00000134, "I7_UNK_0000_0134", UINT64_MAX, 0, 0),
+ MVO(0x0000013c, "TODO_0000_013c", 0x1),
+ MVX(0x00000140, "I7_IVY_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000142, "I7_IVY_UNK_0000_0142", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0xffffff80`21af5080 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffff80`214ce720 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_C(0xffffffff00080000)),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0x180000, 0x1e00ff, UINT64_C(0xffffffffffe00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x240700002400), 0, 0), /* value=0x2407`00002400 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2500, 0, 0), /* Might bite. value=0x2500 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x10, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x10 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88340000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x88340000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x691200, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x691200 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0, 0, UINT64_C(0xffffffc000007000)), /* XXX: The range ended earlier than expected! */
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400001 */
+ MVX(0x000001ad, "TODO_0000_01ad", 0x23232425, UINT32_MAX, ~(uint64_t)UINT32_MAX),
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x4, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88300000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "TODO_0000_01c6", 0x3),
+ MVX(0x000001c8, "TODO_0000_01c8", 0, 0, UINT64_C(0xfffffffffffffe00)),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x8 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffff7f`a38c2298 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`214b24e0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MVO(0x000001f0, "TODO_0000_01f0", 0x74),
+ MVO(0x000001f2, "TODO_0000_01f2", UINT32_C(0x8b000006)),
+ MVO(0x000001f3, "TODO_0000_01f3", UINT32_C(0xff800800)),
+ MVX(0x000001fc, "TODO_0000_01fc", 0x340047, 0x20, UINT64_C(0xffffffffffc20000)),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xa0000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x90000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8b000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MVX(0x00000280, "TODO_0000_0280", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000281, "TODO_0000_0281", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000282, "TODO_0000_0282", 0, 0x40007fff, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000283, "TODO_0000_0283", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000284, "TODO_0000_0284", 0, 0x40007fff, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000285, "TODO_0000_0285", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000286, "TODO_0000_0286", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000287, "TODO_0000_0287", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000288, "TODO_0000_0288", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "TODO_0000_02e0", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "TODO_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "TODO_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "TODO_0000_0305", 0),
+ MVX(0x00000309, "TODO_0000_0309", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x0000030a, "TODO_0000_030a", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x0000030b, "TODO_0000_030b", 0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000345, "TODO_0000_0345", 0x31c3),
+ MVX(0x0000038d, "TODO_0000_038d", 0, 0, UINT64_C(0xfffffffffffff000)),
+ MVO(0x0000038e, "TODO_0000_038e", UINT64_C(0x8000000000000000)),
+ MVX(0x0000038f, "TODO_0000_038f", 0xf, 0, UINT64_C(0xfffffff8fffffff0)),
+ MVX(0x00000390, "TODO_0000_0390", 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)),
+ MVX(0x00000391, "TODO_0000_0391", 0, 0, UINT64_C(0xffffffff1fffffe0)),
+ MVX(0x00000392, "TODO_0000_0392", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000393, "TODO_0000_0393", 0, 0x3, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x00000394, "TODO_0000_0394", 0, 0, UINT64_C(0xffffffffffafffff)),
+ MVX(0x00000395, "TODO_0000_0395", 0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000396, "TODO_0000_0396", 0x5),
+ MVX(0x00000397, "TODO_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000003b0, "TODO_0000_03b0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x000003b1, "TODO_0000_03b1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x000003b2, "TODO_0000_03b2", 0, 0, UINT64_C(0xffffffffc0230000)),
+ MVX(0x000003b3, "TODO_0000_03b3", 0, 0, UINT64_C(0xffffffffc0230000)),
+ MVX(0x000003f1, "TODO_0000_03f1", 0, 0, UINT64_C(0x7ffffff0fffffff0)),
+ MVX(0x000003f6, "TODO_0000_03f6", UINT16_MAX, UINT64_C(0xffffffffffff0000), 0),
+ MVO(0x000003f8, "TODO_0000_03f8", 0),
+ MVO(0x000003f9, "TODO_0000_03f9", UINT64_C(0x27495a818)),
+ MVO(0x000003fa, "TODO_0000_03fa", UINT64_C(0x428fa6c6207)),
+ MVO(0x000003fc, "TODO_0000_03fc", 0x389bb693),
+ MVO(0x000003fd, "TODO_0000_03fd", 0x13323393),
+ MVO(0x000003fe, "TODO_0000_03fe", UINT64_C(0x48d7ffc9bd1)),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVO(0x00000480, "TODO_0000_0480", UINT64_C(0xda040000000010)),
+ MVO(0x00000481, "TODO_0000_0481", UINT64_C(0x7f00000016)),
+ MVO(0x00000482, "TODO_0000_0482", UINT64_C(0xfff9fffe0401e172)),
+ MVO(0x00000483, "TODO_0000_0483", UINT64_C(0x7fffff00036dff)),
+ MVO(0x00000484, "TODO_0000_0484", UINT64_C(0xffff000011ff)),
+ MVO(0x00000485, "TODO_0000_0485", 0x100401e5),
+ MVO(0x00000486, "TODO_0000_0486", UINT32_C(0x80000021)),
+ MVO(0x00000487, "TODO_0000_0487", UINT32_MAX),
+ MVO(0x00000488, "TODO_0000_0488", 0x2000),
+ MVO(0x00000489, "TODO_0000_0489", 0x1767ff),
+ MVO(0x0000048a, "TODO_0000_048a", 0x2a),
+ MVO(0x0000048b, "TODO_0000_048b", UINT64_C(0x8ff00000000)),
+ MVO(0x0000048c, "TODO_0000_048c", UINT64_C(0xf0106114141)),
+ MVO(0x0000048d, "TODO_0000_048d", UINT64_C(0x7f00000016)),
+ MVO(0x0000048e, "TODO_0000_048e", UINT64_C(0xfff9fffe04006172)),
+ MVO(0x0000048f, "TODO_0000_048f", UINT64_C(0x7fffff00036dfb)),
+ MVO(0x00000490, "TODO_0000_0490", UINT64_C(0xffff000011fb)),
+ MVX(0x000004c1, "TODO_0000_04c1", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c2, "TODO_0000_04c2", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c3, "TODO_0000_04c3", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c4, "TODO_0000_04c4", 0, 0, UINT64_C(0xffff000000000000)),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MVX(0x00000601, "TODO_0000_0601", UINT64_C(0x1814149480000380), UINT32_C(0x80001fff), 0x7fffe000),
+ MVX(0x00000602, "TODO_0000_0602", UINT64_C(0x1814149480000170), UINT32_C(0x80001fff), 0x7fffe000),
+ MVX(0x00000603, "TODO_0000_0603", UINT32_C(0x80303030), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVX(0x00000604, "TODO_0000_0604", UINT32_C(0x80646464), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000606, "TODO_0000_0606", 0xa1003),
+ MVX(0x0000060a, "TODO_0000_060a", 0x8894, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x0000060b, "TODO_0000_060b", 0x88a9, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x0000060c, "TODO_0000_060c", 0x88c6, 0, UINT64_C(0xffffffffffff6000)),
+ MVO(0x0000060d, "TODO_0000_060d", UINT64_C(0xd0fd23dd9)),
+ MVX(0x00000610, "TODO_0000_0610", UINT64_C(0x800083e800dd8320), UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)),
+ MVO(0x00000611, "TODO_0000_0611", 0x2ed06e3b),
+ MVO(0x00000614, "TODO_0000_0614", 0x1200168),
+ MVX(0x00000638, "TODO_0000_0638", UINT32_C(0x80000000), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000639, "TODO_0000_0639", 0x106344fd),
+ MVX(0x0000063a, "TODO_0000_063a", 0, 0, UINT64_C(0xffffffffffffffe0)),
+ MVX(0x00000640, "TODO_0000_0640", UINT32_C(0x80000000), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000641, "TODO_0000_0641", 0xb39e93),
+ MVX(0x00000642, "TODO_0000_0642", 0x10, 0, UINT64_C(0xffffffffffffffe0)),
+ MVO(0x00000648, "TODO_0000_0648", 0x1b),
+ MVO(0x00000649, "TODO_0000_0649", UINT64_C(0x120000000000000)),
+ MVO(0x0000064a, "TODO_0000_064a", UINT64_C(0x120000000000000)),
+ MVO(0x0000064b, "TODO_0000_064b", UINT32_C(0x80000000)),
+ MVX(0x0000064c, "TODO_0000_064c", UINT32_C(0x80000000), UINT32_C(0x800000ff), UINT64_C(0xffffffff7fffff00)),
+ MVX(0x00000680, "TODO_0000_0680", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000681, "TODO_0000_0681", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000682, "TODO_0000_0682", UINT64_C(0x7fffff7fa38c2289), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000683, "TODO_0000_0683", UINT64_C(0x7fffff80214b24cb), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000684, "TODO_0000_0684", UINT64_C(0x7fffff7fa38c2298), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000685, "TODO_0000_0685", UINT64_C(0x7fffff80214b24ee), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000686, "TODO_0000_0686", UINT64_C(0x7fffff7fa38c2289), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000687, "TODO_0000_0687", UINT64_C(0x7fffff80214b24cb), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000688, "TODO_0000_0688", UINT64_C(0x7fffff7fa38c2298), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000689, "TODO_0000_0689", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068a, "TODO_0000_068a", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068b, "TODO_0000_068b", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068c, "TODO_0000_068c", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068d, "TODO_0000_068d", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068e, "TODO_0000_068e", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068f, "TODO_0000_068f", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x000006c0, "TODO_0000_06c0", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c1, "TODO_0000_06c1", UINT64_C(0xffffff7fa38c227f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c2, "TODO_0000_06c2", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c3, "TODO_0000_06c3", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c4, "TODO_0000_06c4", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c5, "TODO_0000_06c5", UINT64_C(0xffffff7fa38c227f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c6, "TODO_0000_06c6", UINT64_C(0xffffff80214b24c0), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c7, "TODO_0000_06c7", UINT64_C(0xffffff7fa38c228f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c8, "TODO_0000_06c8", UINT64_C(0xffffff80214b24e0), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c9, "TODO_0000_06c9", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006ca, "TODO_0000_06ca", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cb, "TODO_0000_06cb", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cc, "TODO_0000_06cc", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cd, "TODO_0000_06cd", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006ce, "TODO_0000_06ce", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cf, "TODO_0000_06cf", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006e0, "TODO_0000_06e0", UINT64_C(0x535157ca1ca), 0x80000, 0),
+ MVX(0x00000700, "TODO_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "TODO_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "TODO_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "TODO_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "TODO_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "TODO_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "TODO_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "TODO_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "TODO_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "TODO_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "TODO_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "TODO_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "TODO_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "TODO_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "TODO_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "TODO_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "TODO_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "TODO_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "TODO_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "TODO_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "TODO_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "TODO_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "TODO_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "TODO_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "TODO_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "TODO_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "TODO_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "TODO_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "TODO_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "TODO_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "TODO_0000_0730", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "TODO_0000_0731", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "TODO_0000_0732", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "TODO_0000_0733", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "TODO_0000_0734", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "TODO_0000_0735", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "TODO_0000_0736", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "TODO_0000_0737", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "TODO_0000_0738", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "TODO_0000_0739", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "TODO_0000_0740", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "TODO_0000_0741", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "TODO_0000_0742", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "TODO_0000_0743", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "TODO_0000_0744", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "TODO_0000_0745", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "TODO_0000_0746", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "TODO_0000_0747", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "TODO_0000_0748", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "TODO_0000_0749", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c80, "TODO_0000_0c80", 0, 0, 0),
+ MVX(0x00000c81, "TODO_0000_0c81", 0, 0, 0),
+ MVX(0x00000c82, "TODO_0000_0c82", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000c83, "TODO_0000_0c83", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFX(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget, 0, 0, 0), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`214ce6c0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff81`e942f000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7ccad1e0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_3820QM =
+{
+ /*.pszName = */ "Intel Core i7-3820QM",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 58,
+ /*.uStepping = */ 9,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_3820QM),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_3820QM)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.apaMsrRanges[] = */
+ {
+ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_3820QM),
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ }
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i7_3820QM_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h
new file mode 100644
index 00000000..d4d19e34
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h
@@ -0,0 +1,379 @@
+/* $Id: Intel_Core_i7_3960X.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-3960X".
+ * Generated at 2013-12-12T15:29:11Z by VBoxCpuReport v4.3.53r91237 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_3960X_h
+#define VBOX_CPUDB_Intel_Core_i7_3960X_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_3960X[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000206d6, 0x02200800, 0x1fbee3bf, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x3c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x3c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x3c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x3c07c163, 0x04c0003f, 0x00002fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000001, 0x00000002, 0x00000100, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000005, 0x0000000c, 0x00000201, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x2c100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x49202020, 0x6c65746e, 0x20295228, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x65726f43, 0x294d5428, 0x2d376920, 0x30363933, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x50432058, 0x20402055, 0x30332e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x0000302e, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_3960X[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x177ab4`48466b19 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x8000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffc000000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x4cb),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x6000c*/
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x61600000000), 0, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "MSR_PLATFORM_INFO", IntelPlatformInfo), /* value=0xc00'70012100*/
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x1e008400 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x2be98e4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2d84ced */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFO(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl), /* value=0x1 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc12, 0, 0), /* value=0xc12 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0x0000017f, "I7_SB_ERROR_CONTROL", IntelI7SandyErrorControl, IntelI7SandyErrorControl, 0, 0xc, UINT64_C(0xffffffffffffffe1)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0xf2100, 0xe0000, UINT64_C(0xfffffffffff00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x288300002400), 0, 0), /* value=0x2883`00002400 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2700, 0, 0), /* Might bite. value=0x2700 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88380000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x88380000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x5b0a00, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x5b0a00 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400000 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, IntelI7TurboRatioLimit, UINT64_C(0x2424242425252727), 0, 0), /* value=0x24242424`25252727 */
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88310000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0xc */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7ffff880`093814ea */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xfffff880`093a60e0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MVO(0x000001e1, "I7_SB_UNK_0000_01e1", 0x2),
+ MVX(0x000001ef, "I7_SB_UNK_0000_01ef", 0xff, 0, UINT64_MAX),
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0xad800006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff800800 */
+ MFX(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap, Ia32PlatformDcaCap, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x1 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x1 */
+ MFX(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap, Ia32Dca0Cap, 0, 0x40007ff, UINT64_C(0xfffffffffafe1800)), /* value=0x1e489 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0, UINT64_C(0xffffffff00320020)), /* value=0x2500005b */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3ffc`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x4`00000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x4`40000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xae000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`fe000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xb0000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`f0000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xc0000000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`c0000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x40000001 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000287, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x00000288, 0x0000028b, "IA32_MC8_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x8, 0x1, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x0000028c, 0x00000291, "IA32_MC12_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0xc, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000300, "I7_SB_UNK_0000_0300", UINT32_C(0x8000ff00)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)), /* value=0x0 */
+ MFX(0x0000039c, "I7_SB_MSR_PEBS_NUM_ALT", IntelI7SandyPebsNumAlt, IntelI7SandyPebsNumAlt, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x3f8f`5718a87c */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000447, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x627ff, 0, 0), /* value=0x627ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x4ff00000000), 0, 0), /* value=0x4ff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106134141), 0, 0), /* value=0xf01`06134141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000502, "I7_SB_UNK_0000_0502", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x141494`80000640 */
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80151515 */
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x0 */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x0 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x80068960`005affff */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0xc120ff02 */
+ MFO(0x00000613, "I7_SB_MSR_PKG_PERF_STATUS", IntelI7RaplPkgPerfStatus), /* value=0x0 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x1a80410 */
+ MFX(0x00000618, "I7_SB_MSR_DRAM_POWER_LIMIT", IntelI7RaplDramPowerLimit, IntelI7RaplDramPowerLimit, 0, UINT32_C(0x80feffff), UINT64_C(0xffffffff7f010000)), /* value=0x80000000 */
+ MFO(0x00000619, "I7_SB_MSR_DRAM_ENERGY_STATUS", IntelI7RaplDramEnergyStatus), /* value=0x0 */
+ MFO(0x0000061b, "I7_SB_MSR_DRAM_PERF_STATUS", IntelI7RaplDramPerfStatus), /* value=0x0 */
+ MFO(0x0000061c, "I7_SB_MSR_DRAM_POWER_INFO", IntelI7RaplDramPowerInfo), /* value=0x280258`00780118 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x448bc04 */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFO(0x0000063b, "I7_SB_MSR_PP0_PERF_STATUS", IntelI7RaplPp0PerfStatus), /* value=0x0 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFI(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline), /* value=0x0 */
+ MVX(0x00000a00, "I7_SB_UNK_0000_0a00", 0, 0, UINT64_C(0xfffffffffffffec0)),
+ MVX(0x00000a01, "I7_SB_UNK_0000_0a01", 0x178fa000, 0, UINT64_C(0xffffffff00000f80)),
+ MVX(0x00000a02, "I7_SB_UNK_0000_0a02", 0, 0, UINT64_C(0xffffffff20002000)),
+ MVX(0x00000c00, "I7_SB_UNK_0000_0c00", 0, 0, UINT64_C(0xffffffffbfffff00)),
+ MVX(0x00000c01, "I7_SB_UNK_0000_0c01", 0, 0x9229fe7, UINT64_C(0xfffffffff6dd6018)),
+ MVO(0x00000c06, "I7_SB_UNK_0000_0c06", 0x6),
+ MVX(0x00000c08, "I7_SB_UNK_0000_0c08", 0, 0, UINT64_C(0xffffffffffafffff)),
+ MVX(0x00000c09, "I7_SB_UNK_0000_0c09", 0x301a, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c10, "I7_SB_UNK_0000_0c10", 0, 0x20000, UINT64_C(0xffffffffe0210000)),
+ MVX(0x00000c11, "I7_SB_UNK_0000_0c11", 0, 0x20000, UINT64_C(0xffffffffe0210000)),
+ MVX(0x00000c14, "I7_SB_UNK_0000_0c14", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000c15, "I7_SB_UNK_0000_0c15", 0, 0x3, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x00000c16, "I7_SB_UNK_0000_0c16", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c17, "I7_SB_UNK_0000_0c17", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c24, "I7_SB_UNK_0000_0c24", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000c30, "I7_SB_UNK_0000_0c30", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c31, "I7_SB_UNK_0000_0c31", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c32, "I7_SB_UNK_0000_0c32", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c33, "I7_SB_UNK_0000_0c33", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c34, "I7_SB_UNK_0000_0c34", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000c35, "I7_SB_UNK_0000_0c35", 0, 0x7f, UINT64_C(0xffffffffffffff80)),
+ MVX(0x00000c36, "I7_SB_UNK_0000_0c36", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c37, "I7_SB_UNK_0000_0c37", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c38, "I7_SB_UNK_0000_0c38", 0x20c, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c39, "I7_SB_UNK_0000_0c39", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000d04, "I7_SB_UNK_0000_0d04", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d10, "I7_SB_UNK_0000_0d10", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d11, "I7_SB_UNK_0000_0d11", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d12, "I7_SB_UNK_0000_0d12", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d13, "I7_SB_UNK_0000_0d13", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d14, "I7_SB_UNK_0000_0d14", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d15, "I7_SB_UNK_0000_0d15", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d16, "I7_SB_UNK_0000_0d16", 0x81c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d17, "I7_SB_UNK_0000_0d17", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d18, "I7_SB_UNK_0000_0d18", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d19, "I7_SB_UNK_0000_0d19", 0x810, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d24, "I7_SB_UNK_0000_0d24", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d30, "I7_SB_UNK_0000_0d30", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d31, "I7_SB_UNK_0000_0d31", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d32, "I7_SB_UNK_0000_0d32", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d33, "I7_SB_UNK_0000_0d33", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d34, "I7_SB_UNK_0000_0d34", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d35, "I7_SB_UNK_0000_0d35", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d36, "I7_SB_UNK_0000_0d36", 0x864, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d37, "I7_SB_UNK_0000_0d37", 0x804, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d38, "I7_SB_UNK_0000_0d38", 0x822, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d39, "I7_SB_UNK_0000_0d39", 0x81c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d44, "I7_SB_UNK_0000_0d44", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d50, "I7_SB_UNK_0000_0d50", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d51, "I7_SB_UNK_0000_0d51", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d52, "I7_SB_UNK_0000_0d52", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d53, "I7_SB_UNK_0000_0d53", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d54, "I7_SB_UNK_0000_0d54", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d55, "I7_SB_UNK_0000_0d55", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d56, "I7_SB_UNK_0000_0d56", 0x848, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d57, "I7_SB_UNK_0000_0d57", 0x866, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d58, "I7_SB_UNK_0000_0d58", 0x83c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d59, "I7_SB_UNK_0000_0d59", 0x83c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d64, "I7_SB_UNK_0000_0d64", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d70, "I7_SB_UNK_0000_0d70", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d71, "I7_SB_UNK_0000_0d71", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d72, "I7_SB_UNK_0000_0d72", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d73, "I7_SB_UNK_0000_0d73", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d74, "I7_SB_UNK_0000_0d74", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d75, "I7_SB_UNK_0000_0d75", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d76, "I7_SB_UNK_0000_0d76", 0x846, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d77, "I7_SB_UNK_0000_0d77", 0x90c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d78, "I7_SB_UNK_0000_0d78", 0x846, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d79, "I7_SB_UNK_0000_0d79", 0x842, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d84, "I7_SB_UNK_0000_0d84", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d90, "I7_SB_UNK_0000_0d90", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d91, "I7_SB_UNK_0000_0d91", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d92, "I7_SB_UNK_0000_0d92", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d93, "I7_SB_UNK_0000_0d93", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d94, "I7_SB_UNK_0000_0d94", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d95, "I7_SB_UNK_0000_0d95", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d96, "I7_SB_UNK_0000_0d96", 0x8c6, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d97, "I7_SB_UNK_0000_0d97", 0x840, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d98, "I7_SB_UNK_0000_0d98", 0x81a, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d99, "I7_SB_UNK_0000_0d99", 0x910, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000da4, "I7_SB_UNK_0000_0da4", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000db0, "I7_SB_UNK_0000_0db0", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db1, "I7_SB_UNK_0000_0db1", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db2, "I7_SB_UNK_0000_0db2", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db3, "I7_SB_UNK_0000_0db3", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db4, "I7_SB_UNK_0000_0db4", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000db5, "I7_SB_UNK_0000_0db5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000db6, "I7_SB_UNK_0000_0db6", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db7, "I7_SB_UNK_0000_0db7", 0x81e, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db8, "I7_SB_UNK_0000_0db8", 0x810, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db9, "I7_SB_UNK_0000_0db9", 0x80a, 0, UINT64_C(0xfffff00000000000)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`030dac00 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`030da940 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`061e6000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_3960X =
+{
+ /*.pszName = */ "Intel Core i7-3960X",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 45,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 46,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_3960X),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_3960X)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_3960X)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_3960X),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i7_3960X_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_5600U.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_5600U.h
new file mode 100644
index 00000000..1b3e60f6
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_5600U.h
@@ -0,0 +1,378 @@
+/* $Id: Intel_Core_i7_5600U.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-5600U".
+ * Generated at 2015-11-04T14:14:27Z by VBoxCpuReport v5.0.51r103906 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_5600U_h
+#define VBOX_CPUDB_Intel_Core_i7_5600U_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_5600U[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000014, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000306d4, 0x00100800, 0x7ffafbff, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76036301, 0x00f0b5ff, 0x00000000, 0x00c30000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x1c03c163, 0x03c0003f, 0x00000fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x11142120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x021c2fbb, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000001, 0x00000002, 0x00000100, 0x00000000, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000004, 0x00000004, 0x00000201, 0x00000000, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000000, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, UINT32_MAX, 0x00000000, 0x00000001, 0x00000001, 0x00000000, 0 },
+ { 0x0000000c, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000014, 0x00000000, UINT32_MAX, 0x00000000, 0x00000001, 0x00000001, 0x00000000, 0 },
+ { 0x00000014, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000121, 0x2c100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x37692029, 0x3036352d, 0x43205530, 0x40205550, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x362e3220, 0x7a484730, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003027, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_5600U[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0xff */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x1c93`50dd535c */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x18000000000000), 0, 0), /* value=0x180000`00000000 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xffffff80000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x97b),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x20004 */
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003b, "P6_UNK_0000_003b", UINT64_C(0xfffffffffffffffe), 0, 0),
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_MAX), /* value=0x1f`00000000 */
+ MVX(0x00000095, "TODO_0000_0095", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "IA32_PLATFORM_INFO", IntelPlatformInfo), /* value=0x5053b`f3011a00 */
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffff01ffffff)), /* value=0x1e008408 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, UINT32_C(0xffff7000), ~(uint64_t)UINT32_MAX), /* value=0x8b800000 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x51814 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x23c`764b31c5 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2af`f518152c */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MVX(0x00000102, "I7_IB_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IB_UNK_0000_0103", 0, 0, UINT64_C(0xfffffffffffff000)),
+ MVX(0x00000104, "I7_IB_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000110, "TODO_0000_0110", 0x3),
+ MVX(0x0000011f, "TODO_0000_011f", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVO(0x0000013a, "TODO_0000_013a", UINT64_C(0x30000007f)),
+ MFO(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl), /* value=0x1 */
+ MVX(0x00000140, "I7_IB_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000142, "I7_IB_UNK_0000_0142", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000150, "P6_UNK_0000_0150", 0, UINT64_C(0x8000ffffffffffff), UINT64_C(0x7fff000000000000)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0x0 */
+ MVX(0x00000178, "TODO_0000_0178", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x1000c07, 0, 0), /* value=0x1000c07 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xfffffffe00080000)), /* XXX: The range ended earlier than expected! */
+ MVX(0x00000188, "IA32_PERFEVTSEL2", 0, 0, UINT64_C(0xfffffffc00080000)),
+ MVX(0x00000189, "IA32_PERFEVTSEL3", 0, 0, UINT64_C(0xfffffffe00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0x90000, 0xe0000, UINT64_C(0xffffffffffe00000)), /* value=0x90000 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x273c00002000), 0, 0), /* value=0x273c`00002000 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2000, 0, 0), /* Might bite. value=0x2000 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x10, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x10 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88150800), UINT32_C(0xf87f07fd), UINT64_C(0xffffffff0780f000)), /* value=0x88150800 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+/// @todo WARNING: IA32_MISC_ENABLE probing needs hacking on this CPU!
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MVO(0x000001a1, "P6_UNK_0000_01a1", 0x995),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x5690000, 0xffff00, UINT64_C(0xffffffffc00000ff)), /* value=0x5690000 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400000 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, IntelI7TurboRatioLimit, UINT64_C(0x1f1f1f1f1f20), UINT64_MAX, 0), /* value=0x1f1f`1f1f1f20 */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x880d0802), UINT32_C(0xf87f07fd), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffc00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff003c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0, UINT64_C(0x1fff800000000000)), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0xdc000006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff000800 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, UINT32_C(0x80000020), UINT64_C(0xffffffff3e100000)), /* value=0x4005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7e`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x2`00000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`f0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x2`10000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`f8000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x2`18000006 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x2`1c000006 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`fe000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x2`1e000006 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`ff800800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff8000000ff8)), /* value=0xe0000000 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`e0000800 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff8000000ff8)), /* value=0xde000000 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`fe000800 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xffffff8000000ff8)), /* value=0xdd000000 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`ff000800 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xffffff80000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFX(0x00000280, "IA32_MC0_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0x40000000, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000281, 0x00000283, "IA32_MC1_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x1, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000286, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x32c4, 0, 0), /* value=0x32c4 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe08000070000000f), UINT64_C(0x1f7ffff8fffffff0)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, 0, UINT64_C(0xffffffff1fffff80)), /* value=0x0 */
+ MFX(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus, IntelI7UncPerfGlobalStatus, 0, 0xf, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x0 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x3 */
+ MVX(0x00000397, "I7_SB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MVO(0x000003f0, "TODO_0000_03f0", 0),
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0xfffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x4`465710e6 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x2`3a8a1eca */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x0000041b, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000012), 0, 0), /* value=0xda0400`00000012 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x300481e5, 0, 0), /* value=0x300481e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x3767ff, 0, 0), /* value=0x3767ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x57cff00000000), 0, 0), /* value=0x57cff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106334141), 0, 0), /* value=0xf01`06334141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ MFX(0x00000491, "IA32_VMX_VMFUNC", Ia32VmxVmFunc, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x000004e0, "TODO_0000_04e0", 0x1),
+ MVO(0x000004e2, "TODO_0000_04e2", 0x5),
+ MVO(0x000004e3, "TODO_0000_04e3", 0xff0),
+ MVX(0x00000560, "TODO_0000_0560", 0, 0, UINT64_C(0xffffff800000007f)),
+ MVX(0x00000561, "TODO_0000_0561", 0x7f, UINT64_C(0x70000007f), UINT32_C(0xffffff80)),
+ MVX(0x00000570, "TODO_0000_0570", 0x2100, 0x2100, UINT64_C(0xffffffffffffd272)),
+ MVX(0x00000571, "TODO_0000_0571", 0, 0x7, UINT64_C(0xffffffffffffffc8)),
+ MVX(0x00000572, "TODO_0000_0572", 0, 0, UINT64_C(0xffff00000000001f)),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), UINT64_C(0x800000007fffe000)), /* value=0x40101414`80000100 */
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, 0, UINT64_C(0xff80000000000000)), /* value=0x360000`00333333 */
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa0e03 */
+ MVX(0x00000609, "I7_SB_UNK_0000_0609", 0x1a, 0xc0, UINT64_C(0xffffffffffffff00)),
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x8842 */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x1b`88fad668 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x804280c8`00dd8078 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x7e40b254 */
+ MFO(0x00000613, "I7_SB_MSR_PKG_PERF_STATUS", IntelI7RaplPkgPerfStatus), /* value=0xff3 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x78 */
+ MVX(0x00000615, "TODO_0000_0615", 0, 0, UINT64_C(0xffffffff00010000)),
+ MFX(0x00000618, "I7_SB_MSR_DRAM_POWER_LIMIT", IntelI7RaplDramPowerLimit, IntelI7RaplDramPowerLimit, 0, UINT64_C(0x80feffff00feffff), UINT64_C(0x7f010000ff010000)), /* value=0x805400de`00000000 */
+ MFO(0x00000619, "I7_SB_MSR_DRAM_ENERGY_STATUS", IntelI7RaplDramEnergyStatus), /* value=0x9dbe152 */
+ MFO(0x0000061b, "I7_SB_MSR_DRAM_PERF_STATUS", IntelI7RaplDramPerfStatus), /* value=0x0 */
+ MVO(0x0000061d, "TODO_0000_061d", UINT64_C(0x6e231cb3da)),
+ MVX(0x00000620, "TODO_0000_0620", 0x71d, 0, UINT64_C(0xffffffffffff8080)),
+ MVO(0x00000621, "TODO_0000_0621", 0x1d),
+ MVX(0x00000622, "TODO_0000_0622", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000623, "TODO_0000_0623", 0x1),
+ MVO(0x00000630, "TODO_0000_0630", 0),
+ MVO(0x00000631, "TODO_0000_0631", 0),
+ MVO(0x00000632, "TODO_0000_0632", 0),
+ MVX(0x00000633, "TODO_0000_0633", 0x88e4, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000634, "TODO_0000_0634", 0x8945, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000635, "TODO_0000_0635", 0x89ef, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000636, "TODO_0000_0636", 0x6a, 0, UINT64_C(0xffffffffffff0000)),
+ MVO(0x00000637, "TODO_0000_0637", UINT64_C(0x43af89cfdf)),
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, 0, UINT64_C(0xffffffff7f000000)), /* value=0x0 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x6f9c685f */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x7 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, 0, UINT64_C(0xffffffff7f000000)), /* value=0x0 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x4d471 */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0xb */
+ MFO(0x00000648, "I7_IB_MSR_CONFIG_TDP_NOMINAL", IntelI7IvyConfigTdpNominal), /* value=0x1a */
+ MFO(0x00000649, "I7_IB_MSR_CONFIG_TDP_LEVEL1", IntelI7IvyConfigTdpLevel1), /* value=0x6003c */
+ MFO(0x0000064a, "I7_IB_MSR_CONFIG_TDP_LEVEL2", IntelI7IvyConfigTdpLevel2), /* value=0x0 */
+ MFX(0x0000064b, "I7_IB_MSR_CONFIG_TDP_CONTROL", IntelI7IvyConfigTdpControl, IntelI7IvyConfigTdpControl, 0, 0, UINT64_C(0xffffffff7ffffffc)), /* value=0x80000000 */
+ MFX(0x0000064c, "I7_IB_MSR_TURBO_ACTIVATION_RATIO", IntelI7IvyTurboActivationRatio, IntelI7IvyTurboActivationRatio, 0, 0, UINT64_C(0xffffffff7fffff00)), /* value=0x80000019 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MVX(0x00000690, "TODO_0000_0690", 0x1d200000, UINT32_C(0xe6dfffff), ~(uint64_t)UINT32_MAX),
+ MVX(0x000006b0, "TODO_0000_06b0", 0x1d000000, UINT32_C(0xe2ffffff), ~(uint64_t)UINT32_MAX),
+ MVX(0x000006b1, "TODO_0000_06b1", 0xd000000, UINT32_C(0xf2ffffff), ~(uint64_t)UINT32_MAX),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MFI(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline), /* value=0x0 */
+ MVX(0x00000700, "TODO_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "TODO_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "TODO_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "TODO_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "TODO_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "TODO_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "TODO_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "TODO_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "TODO_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "TODO_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "TODO_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "TODO_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "TODO_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "TODO_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "TODO_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "TODO_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "TODO_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "TODO_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "TODO_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "TODO_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "TODO_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "TODO_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "TODO_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "TODO_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "TODO_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "TODO_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "TODO_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "TODO_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "TODO_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "TODO_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MFO(0x00000c80, "IA32_DEBUG_INTERFACE", Ia32DebugInterface), /* value=0x40000000 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff802`f9b59200 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff802`f9b58f40 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x212000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffd001`83740000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x210000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_5600U =
+{
+ /*.pszName = */ "Intel Core i7-5600U",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 61,
+ /*.uStepping = */ 4,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_Broadwell,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 39,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_5600U),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_5600U)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000001, 0x00000001, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_5600U)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_5600U),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i7_5600U_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_6700K.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_6700K.h
new file mode 100644
index 00000000..ccf37738
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_6700K.h
@@ -0,0 +1,520 @@
+/* $Id: Intel_Core_i7_6700K.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-6700K".
+ * Generated at 2015-11-04T14:22:26Z by VBoxCpuReport v5.0.51r103906 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_6700K_h
+#define VBOX_CPUDB_Intel_Core_i7_6700K_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_6700K[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000016, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000506e3, 0x02100800, 0x7ffafbbf, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76036301, 0x00f0b5ff, 0x00000000, 0x00c30000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c004143, 0x00c0003f, 0x000003ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x1c03c163, 0x03c0003f, 0x00001fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00142120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x000027f7, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x029c6fbf, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300404, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000001, 0x00000002, 0x00000100, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000004, 0x00000008, 0x00000201, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x0000001f, 0x00000440, 0x00000440, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x0000000f, 0x000003c0, 0x00000100, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000040, 0x000003c0, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000004, UINT32_MAX, 0x00000040, 0x00000400, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000005, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000006, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000007, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000008, UINT32_MAX, 0x00000080, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x0000000d, 0x00000009, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000014, 0x00000000, UINT32_MAX, 0x00000001, 0x0000000f, 0x00000007, 0x00000000, 0 },
+ { 0x00000014, 0x00000001, UINT32_MAX, 0x02490002, 0x003f3fff, 0x00000000, 0x00000000, 0 },
+ { 0x00000014, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000015, 0x00000000, 0x00000000, 0x00000002, 0x0000014e, 0x00000000, 0x00000000, 0 },
+ { 0x00000016, 0x00000000, 0x00000000, 0x00000fa0, 0x00001068, 0x00000064, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000121, 0x2c100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x37692029, 0x3037362d, 0x43204b30, 0x40205550, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x302e3420, 0x7a484730, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003027, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_6700K[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xfffffffffffff000), 0), /* value=0xfff */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x12fdb`64facbdf */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x4000000000000), 0, 0), /* value=0x40000`00000000 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffff80000002ff)),
+ MVO(0x00000020, "TODO_0000_0020", UINT64_C(0xaab8e94b4b4ac1)),
+ MVO(0x00000021, "C2_UNK_0000_0021", UINT64_C(0x52d289e67f37651b)),
+ MVO(0x00000022, "TODO_0000_0022", UINT64_C(0xce7bd366cd8dc6e6)),
+ MVO(0x00000023, "TODO_0000_0023", UINT64_C(0xfd0cd1679876a91d)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x8),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40008 */
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003b, "P6_UNK_0000_003b", UINT64_C(0xfff5c5f4e22b), 0, 0),
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000059, "TODO_0000_0059", 0),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000007a, "TODO_0000_007a", 0, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000080, "P4_UNK_0000_0080", 0, UINT64_C(0x7ffffffffe), UINT64_C(0xffffff8000000000)),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_MAX), /* value=0x33`00000000 */
+ MVX(0x00000095, "TODO_0000_0095", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "IA32_PLATFORM_INFO", IntelPlatformInfo), /* value=0x80838`f1012800 */
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffff01fffbf0)), /* value=0x1e000006 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x31814 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x693`992a0bba */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2d8`96416f36 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x1d0a, 0, 0), /* value=0x1d0a */
+ MVX(0x00000102, "I7_IB_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IB_UNK_0000_0103", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVX(0x00000104, "I7_IB_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000110, "TODO_0000_0110", 0x1),
+ MVO(0x00000118, "BBL_CR_DECC", 0),
+ MVX(0x0000011f, "TODO_0000_011f", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVO(0x00000121, "TODO_0000_0121", 0),
+ MVO(0x0000013a, "TODO_0000_013a", 0),
+ MFO(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl), /* value=0x1 */
+ MVX(0x0000013d, "TODO_0000_013d", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000140, "I7_IB_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000150, "P6_UNK_0000_0150", 0, UINT64_C(0x8000ffffffffffff), UINT64_C(0x7fff000000000000)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc0a, 0, 0), /* value=0xc0a */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xfffffffe00080000)), /* XXX: The range ended earlier than expected! */
+ MVX(0x00000188, "IA32_PERFEVTSEL2", 0, 0, UINT64_C(0xfffffffc00080000)),
+ MVX(0x00000189, "IA32_PERFEVTSEL3", 0, 0, UINT64_C(0xfffffffe00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0xf0000, 0xe0000, UINT64_C(0xffffffffffe00000)), /* value=0xf0000 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x29eb00002a00), 0, 0), /* value=0x29eb`00002a00 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2800, 0, 0), /* Might bite. value=0x2800 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88430800), UINT32_C(0xf87fa7ff), UINT64_C(0xffffffff07805000)), /* value=0x88430800 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+/// @todo WARNING: IA32_MISC_ENABLE probing needs hacking on this CPU!
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef76)), /* value=0x850089 */
+ MVO(0x000001a1, "P6_UNK_0000_01a1", 0x2858),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x641400, 0xffff00, UINT64_C(0xffffffff40000000)), /* value=0x641400 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000006000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0x800, UINT64_C(0xffffffffffbff7fe)), /* value=0x401cc0 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, IntelI7TurboRatioLimit, 0x2a2a2a2a, UINT64_MAX, 0), /* value=0x2a2a2a2a */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88370800), UINT32_C(0xf87f07ff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffc00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff003c)), /* value=0x0 */
+ MVO(0x000001da, "TODO_0000_01da", 0),
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MVX(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x88400006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xffc00800 */
+ MVO(0x000001f4, "TODO_0000_01f4", UINT32_C(0x88000006)),
+ MVO(0x000001f5, "TODO_0000_01f5", UINT64_C(0x7ffff00c00)),
+ MVO(0x000001fb, "TODO_0000_01fb", 0xe1),
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, UINT32_C(0x80000020), UINT64_C(0xffffffff20000000)), /* value=0x3c005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff8000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff8000000ff8)), /* value=0xa0000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x90000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x8c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x8a000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`fe000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x89000000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`ff000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x88800000 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff80000007ff)), /* value=0x7f`ff800800 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff80000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xffffff80000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xffffff8000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xffffff80000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000283, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x00000284, 0x00000285, "IA32_MC4_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x00000286, 0x00000289, "IA32_MC6_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x6, 0, UINT64_C(0xffffffffbfff8000)),
+ MVO(0x000002e0, "I7_SB_NO_EVICT_MODE", 0),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x000002f4, "TODO_0000_02f4", UINT32_C(0x88000000)),
+ MVO(0x000002f5, "TODO_0000_02f5", UINT64_C(0x7ffff00c00)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVX(0x00000302, "TODO_0000_0302", UINT64_C(0x1ffff020000), UINT64_C(0xfe0000fd0000), UINT64_C(0xffff00000000ffff)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x33c5, 0, 0), /* value=0x33c5 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xfc8000070000000f), UINT64_C(0x37ffff8fffffff0)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, UINT64_C(0xfc8000070000000f), UINT64_C(0x37ffff8fffffff0)), /* value=0x0 */
+ MFO(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x0 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x5 */
+ MVX(0x00000397, "I7_SB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0xfffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MVX(0x000003f7, "I7_MSR_PEBS_LD_LAT", 0x800, 0, UINT64_C(0xffffffffff8000e8)),
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x4fd`b403a690 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x8d`96b4ea78 */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000427, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000004), 0, 0), /* value=0xda0400`00000004 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x1ffffff00036dff), 0, 0), /* value=0x1ffffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x3ffff000011ff), 0, 0), /* value=0x3ffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x7004c1e7, 0, 0), /* value=0x7004c1e7 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x3727ff, 0, 0), /* value=0x3727ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2e, 0, 0), /* value=0x2e */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x1ffcff00000000), 0, 0), /* value=0x1ffcff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106334141), 0, 0), /* value=0xf01`06334141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x1ffffff00036dfb), 0, 0), /* value=0x1ffffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0x3ffff000011fb), 0, 0), /* value=0x3ffff`000011fb */
+ MFX(0x00000491, "IA32_VMX_VMFUNC", Ia32VmxVmFunc, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x000004e0, "TODO_0000_04e0", 0x5),
+ MVO(0x000004e2, "TODO_0000_04e2", 0x2),
+ MVO(0x000004e3, "TODO_0000_04e3", 0xf00),
+ MVO(0x00000500, "TODO_0000_0500", 0),
+ MVX(0x00000503, "TODO_0000_0503", 0, 0x2, UINT64_C(0xfffffffffffffffd)),
+ MVX(0x00000560, "TODO_0000_0560", 0, 0, UINT64_C(0xffffff800000007f)),
+ MVX(0x00000561, "TODO_0000_0561", 0x7f, UINT64_C(0xf0000007f), 0),
+ MVX(0x00000570, "TODO_0000_0570", 0, 0, UINT64_C(0xffffffccf887d070)),
+ MVX(0x00000571, "TODO_0000_0571", 0, UINT64_C(0xf00000007), UINT64_C(0xfffe0000ffffffc8)),
+ MVX(0x00000572, "TODO_0000_0572", 0, 0, 0xfff),
+ MVX(0x00000580, "TODO_0000_0580", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000581, "TODO_0000_0581", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000582, "TODO_0000_0582", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000583, "TODO_0000_0583", 0, 0, UINT64_C(0xffff800000000000)),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, 0, UINT64_C(0x800000007fffe000)), /* value=0x0 */
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, 0, UINT64_C(0xff80000000000000)), /* value=0x360000`00363636 */
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa0e03 */
+ MVX(0x00000607, "TODO_0000_0607", 0, 0, UINT64_C(0xffffffff60000000)),
+ MVX(0x00000608, "TODO_0000_0608", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000609, "I7_SB_UNK_0000_0609", 0, 0xc0, UINT64_C(0xffffffffffffff00)),
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x884e */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x3c6`052d9140 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, 0x8000, UINT64_C(0x7f000000ff000000)), /* value=0x42fff8`0015fff8 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x79ba094a */
+ MFO(0x00000613, "I7_SB_MSR_PKG_PERF_STATUS", IntelI7RaplPkgPerfStatus), /* value=0x1 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x2f8 */
+ MVX(0x00000615, "TODO_0000_0615", UINT32_C(0x80000000), UINT32_C(0xfffeffff), UINT64_C(0xffffffff00010000)),
+ MFX(0x00000618, "I7_SB_MSR_DRAM_POWER_LIMIT", IntelI7RaplDramPowerLimit, IntelI7RaplDramPowerLimit, 0, 0, UINT64_C(0x7f010000ff010000)), /* value=0x5400de`00000000 */
+ MFO(0x00000619, "I7_SB_MSR_DRAM_ENERGY_STATUS", IntelI7RaplDramEnergyStatus), /* value=0xf282d33 */
+ MFO(0x0000061b, "I7_SB_MSR_DRAM_PERF_STATUS", IntelI7RaplDramPerfStatus), /* value=0x0 */
+ MVO(0x0000061d, "TODO_0000_061d", UINT64_C(0x7db7e4dfa38)),
+ MVX(0x00000620, "TODO_0000_0620", 0x829, 0, UINT64_C(0xffffffffffff8080)),
+ MVO(0x00000621, "TODO_0000_0621", 0x29),
+ MVX(0x00000622, "TODO_0000_0622", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000623, "TODO_0000_0623", 0x1),
+ MVO(0x00000630, "TODO_0000_0630", 0),
+ MVO(0x00000631, "TODO_0000_0631", 0),
+ MVO(0x00000632, "TODO_0000_0632", 0),
+ MVX(0x00000633, "TODO_0000_0633", 0, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000634, "TODO_0000_0634", 0, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000635, "TODO_0000_0635", 0, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x00000636, "TODO_0000_0636", 0, 0, UINT64_C(0xffffffffffff0000)),
+ MVO(0x00000637, "TODO_0000_0637", UINT64_C(0x496ce31e72)),
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, 0, UINT64_C(0xffffffff7f000000)), /* value=0x0 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x2aa89b8a */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, 0, UINT64_C(0xffffffff7f000000)), /* value=0x0 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x0 */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ MFO(0x00000648, "I7_IB_MSR_CONFIG_TDP_NOMINAL", IntelI7IvyConfigTdpNominal), /* value=0x28 */
+ MFO(0x00000649, "I7_IB_MSR_CONFIG_TDP_LEVEL1", IntelI7IvyConfigTdpLevel1), /* value=0x0 */
+ MFO(0x0000064a, "I7_IB_MSR_CONFIG_TDP_LEVEL2", IntelI7IvyConfigTdpLevel2), /* value=0x0 */
+ MFO(0x0000064b, "I7_IB_MSR_CONFIG_TDP_CONTROL", IntelI7IvyConfigTdpControl), /* value=0x80000000 */
+ MFX(0x0000064c, "I7_IB_MSR_TURBO_ACTIVATION_RATIO", IntelI7IvyTurboActivationRatio, IntelI7IvyTurboActivationRatio, 0, 0, UINT64_C(0xffffffff7fffff00)), /* value=0x0 */
+ MVO(0x0000064d, "TODO_0000_064d", 0),
+ MVO(0x0000064e, "TODO_0000_064e", UINT64_C(0x1fdf361be5b)),
+ MVX(0x0000064f, "TODO_0000_064f", 0x4000000, UINT32_C(0xfbffffff), ~(uint64_t)UINT32_MAX),
+ MVX(0x00000652, "TODO_0000_0652", 0x2, 0, UINT64_C(0xfffffffff8000800)),
+ MVO(0x00000653, "TODO_0000_0653", 0),
+ MVO(0x00000655, "TODO_0000_0655", 0),
+ MVO(0x00000656, "TODO_0000_0656", 0),
+ MVO(0x00000658, "TODO_0000_0658", UINT64_C(0x296db63257a5)),
+ MVO(0x00000659, "TODO_0000_0659", UINT64_C(0x195cb5c8d10c)),
+ MVO(0x0000065a, "TODO_0000_065a", 0),
+ MVO(0x0000065b, "TODO_0000_065b", 0),
+ MVX(0x0000065c, "TODO_0000_065c", 0x1402f8, 0, UINT64_C(0x7ffe0000ff000000)),
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MVX(0x00000690, "TODO_0000_0690", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000691, "TODO_0000_0691", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000692, "TODO_0000_0692", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000693, "TODO_0000_0693", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000694, "TODO_0000_0694", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000695, "TODO_0000_0695", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000696, "TODO_0000_0696", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000697, "TODO_0000_0697", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000698, "TODO_0000_0698", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x00000699, "TODO_0000_0699", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069a, "TODO_0000_069a", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069b, "TODO_0000_069b", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069c, "TODO_0000_069c", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069d, "TODO_0000_069d", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069e, "TODO_0000_069e", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x0000069f, "TODO_0000_069f", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006b0, "TODO_0000_06b0", 0, UINT32_MAX, ~(uint64_t)UINT32_MAX),
+ MVX(0x000006b1, "TODO_0000_06b1", 0xc000000, UINT32_C(0xf3ffffff), ~(uint64_t)UINT32_MAX),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MVX(0x000006d0, "TODO_0000_06d0", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d1, "TODO_0000_06d1", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d2, "TODO_0000_06d2", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d3, "TODO_0000_06d3", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d4, "TODO_0000_06d4", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d5, "TODO_0000_06d5", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d6, "TODO_0000_06d6", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d7, "TODO_0000_06d7", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d8, "TODO_0000_06d8", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006d9, "TODO_0000_06d9", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006da, "TODO_0000_06da", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006db, "TODO_0000_06db", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006dc, "TODO_0000_06dc", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006dd, "TODO_0000_06dd", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006de, "TODO_0000_06de", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006df, "TODO_0000_06df", 0, 0, UINT64_C(0xffff800000000000)),
+ MFI(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline), /* value=0x0 */
+ MVX(0x00000700, "TODO_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "TODO_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "TODO_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "TODO_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "TODO_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "TODO_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "TODO_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "TODO_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "TODO_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "TODO_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "TODO_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "TODO_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "TODO_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "TODO_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "TODO_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "TODO_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "TODO_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "TODO_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "TODO_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "TODO_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "TODO_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "TODO_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "TODO_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "TODO_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "TODO_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "TODO_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "TODO_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "TODO_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "TODO_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "TODO_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "TODO_0000_0730", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "TODO_0000_0731", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "TODO_0000_0732", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "TODO_0000_0733", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "TODO_0000_0734", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "TODO_0000_0735", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "TODO_0000_0736", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "TODO_0000_0737", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "TODO_0000_0738", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "TODO_0000_0739", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "TODO_0000_0740", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "TODO_0000_0741", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "TODO_0000_0742", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "TODO_0000_0743", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "TODO_0000_0744", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "TODO_0000_0745", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "TODO_0000_0746", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "TODO_0000_0747", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "TODO_0000_0748", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "TODO_0000_0749", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000770, "TODO_0000_0770", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000771, "TODO_0000_0771", 0x109282a),
+ MVX(0x00000773, "TODO_0000_0773", 0x1, 0, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x00000774, "TODO_0000_0774", UINT64_C(0x19e7f2a2a02), 0, UINT64_C(0xfffffc0000000000)),
+ MVX(0x00000777, "TODO_0000_0777", 0, 0x5, UINT64_C(0xfffffffffffffffa)),
+ MFO(0x00000c80, "IA32_DEBUG_INTERFACE", Ia32DebugInterface), /* value=0x40000000 */
+ MVX(0x00000c8f, "TODO_0000_0c8f", 0, 0, UINT64_C(0xffffffffffffffe0)),
+ MVX(0x00000c90, "TODO_0000_0c90", UINT16_MAX, 0, UINT64_C(0xffffffff7fff0000)),
+ MVX(0x00000c91, "TODO_0000_0c91", 0xf, 0, UINT64_C(0xffffffff7fff0000)),
+ MVX(0x00000c92, "TODO_0000_0c92", 0x3ff, 0, UINT64_C(0xffffffff7fff0000)),
+ MVX(0x00000c93, "TODO_0000_0c93", 0xfff, 0, UINT64_C(0xffffffff7fff0000)),
+ MVX(0x00000d90, "TODO_0000_0d90", 0, 0, UINT64_C(0xffff800000000ffc)),
+ MVX(0x00000da0, "TODO_0000_0da0", 0, 0, UINT64_C(0xfffffffffffffeff)),
+ MVX(0x00000db0, "TODO_0000_0db0", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000db1, "TODO_0000_0db1", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MVO(0x00000db2, "TODO_0000_0db2", 0),
+ MVX(0x00000dc0, "TODO_0000_0dc0", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc1, "TODO_0000_0dc1", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc2, "TODO_0000_0dc2", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc3, "TODO_0000_0dc3", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc4, "TODO_0000_0dc4", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc5, "TODO_0000_0dc5", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc6, "TODO_0000_0dc6", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc7, "TODO_0000_0dc7", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc8, "TODO_0000_0dc8", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dc9, "TODO_0000_0dc9", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dca, "TODO_0000_0dca", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dcb, "TODO_0000_0dcb", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dcc, "TODO_0000_0dcc", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dcd, "TODO_0000_0dcd", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dce, "TODO_0000_0dce", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dcf, "TODO_0000_0dcf", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd0, "TODO_0000_0dd0", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd1, "TODO_0000_0dd1", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd2, "TODO_0000_0dd2", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd3, "TODO_0000_0dd3", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd4, "TODO_0000_0dd4", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd5, "TODO_0000_0dd5", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd6, "TODO_0000_0dd6", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd7, "TODO_0000_0dd7", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd8, "TODO_0000_0dd8", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dd9, "TODO_0000_0dd9", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dda, "TODO_0000_0dda", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000ddb, "TODO_0000_0ddb", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000ddc, "TODO_0000_0ddc", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000ddd, "TODO_0000_0ddd", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000dde, "TODO_0000_0dde", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000ddf, "TODO_0000_0ddf", 0, 0, UINT64_C(0x1fffffffffff0000)),
+ MVX(0x00000e01, "TODO_0000_0e01", 0, 0, UINT64_C(0xffffffff1fffffe0)),
+ MVX(0x00000e02, "TODO_0000_0e02", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff801`a09745c0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff801`a0974300 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x9a90000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffd000`c5800000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff7`09a8e000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x2 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_6700K =
+{
+ /*.pszName = */ "Intel Core i7-6700K",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 94,
+ /*.uStepping = */ 3,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_Skylake,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 39,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_6700K),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_6700K)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x00000fa0, 0x00001068, 0x00000064, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_6700K)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_6700K),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Core_i7_6700K_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h
new file mode 100644
index 00000000..50e08a62
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h
@@ -0,0 +1,287 @@
+/* $Id: Intel_Pentium_4_3_00GHz.h $ */
+/** @file
+ * CPU database entry "Intel Pentium 4 3.00GHz".
+ * Generated at 2013-12-18T06:37:54Z by VBoxCpuReport v4.3.53r91376 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h
+#define VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000005, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000f43, 0x00020800, 0x0000649d, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x605b5001, 0x00000000, 0x00000000, 0x007d7040, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x00004121, 0x01c0003f, 0x0000001f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x00004143, 0x01c0103f, 0x000007ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x6e492020, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x286c6574, 0x50202952, 0x69746e65, 0x52286d75, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20342029, 0x20555043, 0x30302e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Pentium_4_3_00GHz[] =
+{
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0xc55df88 */
+ MFO(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0xbe000300`1008081f */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x1ac`2077a134 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x12000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0x600, UINT64_C(0xffffff00000000ff)),
+ MFX(0x0000002a, "P4_EBC_HARD_POWERON", IntelP4EbcHardPowerOn, IntelP4EbcHardPowerOn, 0, UINT64_MAX, 0), /* value=0x0 */
+ MFX(0x0000002b, "P4_EBC_SOFT_POWERON", IntelP4EbcSoftPowerOn, IntelP4EbcSoftPowerOn, 0x7e, UINT64_C(0xffffffffffffff80), 0), /* value=0x7e */
+ MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0), /* value=0xf12010f */
+ MVX(0x00000039, "C2_UNK_0000_0039", 0x1, 0x1f, ~(uint64_t)UINT32_MAX),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x00000080, "P4_UNK_0000_0080", 0, ~(uint64_t)UINT32_MAX, UINT32_MAX),
+ MFX(0x0000008b, "IA32_BIOS_SIGN_ID", Ia32BiosSignId, Ia32BiosSignId, 0, UINT32_MAX, 0), /* value=0x5`00000000 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000119, "BBL_CR_CTL", IntelBblCrCtl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x180204, 0, 0), /* value=0x180204 */
+ MFN(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus), /* value=0x0 */
+ MVX(0x00000180, "MSR_MCG_RAX", 0, 0, UINT64_MAX),
+ MVX(0x00000181, "MSR_MCG_RBX", 0, 0, UINT64_MAX),
+ MVX(0x00000182, "MSR_MCG_RCX", 0, 0, UINT64_MAX),
+ MVX(0x00000183, "MSR_MCG_RDX", 0, 0, UINT64_MAX),
+ MVX(0x00000184, "MSR_MCG_RSI", 0, 0, UINT64_MAX),
+ MVX(0x00000185, "MSR_MCG_RDI", 0, 0, UINT64_MAX),
+ MFX(0x00000186, "MSR_MCG_RBP", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000187, "MSR_MCG_RSP", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_MAX), /* value=0x0 */
+ MVX(0x00000188, "MSR_MCG_RFLAGS", 0, 0, UINT64_MAX),
+ MVX(0x00000189, "MSR_MCG_RIP", 0, 0, UINT64_MAX),
+ MVX(0x0000018a, "MSR_MCG_MISC", 0, 0, UINT64_MAX),
+ MVX(0x0000018b, "MSR_MCG_RESERVED1", 0, 0, UINT64_MAX),
+ MVX(0x0000018c, "MSR_MCG_RESERVED2", 0, 0, UINT64_MAX),
+ MVX(0x0000018d, "MSR_MCG_RESERVED3", 0, 0, UINT64_MAX),
+ MVX(0x0000018e, "MSR_MCG_RESERVED4", 0, 0, UINT64_MAX),
+ MVX(0x0000018f, "MSR_MCG_RESERVED5", 0, 0, UINT64_MAX),
+ MVX(0x00000190, "MSR_MCG_R8", 0, 0, UINT64_MAX),
+ MVX(0x00000191, "MSR_MCG_R9", 0, 0, UINT64_MAX),
+ MVX(0x00000192, "MSR_MCG_R10", 0, 0, UINT64_MAX),
+ MVX(0x00000193, "MSR_MCG_R11", 0, 0, UINT64_MAX),
+ MVX(0x00000194, "MSR_MCG_R12", 0, 0, UINT64_MAX),
+ MVX(0x00000195, "MSR_MCG_R13", 0, 0, UINT64_MAX),
+ MVX(0x00000196, "MSR_MCG_R14", 0, 0, UINT64_MAX),
+ MVX(0x00000197, "MSR_MCG_R15", 0, 0, UINT64_MAX),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, Ia32PerfStatus, UINT64_C(0xf2d00000f2d), UINT64_MAX, 0), /* value=0xf2d`00000f2d */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0xf2d, 0, 0), /* Might bite. value=0xf2d */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, UINT64_C(0xffffffffffffffe1), 0), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0, UINT64_C(0xfffffffffffffff5), 0), /* value=0x0 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0xe2d, 0, 0), /* value=0xe2d */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x0000019f, "P6_UNK_0000_019f", UINT64_C(0x32050500000101), UINT64_C(0xff000000fff0c0c0), 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x22850089, 0x20800080, UINT64_C(0xfffffffbdc10f800)), /* value=0x22850089 */
+ MVX(0x000001a1, "MSR_PLATFORM_BRV", 0, UINT64_C(0xfffffffffffcc0c0), 0),
+ MFX(0x000001a2, "P4_UNK_0000_01a2", IntelI7TemperatureTarget, ReadOnly, 0x61048, 0, 0), /* value=0x61048 */
+ MFO(0x000001d7, "MSR_LER_FROM_LIP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001d8, "MSR_LER_TO_LIP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffff80)), /* value=0x0 */
+ MFX(0x000001da, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffff0), 0), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x3f600000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x3f800000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`ff800800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVX(0x00000300, "P4_MSR_BPU_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000301, "P4_MSR_BPU_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000302, "P4_MSR_BPU_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000303, "P4_MSR_BPU_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000304, "P4_MSR_MS_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000305, "P4_MSR_MS_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000306, "P4_MSR_MS_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000307, "P4_MSR_MS_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000308, "P4_MSR_FLAME_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000309, "P4_MSR_FLAME_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030a, "P4_MSR_FLAME_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030b, "P4_MSR_FLAME_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030c, "P4_MSR_IQ_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030d, "P4_MSR_IQ_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030e, "P4_MSR_IQ_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030f, "P4_MSR_IQ_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000310, "P4_MSR_IQ_COUNTER4", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000311, "P4_MSR_IQ_COUNTER5", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000360, "P4_MSR_BPU_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000361, "P4_MSR_BPU_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000362, "P4_MSR_BPU_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000363, "P4_MSR_BPU_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000364, "P4_MSR_MS_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000365, "P4_MSR_MS_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000366, "P4_MSR_MS_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000367, "P4_MSR_MS_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000368, "P4_MSR_FLAME_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000369, "P4_MSR_FLAME_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036a, "P4_MSR_FLAME_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036b, "P4_MSR_FLAME_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036c, "P4_MSR_IQ_CCCR0", 0, UINT64_C(0xffffffff000007ff), 0),
+ MVX(0x0000036d, "P4_MSR_IQ_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036e, "P4_MSR_IQ_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036f, "P4_MSR_IQ_CCCR3", 0, UINT64_C(0xffffffff000007ff), 0),
+ MVX(0x00000370, "P4_MSR_IQ_CCCR4", 0, UINT64_C(0xffffffff000000ff), 0),
+ MVX(0x00000371, "P4_MSR_IQ_CCCR5", 0, UINT64_C(0xffffffff000000ff), 0),
+ MVX(0x000003a0, "P4_MSR_BSU_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a1, "P4_MSR_BSU_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a2, "P4_MSR_FSB_ESCR0", 0, UINT64_C(0xffffffff40000000), UINT32_C(0x80000000)),
+ MVX(0x000003a3, "P4_MSR_FSB_ESCR1", 0, UINT64_C(0xffffffff40000000), UINT32_C(0x80000000)),
+ MVX(0x000003a4, "P4_MSR_FIRM_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a5, "P4_MSR_FIRM_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a6, "P4_MSR_FLAME_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a7, "P4_MSR_FLAME_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a8, "P4_MSR_DAC_ESCR0", 0, UINT64_C(0xffffffff61fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003a9, "P4_MSR_DAC_ESCR1", 0, UINT64_C(0xffffffff61fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003aa, "P4_MSR_MOB_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ab, "P4_MSR_MOB_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ac, "P4_MSR_PMH_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ad, "P4_MSR_PMH_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ae, "P4_MSR_SAAT_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003af, "P4_MSR_SAAT_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003b0, "P4_MSR_U2L_ESCR0", 0, UINT64_C(0xffffffff71c001f0), UINT32_C(0x80000000)),
+ MVX(0x000003b1, "P4_MSR_U2L_ESCR1", 0, UINT64_C(0xffffffff71c001f0), UINT32_C(0x80000000)),
+ MVX(0x000003b2, "P4_MSR_BPU_ESCR0", 0, UINT64_C(0xffffffff61fc0000), UINT32_C(0x80000000)),
+ MVX(0x000003b3, "P4_MSR_BPU_ESCR1", 0, UINT64_C(0xffffffff61fc0000), UINT32_C(0x80000000)),
+ MVX(0x000003b4, "P4_MSR_IS_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b5, "P4_MSR_IS_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b6, "P4_MSR_ITLB_ESCR0", 0, UINT64_C(0xffffffff0ffff1e0), UINT32_C(0x80000000)),
+ MVX(0x000003b7, "P4_MSR_ITLB_ESCR1", 0, UINT64_C(0xffffffff0ffff1e0), UINT32_C(0x80000000)),
+ MVX(0x000003b8, "P4_MSR_CRU_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b9, "P4_MSR_CRU_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003ba, "P4_MSR_IQ_ESCR0", 0, UINT64_C(0xffffffff7fffffff), UINT32_C(0x80000000)),
+ MVX(0x000003bb, "P4_MSR_IQ_ESCR1", 0, UINT64_C(0xffffffff7fffffff), UINT32_C(0x80000000)),
+ MVX(0x000003bc, "P4_MSR_RAT_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003bd, "P4_MSR_RAT_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003be, "P4_MSR_SSU_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003c0, "P4_MSR_MS_ESCR0", 0, UINT64_C(0xffffffff61ff81e0), UINT32_C(0x80000000)),
+ MVX(0x000003c1, "P4_MSR_MS_ESCR1", 0, UINT64_C(0xffffffff61ff81e0), UINT32_C(0x80000000)),
+ MVX(0x000003c2, "P4_MSR_TBPU_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c3, "P4_MSR_TBPU_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c4, "P4_MSR_TC_ESCR0", 0, UINT64_C(0xffffffff61f801f0), UINT32_C(0x80000000)),
+ MVX(0x000003c5, "P4_MSR_TC_ESCR1", 0, UINT64_C(0xffffffff61f801f0), UINT32_C(0x80000000)),
+ MVX(0x000003c8, "P4_MSR_IX_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c9, "P4_MSR_IX_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003ca, "P4_MSR_ALF_ESCR0", 0, UINT64_C(0xffffffff700001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cb, "P4_MSR_ALF_ESCR1", 0, UINT64_C(0xffffffff700001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cc, "P4_MSR_CRU_ESCR2", 0, UINT64_C(0xffffffff61f001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cd, "P4_MSR_CRU_ESCR3", 0, UINT64_C(0xffffffff61f001f0), UINT32_C(0x80000000)),
+ MVX(0x000003e0, "P4_MSR_CRU_ESCR4", 0, UINT64_C(0xffffffff71ff01f0), UINT32_C(0x80000000)),
+ MVX(0x000003e1, "P4_MSR_CRU_ESCR5", 0, UINT64_C(0xffffffff71ff01f0), UINT32_C(0x80000000)),
+ MVX(0x000003f0, "P4_MSR_TC_PRECISE_EVENT", 0xfc00, UINT64_C(0xfffffffffffc001f), 0),
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, UINT64_C(0xfffffffff8000000), 0), /* value=0x0 */
+ MVX(0x000003f2, "P4_MSR_PEBS_MATRIX_VERT", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0x000003f5, "P4_UNK_0000_03f5", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x000003f6, "P4_UNK_0000_03f6", 0, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0x000003f7, "P4_UNK_0000_03f7", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0x000003f8, "P4_UNK_0000_03f8", 0, UINT64_C(0xffffff000000003f), 0),
+ RFN(0x00000400, 0x0000040f, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`654efdc0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`654efb00 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xeed1e000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`009bf000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7f7`eed1c000 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Pentium_4_3_00GHz =
+{
+ /*.pszName = */ "Intel Pentium 4 3.00GHz",
+ /*.pszFullName = */ "Intel(R) Pentium(R) 4 CPU 3.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 3,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_NB_Prescott2M,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x00000040, 0x00000040, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Pentium_4_3_00GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Pentium_4_3_00GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Pentium_4_3_00GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h
new file mode 100644
index 00000000..6a931741
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h
@@ -0,0 +1,226 @@
+/* $Id: Intel_Pentium_M_processor_2_00GHz.h $ */
+/** @file
+ * CPU database entry "Intel Pentium M processor 2.00GHz".
+ * Generated at 2013-12-09T14:18:00Z by VBoxCpuReport v4.3.51r91027 on win.x86.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz_h
+#define VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000002, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006d6, 0x00000816, 0x00000180, 0xafe9f9bf, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x02b3b001, 0x000000f0, 0x00000000, 0x2c04307d, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000004, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x65746e49, 0x2952286c, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x6e655020, 0x6d756974, 0x20295228, 0x7270204d, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x7365636f, 0x20726f73, 0x30302e32, 0x007a4847, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz[] =
+{
+ MFI(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x0 */
+ MFI(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0x0 */
+ MFX(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x22`4d44782e */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x140000d0248a28)),
+ MVX(0x00000018, "P6_UNK_0000_0018", 0, 0, 0),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00100), UINT64_C(0xffffffff00000600), 0xff),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x45080000, UINT64_C(0xfffffffffff7ff7e), 0), /* value=0x45080000 */
+ MVX(0x0000002f, "P6_UNK_0000_002f", 0, UINT64_C(0xfffffffffffffff5), 0),
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xfffffffffffe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff40000000), 0),
+ MVX(0x00000034, "P6_UNK_0000_0034", 0x77ff, ~(uint64_t)UINT32_MAX, UINT32_C(0xfff80000)),
+ MVO(0x00000035, "P6_UNK_0000_0035", 0x300008),
+ MVX(0x0000003b, "P6_UNK_0000_003b", 0, UINT64_C(0xafffffffe), UINT64_C(0xfffffff500000001)),
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0x4),
+ RFN(0x00000040, 0x00000047, "MSR_LASTBRANCH_n", IntelLastBranchFromToN, ReadOnly),
+ MVX(0x0000004a, "P6_UNK_0000_004a", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004b, "P6_UNK_0000_004b", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004c, "P6_UNK_0000_004c", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004d, "P6_UNK_0000_004d", 0, 0, 0), /* value=0xeb1cffbf`8918200a */
+ MVX(0x0000004e, "P6_UNK_0000_004e", 0, 0, 0), /* value=0x8204c60a`e8009512 */
+ MVX(0x0000004f, "P6_UNK_0000_004f", 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVI(0x00000050, "P6_UNK_0000_0050", 0), /* Villain? value=0x0 */
+ MVI(0x00000051, "P6_UNK_0000_0051", 0), /* Villain? value=0x0 */
+ MVI(0x00000052, "P6_UNK_0000_0052", 0), /* Villain? value=0x0 */
+ MVI(0x00000053, "P6_UNK_0000_0053", 0), /* Villain? value=0x0 */
+ MVI(0x00000054, "P6_UNK_0000_0054", 0), /* Villain? value=0x0 */
+ MVX(0x0000006c, "P6_UNK_0000_006c", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006d, "P6_UNK_0000_006d", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006e, "P6_UNK_0000_006e", 0, UINT64_C(0xffffffff00000082), 0),
+ MVO(0x0000006f, "P6_UNK_0000_006f", 0xadb),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x00000088, "BBL_CR_D0", 0, 0, 0), /* value=0xfcaeffff`d779fd3e */
+ MVX(0x00000089, "BBL_CR_D1", 0, 0, 0), /* value=0xefffbcb7`ff77fbef */
+ MVX(0x0000008a, "BBL_CR_D2", 0, 0, 0), /* value=0xdfff3f2f`fb367d9f */
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1800000000), 0, 0),
+ MVX(0x0000008c, "P6_UNK_0000_008c", 0, 0, 0), /* value=0xeffff3ff`ef39bfff */
+ MVX(0x0000008d, "P6_UNK_0000_008d", 0, 0, 0), /* value=0xf773adfb`ef3ff3fc */
+ MVX(0x0000008e, "P6_UNK_0000_008e", 0, 0, 0), /* value=0xfeb7f6ff`ebbffeff */
+ MVX(0x0000008f, "P6_UNK_0000_008f", 0, 0, 0), /* value=0xd6ffb7af`ffad9e7e */
+ MVX(0x00000090, "P6_UNK_0000_0090", 0, UINT64_C(0xfffffffffffffffa), 0), /* value=0x9ebdb4b5 */
+ MVX(0x000000ae, "P6_UNK_0000_00ae", UINT64_C(0x1000000000007efc), 0, 0),
+ MFX(0x000000c1, "IA32_PMC0", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c2, "IA32_PMC1", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x5a000000ac000000)),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0),
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x2812140000000000)),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVX(0x00000116, "BBL_CR_ADDR", UINT32_C(0xfe7efff0), UINT64_C(0xffffffff0000000f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", UINT64_C(0xc0000000c1ae9fda), UINT64_C(0xfffffff00000000), 0),
+ MFX(0x00000119, "BBL_CR_CTL", IntelBblCrCtl, IntelBblCrCtl, 0x8, UINT64_C(0xffffffffc00001ff), 0), /* value=0x8 */
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0x34272b, UINT64_C(0xfffffffffffbfc1f), 0), /* value=0x34272b */
+ MVI(0x00000131, "P6_UNK_0000_0131", 0),
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0xd31f40, UINT64_C(0xfffffffff000008f), 0),
+ MVI(0x0000014f, "P6_UNK_0000_014f", 0xd31f40),
+ MVX(0x00000150, "P6_UNK_0000_0150", 0, UINT64_C(0xffffffffdfffe07f), 0x20000000),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x3c531fc6, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00000154, "P6_UNK_0000_0154", 0),
+ MVX(0x0000015b, "P6_UNK_0000_015b", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xf78af000 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x804de6f0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x5, 0, 0), /* value=0x5 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0, UINT64_C(0xfffffffffffee0c0), 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x612142806000612), 0, 0), /* value=0x6121428`06000612 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x612, 0, 0), /* Might bite. value=0x612 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0, UINT64_C(0xfffffffffffffffd), 0), /* value=0x0 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, Ia32Therm2Ctl, 0x10612, UINT64_C(0xfffffffffffee0c0), 0), /* value=0x10612 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x111088, UINT64_C(0xffffffff001ffb77), 0), /* value=0x111088 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", 0x3, UINT64_C(0xfffffffffffffffc), 0),
+ MVX(0x000001ae, "P6_UNK_0000_01ae", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x000001af, "P6_UNK_0000_01af", 0x3ff, UINT64_C(0xfffffffffffffc00), 0),
+ MVO(0x000001c9, "TODO_0000_01c9", 0x8000000),
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffc200), 0), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xaad05fa1 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xaad06480 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x7dba1245 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x806f5d54 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x40000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x5ff80000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`fff80800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0x00001000, "P6_DEBUG_REGISTER_0", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001001, "P6_DEBUG_REGISTER_1", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001002, "P6_DEBUG_REGISTER_2", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001003, "P6_DEBUG_REGISTER_3", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001004, "P6_DEBUG_REGISTER_4", UINT32_C(0xffff0ff0), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001005, "P6_DEBUG_REGISTER_5", 0x400, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001006, "P6_DEBUG_REGISTER_6", UINT32_C(0xffff0ff0)), /* Villain? */
+ MVI(0x00001007, "P6_DEBUG_REGISTER_7", 0x400), /* Villain? */
+ MVO(0x0000103f, "P6_UNK_0000_103f", 0x4),
+ MVO(0x000010cd, "P6_UNK_0000_10cd", 0),
+ MFW(0x00002000, "P6_CR0", IntelP6CrN, IntelP6CrN, UINT64_C(0xffffffff00000010)), /* value=0x8001003b */
+ MFX(0x00002002, "P6_CR2", IntelP6CrN, IntelP6CrN, 0x2, ~(uint64_t)UINT32_MAX, 0), /* value=0xc30000 */
+ MFX(0x00002003, "P6_CR3", IntelP6CrN, IntelP6CrN, 0x3, ~(uint64_t)UINT32_MAX, 0), /* value=0x29765000 */
+ MFX(0x00002004, "P6_CR4", IntelP6CrN, IntelP6CrN, 0x4, ~(uint64_t)UINT32_MAX, 0), /* value=0x6d9 */
+ MVO(0x0000203f, "P6_UNK_0000_203f", 0x4),
+ MVO(0x000020cd, "P6_UNK_0000_20cd", 0),
+ MVO(0x0000303f, "P6_UNK_0000_303f", 0x4),
+ MVO(0x000030cd, "P6_UNK_0000_30cd", 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Pentium_M_processor_2_00GHz =
+{
+ /*.pszName = */ "Intel Pentium M processor 2.00GHz",
+ /*.pszFullName = */ "Intel(R) Pentium(R) M processor 2.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 13,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_P6_M_Dothan,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 32,
+ /*.fMxCsrMask = */ 0xffbf, ///< @todo check this
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x02b3b001, 0x000000f0, 0x00000000, 0x2c04307d },
+ /*.fMsrMask = */ UINT32_C(0x3fff),
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_N3530_2_16GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_N3530_2_16GHz.h
new file mode 100644
index 00000000..cf79e50a
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_N3530_2_16GHz.h
@@ -0,0 +1,275 @@
+/* $Id: Intel_Pentium_N3530_2_16GHz.h $ */
+/** @file
+ * CPU database entry "Intel Pentium N3530 2.16GHz".
+ * Generated at 2016-04-29T13:34:27Z by VBoxCpuReport v5.0.51r106929 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz_h
+#define VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Pentium(R) CPU N3530 @ 2.16GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Pentium_N3530_2_16GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000b, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00030678, 0x02100800, 0x41d8e3bf, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x61b3a001, 0x0000ffc2, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c000121, 0x0140003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c000122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c00c143, 0x03c0003f, 0x000003ff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x33000020, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000007, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x00002282, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280203, 0x00000000, 0x00000000, 0x00004503, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000001, 0x00000001, 0x00000100, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000004, 0x00000004, 0x00000201, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000002, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000101, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x6e492020, 0x286c6574, 0x50202952, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x69746e65, 0x52286d75, 0x50432029, 0x4e202055, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30333533, 0x20402020, 0x36312e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Pentium(R) CPU N3530 @ 2.16GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Pentium_N3530_2_16GHz[] =
+{
+ MFI(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x0 */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x4c5e`43033c62 */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0xc000090341f52), 0, 0), /* value=0xc0000`90341f52 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xfffffff0000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x40080000, UINT32_C(0xfff7ffff), ~(uint64_t)UINT32_MAX), /* value=0x40080000 */
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MFO(0x00000034, "MSR_SMI_COUNT", IntelI7SmiCount), /* value=0xa */
+ MVO(0x00000039, "C2_UNK_0000_0039", 0x2),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003b, "P6_UNK_0000_003b", UINT64_C(0x4c27f41f3066), 0x800, 0),
+ RFN(0x00000040, 0x00000047, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ RFN(0x00000060, 0x00000067, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_MAX), /* value=0x809`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFI(0x000000c7, "IA32_PMC6", Ia32PmcN), /* value=0x36c9 */
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x000000ce, "IA32_PLATFORM_INFO", UINT64_C(0x60000001a00)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffffc073f0)), /* value=0x1a000f */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xffffffffff800000)), /* value=0x20000 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0x80031838)),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x1f8f8 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x9875 */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT32_C(0xefc5ffff), UINT64_C(0xffffffff10000000)), /* value=0x2380002 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0x7e2801ff, UINT32_C(0xfe83f8ff), UINT64_C(0xffffffff00400600)), /* value=0x7e2801ff */
+ MVX(0x00000120, "SILV_UNK_0000_0120", 0x44, 0x40, UINT64_C(0xffffffffffffff33)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x806, 0, 0), /* value=0x806 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, ~(uint64_t)UINT32_MAX),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, UINT32_C(0xfffec080), ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x880000001f52), 0, 0), /* value=0x8800`00001f52 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x1f52, 0, 0), /* Might bite. value=0x1f52 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe1)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0xcbb700, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0xcbb700 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88420100), UINT32_C(0xfffff555), ~(uint64_t)UINT32_MAX), /* value=0x88420100 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x623, 0, 0), /* value=0x623 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT32_MAX, ~(uint64_t)UINT32_MAX),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef76)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x690000, 0xff0000, UINT64_C(0xffffffffc000ffff)), /* value=0x690000 */
+ MFX(0x000001a6, "I7_MSR_OFFCORE_RSP_0", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffff897ffa0000)), /* XXX: The range ended earlier than expected! */
+ MFX(0x000001a7, "I7_MSR_OFFCORE_RSP_1", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc97ffa0000)), /* value=0x0 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, IntelI7TurboRatioLimit, 0, UINT64_C(0x3f3f3f3f00000000), UINT64_C(0xc0c0c0c0c0c0c0c0)), /* value=0x0 */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0x200, UINT64_C(0xfffffffffffffc00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffa03c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0x0 */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0x0 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x7a000006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff800800 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0, UINT64_C(0xfffffffffffffffd)), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xffc00005 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ffc00800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xffb80000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fff80800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x7c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x7b000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x7ae00000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x1`00000006 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffffff0000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x32c1, 0, 0), /* value=0x32c1 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffffc)), /* value=0x3 */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xc000000700000003), UINT64_C(0x3ffffff8fffffffc)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x0 */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x80000000`0000ad5b */
+ MFX(0x000003fd, "I7_MSR_CORE_C6_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX), /* value=0x5`51eddedc */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0xda040000000002), 0, 0), /* value=0xda0400`00000002 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x481e6, 0, 0), /* value=0x481e6 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x1027ff, 0, 0), /* value=0x1027ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2e, 0, 0), /* value=0x2e */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x28ef00000000), 0, 0), /* value=0x28ef`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106114141), 0, 0), /* value=0xf01`06114141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ MFX(0x00000491, "IA32_VMX_VMFUNC", Ia32VmxVmFunc, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ RSN(0x000004c1, 0x000004c2, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffffff0000000000)),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT64_C(0xc00000007fffe000), 0), /* value=0x0 */
+ MFX(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit, IntelI7SandyRaplPowerUnit, 0x505, 0, UINT64_C(0xfffffffffff0e0f0)), /* value=0x505 */
+ MFN(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency, IntelI7SandyPkgC2Residency), /* value=0x0 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0x3880fa, 0x8000, UINT64_C(0xff000000ff000000)), /* value=0x3880fa */
+ MFX(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus, ReadOnly, 0x21823a, 0, 0), /* value=0x21823a */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0x20000, 0, UINT64_C(0xffffffffff000000)), /* value=0x20000 */
+ MFX(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus, ReadOnly, 0x792fa, 0, 0), /* value=0x792fa */
+ MFO(0x00000660, "SILV_CORE_C1_RESIDENCY", IntelAtSilvCoreC1Recidency), /* value=0x22`70ff1790 */
+ MVO(0x00000661, "SILV_UNK_0000_0661", 0),
+ MVO(0x00000662, "SILV_UNK_0000_0662", 0),
+ MVO(0x00000663, "SILV_UNK_0000_0663", 0),
+ MVO(0x00000664, "SILV_UNK_0000_0664", 0),
+ MVO(0x00000665, "SILV_UNK_0000_0665", 0),
+ MVO(0x00000666, "SILV_UNK_0000_0666", 0),
+ MVO(0x00000667, "SILV_UNK_0000_0667", 0x9),
+ MVX(0x00000668, "SILV_UNK_0000_0668", 0x13130f0b, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000669, "SILV_UNK_0000_0669", 0x1010f20, 0, ~(uint64_t)UINT32_MAX),
+ MVO(0x0000066a, "SILV_UNK_0000_066a", 0x1a0602),
+ MVO(0x0000066b, "SILV_UNK_0000_066b", 0x442323),
+ MVO(0x0000066c, "SILV_UNK_0000_066c", 0x1f1f1f1f),
+ MVO(0x0000066d, "SILV_UNK_0000_066d", 0x52525252),
+ MVX(0x0000066e, "SILV_UNK_0000_066e", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x0000066f, "SILV_UNK_0000_066f", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000670, "SILV_UNK_0000_0670", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000671, "SILV_UNK_0000_0671", 0, 0, UINT64_C(0xffffffff000000c0)),
+ MVX(0x00000672, "SILV_UNK_0000_0672", 0, 0, UINT64_C(0xffffffffc0000000)),
+ MVX(0x00000673, "SILV_UNK_0000_0673", 0x205, 0, UINT64_C(0xffffffffffffc000)),
+ MVX(0x00000674, "SILV_UNK_0000_0674", 0x4050006, 0, UINT64_C(0xfffffffff8000000)),
+ MVX(0x00000675, "SILV_UNK_0000_0675", 0x27, 0x20, UINT64_C(0xffffffffffffffc0)),
+ MVX(0x00000676, "SILV_UNK_0000_0676", 0, UINT64_C(0x7f7f7f7f00000000), UINT64_C(0x8080808080808080)),
+ MVX(0x00000677, "SILV_UNK_0000_0677", 0, 0, ~(uint64_t)UINT32_MAX),
+ MFI(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline), /* value=0x0 */
+ MVX(0x00000768, "SILV_UNK_0000_0768", 0, 0, UINT64_C(0xffffffffffff0060)),
+ MVX(0x00000769, "SILV_UNK_0000_0769", 0, 0x6, UINT64_C(0xfffffffffffffff0)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff802`6e9de200 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff802`6e9ddf40 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x9b440000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffd000`20661000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff7`9b43e000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Pentium(R) CPU N3530 @ 2.16GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Pentium_N3530_2_16GHz =
+{
+ /*.pszName = */ "Intel Pentium N3530 2.16GHz",
+ /*.pszFullName = */ "Intel(R) Pentium(R) CPU N3530 @ 2.16GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 55,
+ /*.uStepping = */ 8,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Atom_Silvermont,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_267MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Pentium_N3530_2_16GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Pentium_N3530_2_16GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000001, 0x00000001, 0x00000100, 0x00000004 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Pentium_N3530_2_16GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Pentium_N3530_2_16GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Pentium_N3530_2_16GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h
new file mode 100644
index 00000000..b06b039f
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h
@@ -0,0 +1,258 @@
+/* $Id: Intel_Xeon_X5482_3_20GHz.h $ */
+/** @file
+ * CPU database entry "Intel Xeon X5482 3.20GHz".
+ * Generated at 2013-12-16T12:10:52Z by VBoxCpuReport v4.3.53r91299 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz_h
+#define VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00010676, 0x04040800, 0x000ce3bd, 0xbfebfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x05b0b101, 0x005657f0, 0x00000000, 0x2cb4304e, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x0c000121, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x0c000122, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x0c004143, 0x05c0003f, 0x00000fff, 0x00000001, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00002220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000400, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280202, 0x00000000, 0x00000000, 0x00000503, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x6f655820, 0x2952286e, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x55504320, 0x20202020, 0x20202020, 0x58202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x32383435, 0x20402020, 0x30322e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x18008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003026, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Xeon_X5482_3_20GHz[] =
+{
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x610010 */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x1358`d28c2c60 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x18000088e40822)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffffc0000006ff)),
+ MVX(0x00000021, "C2_UNK_0000_0021", 0, 0, UINT64_C(0xffffffffffffffc0)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, UINT32_C(0xc2383400), UINT64_C(0xffffffffdff7df00), 0), /* value=0xc2383400 */
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xffffffff01fe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff7fffffff), 0),
+ MVO(0x00000039, "C2_UNK_0000_0039", 0x7),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0),
+ RFN(0x00000040, 0x00000043, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ RFN(0x00000060, 0x00000063, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x60b00000000), UINT32_MAX, 0),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000a8, "C2_EMTTM_CR_TABLES_0", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000a9, "C2_EMTTM_CR_TABLES_1", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000aa, "C2_EMTTM_CR_TABLES_2", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ab, "C2_EMTTM_CR_TABLES_3", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ac, "C2_EMTTM_CR_TABLES_4", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ad, "C2_EMTTM_CR_TABLES_5", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, ~(uint64_t)UINT32_MAX, 0), /* value=0x612 */
+ RSN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x2300000052000000)),
+ MFX(0x000000cd, "P6_MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x806, 0, 0),
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x1208227f7f0710)),
+ MVO(0x000000cf, "C2_UNK_0000_00cf", 0),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x18820f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0x404000, UINT64_C(0xfffffffffc001000)), /* value=0x202a01 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xffffffffff800000)), /* value=0x0 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0xd00208c8)),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x40`a0a41c60 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x3a`cc470b98 */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT64_C(0xffffffffefc5ffff), 0), /* value=0x4000000`877d4b01 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ MVX(0x00000116, "BBL_CR_ADDR", 0x3fc0, UINT64_C(0xffffff000000001f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", 0xa7f99, UINT64_C(0xfffffffffff00000), 0),
+ MFN(0x0000011a, "BBL_CR_TRIG", WriteOnly, IgnoreWrite),
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MVX(0x0000011c, "C2_UNK_0000_011c", UINT32_C(0xe003b94d), UINT64_C(0xffffffff07c00000), 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, UINT32_C(0xbe702111), UINT64_C(0xfffffffffef3fe9f), 0), /* value=0xbe702111 */
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0x70375245, UINT64_C(0xffffffff00000080), 0),
+ MVI(0x0000014f, "P6_UNK_0000_014f", UINT32_C(0xffffba7f)),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x6b929082, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x0000015e, "C2_UNK_0000_015e", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x0000015f, "C1_DTS_CAL_CTRL", IntelCore1DtsCalControl, IntelCore1DtsCalControl, 0, UINT64_C(0xffffffffffc0ffff), 0), /* value=0x822 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0xffffff82`0dce9190 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffff80`0d2ce720 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x806, 0, 0), /* value=0x806 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00200000)),
+ MVO(0x00000193, "C2_UNK_0000_0193", 0),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0x14822, UINT64_C(0xfffffffffffea0c0), 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x822082206300622), 0, 0), /* value=0x8220822`06300622 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x822, 0, 0), /* Might bite. value=0x822 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, 0, UINT64_C(0xffffffffffffffe1)), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x10, 0, UINT64_C(0xffffffffff0000e0)), /* value=0x10 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x883c0000), UINT32_C(0xf87f017f), UINT64_C(0xffffffff0780fc00)), /* value=0x883c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x612, 0, 0), /* value=0x612 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0x2120000, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, UINT64_C(0x4066a52489), UINT64_C(0x52600099f6), UINT64_C(0xffffff0019004000)), /* value=0x40`66a52489 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, ReadOnly, 0x1400, 0, 0), /* value=0x1400 */
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", UINT32_C(0xfe7f042f), UINT64_C(0xffffffff7faf00af), 0),
+ MVX(0x000001bf, "C2_UNK_0000_01bf", 0x404, UINT64_C(0xffffffffffff0000), 0),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffa03c)), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffff7f`8f47ca6b */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`0d2b24c0 */
+ MFN(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp), /* value=0xffffff80`0d2ba20f */
+ MFN(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp), /* value=0xffffff80`0d2ba200 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap, Ia32PlatformDcaCap, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x1 */
+ MFX(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap, Ia32Dca0Cap, 0, UINT64_C(0xfffffffffefe17ff), 0), /* value=0xc01e489 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x80000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x3f`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x7fc00000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x3f`ffc00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x6 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x30`00000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffffff0000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x10c2, 0, 0), /* value=0x10c2 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff444)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0xffffffff`ffffffff */
+ MFO(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl), /* value=0xffffffff`ffffffff */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFN(0x00000478, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0x5a08000000000d), 0, 0), /* value=0x5a0800`0000000d */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x3f00000016), 0, 0), /* value=0x3f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xf7f9fffe0401e172), 0, 0), /* value=0xf7f9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3ffff00036dff), 0, 0), /* value=0x3ffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x3fff000011ff), 0, 0), /* value=0x3fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x4100000000), 0, 0), /* value=0x41`00000000 */
+ MVX(0x000004f8, "C2_UNK_0000_04f8", 0, 0, 0),
+ MVX(0x000004f9, "C2_UNK_0000_04f9", 0, 0, 0),
+ MVX(0x000004fa, "C2_UNK_0000_04fa", 0, 0, 0),
+ MVX(0x000004fb, "C2_UNK_0000_04fb", 0, 0, 0),
+ MVX(0x000004fc, "C2_UNK_0000_04fc", 0, 0, 0),
+ MVX(0x000004fd, "C2_UNK_0000_04fd", 0, 0, 0),
+ MVX(0x000004fe, "C2_UNK_0000_04fe", 0, 0, 0),
+ MVX(0x000004ff, "C2_UNK_0000_04ff", 0, 0, 0),
+ MVX(0x00000590, "C2_UNK_0000_0590", 0, 0, 0),
+ MVX(0x00000591, "C2_UNK_0000_0591", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x000005a0, "C2_PECI_CTL", IntelCore2PeciControl, IntelCore2PeciControl, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x1 */
+ MVI(0x000005a1, "C2_UNK_0000_05a1", 0x1),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`0d2ce6c0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff82`0dcfd000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7c7511e0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Xeon_X5482_3_20GHz =
+{
+ /*.pszName = */ "Intel Xeon X5482 3.20GHz",
+ /*.pszFullName = */ "Intel(R) Xeon(R) CPU X5482 @ 3.20GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 23,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core2_Penryn,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_400MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 38,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280202, 0x00000000, 0x00000000, 0x00000503 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Xeon_X5482_3_20GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Xeon_X5482_3_20GHz),
+};
+
+#endif /* !VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Makefile.kup b/src/VBox/VMM/VMMR3/cpus/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Makefile.kup
diff --git a/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h b/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h
new file mode 100644
index 00000000..2f2df06d
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h
@@ -0,0 +1,280 @@
+/* $Id: Quad_Core_AMD_Opteron_2384.h $ */
+/** @file
+ * CPU database entry "Quad-Core AMD Opteron 2384".
+ * Generated at 2013-12-09T21:56:56Z by VBoxCpuReport v4.3.51r91133 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384_h
+#define VBOX_CPUDB_Quad_Core_AMD_Opteron_2384_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000005, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00100f42, 0x06040800, 0x00802009, 0x178bfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001b, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00100f42, 0x00000d4f, 0x000037ff, 0xefd3fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x80000002, 0x00000000, 0x00000000, 0x64617551, 0x726f432d, 0x4d412065, 0x704f2044, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x6f726574, 0x6d74286e, 0x72502029, 0x7365636f, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20726f73, 0x34383332, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff30ff10, 0xff30ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x20800000, 0x42004200, 0x02008140, 0x0030b140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000001f9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00002003, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x0000000f, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf0300000, 0x60100000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x0000001f, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Quad_Core_AMD_Opteron_2384[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0xbe`410ca9b6 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x1000086),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x106, 0, 0), /* value=0x106 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffc0), 0), /* value=0x3f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xfffff800`0245dd94 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xfffff800`0245e910 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x753d3416 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x753ea130 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`f8000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x4d01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`0245dd00 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`0245da80 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x14700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffffa60`01b8a000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0xc0000408, 0xc000040a, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040b, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760600, UINT64_C(0xffffffffff80f8ff), 0), /* value=0x760600 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x1000030, UINT64_C(0xffffffff00000020), 0), /* value=0x1000030 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x1`b8210000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xc8000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x2`38000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x400000`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x327f0004, UINT64_C(0xffffffffc0008838), 0), /* value=0x327f0004 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffffc00088c0), 0), /* value=0x30000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x1830),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xffffffffffffff00), 0), /* value=0x80 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xfffffffffffff000), 0), /* value=0x200 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, UINT64_C(0xffffffffc0000000), 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x200242e */
+ MVX(0xc0010057, "AMD_10H_UNK_c001_0057", 0, 0, 0),
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x30, 0, 0), /* value=0x30 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x1, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x1 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001e13000300b), 0, 0), /* value=0x800001e1`3000300b */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001c840004004), 0, 0), /* value=0x800001c8`40004004 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b64000404e), 0, 0), /* value=0x800001b6`4000404e */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019d40004040), 0, 0), /* value=0x8000019d`40004040 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40014004, UINT64_C(0xffffffff01b80000), 0), /* value=0x40014004 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x38b600c340014004), UINT64_MAX, 0), /* value=0x38b600c3`40014004 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98e00 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0x1 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x1, 0, 0), /* value=0x1 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x802009`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x37ff`efd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0), /* value=0x0 */
+ MVI(0xc0011012, "AMD_K7_UNK_c001_1012", UINT32_MAX),
+ MVI(0xc0011013, "AMD_K7_UNK_c001_1013", UINT64_MAX),
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x1830), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x40000000000008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0xfffe012000000000), 0), /* value=0x10`00001000 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xffc0000000000000), 0), /* value=0x1c94`49000000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x10200020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MVX(0xc0011028, "AMD_10H_UNK_c001_1028", 0, UINT64_C(0xfffffffffffffff8), 0),
+ MVX(0xc0011029, "AMD_10H_UNK_c001_1029", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2, 0, UINT64_C(0xfff00000c0000000), 0), /* value=0x40040`050000c0 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfcffff00000000), 0), /* value=0x0 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0xffffff1f`6ffffec0 */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0xffffbecf`eff1fec0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xfffffffffff00000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0xffffcf06`409f2d93 */
+ MFI(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData), /* value=0x3b`7701fe63 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFI(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3), /* value=0x0 */
+ MFX(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr, 0, UINT64_C(0x7fffffffffff), 0), /* value=0x0 */
+ MFI(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMDBENTRY const g_Entry_Quad_Core_AMD_Opteron_2384 =
+{
+ /*.pszName = */ "Quad-Core AMD Opteron 2384",
+ /*.pszFullName = */ "Quad-Core AMD Opteron(tm) Processor 2384",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 16,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K10,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.fMxCsrMask = */ 0x2ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Quad_Core_AMD_Opteron_2384)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Quad_Core_AMD_Opteron_2384),
+};
+
+#endif /* !VBOX_CPUDB_Quad_Core_AMD_Opteron_2384_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h b/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h
new file mode 100644
index 00000000..72e74e92
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h
@@ -0,0 +1,414 @@
+/* $Id: VIA_QuadCore_L4700_1_2_GHz.h $ */
+/** @file
+ * CPU database entry "VIA QuadCore L4700 1.2+ GHz".
+ * Generated at 2013-12-20T14:40:07Z by VBoxCpuReport v4.3.53r91411 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz_h
+#define VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x746e6543, 0x736c7561, 0x48727561, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006fd, 0x06080800, 0x008863a9, 0xbfc9fbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x02b3b001, 0x00000000, 0x00000000, 0x2c04307d, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c000021, 0x03c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00022220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x06280202, 0x00000000, 0x00000000, 0x00000503, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x49562020, 0x75512041, 0x6f436461, 0x4c206572, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30303734, 0x31204020, 0x202b322e, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x08800880, 0x40100140, 0x40100140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000000, 0x00000000, 0x00000000, 0xc0000004, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000001, 0x00000000, 0x00000000, 0x000006fd, 0x00000000, 0x00000000, 0x1ec03dcc, 0 },
+ { 0xc0000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000004, 0x00000000, 0x00000000, 0x000fffb7, 0x08000955, 0x08530954, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz[] =
+{
+ RVI(0x00000000, 0x00000005, "ZERO_0000_0000_THRU_0000_0005", 0),
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x40 */
+ RVI(0x00000007, 0x0000000f, "ZERO_0000_0007_THRU_0000_000f", 0),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x965`912e15ac */
+ RVI(0x00000011, 0x0000001a, "ZERO_0000_0011_THRU_0000_001a", 0),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0x600, UINT64_C(0xfffffff0000000ff)),
+ RVI(0x0000001c, 0x00000029, "ZERO_0000_001c_THRU_0000_0029", 0),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x2580000, UINT64_MAX, 0), /* value=0x2580000 */
+ RVI(0x0000002b, 0x00000039, "ZERO_0000_002b_THRU_0000_0039", 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ RVI(0x0000003b, 0x00000078, "ZERO_0000_003b_THRU_0000_0078", 0),
+ RVI(0x0000007a, 0x0000008a, "ZERO_0000_007a_THRU_0000_008a", 0),
+ MFN(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId), /* value=0xc`00000000 */
+ RVI(0x0000008c, 0x0000009a, "ZERO_0000_008c_THRU_0000_009a", 0),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RVI(0x0000009c, 0x000000c0, "ZERO_0000_009c_THRU_0000_00c0", 0),
+ RSN(0x000000c1, 0x000000c3, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, UINT64_C(0xffffff0000000000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x000000c4, 0x000000cc, "ZERO_0000_00c4_THRU_0000_00cc", 0),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0),
+ RVI(0x000000ce, 0x000000e1, "ZERO_0000_00ce_THRU_0000_00e1", 0),
+ MFI(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl), /* value=0x6a204 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RVI(0x000000e5, 0x000000e6, "ZERO_0000_00e5_THRU_0000_00e6", 0),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x2f4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2f2 */
+ RVI(0x000000e9, 0x000000fd, "ZERO_0000_00e9_THRU_0000_00fd", 0),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ RVI(0x000000ff, 0x0000011d, "ZERO_0000_00ff_THRU_0000_011d", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0, UINT64_MAX, 0), /* value=0x0 */
+ RVI(0x0000011f, 0x00000173, "ZERO_0000_011f_THRU_0000_0173", 0),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffffff`8166bfa0 */
+ RVI(0x00000177, 0x00000178, "ZERO_0000_0177_THRU_0000_0178", 0),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ RVI(0x0000017b, 0x00000185, "ZERO_0000_017b_THRU_0000_0185", 0),
+ RSN(0x00000186, 0x00000188, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, UINT64_C(0xfffffffff8280000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x00000189, 0x00000197, "ZERO_0000_0189_THRU_0000_0197", 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, Ia32PerfStatus, UINT64_C(0x853095408000955), UINT64_MAX, 0), /* value=0x8530954`08000955 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x954, 0, 0), /* Might bite. value=0x954 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xffffffffff0000e0), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0x8320000, UINT64_MAX, 0), /* value=0x8320000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x853, 0, 0), /* value=0x853 */
+ RVI(0x0000019e, 0x0000019f, "ZERO_0000_019e_THRU_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x173c89, UINT64_C(0xffffffb87939c176), 0), /* value=0x173c89 */
+ RVI(0x000001a1, 0x000001d8, "ZERO_0000_01a1_THRU_0000_01d8", 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffe03c)), /* value=0x1 */
+ RVI(0x000001da, 0x000001f1, "ZERO_0000_01da_THRU_0000_01f1", 0),
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x0 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0x0 */
+ RVI(0x000001f4, 0x000001ff, "ZERO_0000_01f4_THRU_0000_01ff", 0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x70000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xd0000001 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ RVI(0x00000210, 0x0000024f, "ZERO_0000_0210_THRU_0000_024f", 0),
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ RVI(0x00000251, 0x00000257, "ZERO_0000_0251_THRU_0000_0257", 0),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ RVI(0x0000025a, 0x00000267, "ZERO_0000_025a_THRU_0000_0267", 0),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ RVI(0x00000270, 0x00000276, "ZERO_0000_0270_THRU_0000_0276", 0),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RVI(0x00000278, 0x000002fe, "ZERO_0000_0278_THRU_0000_02fe", 0),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RVI(0x00000300, 0x00000308, "ZERO_0000_0300_THRU_0000_0308", 0),
+ RSN(0x00000309, 0x0000030a, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, UINT64_C(0xffffff0000000000), 0),
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, UINT64_C(0xfffff8020a068061), 0), /* value=0x2d4 */
+ RVI(0x0000030c, 0x0000038c, "ZERO_0000_030c_THRU_0000_038c", 0),
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, UINT64_C(0xfffffffffffff444), 0), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0xffffffff`ffffffff */
+ RVI(0x00000390, 0x0000047f, "ZERO_0000_0390_THRU_0000_047f", 0),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0x1a040000000007), 0, 0), /* value=0x1a0400`00000007 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x3f00000016), 0, 0), /* value=0x3f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0x77f9fffe0401e172), 0, 0), /* value=0x77f9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3efff00036dff), 0, 0), /* value=0x3efff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x1fff000011ff), 0, 0), /* value=0x1fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ RVI(0x0000048b, 0x000005ff, "ZERO_0000_048b_THRU_0000_05ff", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ RVI(0x00000601, 0x00001106, "ZERO_0000_0601_THRU_0000_1106", 0),
+ MVI(0x00001107, "VIA_UNK_0000_1107", 0x2),
+ RVI(0x00001108, 0x0000110e, "ZERO_0000_1108_THRU_0000_110e", 0),
+ MVI(0x0000110f, "VIA_UNK_0000_110f", 0x2),
+ RVI(0x00001110, 0x00001152, "ZERO_0000_1110_THRU_0000_1152", 0),
+ MVO(0x00001153, "VIA_UNK_0000_1153", 0),
+ RVI(0x00001154, 0x000011ff, "ZERO_0000_1154_THRU_0000_11ff", 0),
+ MVX(0x00001200, "VIA_UNK_0000_1200", UINT64_C(0x8863a9bfc9fbff), 0x40000, 0),
+ MVX(0x00001201, "VIA_UNK_0000_1201", UINT64_C(0x120100800), UINT64_C(0xfffffff000000000), 0),
+ MVX(0x00001202, "VIA_UNK_0000_1202", 0x3dcc, UINT64_C(0xffffffffffffc233), 0),
+ MVX(0x00001203, "VIA_UNK_0000_1203", 0x18, 0, 0),
+ MVX(0x00001204, "VIA_UNK_0000_1204", UINT64_C(0x6fd00000424), 0, 0),
+ MVX(0x00001205, "VIA_UNK_0000_1205", UINT64_C(0x9890000000001), 0, 0),
+ MVX(0x00001206, "VIA_ALT_VENDOR_EBX", 0, 0, 0),
+ MVX(0x00001207, "VIA_ALT_VENDOR_ECDX", 0, 0, 0),
+ MVX(0x00001208, "VIA_UNK_0000_1208", 0, 0, 0),
+ MVX(0x00001209, "VIA_UNK_0000_1209", 0, 0, 0),
+ MVX(0x0000120a, "VIA_UNK_0000_120a", 0, 0, 0),
+ MVX(0x0000120b, "VIA_UNK_0000_120b", 0, 0, 0),
+ MVX(0x0000120c, "VIA_UNK_0000_120c", 0, 0, 0),
+ MVX(0x0000120d, "VIA_UNK_0000_120d", 0, 0, 0),
+ MVI(0x0000120e, "VIA_UNK_0000_120e", UINT64_C(0x820007b100002080)), /* Villain? */
+ MVX(0x0000120f, "VIA_UNK_0000_120f", UINT64_C(0x200000001a000000), 0x18000000, 0),
+ MVI(0x00001210, "ZERO_0000_1210", 0),
+ MVX(0x00001211, "VIA_UNK_0000_1211", 0, 0, 0),
+ MVX(0x00001212, "VIA_UNK_0000_1212", 0, 0, 0),
+ MVX(0x00001213, "VIA_UNK_0000_1213", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVO(0x00001214, "VIA_UNK_0000_1214", UINT64_C(0x5dd89e10ffffffff)),
+ RVI(0x00001215, 0x0000121f, "ZERO_0000_1215_THRU_0000_121f", 0),
+ MVO(0x00001220, "VIA_UNK_0000_1220", 0),
+ MVO(0x00001221, "VIA_UNK_0000_1221", 0x4dd2e713),
+ RVI(0x00001222, 0x0000122f, "ZERO_0000_1222_THRU_0000_122f", 0),
+ MVX(0x00001230, "VIA_UNK_0000_1230", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001231, "VIA_UNK_0000_1231", UINT64_C(0x7f9110bdc740), 0x200, 0),
+ MVO(0x00001232, "VIA_UNK_0000_1232", UINT64_C(0x2603448430479888)),
+ MVI(0x00001233, "VIA_UNK_0000_1233", UINT64_C(0xb39acda158793c27)), /* Villain? */
+ MVX(0x00001234, "VIA_UNK_0000_1234", 0, 0, 0),
+ MVX(0x00001235, "VIA_UNK_0000_1235", 0, 0, 0),
+ MVX(0x00001236, "VIA_UNK_0000_1236", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001237, "VIA_UNK_0000_1237", UINT32_C(0xffc00026), UINT64_C(0xffffffff06000001), 0),
+ MVO(0x00001238, "VIA_UNK_0000_1238", 0x2),
+ MVI(0x00001239, "VIA_UNK_0000_1239", 0), /* Villain? */
+ RVI(0x0000123a, 0x0000123f, "ZERO_0000_123a_THRU_0000_123f", 0),
+ MVO(0x00001240, "VIA_UNK_0000_1240", 0),
+ MVO(0x00001241, "VIA_UNK_0000_1241", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x00001242, "ZERO_0000_1242", 0),
+ MVX(0x00001243, "VIA_UNK_0000_1243", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001244, "ZERO_0000_1244", 0),
+ MVX(0x00001245, "VIA_UNK_0000_1245", UINT64_C(0x3020400000000064), UINT64_C(0xf000000000000000), 0),
+ MVX(0x00001246, "VIA_UNK_0000_1246", UINT64_C(0x10000000000), 0, 0),
+ MVX(0x00001247, "VIA_UNK_0000_1247", 0, 0, 0),
+ MVX(0x00001248, "VIA_UNK_0000_1248", 0, 0, 0),
+ MVI(0x00001249, "VIA_UNK_0000_1249", 0), /* Villain? */
+ MVI(0x0000124a, "VIA_UNK_0000_124a", 0), /* Villain? */
+ RVI(0x0000124b, 0x00001300, "ZERO_0000_124b_THRU_0000_1300", 0),
+ MVX(0x00001301, "VIA_UNK_0000_1301", 0, 0, 0),
+ MVX(0x00001302, "VIA_UNK_0000_1302", 0, 0, 0),
+ MVX(0x00001303, "VIA_UNK_0000_1303", 0, 0, 0),
+ MVX(0x00001304, "VIA_UNK_0000_1304", 0, 0, 0),
+ MVX(0x00001305, "VIA_UNK_0000_1305", 0, 0, 0),
+ MVX(0x00001306, "VIA_UNK_0000_1306", 0, 0, 0),
+ MVX(0x00001307, "VIA_UNK_0000_1307", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00001308, "VIA_UNK_0000_1308", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001309, "VIA_UNK_0000_1309", 0, ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x0000130a, 0x0000130c, "ZERO_0000_130a_THRU_0000_130c", 0),
+ MVX(0x0000130d, "VIA_UNK_0000_130d", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x0000130e, "VIA_UNK_0000_130e", UINT64_MAX, 0, 0),
+ RVI(0x0000130f, 0x00001311, "ZERO_0000_130f_THRU_0000_1311", 0),
+ MVX(0x00001312, "VIA_UNK_0000_1312", 0, 0, 0),
+ RVI(0x00001313, 0x00001314, "ZERO_0000_1313_THRU_0000_1314", 0),
+ MVX(0x00001315, "VIA_UNK_0000_1315", 0, 0, 0),
+ MVI(0x00001316, "ZERO_0000_1316", 0),
+ MVX(0x00001317, "VIA_UNK_0000_1317", 0, 0, 0),
+ MVX(0x00001318, "VIA_UNK_0000_1318", 0, 0, 0),
+ MVI(0x00001319, "ZERO_0000_1319", 0),
+ MVX(0x0000131a, "VIA_UNK_0000_131a", 0, 0, 0),
+ MVX(0x0000131b, "VIA_UNK_0000_131b", 0x3c20954, 0, 0),
+ RVI(0x0000131c, 0x00001401, "ZERO_0000_131c_THRU_0000_1401", 0),
+ MVO(0x00001402, "VIA_UNK_0000_1402", 0x148c48),
+ MVX(0x00001403, "VIA_UNK_0000_1403", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001404, "VIA_UNK_0000_1404", 0), /* Villain? */
+ MVI(0x00001405, "VIA_UNK_0000_1405", UINT32_C(0x80fffffc)), /* Villain? */
+ MVX(0x00001406, "VIA_UNK_0000_1406", UINT32_C(0xc842c800), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001407, "VIA_UNK_0000_1407", UINT32_C(0x880400c0), ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x00001408, 0x0000140f, "ZERO_0000_1408_THRU_0000_140f", 0),
+ MVX(0x00001410, "VIA_UNK_0000_1410", 0xfa0, UINT64_C(0xfffffffffff00000), 0),
+ MVX(0x00001411, "VIA_UNK_0000_1411", 0xa5a, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x00001412, "VIA_UNK_0000_1412", 0x4090),
+ MVI(0x00001413, "VIA_UNK_0000_1413", 0), /* Villain? */
+ MVX(0x00001414, "VIA_UNK_0000_1414", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001415, "VIA_UNK_0000_1415", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001416, "VIA_UNK_0000_1416", 0x6e, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001417, "VIA_UNK_0000_1417", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001418, "VIA_UNK_0000_1418", 0xa, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001419, "VIA_UNK_0000_1419", 0x14, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141a, "VIA_UNK_0000_141a", 0x28, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141b, "VIA_UNK_0000_141b", 0x3c, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141c, "VIA_UNK_0000_141c", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141d, "VIA_UNK_0000_141d", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141e, "VIA_UNK_0000_141e", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141f, "VIA_UNK_0000_141f", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001420, "VIA_UNK_0000_1420", 0x3, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001421, "VIA_UNK_0000_1421", 0x1f8, UINT64_C(0xfffffffffffc0000), 0),
+ MVX(0x00001422, "VIA_UNK_0000_1422", 0x1f4, UINT64_C(0xfffffffffffc0000), 0),
+ MVI(0x00001423, "VIA_UNK_0000_1423", 0xfffb7),
+ MVI(0x00001424, "VIA_UNK_0000_1424", 0x5b6),
+ MVI(0x00001425, "VIA_UNK_0000_1425", 0x65508),
+ MVI(0x00001426, "VIA_UNK_0000_1426", 0x843b),
+ MVX(0x00001427, "VIA_UNK_0000_1427", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001428, "VIA_UNK_0000_1428", 0x1ffffff, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001429, "VIA_UNK_0000_1429", 0, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x0000142a, "VIA_UNK_0000_142a", 0x1c85d),
+ MVO(0x0000142b, "VIA_UNK_0000_142b", 0xf7e),
+ MVI(0x0000142c, "VIA_UNK_0000_142c", 0x20080), /* Villain? */
+ MVI(0x0000142d, "ZERO_0000_142d", 0),
+ MVI(0x0000142e, "VIA_UNK_0000_142e", 0x8000000), /* Villain? */
+ MVX(0x0000142f, "VIA_UNK_0000_142f", UINT64_C(0xffe57bea2ff3fdff), 0, 0),
+ RVI(0x00001430, 0x00001433, "ZERO_0000_1430_THRU_0000_1433", 0),
+ MVX(0x00001434, "VIA_UNK_0000_1434", 0x853f0e0, UINT64_C(0xffffffff7e7b0000), 0),
+ MVI(0x00001435, "VIA_UNK_0000_1435", 0x8000838), /* Villain? */
+ MVI(0x00001436, "VIA_UNK_0000_1436", 0x200004f), /* Villain? */
+ MVX(0x00001437, "VIA_UNK_0000_1437", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001438, "VIA_UNK_0000_1438", 0x7004801c), /* Villain? */
+ MVI(0x00001439, "ZERO_0000_1439", 0),
+ MVX(0x0000143a, "VIA_UNK_0000_143a", 0x20000, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x0000143b, "ZERO_0000_143b", 0),
+ MVX(0x0000143c, "VIA_UNK_0000_143c", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000143d, "VIA_UNK_0000_143d", 0, UINT64_C(0xfffffffffffffe00), 0),
+ RVI(0x0000143e, 0x0000143f, "ZERO_0000_143e_THRU_0000_143f", 0),
+ MVX(0x00001440, "VIA_UNK_0000_1440", UINT32_C(0x80e00954), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001441, "VIA_UNK_0000_1441", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ MVX(0x00001442, "VIA_UNK_0000_1442", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ RVI(0x00001443, 0x00001448, "ZERO_0000_1443_THRU_0000_1448", 0),
+ MVI(0x00001449, "VIA_UNK_0000_1449", UINT64_C(0xfffff7e247)),
+ RVI(0x0000144a, 0x0000144f, "ZERO_0000_144a_THRU_0000_144f", 0),
+ MVX(0x00001450, "VIA_UNK_0000_1450", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0x00001451, "VIA_UNK_0000_1451", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001452, "VIA_UNK_0000_1452", 0, UINT64_C(0xffffffffff000000), 0),
+ MVI(0x00001453, "VIA_UNK_0000_1453", 0x3fffffff),
+ RVI(0x00001454, 0x0000145f, "ZERO_0000_1454_THRU_0000_145f", 0),
+ MVX(0x00001460, "VIA_UNK_0000_1460", 0, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0x00001461, "VIA_UNK_0000_1461", 0x7b, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0x00001462, "VIA_UNK_0000_1462", 0x76, UINT64_C(0xffffffffffffff00), 0),
+ MVI(0x00001463, "VIA_UNK_0000_1463", 0x4a),
+ MVI(0x00001464, "ZERO_0000_1464", 0),
+ MVI(0x00001465, "VIA_UNK_0000_1465", 0xc6),
+ MVI(0x00001466, "VIA_UNK_0000_1466", UINT64_C(0x800000053)),
+ RVI(0x00001467, 0x0000146f, "ZERO_0000_1467_THRU_0000_146f", 0),
+ MVX(0x00001470, "VIA_UNK_0000_1470", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVI(0x00001471, "VIA_UNK_0000_1471", 0x2a000000),
+ RVI(0x00001472, 0x0000147f, "ZERO_0000_1472_THRU_0000_147f", 0),
+ MVI(0x00001480, "VIA_UNK_0000_1480", 0x3907),
+ MVI(0x00001481, "VIA_UNK_0000_1481", 0x12c0),
+ MVI(0x00001482, "VIA_UNK_0000_1482", 0x320),
+ MVI(0x00001483, "VIA_UNK_0000_1483", 0x3),
+ MVI(0x00001484, "VIA_UNK_0000_1484", 0x1647),
+ MVI(0x00001485, "VIA_UNK_0000_1485", 0x3b7),
+ MVI(0x00001486, "VIA_UNK_0000_1486", 0x443),
+ RVI(0x00001487, 0x0000148f, "ZERO_0000_1487_THRU_0000_148f", 0),
+ MVX(0x00001490, "VIA_UNK_0000_1490", 0xf5, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001491, "VIA_UNK_0000_1491", 0x200, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001492, "VIA_UNK_0000_1492", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001493, "VIA_UNK_0000_1493", 0x4, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001494, "VIA_UNK_0000_1494", 0x100, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001495, "VIA_UNK_0000_1495", 0x100, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001496, "VIA_UNK_0000_1496", 0x8, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001497, "VIA_UNK_0000_1497", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001498, "VIA_UNK_0000_1498", 0xffffff, UINT64_C(0xfffffffffffffe3c), 0),
+ MVI(0x00001499, "VIA_UNK_0000_1499", 0x2c5),
+ MVI(0x0000149a, "VIA_UNK_0000_149a", 0x1c1),
+ MVI(0x0000149b, "VIA_UNK_0000_149b", 0x2c5a),
+ MVI(0x0000149c, "VIA_UNK_0000_149c", 0x1c8f),
+ RVI(0x0000149d, 0x0000149e, "ZERO_0000_149d_THRU_0000_149e", 0),
+ MVI(0x0000149f, "VIA_UNK_0000_149f", 0x1c9),
+ RVI(0x000014a0, 0x00001522, "ZERO_0000_14a0_THRU_0000_1522", 0),
+ MFN(0x00001523, "VIA_UNK_0000_1523", WriteOnly, IgnoreWrite),
+ RVI(0x00001524, 0x00003179, "ZERO_0000_1524_THRU_0000_3179", 0),
+ MVO(0x0000317a, "VIA_UNK_0000_317a", UINT64_C(0x139f29749595b8)),
+ MVO(0x0000317b, "VIA_UNK_0000_317b", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x0000317c, "ZERO_0000_317c", 0),
+ MFN(0x0000317d, "VIA_UNK_0000_317d", WriteOnly, IgnoreWrite),
+ MFN(0x0000317e, "VIA_UNK_0000_317e", WriteOnly, IgnoreWrite),
+ MVI(0x0000317f, "VIA_UNK_0000_317f", 0), /* Villain? */
+ RVI(0x00003180, 0x00003fff, "ZERO_0000_3180_THRU_0000_3fff", 0),
+ RVI(0x40000000, 0x40003fff, "ZERO_4000_0000_THRU_4000_3fff", 0),
+ RVI(0x80000000, 0x80000197, "ZERO_8000_0000_THRU_8000_0197", 0),
+ RVI(0x80000199, 0x80003fff, "ZERO_8000_0199_THRU_8000_3fff", 0),
+ RVI(0xc0000000, 0xc000007f, "ZERO_c000_0000_THRU_c000_007f", 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xffffffffffffd2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`81669af0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8166c1d0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ RVI(0xc0000085, 0xc00000ff, "ZERO_c000_0085_THRU_c000_00ff", 0),
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7f91`10bdc740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`6fd80000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ RVI(0xc0000104, 0xc0003fff, "ZERO_c000_0104_THRU_c000_3fff", 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMDBENTRY const g_Entry_VIA_QuadCore_L4700_1_2_GHz =
+{
+ /*.pszName = */ "VIA QuadCore L4700 1.2+ GHz",
+ /*.pszFullName = */ "VIA QuadCore L4700 @ 1.2+ GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_VIA,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 15,
+ /*.uStepping = */ 13,
+ /*.enmMicroarch = */ kCpumMicroarch_VIA_Isaiah,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_267MHZ, /*??*/
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.fMxCsrMask = */ 0xffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz),
+};
+
+#endif /* !VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz_h */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h b/src/VBox/VMM/VMMR3/cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h
new file mode 100644
index 00000000..05b7d71e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h
@@ -0,0 +1,427 @@
+/* $Id: ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h $ */
+/** @file
+ * CPU database entry "ZHAOXIN KaiXian KX-U5581 1.8GHz"
+ * Generated at 2019-01-15T08:37:25Z by VBoxCpuReport v5.2.22r126460 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+#ifndef VBOX_CPUDB_ZHAOXIN_KaiXian_KX_U5581_1_8GHz_h
+#define VBOX_CPUDB_ZHAOXIN_KaiXian_KX_U5581_1_8GHz_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for ZHAOXIN KaiXian KX-U5581@1.8GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_ZHAOXIN_KaiXian_KX_U5581_1_8GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x68532020, 0x20206961, 0x68676e61, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000107b5, 0x07080800, 0x7eda63eb, 0xbfcbfbff, 0 | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_APIC },
+ { 0x00000002, 0x00000000, 0x00000000, 0x635af001, 0x00000000, 0x00000000, 0x000000ff, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c000121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c000122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c00c143, 0x03c0003f, 0x00000fff, 0x00000003, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00022220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, UINT32_MAX, 0x00000000, 0x000c258b, 0x00000000, 0x24000000, 0 },
+ { 0x00000007, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300402, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000000, 0x00000001, 0x00000100, 0x00000007, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000003, 0x00000008, 0x00000201, 0x00000007, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000007, 0 | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES | CPUMCPUIDLEAF_F_CONTAINS_APIC_ID },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000121, 0x2c100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x4f41485a, 0x204e4958, 0x5869614b, 0x206e6169, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x552d584b, 0x31383535, 0x382e3140, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x04200420, 0x06600660, 0x20080140, 0x20080140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003028, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000000, 0x00000000, 0x00000000, 0xc0000004, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000001, 0x00000000, 0x00000000, 0x000107b5, 0x00000000, 0x00000000, 0x1ec33dfc, 0 },
+ { 0xc0000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000004, 0x00000000, 0x00000000, 0x00000025, 0x18002463, 0x18502461, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for ZHAOXIN KaiXian KX-U5581@1.8GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_ZHAOXIN_KaiXian_KX_U5581_1_8GHz[] =
+{
+ RVI(0x00000000, 0x00000005, "ZERO_0000_0000_THRU_0000_0005", 0),
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x40 */
+ RVI(0x00000007, 0x0000000f, "ZERO_0000_0007_THRU_0000_000f", 0),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x965`912e15ac */
+ RVI(0x00000011, 0x0000001a, "ZERO_0000_0011_THRU_0000_001a", 0),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0x600, UINT64_C(0xfffffff0000000ff)),
+ RVI(0x0000001c, 0x00000029, "ZERO_0000_001c_THRU_0000_0029", 0),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x2580000, UINT64_MAX, 0), /* value=0x2580000 */
+ RVI(0x0000002b, 0x00000039, "ZERO_0000_002b_THRU_0000_0039", 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ RVI(0x0000003b, 0x00000078, "ZERO_0000_003b_THRU_0000_0078", 0),
+ RVI(0x0000007a, 0x0000008a, "ZERO_0000_007a_THRU_0000_008a", 0),
+ MFN(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId), /* value=0xc`00000000 */
+ RVI(0x0000008c, 0x0000009a, "ZERO_0000_008c_THRU_0000_009a", 0),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RVI(0x0000009c, 0x000000c0, "ZERO_0000_009c_THRU_0000_00c0", 0),
+ RSN(0x000000c1, 0x000000c3, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, UINT64_C(0xffffff0000000000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x000000c4, 0x000000cc, "ZERO_0000_00c4_THRU_0000_00cc", 0),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0),
+ RVI(0x000000ce, 0x000000e1, "ZERO_0000_00ce_THRU_0000_00e1", 0),
+ MFI(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl), /* value=0x6a204 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RVI(0x000000e5, 0x000000e6, "ZERO_0000_00e5_THRU_0000_00e6", 0),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x2f4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2f2 */
+ RVI(0x000000e9, 0x000000fd, "ZERO_0000_00e9_THRU_0000_00fd", 0),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ RVI(0x000000ff, 0x0000011d, "ZERO_0000_00ff_THRU_0000_011d", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0, UINT64_MAX, 0), /* value=0x0 */
+ RVI(0x0000011f, 0x00000173, "ZERO_0000_011f_THRU_0000_0173", 0),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFN(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp), /* value=0x0 */
+ MFN(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip), /* value=0xffffffff`8166bfa0 */
+ RVI(0x00000177, 0x00000178, "ZERO_0000_0177_THRU_0000_0178", 0),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ RVI(0x0000017b, 0x00000185, "ZERO_0000_017b_THRU_0000_0185", 0),
+ RSN(0x00000186, 0x00000188, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, UINT64_C(0xfffffffff8280000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x00000189, 0x00000197, "ZERO_0000_0189_THRU_0000_0197", 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, Ia32PerfStatus, UINT64_C(0x853095408000955), UINT64_MAX, 0), /* value=0x8530954`08000955 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x954, 0, 0), /* Might bite. value=0x954 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xffffffffff0000e0), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0x8320000, UINT64_MAX, 0), /* value=0x8320000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x853, 0, 0), /* value=0x853 */
+ RVI(0x0000019e, 0x0000019f, "ZERO_0000_019e_THRU_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x173c89, UINT64_C(0xffffffb87939c176), 0), /* value=0x173c89 */
+ RVI(0x000001a1, 0x000001d8, "ZERO_0000_01a1_THRU_0000_01d8", 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffe03c)), /* value=0x1 */
+ RVI(0x000001da, 0x000001f1, "ZERO_0000_01da_THRU_0000_01f1", 0),
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x0 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0x0 */
+ RVI(0x000001f4, 0x000001ff, "ZERO_0000_01f4_THRU_0000_01ff", 0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x70000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xd0000001 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ RVI(0x00000210, 0x0000024f, "ZERO_0000_0210_THRU_0000_024f", 0),
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ RVI(0x00000251, 0x00000257, "ZERO_0000_0251_THRU_0000_0257", 0),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ RVI(0x0000025a, 0x00000267, "ZERO_0000_025a_THRU_0000_0267", 0),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ RVI(0x00000270, 0x00000276, "ZERO_0000_0270_THRU_0000_0276", 0),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RVI(0x00000278, 0x000002fe, "ZERO_0000_0278_THRU_0000_02fe", 0),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RVI(0x00000300, 0x00000308, "ZERO_0000_0300_THRU_0000_0308", 0),
+ RSN(0x00000309, 0x0000030a, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, UINT64_C(0xffffff0000000000), 0),
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, UINT64_C(0xfffff8020a068061), 0), /* value=0x2d4 */
+ RVI(0x0000030c, 0x0000038c, "ZERO_0000_030c_THRU_0000_038c", 0),
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, UINT64_C(0xfffffffffffff444), 0), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0xffffffff`ffffffff */
+ RVI(0x00000390, 0x0000047f, "ZERO_0000_0390_THRU_0000_047f", 0),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBasic, ReadOnly, UINT64_C(0x1a040000000007), 0, 0), /* value=0x1a0400`00000007 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x3f00000016), 0, 0), /* value=0x3f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0x77f9fffe0401e172), 0, 0), /* value=0x77f9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3efff00036dff), 0, 0), /* value=0x3efff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x1fff000011ff), 0, 0), /* value=0x1fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ RVI(0x0000048b, 0x000005ff, "ZERO_0000_048b_THRU_0000_05ff", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ RVI(0x00000601, 0x00001106, "ZERO_0000_0601_THRU_0000_1106", 0),
+ MVI(0x00001107, "VIA_UNK_0000_1107", 0x2),
+ RVI(0x00001108, 0x0000110e, "ZERO_0000_1108_THRU_0000_110e", 0),
+ MVI(0x0000110f, "VIA_UNK_0000_110f", 0x2),
+ RVI(0x00001110, 0x00001152, "ZERO_0000_1110_THRU_0000_1152", 0),
+ MVO(0x00001153, "VIA_UNK_0000_1153", 0),
+ RVI(0x00001154, 0x000011ff, "ZERO_0000_1154_THRU_0000_11ff", 0),
+ MVX(0x00001200, "VIA_UNK_0000_1200", UINT64_C(0x8863a9bfc9fbff), 0x40000, 0),
+ MVX(0x00001201, "VIA_UNK_0000_1201", UINT64_C(0x120100800), UINT64_C(0xfffffff000000000), 0),
+ MVX(0x00001202, "VIA_UNK_0000_1202", 0x3dcc, UINT64_C(0xffffffffffffc233), 0),
+ MVX(0x00001203, "VIA_UNK_0000_1203", 0x18, 0, 0),
+ MVX(0x00001204, "VIA_UNK_0000_1204", UINT64_C(0x6fd00000424), 0, 0),
+ MVX(0x00001205, "VIA_UNK_0000_1205", UINT64_C(0x9890000000001), 0, 0),
+ MVX(0x00001206, "VIA_ALT_VENDOR_EBX", 0, 0, 0),
+ MVX(0x00001207, "VIA_ALT_VENDOR_ECDX", 0, 0, 0),
+ MVX(0x00001208, "VIA_UNK_0000_1208", 0, 0, 0),
+ MVX(0x00001209, "VIA_UNK_0000_1209", 0, 0, 0),
+ MVX(0x0000120a, "VIA_UNK_0000_120a", 0, 0, 0),
+ MVX(0x0000120b, "VIA_UNK_0000_120b", 0, 0, 0),
+ MVX(0x0000120c, "VIA_UNK_0000_120c", 0, 0, 0),
+ MVX(0x0000120d, "VIA_UNK_0000_120d", 0, 0, 0),
+ MVI(0x0000120e, "VIA_UNK_0000_120e", UINT64_C(0x820007b100002080)), /* Villain? */
+ MVX(0x0000120f, "VIA_UNK_0000_120f", UINT64_C(0x200000001a000000), 0x18000000, 0),
+ MVI(0x00001210, "ZERO_0000_1210", 0),
+ MVX(0x00001211, "VIA_UNK_0000_1211", 0, 0, 0),
+ MVX(0x00001212, "VIA_UNK_0000_1212", 0, 0, 0),
+ MVX(0x00001213, "VIA_UNK_0000_1213", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVO(0x00001214, "VIA_UNK_0000_1214", UINT64_C(0x5dd89e10ffffffff)),
+ RVI(0x00001215, 0x0000121f, "ZERO_0000_1215_THRU_0000_121f", 0),
+ MVO(0x00001220, "VIA_UNK_0000_1220", 0),
+ MVO(0x00001221, "VIA_UNK_0000_1221", 0x4dd2e713),
+ RVI(0x00001222, 0x0000122f, "ZERO_0000_1222_THRU_0000_122f", 0),
+ MVX(0x00001230, "VIA_UNK_0000_1230", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001231, "VIA_UNK_0000_1231", UINT64_C(0x7f9110bdc740), 0x200, 0),
+ MVO(0x00001232, "VIA_UNK_0000_1232", UINT64_C(0x2603448430479888)),
+ MVI(0x00001233, "VIA_UNK_0000_1233", UINT64_C(0xb39acda158793c27)), /* Villain? */
+ MVX(0x00001234, "VIA_UNK_0000_1234", 0, 0, 0),
+ MVX(0x00001235, "VIA_UNK_0000_1235", 0, 0, 0),
+ MVX(0x00001236, "VIA_UNK_0000_1236", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001237, "VIA_UNK_0000_1237", UINT32_C(0xffc00026), UINT64_C(0xffffffff06000001), 0),
+ MVO(0x00001238, "VIA_UNK_0000_1238", 0x2),
+ MVI(0x00001239, "VIA_UNK_0000_1239", 0), /* Villain? */
+ RVI(0x0000123a, 0x0000123f, "ZERO_0000_123a_THRU_0000_123f", 0),
+ MVO(0x00001240, "VIA_UNK_0000_1240", 0),
+ MVO(0x00001241, "VIA_UNK_0000_1241", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x00001242, "ZERO_0000_1242", 0),
+ MVX(0x00001243, "VIA_UNK_0000_1243", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001244, "ZERO_0000_1244", 0),
+ MVX(0x00001245, "VIA_UNK_0000_1245", UINT64_C(0x3020400000000064), UINT64_C(0xf000000000000000), 0),
+ MVX(0x00001246, "VIA_UNK_0000_1246", UINT64_C(0x10000000000), 0, 0),
+ MVX(0x00001247, "VIA_UNK_0000_1247", 0, 0, 0),
+ MVX(0x00001248, "VIA_UNK_0000_1248", 0, 0, 0),
+ MVI(0x00001249, "VIA_UNK_0000_1249", 0), /* Villain? */
+ MVI(0x0000124a, "VIA_UNK_0000_124a", 0), /* Villain? */
+ RVI(0x0000124b, 0x00001300, "ZERO_0000_124b_THRU_0000_1300", 0),
+ MVX(0x00001301, "VIA_UNK_0000_1301", 0, 0, 0),
+ MVX(0x00001302, "VIA_UNK_0000_1302", 0, 0, 0),
+ MVX(0x00001303, "VIA_UNK_0000_1303", 0, 0, 0),
+ MVX(0x00001304, "VIA_UNK_0000_1304", 0, 0, 0),
+ MVX(0x00001305, "VIA_UNK_0000_1305", 0, 0, 0),
+ MVX(0x00001306, "VIA_UNK_0000_1306", 0, 0, 0),
+ MVX(0x00001307, "VIA_UNK_0000_1307", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00001308, "VIA_UNK_0000_1308", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001309, "VIA_UNK_0000_1309", 0, ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x0000130a, 0x0000130c, "ZERO_0000_130a_THRU_0000_130c", 0),
+ MVX(0x0000130d, "VIA_UNK_0000_130d", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x0000130e, "VIA_UNK_0000_130e", UINT64_MAX, 0, 0),
+ RVI(0x0000130f, 0x00001311, "ZERO_0000_130f_THRU_0000_1311", 0),
+ MVX(0x00001312, "VIA_UNK_0000_1312", 0, 0, 0),
+ RVI(0x00001313, 0x00001314, "ZERO_0000_1313_THRU_0000_1314", 0),
+ MVX(0x00001315, "VIA_UNK_0000_1315", 0, 0, 0),
+ MVI(0x00001316, "ZERO_0000_1316", 0),
+ MVX(0x00001317, "VIA_UNK_0000_1317", 0, 0, 0),
+ MVX(0x00001318, "VIA_UNK_0000_1318", 0, 0, 0),
+ MVI(0x00001319, "ZERO_0000_1319", 0),
+ MVX(0x0000131a, "VIA_UNK_0000_131a", 0, 0, 0),
+ MVX(0x0000131b, "VIA_UNK_0000_131b", 0x3c20954, 0, 0),
+ RVI(0x0000131c, 0x00001401, "ZERO_0000_131c_THRU_0000_1401", 0),
+ MVO(0x00001402, "VIA_UNK_0000_1402", 0x148c48),
+ MVX(0x00001403, "VIA_UNK_0000_1403", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001404, "VIA_UNK_0000_1404", 0), /* Villain? */
+ MVI(0x00001405, "VIA_UNK_0000_1405", UINT32_C(0x80fffffc)), /* Villain? */
+ MVX(0x00001406, "VIA_UNK_0000_1406", UINT32_C(0xc842c800), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001407, "VIA_UNK_0000_1407", UINT32_C(0x880400c0), ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x00001408, 0x0000140f, "ZERO_0000_1408_THRU_0000_140f", 0),
+ MVX(0x00001410, "VIA_UNK_0000_1410", 0xfa0, UINT64_C(0xfffffffffff00000), 0),
+ MVX(0x00001411, "VIA_UNK_0000_1411", 0xa5a, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x00001412, "VIA_UNK_0000_1412", 0x4090),
+ MVI(0x00001413, "VIA_UNK_0000_1413", 0), /* Villain? */
+ MVX(0x00001414, "VIA_UNK_0000_1414", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001415, "VIA_UNK_0000_1415", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001416, "VIA_UNK_0000_1416", 0x6e, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001417, "VIA_UNK_0000_1417", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001418, "VIA_UNK_0000_1418", 0xa, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001419, "VIA_UNK_0000_1419", 0x14, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141a, "VIA_UNK_0000_141a", 0x28, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141b, "VIA_UNK_0000_141b", 0x3c, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141c, "VIA_UNK_0000_141c", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141d, "VIA_UNK_0000_141d", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141e, "VIA_UNK_0000_141e", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141f, "VIA_UNK_0000_141f", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001420, "VIA_UNK_0000_1420", 0x3, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001421, "VIA_UNK_0000_1421", 0x1f8, UINT64_C(0xfffffffffffc0000), 0),
+ MVX(0x00001422, "VIA_UNK_0000_1422", 0x1f4, UINT64_C(0xfffffffffffc0000), 0),
+ MVI(0x00001423, "VIA_UNK_0000_1423", 0xfffb7),
+ MVI(0x00001424, "VIA_UNK_0000_1424", 0x5b6),
+ MVI(0x00001425, "VIA_UNK_0000_1425", 0x65508),
+ MVI(0x00001426, "VIA_UNK_0000_1426", 0x843b),
+ MVX(0x00001427, "VIA_UNK_0000_1427", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001428, "VIA_UNK_0000_1428", 0x1ffffff, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001429, "VIA_UNK_0000_1429", 0, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x0000142a, "VIA_UNK_0000_142a", 0x1c85d),
+ MVO(0x0000142b, "VIA_UNK_0000_142b", 0xf7e),
+ MVI(0x0000142c, "VIA_UNK_0000_142c", 0x20080), /* Villain? */
+ MVI(0x0000142d, "ZERO_0000_142d", 0),
+ MVI(0x0000142e, "VIA_UNK_0000_142e", 0x8000000), /* Villain? */
+ MVX(0x0000142f, "VIA_UNK_0000_142f", UINT64_C(0xffe57bea2ff3fdff), 0, 0),
+ RVI(0x00001430, 0x00001433, "ZERO_0000_1430_THRU_0000_1433", 0),
+ MVX(0x00001434, "VIA_UNK_0000_1434", 0x853f0e0, UINT64_C(0xffffffff7e7b0000), 0),
+ MVI(0x00001435, "VIA_UNK_0000_1435", 0x8000838), /* Villain? */
+ MVI(0x00001436, "VIA_UNK_0000_1436", 0x200004f), /* Villain? */
+ MVX(0x00001437, "VIA_UNK_0000_1437", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001438, "VIA_UNK_0000_1438", 0x7004801c), /* Villain? */
+ MVI(0x00001439, "ZERO_0000_1439", 0),
+ MVX(0x0000143a, "VIA_UNK_0000_143a", 0x20000, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x0000143b, "ZERO_0000_143b", 0),
+ MVX(0x0000143c, "VIA_UNK_0000_143c", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000143d, "VIA_UNK_0000_143d", 0, UINT64_C(0xfffffffffffffe00), 0),
+ RVI(0x0000143e, 0x0000143f, "ZERO_0000_143e_THRU_0000_143f", 0),
+ MVX(0x00001440, "VIA_UNK_0000_1440", UINT32_C(0x80e00954), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001441, "VIA_UNK_0000_1441", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ MVX(0x00001442, "VIA_UNK_0000_1442", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ RVI(0x00001443, 0x00001448, "ZERO_0000_1443_THRU_0000_1448", 0),
+ MVI(0x00001449, "VIA_UNK_0000_1449", UINT64_C(0xfffff7e247)),
+ RVI(0x0000144a, 0x0000144f, "ZERO_0000_144a_THRU_0000_144f", 0),
+ MVX(0x00001450, "VIA_UNK_0000_1450", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0x00001451, "VIA_UNK_0000_1451", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001452, "VIA_UNK_0000_1452", 0, UINT64_C(0xffffffffff000000), 0),
+ MVI(0x00001453, "VIA_UNK_0000_1453", 0x3fffffff),
+ RVI(0x00001454, 0x0000145f, "ZERO_0000_1454_THRU_0000_145f", 0),
+ MVX(0x00001460, "VIA_UNK_0000_1460", 0, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0x00001461, "VIA_UNK_0000_1461", 0x7b, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0x00001462, "VIA_UNK_0000_1462", 0x76, UINT64_C(0xffffffffffffff00), 0),
+ MVI(0x00001463, "VIA_UNK_0000_1463", 0x4a),
+ MVI(0x00001464, "ZERO_0000_1464", 0),
+ MVI(0x00001465, "VIA_UNK_0000_1465", 0xc6),
+ MVI(0x00001466, "VIA_UNK_0000_1466", UINT64_C(0x800000053)),
+ RVI(0x00001467, 0x0000146f, "ZERO_0000_1467_THRU_0000_146f", 0),
+ MVX(0x00001470, "VIA_UNK_0000_1470", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVI(0x00001471, "VIA_UNK_0000_1471", 0x2a000000),
+ RVI(0x00001472, 0x0000147f, "ZERO_0000_1472_THRU_0000_147f", 0),
+ MVI(0x00001480, "VIA_UNK_0000_1480", 0x3907),
+ MVI(0x00001481, "VIA_UNK_0000_1481", 0x12c0),
+ MVI(0x00001482, "VIA_UNK_0000_1482", 0x320),
+ MVI(0x00001483, "VIA_UNK_0000_1483", 0x3),
+ MVI(0x00001484, "VIA_UNK_0000_1484", 0x1647),
+ MVI(0x00001485, "VIA_UNK_0000_1485", 0x3b7),
+ MVI(0x00001486, "VIA_UNK_0000_1486", 0x443),
+ RVI(0x00001487, 0x0000148f, "ZERO_0000_1487_THRU_0000_148f", 0),
+ MVX(0x00001490, "VIA_UNK_0000_1490", 0xf5, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001491, "VIA_UNK_0000_1491", 0x200, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001492, "VIA_UNK_0000_1492", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001493, "VIA_UNK_0000_1493", 0x4, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001494, "VIA_UNK_0000_1494", 0x100, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001495, "VIA_UNK_0000_1495", 0x100, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001496, "VIA_UNK_0000_1496", 0x8, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001497, "VIA_UNK_0000_1497", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001498, "VIA_UNK_0000_1498", 0xffffff, UINT64_C(0xfffffffffffffe3c), 0),
+ MVI(0x00001499, "VIA_UNK_0000_1499", 0x2c5),
+ MVI(0x0000149a, "VIA_UNK_0000_149a", 0x1c1),
+ MVI(0x0000149b, "VIA_UNK_0000_149b", 0x2c5a),
+ MVI(0x0000149c, "VIA_UNK_0000_149c", 0x1c8f),
+ RVI(0x0000149d, 0x0000149e, "ZERO_0000_149d_THRU_0000_149e", 0),
+ MVI(0x0000149f, "VIA_UNK_0000_149f", 0x1c9),
+ RVI(0x000014a0, 0x00001522, "ZERO_0000_14a0_THRU_0000_1522", 0),
+ MFN(0x00001523, "VIA_UNK_0000_1523", WriteOnly, IgnoreWrite),
+ RVI(0x00001524, 0x00003179, "ZERO_0000_1524_THRU_0000_3179", 0),
+ MVO(0x0000317a, "VIA_UNK_0000_317a", UINT64_C(0x139f29749595b8)),
+ MVO(0x0000317b, "VIA_UNK_0000_317b", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x0000317c, "ZERO_0000_317c", 0),
+ MFN(0x0000317d, "VIA_UNK_0000_317d", WriteOnly, IgnoreWrite),
+ MFN(0x0000317e, "VIA_UNK_0000_317e", WriteOnly, IgnoreWrite),
+ MVI(0x0000317f, "VIA_UNK_0000_317f", 0), /* Villain? */
+ RVI(0x00003180, 0x00003fff, "ZERO_0000_3180_THRU_0000_3fff", 0),
+ RVI(0x40000000, 0x40003fff, "ZERO_4000_0000_THRU_4000_3fff", 0),
+ RVI(0x80000000, 0x80000197, "ZERO_8000_0000_THRU_8000_0197", 0),
+ RVI(0x80000199, 0x80003fff, "ZERO_8000_0199_THRU_8000_3fff", 0),
+ RVI(0xc0000000, 0xc000007f, "ZERO_c000_0000_THRU_c000_007f", 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xffffffffffffd2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`81669af0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8166c1d0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ RVI(0xc0000085, 0xc00000ff, "ZERO_c000_0085_THRU_c000_00ff", 0),
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7f91`10bdc740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`6fd80000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ RVI(0xc0000104, 0xc0003fff, "ZERO_c000_0104_THRU_c000_3fff", 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for ZHAOXIN KaiXian KX-U5581@1.8GHz.
+ */
+static CPUMDBENTRY const g_Entry_ZHAOXIN_KaiXian_KX_U5581_1_8GHz =
+{
+ /*.pszName = */ "ZHAOXIN KaiXian KX-U5581 1.8GHz",
+ /*.pszFullName = */ "ZHAOXIN KaiXian KX-U5581@1.8GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_SHANGHAI,
+ /*.uFamily = */ 7,
+ /*.uModel = */ 11,
+ /*.uStepping = */ 5,
+ /*.enmMicroarch = */ kCpumMicroarch_Shanghai_Wudaokou,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 40,
+ /*.fMxCsrMask = */ 0x0000ffff,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_ZHAOXIN_KaiXian_KX_U5581_1_8GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_ZHAOXIN_KaiXian_KX_U5581_1_8GHz)),
+ /*.enmUnknownCpuId = */ CPUMUNKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX /** @todo */,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_ZHAOXIN_KaiXian_KX_U5581_1_8GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_ZHAOXIN_KaiXian_KX_U5581_1_8GHz),
+};
+
+#endif /* !VBOX_CPUDB_ZHAOXIN_KaiXian_KX_U5581_1_8GHz_h */
+