summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/include')
-rw-r--r--src/VBox/VMM/include/APICInternal.h1174
-rw-r--r--src/VBox/VMM/include/CFGMInternal.h144
-rw-r--r--src/VBox/VMM/include/CPUMInternal.h537
-rw-r--r--src/VBox/VMM/include/CPUMInternal.mac710
-rw-r--r--src/VBox/VMM/include/DBGFInline.h135
-rw-r--r--src/VBox/VMM/include/DBGFInternal.h1522
-rw-r--r--src/VBox/VMM/include/EMHandleRCTmpl.h279
-rw-r--r--src/VBox/VMM/include/EMInternal.h339
-rw-r--r--src/VBox/VMM/include/GCMInternal.h66
-rw-r--r--src/VBox/VMM/include/GIMHvInternal.h1380
-rw-r--r--src/VBox/VMM/include/GIMInternal.h131
-rw-r--r--src/VBox/VMM/include/GIMKvmInternal.h282
-rw-r--r--src/VBox/VMM/include/GIMMinimalInternal.h48
-rw-r--r--src/VBox/VMM/include/HMInternal.h1322
-rw-r--r--src/VBox/VMM/include/HMInternal.mac278
-rw-r--r--src/VBox/VMM/include/HMVMXCommon.h435
-rw-r--r--src/VBox/VMM/include/IEMInline.h2880
-rw-r--r--src/VBox/VMM/include/IEMInternal.h4320
-rw-r--r--src/VBox/VMM/include/IEMMc.h1598
-rw-r--r--src/VBox/VMM/include/IEMOpHlp.h601
-rw-r--r--src/VBox/VMM/include/IOMInline.h270
-rw-r--r--src/VBox/VMM/include/IOMInternal.h629
-rw-r--r--src/VBox/VMM/include/MMInternal.h207
-rw-r--r--src/VBox/VMM/include/NEMInternal.h669
-rw-r--r--src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h576
-rw-r--r--src/VBox/VMM/include/PDMAsyncCompletionInternal.h291
-rw-r--r--src/VBox/VMM/include/PDMBlkCacheInternal.h344
-rw-r--r--src/VBox/VMM/include/PDMInline.h52
-rw-r--r--src/VBox/VMM/include/PDMInternal.h1906
-rw-r--r--src/VBox/VMM/include/PGMGstDefs.h254
-rw-r--r--src/VBox/VMM/include/PGMInline.h1210
-rw-r--r--src/VBox/VMM/include/PGMInternal.h3884
-rw-r--r--src/VBox/VMM/include/PGMSlatDefs.h141
-rw-r--r--src/VBox/VMM/include/SELMInternal.h72
-rw-r--r--src/VBox/VMM/include/SSMInternal.h341
-rw-r--r--src/VBox/VMM/include/STAMInternal.h187
-rw-r--r--src/VBox/VMM/include/SVMInternal.h89
-rw-r--r--src/VBox/VMM/include/TMInline.h289
-rw-r--r--src/VBox/VMM/include/TMInternal.h886
-rw-r--r--src/VBox/VMM/include/TRPMInternal.h102
-rw-r--r--src/VBox/VMM/include/VMInternal.h495
-rw-r--r--src/VBox/VMM/include/VMMInternal.h754
-rw-r--r--src/VBox/VMM/include/VMMInternal.mac119
-rw-r--r--src/VBox/VMM/include/VMMTracing.h136
-rw-r--r--src/VBox/VMM/include/VMXInternal.h335
45 files changed, 32419 insertions, 0 deletions
diff --git a/src/VBox/VMM/include/APICInternal.h b/src/VBox/VMM/include/APICInternal.h
new file mode 100644
index 00000000..eb13009b
--- /dev/null
+++ b/src/VBox/VMM/include/APICInternal.h
@@ -0,0 +1,1174 @@
+/* $Id: APICInternal.h $ */
+/** @file
+ * APIC - Advanced Programmable Interrupt Controller.
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_APICInternal_h
+#define VMM_INCLUDED_SRC_include_APICInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/apic.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/pdmdev.h>
+
+/** @defgroup grp_apic_int Internal
+ * @ingroup grp_apic
+ * @internal
+ * @{
+ */
+
+/** The APIC hardware version we are emulating. */
+#define XAPIC_HARDWARE_VERSION XAPIC_HARDWARE_VERSION_P4
+
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+#define XAPIC_SVR_VALID XAPIC_SVR_VALID_P4
+#define XAPIC_ID_BROADCAST_MASK XAPIC_ID_BROADCAST_MASK_P4
+#else
+# error "Implement Pentium and P6 family APIC architectures"
+#endif
+
+#define VMCPU_TO_XAPICPAGE(a_pVCpu) ((PXAPICPAGE)(CTX_SUFF((a_pVCpu)->apic.s.pvApicPage)))
+#define VMCPU_TO_CXAPICPAGE(a_pVCpu) ((PCXAPICPAGE)(CTX_SUFF((a_pVCpu)->apic.s.pvApicPage)))
+
+#define VMCPU_TO_X2APICPAGE(a_pVCpu) ((PX2APICPAGE)(CTX_SUFF((a_pVCpu)->apic.s.pvApicPage)))
+#define VMCPU_TO_CX2APICPAGE(a_pVCpu) ((PCX2APICPAGE)(CTX_SUFF((a_pVCpu)->apic.s.pvApicPage)))
+
+#define VMCPU_TO_APICCPU(a_pVCpu) (&(a_pVCpu)->apic.s)
+#define VM_TO_APIC(a_pVM) (&(a_pVM)->apic.s)
+#define VM_TO_APICDEV(a_pVM) CTX_SUFF(VM_TO_APIC(a_pVM)->pApicDev)
+#ifdef IN_RING3
+# define VMCPU_TO_DEVINS(a_pVCpu) ((a_pVCpu)->pVMR3->apic.s.pDevInsR3)
+#elif defined(IN_RING0)
+# define VMCPU_TO_DEVINS(a_pVCpu) ((a_pVCpu)->pGVM->apicr0.s.pDevInsR0)
+#endif
+
+#define APICCPU_TO_XAPICPAGE(a_ApicCpu) ((PXAPICPAGE)(CTX_SUFF((a_ApicCpu)->pvApicPage)))
+#define APICCPU_TO_CXAPICPAGE(a_ApicCpu) ((PCXAPICPAGE)(CTX_SUFF((a_ApicCpu)->pvApicPage)))
+
+/** Vector offset in an APIC 256-bit sparse register. */
+#define XAPIC_REG256_VECTOR_OFF(a_Vector) (((a_Vector) & UINT32_C(0xe0)) >> 1)
+/** Bit position at offset in an APIC 256-bit sparse register. */
+#define XAPIC_REG256_VECTOR_BIT(a_Vector) ((a_Vector) & UINT32_C(0x1f))
+
+/** Maximum valid offset for a register (16-byte aligned, 4 byte wide access). */
+#define XAPIC_OFF_MAX_VALID (sizeof(XAPICPAGE) - 4 * sizeof(uint32_t))
+
+/** Whether the APIC is in X2APIC mode or not. */
+#define XAPIC_IN_X2APIC_MODE(a_pVCpu) ( ( ((a_pVCpu)->apic.s.uApicBaseMsr) \
+ & (MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD)) \
+ == (MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD) )
+
+/**
+ * The xAPIC sparse 256-bit register.
+ */
+typedef union XAPIC256BITREG
+{
+ /** The sparse-bitmap view. */
+ struct
+ {
+ uint32_t u32Reg;
+ uint32_t uReserved0[3];
+ } u[8];
+ /** The 32-bit view. */
+ uint32_t au32[32];
+} XAPIC256BITREG;
+/** Pointer to an xAPIC sparse bitmap register. */
+typedef XAPIC256BITREG *PXAPIC256BITREG;
+/** Pointer to a const xAPIC sparse bitmap register. */
+typedef XAPIC256BITREG const *PCXAPIC256BITREG;
+AssertCompileSize(XAPIC256BITREG, 128);
+
+/**
+ * The xAPIC memory layout as per Intel/AMD specs.
+ */
+typedef struct XAPICPAGE
+{
+ /* 0x00 - Reserved. */
+ uint32_t uReserved0[8];
+ /* 0x20 - APIC ID. */
+ struct
+ {
+ uint8_t u8Reserved0[3];
+ uint8_t u8ApicId;
+ uint32_t u32Reserved0[3];
+ } id;
+ /* 0x30 - APIC version register. */
+ union
+ {
+ struct
+ {
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+ uint8_t u8Version;
+#else
+# error "Implement Pentium and P6 family APIC architectures"
+#endif
+ uint8_t uReserved0;
+ uint8_t u8MaxLvtEntry;
+ uint8_t fEoiBroadcastSupression : 1;
+ uint8_t u7Reserved1 : 7;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Version;
+ uint32_t u32Reserved0[3];
+ } all;
+ } version;
+ /* 0x40 - Reserved. */
+ uint32_t uReserved1[16];
+ /* 0x80 - Task Priority Register (TPR). */
+ struct
+ {
+ uint8_t u8Tpr;
+ uint8_t u8Reserved0[3];
+ uint32_t u32Reserved0[3];
+ } tpr;
+ /* 0x90 - Arbitration Priority Register (APR). */
+ struct
+ {
+ uint8_t u8Apr;
+ uint8_t u8Reserved0[3];
+ uint32_t u32Reserved0[3];
+ } apr;
+ /* 0xA0 - Processor Priority Register (PPR). */
+ struct
+ {
+ uint8_t u8Ppr;
+ uint8_t u8Reserved0[3];
+ uint32_t u32Reserved0[3];
+ } ppr;
+ /* 0xB0 - End Of Interrupt Register (EOI). */
+ struct
+ {
+ uint32_t u32Eoi;
+ uint32_t u32Reserved0[3];
+ } eoi;
+ /* 0xC0 - Remote Read Register (RRD). */
+ struct
+ {
+ uint32_t u32Rrd;
+ uint32_t u32Reserved0[3];
+ } rrd;
+ /* 0xD0 - Logical Destination Register (LDR). */
+ union
+ {
+ struct
+ {
+ uint8_t u8Reserved0[3];
+ uint8_t u8LogicalApicId;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Ldr;
+ uint32_t u32Reserved0[3];
+ } all;
+ } ldr;
+ /* 0xE0 - Destination Format Register (DFR). */
+ union
+ {
+ struct
+ {
+ uint32_t u28ReservedMb1 : 28; /* MB1 */
+ uint32_t u4Model : 4;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Dfr;
+ uint32_t u32Reserved0[3];
+ } all;
+ } dfr;
+ /* 0xF0 - Spurious-Interrupt Vector Register (SVR). */
+ union
+ {
+ struct
+ {
+ uint32_t u8SpuriousVector : 8;
+ uint32_t fApicSoftwareEnable : 1;
+ uint32_t u3Reserved0 : 3;
+ uint32_t fSupressEoiBroadcast : 1;
+ uint32_t u19Reserved1 : 19;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Svr;
+ uint32_t u32Reserved0[3];
+ } all;
+ } svr;
+ /* 0x100 - In-service Register (ISR). */
+ XAPIC256BITREG isr;
+ /* 0x180 - Trigger Mode Register (TMR). */
+ XAPIC256BITREG tmr;
+ /* 0x200 - Interrupt Request Register (IRR). */
+ XAPIC256BITREG irr;
+ /* 0x280 - Error Status Register (ESR). */
+ union
+ {
+ struct
+ {
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+ uint32_t u4Reserved0 : 4;
+#else
+# error "Implement Pentium and P6 family APIC architectures"
+#endif
+ uint32_t fRedirectableIpi : 1;
+ uint32_t fSendIllegalVector : 1;
+ uint32_t fRcvdIllegalVector : 1;
+ uint32_t fIllegalRegAddr : 1;
+ uint32_t u24Reserved1 : 24;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Errors;
+ uint32_t u32Reserved0[3];
+ } all;
+ } esr;
+ /* 0x290 - Reserved. */
+ uint32_t uReserved2[28];
+ /* 0x300 - Interrupt Command Register (ICR) - Low. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1DestMode : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t fReserved0 : 1;
+ uint32_t u1Level : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u2Reserved1 : 2;
+ uint32_t u2DestShorthand : 2;
+ uint32_t u12Reserved2 : 12;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32IcrLo;
+ uint32_t u32Reserved0[3];
+ } all;
+ } icr_lo;
+ /* 0x310 - Interrupt Comannd Register (ICR) - High. */
+ union
+ {
+ struct
+ {
+ uint32_t u24Reserved0 : 24;
+ uint32_t u8Dest : 8;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32IcrHi;
+ uint32_t u32Reserved0[3];
+ } all;
+ } icr_hi;
+ /* 0x320 - Local Vector Table (LVT) Timer Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u4Reserved0 : 4;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u2TimerMode : 2;
+ uint32_t u13Reserved2 : 13;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtTimer;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_timer;
+ /* 0x330 - Local Vector Table (LVT) Thermal Sensor Register. */
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtThermal;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_thermal;
+#else
+# error "Implement Pentium and P6 family APIC architectures"
+#endif
+ /* 0x340 - Local Vector Table (LVT) Performance Monitor Counter (PMC) Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtPerf;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_perf;
+ /* 0x350 - Local Vector Table (LVT) LINT0 Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u1IntrPolarity : 1;
+ uint32_t u1RemoteIrr : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtLint0;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_lint0;
+ /* 0x360 - Local Vector Table (LVT) LINT1 Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u1IntrPolarity : 1;
+ uint32_t u1RemoteIrr : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtLint1;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_lint1;
+ /* 0x370 - Local Vector Table (LVT) Error Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u4Reserved0 : 4;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtError;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_error;
+ /* 0x380 - Timer Initial Counter Register. */
+ struct
+ {
+ uint32_t u32InitialCount;
+ uint32_t u32Reserved0[3];
+ } timer_icr;
+ /* 0x390 - Timer Current Counter Register. */
+ struct
+ {
+ uint32_t u32CurrentCount;
+ uint32_t u32Reserved0[3];
+ } timer_ccr;
+ /* 0x3A0 - Reserved. */
+ uint32_t u32Reserved3[16];
+ /* 0x3E0 - Timer Divide Configuration Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u2DivideValue0 : 2;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DivideValue1 : 1;
+ uint32_t u28Reserved1 : 28;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32DivideValue;
+ uint32_t u32Reserved0[3];
+ } all;
+ } timer_dcr;
+ /* 0x3F0 - Reserved. */
+ uint8_t u8Reserved0[3088];
+} XAPICPAGE;
+/** Pointer to a XAPICPAGE struct. */
+typedef XAPICPAGE *PXAPICPAGE;
+/** Pointer to a const XAPICPAGE struct. */
+typedef const XAPICPAGE *PCXAPICPAGE;
+AssertCompileSize(XAPICPAGE, 4096);
+AssertCompileMemberOffset(XAPICPAGE, id, XAPIC_OFF_ID);
+AssertCompileMemberOffset(XAPICPAGE, version, XAPIC_OFF_VERSION);
+AssertCompileMemberOffset(XAPICPAGE, tpr, XAPIC_OFF_TPR);
+AssertCompileMemberOffset(XAPICPAGE, apr, XAPIC_OFF_APR);
+AssertCompileMemberOffset(XAPICPAGE, ppr, XAPIC_OFF_PPR);
+AssertCompileMemberOffset(XAPICPAGE, eoi, XAPIC_OFF_EOI);
+AssertCompileMemberOffset(XAPICPAGE, rrd, XAPIC_OFF_RRD);
+AssertCompileMemberOffset(XAPICPAGE, ldr, XAPIC_OFF_LDR);
+AssertCompileMemberOffset(XAPICPAGE, dfr, XAPIC_OFF_DFR);
+AssertCompileMemberOffset(XAPICPAGE, svr, XAPIC_OFF_SVR);
+AssertCompileMemberOffset(XAPICPAGE, isr, XAPIC_OFF_ISR0);
+AssertCompileMemberOffset(XAPICPAGE, tmr, XAPIC_OFF_TMR0);
+AssertCompileMemberOffset(XAPICPAGE, irr, XAPIC_OFF_IRR0);
+AssertCompileMemberOffset(XAPICPAGE, esr, XAPIC_OFF_ESR);
+AssertCompileMemberOffset(XAPICPAGE, icr_lo, XAPIC_OFF_ICR_LO);
+AssertCompileMemberOffset(XAPICPAGE, icr_hi, XAPIC_OFF_ICR_HI);
+AssertCompileMemberOffset(XAPICPAGE, lvt_timer, XAPIC_OFF_LVT_TIMER);
+AssertCompileMemberOffset(XAPICPAGE, lvt_thermal, XAPIC_OFF_LVT_THERMAL);
+AssertCompileMemberOffset(XAPICPAGE, lvt_perf, XAPIC_OFF_LVT_PERF);
+AssertCompileMemberOffset(XAPICPAGE, lvt_lint0, XAPIC_OFF_LVT_LINT0);
+AssertCompileMemberOffset(XAPICPAGE, lvt_lint1, XAPIC_OFF_LVT_LINT1);
+AssertCompileMemberOffset(XAPICPAGE, lvt_error, XAPIC_OFF_LVT_ERROR);
+AssertCompileMemberOffset(XAPICPAGE, timer_icr, XAPIC_OFF_TIMER_ICR);
+AssertCompileMemberOffset(XAPICPAGE, timer_ccr, XAPIC_OFF_TIMER_CCR);
+AssertCompileMemberOffset(XAPICPAGE, timer_dcr, XAPIC_OFF_TIMER_DCR);
+
+/**
+ * The x2APIC memory layout as per Intel/AMD specs.
+ */
+typedef struct X2APICPAGE
+{
+ /* 0x00 - Reserved. */
+ uint32_t uReserved0[8];
+ /* 0x20 - APIC ID. */
+ struct
+ {
+ uint32_t u32ApicId;
+ uint32_t u32Reserved0[3];
+ } id;
+ /* 0x30 - APIC version register. */
+ union
+ {
+ struct
+ {
+ uint8_t u8Version;
+ uint8_t u8Reserved0;
+ uint8_t u8MaxLvtEntry;
+ uint8_t fEoiBroadcastSupression : 1;
+ uint8_t u7Reserved1 : 7;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Version;
+ uint32_t u32Reserved2[3];
+ } all;
+ } version;
+ /* 0x40 - Reserved. */
+ uint32_t uReserved1[16];
+ /* 0x80 - Task Priority Register (TPR). */
+ struct
+ {
+ uint8_t u8Tpr;
+ uint8_t u8Reserved0[3];
+ uint32_t u32Reserved0[3];
+ } tpr;
+ /* 0x90 - Reserved. */
+ uint32_t uReserved2[4];
+ /* 0xA0 - Processor Priority Register (PPR). */
+ struct
+ {
+ uint8_t u8Ppr;
+ uint8_t u8Reserved0[3];
+ uint32_t u32Reserved0[3];
+ } ppr;
+ /* 0xB0 - End Of Interrupt Register (EOI). */
+ struct
+ {
+ uint32_t u32Eoi;
+ uint32_t u32Reserved0[3];
+ } eoi;
+ /* 0xC0 - Remote Read Register (RRD). */
+ struct
+ {
+ uint32_t u32Rrd;
+ uint32_t u32Reserved0[3];
+ } rrd;
+ /* 0xD0 - Logical Destination Register (LDR). */
+ struct
+ {
+ uint32_t u32LogicalApicId;
+ uint32_t u32Reserved1[3];
+ } ldr;
+ /* 0xE0 - Reserved. */
+ uint32_t uReserved3[4];
+ /* 0xF0 - Spurious-Interrupt Vector Register (SVR). */
+ union
+ {
+ struct
+ {
+ uint32_t u8SpuriousVector : 8;
+ uint32_t fApicSoftwareEnable : 1;
+ uint32_t u3Reserved0 : 3;
+ uint32_t fSupressEoiBroadcast : 1;
+ uint32_t u19Reserved1 : 19;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Svr;
+ uint32_t uReserved0[3];
+ } all;
+ } svr;
+ /* 0x100 - In-service Register (ISR). */
+ XAPIC256BITREG isr;
+ /* 0x180 - Trigger Mode Register (TMR). */
+ XAPIC256BITREG tmr;
+ /* 0x200 - Interrupt Request Register (IRR). */
+ XAPIC256BITREG irr;
+ /* 0x280 - Error Status Register (ESR). */
+ union
+ {
+ struct
+ {
+#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
+ uint32_t u4Reserved0 : 4;
+#else
+# error "Implement Pentium and P6 family APIC architectures"
+#endif
+ uint32_t fRedirectableIpi : 1;
+ uint32_t fSendIllegalVector : 1;
+ uint32_t fRcvdIllegalVector : 1;
+ uint32_t fIllegalRegAddr : 1;
+ uint32_t u24Reserved1 : 24;
+ uint32_t uReserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32Errors;
+ uint32_t u32Reserved0[3];
+ } all;
+ } esr;
+ /* 0x290 - Reserved. */
+ uint32_t uReserved4[28];
+ /* 0x300 - Interrupt Command Register (ICR) - Low. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1DestMode : 1;
+ uint32_t u2Reserved0 : 2;
+ uint32_t u1Level : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u2Reserved1 : 2;
+ uint32_t u2DestShorthand : 2;
+ uint32_t u12Reserved2 : 12;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32IcrLo;
+ uint32_t u32Reserved3[3];
+ } all;
+ } icr_lo;
+ /* 0x310 - Interrupt Comannd Register (ICR) - High. */
+ struct
+ {
+ uint32_t u32IcrHi;
+ uint32_t uReserved1[3];
+ } icr_hi;
+ /* 0x320 - Local Vector Table (LVT) Timer Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u4Reserved0 : 4;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u2TimerMode : 2;
+ uint32_t u13Reserved2 : 13;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtTimer;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_timer;
+ /* 0x330 - Local Vector Table (LVT) Thermal Sensor Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtThermal;
+ uint32_t uReserved0[3];
+ } all;
+ } lvt_thermal;
+ /* 0x340 - Local Vector Table (LVT) Performance Monitor Counter (PMC) Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtPerf;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_perf;
+ /* 0x350 - Local Vector Table (LVT) LINT0 Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u1IntrPolarity : 1;
+ uint32_t u1RemoteIrr : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtLint0;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_lint0;
+ /* 0x360 - Local Vector Table (LVT) LINT1 Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u3DeliveryMode : 3;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u1IntrPolarity : 1;
+ uint32_t u1RemoteIrr : 1;
+ uint32_t u1TriggerMode : 1;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtLint1;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_lint1;
+ /* 0x370 - Local Vector Table (LVT) Error Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u4Reserved0 : 4;
+ uint32_t u1DeliveryStatus : 1;
+ uint32_t u3Reserved1 : 3;
+ uint32_t u1Mask : 1;
+ uint32_t u15Reserved2 : 15;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32LvtError;
+ uint32_t u32Reserved0[3];
+ } all;
+ } lvt_error;
+ /* 0x380 - Timer Initial Counter Register. */
+ struct
+ {
+ uint32_t u32InitialCount;
+ uint32_t u32Reserved0[3];
+ } timer_icr;
+ /* 0x390 - Timer Current Counter Register. */
+ struct
+ {
+ uint32_t u32CurrentCount;
+ uint32_t u32Reserved0[3];
+ } timer_ccr;
+ /* 0x3A0 - Reserved. */
+ uint32_t uReserved5[16];
+ /* 0x3E0 - Timer Divide Configuration Register. */
+ union
+ {
+ struct
+ {
+ uint32_t u2DivideValue0 : 2;
+ uint32_t u1Reserved0 : 1;
+ uint32_t u1DivideValue1 : 1;
+ uint32_t u28Reserved1 : 28;
+ uint32_t u32Reserved0[3];
+ } u;
+ struct
+ {
+ uint32_t u32DivideValue;
+ uint32_t u32Reserved0[3];
+ } all;
+ } timer_dcr;
+ /* 0x3F0 - Self IPI Register. */
+ struct
+ {
+ uint32_t u8Vector : 8;
+ uint32_t u24Reserved0 : 24;
+ uint32_t u32Reserved0[3];
+ } self_ipi;
+ /* 0x400 - Reserved. */
+ uint8_t u8Reserved0[3072];
+} X2APICPAGE;
+/** Pointer to a X2APICPAGE struct. */
+typedef X2APICPAGE *PX2APICPAGE;
+/** Pointer to a const X2APICPAGE struct. */
+typedef const X2APICPAGE *PCX2APICPAGE;
+AssertCompileSize(X2APICPAGE, 4096);
+AssertCompileSize(X2APICPAGE, sizeof(XAPICPAGE));
+AssertCompileMemberOffset(X2APICPAGE, id, XAPIC_OFF_ID);
+AssertCompileMemberOffset(X2APICPAGE, version, XAPIC_OFF_VERSION);
+AssertCompileMemberOffset(X2APICPAGE, tpr, XAPIC_OFF_TPR);
+AssertCompileMemberOffset(X2APICPAGE, ppr, XAPIC_OFF_PPR);
+AssertCompileMemberOffset(X2APICPAGE, eoi, XAPIC_OFF_EOI);
+AssertCompileMemberOffset(X2APICPAGE, rrd, XAPIC_OFF_RRD);
+AssertCompileMemberOffset(X2APICPAGE, ldr, XAPIC_OFF_LDR);
+AssertCompileMemberOffset(X2APICPAGE, svr, XAPIC_OFF_SVR);
+AssertCompileMemberOffset(X2APICPAGE, isr, XAPIC_OFF_ISR0);
+AssertCompileMemberOffset(X2APICPAGE, tmr, XAPIC_OFF_TMR0);
+AssertCompileMemberOffset(X2APICPAGE, irr, XAPIC_OFF_IRR0);
+AssertCompileMemberOffset(X2APICPAGE, esr, XAPIC_OFF_ESR);
+AssertCompileMemberOffset(X2APICPAGE, icr_lo, XAPIC_OFF_ICR_LO);
+AssertCompileMemberOffset(X2APICPAGE, icr_hi, XAPIC_OFF_ICR_HI);
+AssertCompileMemberOffset(X2APICPAGE, lvt_timer, XAPIC_OFF_LVT_TIMER);
+AssertCompileMemberOffset(X2APICPAGE, lvt_thermal, XAPIC_OFF_LVT_THERMAL);
+AssertCompileMemberOffset(X2APICPAGE, lvt_perf, XAPIC_OFF_LVT_PERF);
+AssertCompileMemberOffset(X2APICPAGE, lvt_lint0, XAPIC_OFF_LVT_LINT0);
+AssertCompileMemberOffset(X2APICPAGE, lvt_lint1, XAPIC_OFF_LVT_LINT1);
+AssertCompileMemberOffset(X2APICPAGE, lvt_error, XAPIC_OFF_LVT_ERROR);
+AssertCompileMemberOffset(X2APICPAGE, timer_icr, XAPIC_OFF_TIMER_ICR);
+AssertCompileMemberOffset(X2APICPAGE, timer_ccr, XAPIC_OFF_TIMER_CCR);
+AssertCompileMemberOffset(X2APICPAGE, timer_dcr, XAPIC_OFF_TIMER_DCR);
+AssertCompileMemberOffset(X2APICPAGE, self_ipi, X2APIC_OFF_SELF_IPI);
+
+/**
+ * APIC MSR access error.
+ * @note The values must match the array indices in apicMsrAccessError().
+ */
+typedef enum APICMSRACCESS
+{
+ /** MSR read while not in x2APIC. */
+ APICMSRACCESS_INVALID_READ_MODE = 0,
+ /** MSR write while not in x2APIC. */
+ APICMSRACCESS_INVALID_WRITE_MODE,
+ /** MSR read for a reserved/unknown/invalid MSR. */
+ APICMSRACCESS_READ_RSVD_OR_UNKNOWN,
+ /** MSR write for a reserved/unknown/invalid MSR. */
+ APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN,
+ /** MSR read for a write-only MSR. */
+ APICMSRACCESS_READ_WRITE_ONLY,
+ /** MSR write for a read-only MSR. */
+ APICMSRACCESS_WRITE_READ_ONLY,
+ /** MSR read to reserved bits. */
+ APICMSRACCESS_READ_RSVD_BITS,
+ /** MSR write to reserved bits. */
+ APICMSRACCESS_WRITE_RSVD_BITS,
+ /** MSR write with invalid value. */
+ APICMSRACCESS_WRITE_INVALID,
+ /** MSR write disallowed due to incompatible config. */
+ APICMSRACCESS_WRITE_DISALLOWED_CONFIG,
+ /** MSR read disallowed due to incompatible config. */
+ APICMSRACCESS_READ_DISALLOWED_CONFIG,
+ /** Count of enum members (don't use). */
+ APICMSRACCESS_COUNT
+} APICMSRACCESS;
+
+
+/** @def APIC_CACHE_LINE_SIZE
+ * Padding (in bytes) for aligning data in different cache lines. Present
+ * generation x86 CPUs use 64-byte cache lines[1]. However, Intel NetBurst
+ * architecture supposedly uses 128-byte cache lines[2]. Since 128 is a
+ * multiple of 64, we use the larger one here.
+ *
+ * [1] - Intel spec "Table 11-1. Characteristics of the Caches, TLBs, Store
+ * Buffer, and Write Combining Buffer in Intel 64 and IA-32 Processors"
+ * [2] - Intel spec. 8.10.6.7 "Place Locks and Semaphores in Aligned, 128-Byte
+ * Blocks of Memory".
+ */
+#define APIC_CACHE_LINE_SIZE 128
+
+/**
+ * APIC Pending-Interrupt Bitmap (PIB).
+ */
+typedef struct APICPIB
+{
+ uint64_t volatile au64VectorBitmap[4];
+ uint32_t volatile fOutstandingNotification;
+ uint8_t au8Reserved[APIC_CACHE_LINE_SIZE - sizeof(uint32_t) - (sizeof(uint64_t) * 4)];
+} APICPIB;
+AssertCompileMemberOffset(APICPIB, fOutstandingNotification, 256 / 8);
+AssertCompileSize(APICPIB, APIC_CACHE_LINE_SIZE);
+/** Pointer to a pending-interrupt bitmap. */
+typedef APICPIB *PAPICPIB;
+/** Pointer to a const pending-interrupt bitmap. */
+typedef const APICPIB *PCAPICPIB;
+
+/**
+ * APIC PDM instance data (per-VM).
+ */
+typedef struct APICDEV
+{
+ /** The MMIO handle. */
+ IOMMMIOHANDLE hMmio;
+} APICDEV;
+/** Pointer to an APIC device. */
+typedef APICDEV *PAPICDEV;
+/** Pointer to a const APIC device. */
+typedef APICDEV const *PCAPICDEV;
+
+
+/**
+ * The APIC GVM instance data.
+ */
+typedef struct APICR0PERVM
+{
+ /** The ring-0 device instance. */
+ PPDMDEVINSR0 pDevInsR0;
+} APICR0PERVM;
+
+
+/**
+ * APIC VM Instance data.
+ */
+typedef struct APIC
+{
+ /** The ring-3 device instance. */
+ PPDMDEVINSR3 pDevInsR3;
+
+ /** @name The APIC pending-interrupt bitmap (PIB).
+ * @{ */
+ /** The host-context physical address of the PIB. */
+ RTHCPHYS HCPhysApicPib;
+ /** The ring-0 memory object of the PIB. */
+ RTR0MEMOBJ hMemObjApicPibR0;
+ /** The ring-3 mapping of the memory object of the PIB. */
+ RTR0MEMOBJ hMapObjApicPibR0;
+ /** The APIC PIB virtual address - R0 ptr. */
+ R0PTRTYPE(void *) pvApicPibR0;
+ /** The APIC PIB virtual address - R3 ptr. */
+ R3PTRTYPE(void *) pvApicPibR3;
+ /** The size of the page in bytes. */
+ uint32_t cbApicPib;
+ /** @} */
+
+ /** @name Other miscellaneous data.
+ * @{ */
+ /** Whether full APIC register virtualization is enabled. */
+ bool fVirtApicRegsEnabled;
+ /** Whether posted-interrupt processing is enabled. */
+ bool fPostedIntrsEnabled;
+ /** Whether TSC-deadline timer mode is supported for the guest. */
+ bool fSupportsTscDeadline;
+ /** Whether this VM has an IO-APIC. */
+ bool fIoApicPresent;
+ /** Whether R0 is enabled or not (applies to MSR handling as well). */
+ bool fR0Enabled;
+ /** Whether RC is enabled or not (applies to MSR handling as well). */
+ bool fRCEnabled;
+ /** Whether Hyper-V x2APIC compatibility mode is enabled. */
+ bool fHyperVCompatMode;
+ /** Enable horrible macOS workaround where the ID register has the value
+ * shifted up 24 bits to be compatible with buggy code in
+ * i386_init.c/vstart(). Only applied if we're in typical macOS 64-bit
+ * kernel load area and macOS kernel selector value (8), as we must not ever
+ * apply this to the EFI code. */
+ bool fMacOSWorkaround;
+ /** The max supported APIC mode from CFGM. */
+ PDMAPICMODE enmMaxMode;
+ /** @} */
+} APIC;
+/** Pointer to APIC VM instance data. */
+typedef APIC *PAPIC;
+/** Pointer to const APIC VM instance data. */
+typedef APIC const *PCAPIC;
+AssertCompileMemberAlignment(APIC, cbApicPib, 8);
+AssertCompileSizeAlignment(APIC, 8);
+
+/**
+ * APIC VMCPU Instance data.
+ */
+typedef struct APICCPU
+{
+ /** @name The APIC page.
+ * @{ */
+ /** The host-context physical address of the page. */
+ RTHCPHYS HCPhysApicPage;
+ /** The ring-0 memory object of the page. */
+ RTR0MEMOBJ hMemObjApicPageR0;
+ /** The ring-3 mapping of the memory object of the page. */
+ RTR0MEMOBJ hMapObjApicPageR0;
+ /** The APIC page virtual address - R0 ptr. */
+ R0PTRTYPE(void *) pvApicPageR0;
+ /** The APIC page virtual address - R3 ptr. */
+ R3PTRTYPE(void *) pvApicPageR3;
+ /** The size of the page in bytes. */
+ uint32_t cbApicPage;
+ /** @} */
+
+ /** @name Auxiliary state.
+ * @{ */
+ /** The error status register's internal state. */
+ uint32_t uEsrInternal;
+ /** The APIC base MSR.*/
+ uint64_t volatile uApicBaseMsr;
+ /** @} */
+
+ /** @name The pending-interrupt bitmaps (PIB).
+ * @{ */
+ /** The host-context physical address of the page. */
+ RTHCPHYS HCPhysApicPib;
+ /** The APIC PIB virtual address - R0 ptr. */
+ R0PTRTYPE(void *) pvApicPibR0;
+ /** The APIC PIB virtual address - R3 ptr. */
+ R3PTRTYPE(void *) pvApicPibR3;
+ /** The APIC PIB for level-sensitive interrupts. */
+ APICPIB ApicPibLevel;
+ /** @} */
+
+ /** @name Other miscellaneous data.
+ * @{ */
+ /** Whether the LINT0 interrupt line is active. */
+ bool volatile fActiveLint0;
+ /** Whether the LINT1 interrupt line is active. */
+ bool volatile fActiveLint1;
+ /** Alignment padding. */
+ uint8_t auAlignment2[6];
+ /** The source tags corresponding to each interrupt vector (debugging). */
+ uint32_t auSrcTags[256];
+ /** @} */
+
+ /** @name The APIC timer.
+ * @{ */
+ /** The timer. */
+ TMTIMERHANDLE hTimer;
+ /** The time stamp when the timer was initialized.
+ * @note Access protected by the timer critsect. */
+ uint64_t u64TimerInitial;
+ /** Cache of timer initial count of the frequency hint to TM. */
+ uint32_t uHintedTimerInitialCount;
+ /** Cache of timer shift of the frequency hint to TM. */
+ uint32_t uHintedTimerShift;
+ /** The timer description. */
+ char szTimerDesc[16];
+ /** @} */
+
+ /** @name Log Max counters
+ * @{ */
+ uint32_t cLogMaxAccessError;
+ uint32_t cLogMaxSetApicBaseAddr;
+ uint32_t cLogMaxGetApicBaseAddr;
+ uint32_t uAlignment4;
+ /** @} */
+
+ /** @name APIC statistics.
+ * @{ */
+#ifdef VBOX_WITH_STATISTICS
+ /** Number of MMIO reads in RZ. */
+ STAMCOUNTER StatMmioReadRZ;
+ /** Number of MMIO reads in R3. */
+ STAMCOUNTER StatMmioReadR3;
+
+ /** Number of MMIO writes in RZ. */
+ STAMCOUNTER StatMmioWriteRZ;
+ /** Number of MMIO writes in R3. */
+ STAMCOUNTER StatMmioWriteR3;
+
+ /** Number of MSR reads in RZ. */
+ STAMCOUNTER StatMsrReadRZ;
+ /** Number of MSR reads in R3. */
+ STAMCOUNTER StatMsrReadR3;
+
+ /** Number of MSR writes in RZ. */
+ STAMCOUNTER StatMsrWriteRZ;
+ /** Number of MSR writes in R3. */
+ STAMCOUNTER StatMsrWriteR3;
+
+ /** Profiling of APICUpdatePendingInterrupts(). */
+ STAMPROFILE StatUpdatePendingIntrs;
+ /** Profiling of apicPostInterrupt(). */
+ STAMPROFILE StatPostIntr;
+ /** Number of times an interrupt is already pending in
+ * apicPostInterrupts().*/
+ STAMCOUNTER StatPostIntrAlreadyPending;
+ /** Number of times the timer callback is invoked. */
+ STAMCOUNTER StatTimerCallback;
+ /** Number of times the TPR is written. */
+ STAMCOUNTER StatTprWrite;
+ /** Number of times the TPR is read. */
+ STAMCOUNTER StatTprRead;
+ /** Number of times the EOI is written. */
+ STAMCOUNTER StatEoiWrite;
+ /** Number of times TPR masks an interrupt in apicGetInterrupt(). */
+ STAMCOUNTER StatMaskedByTpr;
+ /** Number of times PPR masks an interrupt in apicGetInterrupt(). */
+ STAMCOUNTER StatMaskedByPpr;
+ /** Number of times the timer ICR is written. */
+ STAMCOUNTER StatTimerIcrWrite;
+ /** Number of times the ICR Lo (send IPI) is written. */
+ STAMCOUNTER StatIcrLoWrite;
+ /** Number of times the ICR Hi is written. */
+ STAMCOUNTER StatIcrHiWrite;
+ /** Number of times the full ICR (x2APIC send IPI) is written. */
+ STAMCOUNTER StatIcrFullWrite;
+ /** Number of times the DCR is written. */
+ STAMCOUNTER StatDcrWrite;
+ /** Number of times the DFR is written. */
+ STAMCOUNTER StatDfrWrite;
+ /** Number of times the LDR is written. */
+ STAMCOUNTER StatLdrWrite;
+ /** Number of times the APIC-ID MSR is read. */
+ STAMCOUNTER StatIdMsrRead;
+ /** Number of times the LVT timer is written. */
+ STAMCOUNTER StatLvtTimerWrite;
+#endif
+ /** Number of apicPostInterrupt() calls. */
+ STAMCOUNTER StatPostIntrCnt;
+ /** Number of interrupts broken down by vector. */
+ STAMCOUNTER aStatVectors[256];
+ /** @} */
+} APICCPU;
+/** Pointer to APIC VMCPU instance data. */
+typedef APICCPU *PAPICCPU;
+/** Pointer to a const APIC VMCPU instance data. */
+typedef APICCPU const *PCAPICCPU;
+AssertCompileMemberAlignment(APICCPU, uApicBaseMsr, 8);
+
+/**
+ * APIC operating modes as returned by apicGetMode().
+ *
+ * The values match hardware states.
+ * See Intel spec. 10.12.1 "Detecting and Enabling x2APIC Mode".
+ */
+typedef enum APICMODE
+{
+ APICMODE_DISABLED = 0,
+ APICMODE_INVALID,
+ APICMODE_XAPIC,
+ APICMODE_X2APIC
+} APICMODE;
+
+/**
+ * Gets the timer shift value.
+ *
+ * @returns The timer shift value.
+ * @param pXApicPage The xAPIC page.
+ */
+DECLINLINE(uint8_t) apicGetTimerShift(PCXAPICPAGE pXApicPage)
+{
+ /* See Intel spec. 10.5.4 "APIC Timer". */
+ uint32_t uShift = pXApicPage->timer_dcr.u.u2DivideValue0 | (pXApicPage->timer_dcr.u.u1DivideValue1 << 2);
+ return (uShift + 1) & 7;
+}
+
+
+const char *apicGetModeName(APICMODE enmMode);
+const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat);
+const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode);
+const char *apicGetDestModeName(XAPICDESTMODE enmDestMode);
+const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode);
+const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand);
+const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode);
+void apicHintTimerFreq(PPDMDEVINS pDevIns, PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift);
+APICMODE apicGetMode(uint64_t uApicBaseMsr);
+
+DECLCALLBACK(VBOXSTRICTRC) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb);
+DECLCALLBACK(VBOXSTRICTRC) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb);
+
+bool apicPostInterrupt(PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode, uint32_t uSrcTag);
+void apicStartTimer(PVMCPUCC pVCpu, uint32_t uInitialCount);
+void apicClearInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType);
+void apicInitIpi(PVMCPUCC pVCpu);
+void apicResetCpu(PVMCPUCC pVCpu, bool fResetApicBaseMsr);
+
+DECLCALLBACK(int) apicR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg);
+DECLCALLBACK(int) apicR3Destruct(PPDMDEVINS pDevIns);
+DECLCALLBACK(void) apicR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta);
+DECLCALLBACK(void) apicR3Reset(PPDMDEVINS pDevIns);
+DECLCALLBACK(int) apicR3InitComplete(PPDMDEVINS pDevIns);
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_APICInternal_h */
+
diff --git a/src/VBox/VMM/include/CFGMInternal.h b/src/VBox/VMM/include/CFGMInternal.h
new file mode 100644
index 00000000..d873a75a
--- /dev/null
+++ b/src/VBox/VMM/include/CFGMInternal.h
@@ -0,0 +1,144 @@
+/* $Id: CFGMInternal.h $ */
+/** @file
+ * CFGM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_CFGMInternal_h
+#define VMM_INCLUDED_SRC_include_CFGMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+
+
+/** @defgroup grp_cfgm_int Internals.
+ * @ingroup grp_cfgm
+ * @{
+ */
+
+
+/**
+ * Configuration manager propertype value.
+ */
+typedef union CFGMVALUE
+{
+ /** Integer value. */
+ struct CFGMVALUE_INTEGER
+ {
+ /** The integer represented as 64-bit unsigned. */
+ uint64_t u64;
+ } Integer;
+
+ /** String or password value. (UTF-8 of course) */
+ struct CFGMVALUE_STRING
+ {
+ /** Length of string. (In bytes, including the terminator.) */
+ size_t cb;
+ /** Pointer to the string. */
+ char *psz;
+ } String;
+
+ /** Byte string value. */
+ struct CFGMVALUE_BYTES
+ {
+ /** Length of byte string. (in bytes) */
+ size_t cb;
+ /** Pointer to the byte string. */
+ uint8_t *pau8;
+ } Bytes;
+} CFGMVALUE;
+/** Pointer to configuration manager property value. */
+typedef CFGMVALUE *PCFGMVALUE;
+
+
+/**
+ * Configuration manager tree node.
+ */
+typedef struct CFGMLEAF
+{
+ /** Pointer to the next leaf. */
+ PCFGMLEAF pNext;
+ /** Pointer to the previous leaf. */
+ PCFGMLEAF pPrev;
+
+ /** Property type. */
+ CFGMVALUETYPE enmType;
+ /** Property value. */
+ CFGMVALUE Value;
+
+ /** Name length. (exclusive) */
+ size_t cchName;
+ /** Name. */
+ char szName[1];
+} CFGMLEAF;
+
+
+/**
+ * Configuration manager tree node.
+ */
+typedef struct CFGMNODE
+{
+ /** Pointer to the next node (on this level). */
+ PCFGMNODE pNext;
+ /** Pointer to the previous node (on this level). */
+ PCFGMNODE pPrev;
+ /** Pointer Parent node. */
+ PCFGMNODE pParent;
+ /** Pointer to first child node. */
+ PCFGMNODE pFirstChild;
+ /** Pointer to first property leaf. */
+ PCFGMLEAF pFirstLeaf;
+
+ /** Pointer to the VM owning this node. */
+ PVM pVM;
+
+ /** The root of a 'restricted' subtree, i.e. the parent is
+ * invisible to non-trusted users.
+ */
+ bool fRestrictedRoot;
+
+ /** Name length. (exclusive) */
+ size_t cchName;
+ /** Name. */
+ char szName[1];
+} CFGMNODE;
+
+
+
+/**
+ * CFGM VM Instance data.
+ * Changes to this must checked against the padding of the cfgm union in VM!
+ */
+typedef struct CFGM
+{
+ /** Pointer to root node. */
+ R3PTRTYPE(PCFGMNODE) pRoot;
+} CFGM;
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_CFGMInternal_h */
diff --git a/src/VBox/VMM/include/CPUMInternal.h b/src/VBox/VMM/include/CPUMInternal.h
new file mode 100644
index 00000000..f0e9e1f9
--- /dev/null
+++ b/src/VBox/VMM/include/CPUMInternal.h
@@ -0,0 +1,537 @@
+/* $Id: CPUMInternal.h $ */
+/** @file
+ * CPUM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_CPUMInternal_h
+#define VMM_INCLUDED_SRC_include_CPUMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#ifndef VBOX_FOR_DTRACE_LIB
+# include <VBox/cdefs.h>
+# include <VBox/types.h>
+# include <VBox/vmm/stam.h>
+# include <iprt/x86.h>
+# include <VBox/vmm/pgm.h>
+#else
+# pragma D depends_on library x86.d
+# pragma D depends_on library cpumctx.d
+# pragma D depends_on library cpum.d
+
+/* Some fudging. */
+typedef uint64_t STAMCOUNTER;
+#endif
+
+
+
+
+/** @defgroup grp_cpum_int Internals
+ * @ingroup grp_cpum
+ * @internal
+ * @{
+ */
+
+/** Use flags (CPUM::fUseFlags).
+ * (Don't forget to sync this with CPUMInternal.mac !)
+ * @note Was part of saved state (6.1 and earlier).
+ * @{ */
+/** Indicates that we've saved the host FPU, SSE, whatever state and that it
+ * needs to be restored. */
+#define CPUM_USED_FPU_HOST RT_BIT(0)
+/** Indicates that we've loaded the guest FPU, SSE, whatever state and that it
+ * needs to be saved.
+ * @note Mirrored in CPUMCTX::fUsedFpuGuest for the HM switcher code. */
+#define CPUM_USED_FPU_GUEST RT_BIT(10)
+/** Used the guest FPU, SSE or such stuff since last we were in REM.
+ * REM syncing is clearing this, lazy FPU is setting it. */
+#define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
+/** The XMM state was manually restored. (AMD only) */
+#define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
+
+/** Host OS is using SYSENTER and we must NULL the CS. */
+#define CPUM_USE_SYSENTER RT_BIT(3)
+/** Host OS is using SYSENTER and we must NULL the CS. */
+#define CPUM_USE_SYSCALL RT_BIT(4)
+
+/** Debug registers are used by host and that DR7 and DR6 must be saved and
+ * disabled when switching to raw-mode. */
+#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
+/** Records that we've saved the host DRx registers.
+ * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
+ * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
+#define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
+/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
+ * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
+#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
+/** Used in ring-0 to indicate that we have loaded the hypervisor debug
+ * registers. */
+#define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
+/** Used in ring-0 to indicate that we have loaded the guest debug
+ * registers (DR0-3 and maybe DR6) for direct use by the guest.
+ * DR7 (and AMD-V DR6) are handled via the VMCB. */
+#define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
+
+/** Host CPU requires fxsave/fxrstor leaky bit handling. */
+#define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
+/** Set if the VM supports long-mode. */
+#define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
+/** @} */
+
+
+/** @name CPUM Saved State Version.
+ * @{ */
+/** The current saved state version. */
+#define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3
+/** The saved state version with more virtual VMCS fields (HLAT prefix size,
+ * PCONFIG-exiting bitmap, HLAT ptr, VM-exit ctls2) and a CPUMCTX field (VM-exit
+ * ctls2 MSR). */
+#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3 22
+/** The saved state version with PAE PDPEs added. */
+#define CPUM_SAVED_STATE_VERSION_PAE_PDPES 21
+/** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */
+#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 20
+/** The saved state version including VMX hardware virtualization state. */
+#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX 19
+/** The saved state version including SVM hardware virtualization state. */
+#define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18
+/** The saved state version including XSAVE state. */
+#define CPUM_SAVED_STATE_VERSION_XSAVE 17
+/** The saved state version with good CPUID leaf count. */
+#define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
+/** CPUID changes with explode forgetting to update the leaf count on
+ * restore, resulting in garbage being saved restoring+saving old states). */
+#define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
+/** The saved state version before the CPUIDs changes. */
+#define CPUM_SAVED_STATE_VERSION_PUT_STRUCT 14
+/** The saved state version before using SSMR3PutStruct. */
+#define CPUM_SAVED_STATE_VERSION_MEM 13
+/** The saved state version before introducing the MSR size field. */
+#define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
+/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
+ * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
+#define CPUM_SAVED_STATE_VERSION_VER3_2 11
+/** The saved state version of 3.0 and 3.1 trunk before the teleportation
+ * changes. */
+#define CPUM_SAVED_STATE_VERSION_VER3_0 10
+/** The saved state version for the 2.1 trunk before the MSR changes. */
+#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
+/** The saved state version of 2.0, used for backwards compatibility. */
+#define CPUM_SAVED_STATE_VERSION_VER2_0 8
+/** The saved state version of 1.6, used for backwards compatibility. */
+#define CPUM_SAVED_STATE_VERSION_VER1_6 6
+/** @} */
+
+
+/** @name XSAVE limits.
+ * @{ */
+/** Max size we accept for the XSAVE area.
+ * @see CPUMCTX::abXSave */
+#define CPUM_MAX_XSAVE_AREA_SIZE (0x4000 - 0x300)
+/* Min size we accept for the XSAVE area. */
+#define CPUM_MIN_XSAVE_AREA_SIZE 0x240
+/** @} */
+
+
+/**
+ * CPU info
+ */
+typedef struct CPUMINFO
+{
+ /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
+ uint32_t cMsrRanges;
+ /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
+ * instruction. Older hardware has been observed to ignore higher bits. */
+ uint32_t fMsrMask;
+
+ /** MXCSR mask. */
+ uint32_t fMxCsrMask;
+
+ /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
+ uint32_t cCpuIdLeaves;
+ /** The index of the first extended CPUID leaf in the array.
+ * Set to cCpuIdLeaves if none present. */
+ uint32_t iFirstExtCpuIdLeaf;
+ /** How to handle unknown CPUID leaves. */
+ CPUMUNKNOWNCPUID enmUnknownCpuIdMethod;
+ /** For use with CPUMUNKNOWNCPUID_DEFAULTS (DB & VM),
+ * CPUMUNKNOWNCPUID_LAST_STD_LEAF (VM) and CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX (VM). */
+ CPUMCPUID DefCpuId;
+
+ /** Scalable bus frequency used for reporting other frequencies. */
+ uint64_t uScalableBusFreq;
+
+ /** Pointer to the MSR ranges (for compatibility with old hyper heap code). */
+ R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3;
+ /** Pointer to the CPUID leaves (for compatibility with old hyper heap code). */
+ R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3;
+
+ /** CPUID leaves. */
+ CPUMCPUIDLEAF aCpuIdLeaves[256];
+ /** MSR ranges.
+ * @todo This is insane, so might want to move this into a separate
+ * allocation. The insanity is mainly for more recent AMD CPUs. */
+ CPUMMSRRANGE aMsrRanges[8192];
+} CPUMINFO;
+/** Pointer to a CPU info structure. */
+typedef CPUMINFO *PCPUMINFO;
+/** Pointer to a const CPU info structure. */
+typedef CPUMINFO const *CPCPUMINFO;
+
+
+/**
+ * The saved host CPU state.
+ */
+typedef struct CPUMHOSTCTX
+{
+ /** The extended state (FPU/SSE/AVX/AVX-2/XXXX). Must be aligned on 64 bytes. */
+ union /* no tag */
+ {
+ X86XSAVEAREA XState;
+ /** Byte view for simple indexing and space allocation.
+ * @note Must match or exceed the size of CPUMCTX::abXState. */
+ uint8_t abXState[0x4000 - 0x300];
+ } CPUM_UNION_NM(u);
+
+ /** General purpose register, selectors, flags and more
+ * @{ */
+ /** General purpose register ++
+ * { */
+ /*uint64_t rax; - scratch*/
+ uint64_t rbx;
+ /*uint64_t rcx; - scratch*/
+ /*uint64_t rdx; - scratch*/
+ uint64_t rdi;
+ uint64_t rsi;
+ uint64_t rbp;
+ uint64_t rsp;
+ /*uint64_t r8; - scratch*/
+ /*uint64_t r9; - scratch*/
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ /*uint64_t rip; - scratch*/
+ uint64_t rflags;
+ /** @} */
+
+ /** Selector registers
+ * @{ */
+ RTSEL ss;
+ RTSEL ssPadding;
+ RTSEL gs;
+ RTSEL gsPadding;
+ RTSEL fs;
+ RTSEL fsPadding;
+ RTSEL es;
+ RTSEL esPadding;
+ RTSEL ds;
+ RTSEL dsPadding;
+ RTSEL cs;
+ RTSEL csPadding;
+ /** @} */
+
+ /** Control registers.
+ * @{ */
+ /** The CR0 FPU state in HM mode. */
+ uint64_t cr0;
+ /*uint64_t cr2; - scratch*/
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t cr8;
+ /** @} */
+
+ /** Debug registers.
+ * @{ */
+ uint64_t dr0;
+ uint64_t dr1;
+ uint64_t dr2;
+ uint64_t dr3;
+ uint64_t dr6;
+ uint64_t dr7;
+ /** @} */
+
+ /** Global Descriptor Table register. */
+ X86XDTR64 gdtr;
+ uint16_t gdtrPadding;
+ /** Interrupt Descriptor Table register. */
+ X86XDTR64 idtr;
+ uint16_t idtrPadding;
+ /** The task register. */
+ RTSEL ldtr;
+ RTSEL ldtrPadding;
+ /** The task register. */
+ RTSEL tr;
+ RTSEL trPadding;
+
+ /** MSRs
+ * @{ */
+ CPUMSYSENTER SysEnter;
+ uint64_t FSbase;
+ uint64_t GSbase;
+ uint64_t efer;
+ /** @} */
+
+ /** The XCR0 register. */
+ uint64_t xcr0;
+ /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
+ * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
+ uint64_t fXStateMask;
+
+ /* padding to get 64byte aligned size */
+ uint8_t auPadding[24];
+#if HC_ARCH_BITS != 64
+# error HC_ARCH_BITS not defined or unsupported
+#endif
+} CPUMHOSTCTX;
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
+#endif
+/** Pointer to the saved host CPU state. */
+typedef CPUMHOSTCTX *PCPUMHOSTCTX;
+
+
+/**
+ * The hypervisor context CPU state (just DRx left now).
+ */
+typedef struct CPUMHYPERCTX
+{
+ /** Debug registers.
+ * @remarks DR4 and DR5 should not be used since they are aliases for
+ * DR6 and DR7 respectively on both AMD and Intel CPUs.
+ * @remarks DR8-15 are currently not supported by AMD or Intel, so
+ * neither do we.
+ */
+ uint64_t dr[8];
+ /** @todo eliminiate the rest. */
+ uint64_t cr3;
+ uint64_t au64Padding[7];
+} CPUMHYPERCTX;
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileSizeAlignment(CPUMHYPERCTX, 64);
+#endif
+/** Pointer to the hypervisor context CPU state. */
+typedef CPUMHYPERCTX *PCPUMHYPERCTX;
+
+
+/**
+ * CPUM Data (part of VM)
+ */
+typedef struct CPUM
+{
+ /** Use flags.
+ * These flags indicates which CPU features the host uses.
+ */
+ uint32_t fHostUseFlags;
+
+ /** CR4 mask
+ * @todo obsolete? */
+ struct
+ {
+ uint32_t AndMask; /**< @todo Move these to the per-CPU structure and fix the switchers. Saves a register! */
+ uint32_t OrMask;
+ } CR4;
+
+ /** The (more) portable CPUID level. */
+ uint8_t u8PortableCpuIdLevel;
+ /** Indicates that a state restore is pending.
+ * This is used to verify load order dependencies (PGM). */
+ bool fPendingRestore;
+ uint8_t abPadding0[2];
+
+ /** XSAVE/XRTOR components we can expose to the guest mask. */
+ uint64_t fXStateGuestMask;
+ /** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed
+ * to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */
+ uint64_t fXStateHostMask;
+
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ /** The host MXCSR mask (determined at init). */
+ uint32_t fHostMxCsrMask;
+#else
+ uint32_t u32UnusedOnNonX86;
+#endif
+ uint8_t abPadding1[4];
+
+ /** Random value we store in the reserved RFLAGS bits we don't use ourselves so
+ * we can detect corruption. */
+ uint64_t fReservedRFlagsCookie;
+
+ /** Align to 64-byte boundary. */
+ uint8_t abPadding2[16];
+
+ /** Host CPU feature information.
+ * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
+ CPUMFEATURES HostFeatures;
+ /** Guest CPU feature information.
+ * Externaly visible via that VM structure, aligned with HostFeatures. */
+ CPUMFEATURES GuestFeatures;
+ /** Guest CPU info. */
+ CPUMINFO GuestInfo;
+
+ /** The standard set of CpuId leaves. */
+ CPUMCPUID aGuestCpuIdPatmStd[6];
+ /** The extended set of CpuId leaves. */
+ CPUMCPUID aGuestCpuIdPatmExt[10];
+ /** The centaur set of CpuId leaves. */
+ CPUMCPUID aGuestCpuIdPatmCentaur[4];
+
+ /** @name MSR statistics.
+ * @{ */
+ STAMCOUNTER cMsrWrites;
+ STAMCOUNTER cMsrWritesToIgnoredBits;
+ STAMCOUNTER cMsrWritesRaiseGp;
+ STAMCOUNTER cMsrWritesUnknown;
+ STAMCOUNTER cMsrReads;
+ STAMCOUNTER cMsrReadsRaiseGp;
+ STAMCOUNTER cMsrReadsUnknown;
+ /** @} */
+} CPUM;
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileMemberOffset(CPUM, HostFeatures, 64);
+AssertCompileMemberOffset(CPUM, GuestFeatures, 112);
+#endif
+/** Pointer to the CPUM instance data residing in the shared VM structure. */
+typedef CPUM *PCPUM;
+
+/**
+ * CPUM Data (part of VMCPU)
+ */
+typedef struct CPUMCPU
+{
+ /** Guest context.
+ * Aligned on a 64-byte boundary. */
+ CPUMCTX Guest;
+ /** Guest context - misc MSRs
+ * Aligned on a 64-byte boundary. */
+ CPUMCTXMSRS GuestMsrs;
+
+ /** Nested VMX: VMX-preemption timer. */
+ TMTIMERHANDLE hNestedVmxPreemptTimer;
+
+ /** Use flags.
+ * These flags indicates both what is to be used and what has been used. */
+ uint32_t fUseFlags;
+
+ /** Changed flags.
+ * These flags indicates to REM (and others) which important guest
+ * registers which has been changed since last time the flags were cleared.
+ * See the CPUM_CHANGED_* defines for what we keep track of.
+ *
+ * @todo Obsolete, but will probably be refactored so keep it for reference. */
+ uint32_t fChanged;
+
+ /** Temporary storage for the return code of the function called in the
+ * 32-64 switcher. */
+ uint32_t u32RetCode;
+
+ /** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC
+ * (?) bits are visible or not. (The APIC is responsible for setting this
+ * when loading state, so we won't save it.) */
+ bool fCpuIdApicFeatureVisible;
+
+ /** Align the next member on a 64-byte boundary. */
+ uint8_t abPadding2[64 - 8 - 4*3 - 1];
+
+ /** Saved host context. Only valid while inside RC or HM contexts.
+ * Must be aligned on a 64-byte boundary. */
+ CPUMHOSTCTX Host;
+ /** Old hypervisor context, only used for combined DRx values now.
+ * Must be aligned on a 64-byte boundary. */
+ CPUMHYPERCTX Hyper;
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ uint8_t aMagic[56];
+ uint64_t uMagic;
+#endif
+} CPUMCPU;
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileMemberAlignment(CPUMCPU, Host, 64);
+#endif
+/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
+typedef CPUMCPU *PCPUMCPU;
+
+#ifndef VBOX_FOR_DTRACE_LIB
+RT_C_DECLS_BEGIN
+
+PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf);
+PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit);
+PCPUMCPUIDLEAF cpumCpuIdGetLeafInt(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
+PCPUMCPUIDLEAF cpumCpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves);
+# ifdef VBOX_STRICT
+void cpumCpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves);
+# endif
+int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs,
+ PCPUMFEATURES pFeatures);
+
+# ifdef IN_RING3
+int cpumR3DbgInit(PVM pVM);
+int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs);
+void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCVMXMSRS pHostVmxMsrs,
+ PVMXMSRS pGuestVmxMsrs);
+void cpumR3CpuIdRing3InitDone(PVM pVM);
+void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
+int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
+int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
+DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+
+int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
+int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
+int cpumR3MsrReconcileWithCpuId(PVM pVM);
+int cpumR3MsrApplyFudge(PVM pVM);
+int cpumR3MsrRegStats(PVM pVM);
+int cpumR3MsrStrictInitChecks(void);
+PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
+# endif
+
+# ifdef IN_RC
+DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
+# endif
+
+# ifdef IN_RING0
+DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
+DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
+# if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
+DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
+# endif
+# endif
+
+# if defined(IN_RC) || defined(IN_RING0)
+DECLASM(int) cpumRZSaveHostFPUState(PCPUMCPU pCPUM);
+DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible);
+DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM);
+DECLASM(void) cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM);
+# endif
+
+RT_C_DECLS_END
+#endif /* !VBOX_FOR_DTRACE_LIB */
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_h */
+
diff --git a/src/VBox/VMM/include/CPUMInternal.mac b/src/VBox/VMM/include/CPUMInternal.mac
new file mode 100644
index 00000000..8e6286b1
--- /dev/null
+++ b/src/VBox/VMM/include/CPUMInternal.mac
@@ -0,0 +1,710 @@
+; $Id: CPUMInternal.mac $
+;; @file
+; CPUM - Internal header file (asm).
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+%include "VBox/asmdefs.mac"
+%include "VBox/vmm/cpum.mac"
+
+;; Check sanity.
+%ifdef VBOX_WITH_KERNEL_USING_XMM
+ %ifndef IN_RING0
+ %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
+ %endif
+%endif
+
+;; For numeric expressions
+%ifdef RT_ARCH_AMD64
+ %define CPUM_IS_AMD64 1
+%else
+ %define CPUM_IS_AMD64 0
+%endif
+
+
+;;
+; CPU info
+struc CPUMINFO
+ .cMsrRanges resd 1 ; uint32_t
+ .fMsrMask resd 1 ; uint32_t
+ .fMxCsrMask resd 1 ; uint32_t
+ .cCpuIdLeaves resd 1 ; uint32_t
+ .iFirstExtCpuIdLeaf resd 1 ; uint32_t
+ .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
+ .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
+ .uScalableBusFreq resq 1 ; uint64_t
+ .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
+ .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
+ .aCpuIdLeaves resb 256*32
+ .aMsrRanges resb 8192*128
+endstruc
+
+
+%define CPUM_USED_FPU_HOST RT_BIT(0)
+%define CPUM_USED_FPU_GUEST RT_BIT(10)
+%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
+%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
+%define CPUM_USE_SYSENTER RT_BIT(3)
+%define CPUM_USE_SYSCALL RT_BIT(4)
+%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
+%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
+%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
+%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
+%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
+%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
+%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
+
+
+struc CPUM
+ ;...
+ .fHostUseFlags resd 1
+
+ ; CR4 masks
+ .CR4.AndMask resd 1
+ .CR4.OrMask resd 1
+ .u8PortableCpuIdLevel resb 1
+ .fPendingRestore resb 1
+
+ alignb 8
+ .fXStateGuestMask resq 1
+ .fXStateHostMask resq 1
+
+ alignb 64
+ .HostFeatures resb 48
+ .GuestFeatures resb 48
+ .GuestInfo resb CPUMINFO_size
+
+ ; Patch manager saved state compatability CPUID leaf arrays
+ .aGuestCpuIdPatmStd resb 16*6
+ .aGuestCpuIdPatmExt resb 16*10
+ .aGuestCpuIdPatmCentaur resb 16*4
+
+ alignb 8
+ .cMsrWrites resq 1
+ .cMsrWritesToIgnoredBits resq 1
+ .cMsrWritesRaiseGp resq 1
+ .cMsrWritesUnknown resq 1
+ .cMsrReads resq 1
+ .cMsrReadsRaiseGp resq 1
+ .cMsrReadsUnknown resq 1
+endstruc
+
+struc CPUMCPU
+ ;
+ ; Guest context state
+ ;
+ .Guest resq 0
+ .Guest.eax resq 1
+ .Guest.ecx resq 1
+ .Guest.edx resq 1
+ .Guest.ebx resq 1
+ .Guest.esp resq 1
+ .Guest.ebp resq 1
+ .Guest.esi resq 1
+ .Guest.edi resq 1
+ .Guest.r8 resq 1
+ .Guest.r9 resq 1
+ .Guest.r10 resq 1
+ .Guest.r11 resq 1
+ .Guest.r12 resq 1
+ .Guest.r13 resq 1
+ .Guest.r14 resq 1
+ .Guest.r15 resq 1
+ .Guest.es.Sel resw 1
+ .Guest.es.PaddingSel resw 1
+ .Guest.es.ValidSel resw 1
+ .Guest.es.fFlags resw 1
+ .Guest.es.u64Base resq 1
+ .Guest.es.u32Limit resd 1
+ .Guest.es.Attr resd 1
+ .Guest.cs.Sel resw 1
+ .Guest.cs.PaddingSel resw 1
+ .Guest.cs.ValidSel resw 1
+ .Guest.cs.fFlags resw 1
+ .Guest.cs.u64Base resq 1
+ .Guest.cs.u32Limit resd 1
+ .Guest.cs.Attr resd 1
+ .Guest.ss.Sel resw 1
+ .Guest.ss.PaddingSel resw 1
+ .Guest.ss.ValidSel resw 1
+ .Guest.ss.fFlags resw 1
+ .Guest.ss.u64Base resq 1
+ .Guest.ss.u32Limit resd 1
+ .Guest.ss.Attr resd 1
+ .Guest.ds.Sel resw 1
+ .Guest.ds.PaddingSel resw 1
+ .Guest.ds.ValidSel resw 1
+ .Guest.ds.fFlags resw 1
+ .Guest.ds.u64Base resq 1
+ .Guest.ds.u32Limit resd 1
+ .Guest.ds.Attr resd 1
+ .Guest.fs.Sel resw 1
+ .Guest.fs.PaddingSel resw 1
+ .Guest.fs.ValidSel resw 1
+ .Guest.fs.fFlags resw 1
+ .Guest.fs.u64Base resq 1
+ .Guest.fs.u32Limit resd 1
+ .Guest.fs.Attr resd 1
+ .Guest.gs.Sel resw 1
+ .Guest.gs.PaddingSel resw 1
+ .Guest.gs.ValidSel resw 1
+ .Guest.gs.fFlags resw 1
+ .Guest.gs.u64Base resq 1
+ .Guest.gs.u32Limit resd 1
+ .Guest.gs.Attr resd 1
+ .Guest.ldtr.Sel resw 1
+ .Guest.ldtr.PaddingSel resw 1
+ .Guest.ldtr.ValidSel resw 1
+ .Guest.ldtr.fFlags resw 1
+ .Guest.ldtr.u64Base resq 1
+ .Guest.ldtr.u32Limit resd 1
+ .Guest.ldtr.Attr resd 1
+ .Guest.tr.Sel resw 1
+ .Guest.tr.PaddingSel resw 1
+ .Guest.tr.ValidSel resw 1
+ .Guest.tr.fFlags resw 1
+ .Guest.tr.u64Base resq 1
+ .Guest.tr.u32Limit resd 1
+ .Guest.tr.Attr resd 1
+ alignb 8
+ .Guest.eip resq 1
+ .Guest.eflags resq 1
+ .Guest.fExtrn resq 1
+ .Guest.uRipInhibitInt resq 1
+ .Guest.cr0 resq 1
+ .Guest.cr2 resq 1
+ .Guest.cr3 resq 1
+ .Guest.cr4 resq 1
+ .Guest.dr resq 8
+ .Guest.gdtrPadding resw 3
+ .Guest.gdtr resw 0
+ .Guest.gdtr.cbGdt resw 1
+ .Guest.gdtr.pGdt resq 1
+ .Guest.idtrPadding resw 3
+ .Guest.idtr resw 0
+ .Guest.idtr.cbIdt resw 1
+ .Guest.idtr.pIdt resq 1
+ .Guest.SysEnter.cs resb 8
+ .Guest.SysEnter.eip resb 8
+ .Guest.SysEnter.esp resb 8
+ .Guest.msrEFER resb 8
+ .Guest.msrSTAR resb 8
+ .Guest.msrPAT resb 8
+ .Guest.msrLSTAR resb 8
+ .Guest.msrCSTAR resb 8
+ .Guest.msrSFMASK resb 8
+ .Guest.msrKERNELGSBASE resb 8
+
+ alignb 32
+ .Guest.aPaePdpes resq 4
+
+ alignb 8
+ .Guest.aXcr resq 2
+ .Guest.fXStateMask resq 1
+ .Guest.fUsedFpuGuest resb 1
+ alignb 8
+ .Guest.aoffXState resw 64
+ alignb 256
+ .Guest.abXState resb 0x4000-0x300
+ .Guest.XState EQU .Guest.abXState
+
+;;
+ alignb 4096
+ .Guest.hwvirt resb 0
+ .Guest.hwvirt.svm resb 0
+ .Guest.hwvirt.vmx resb 0
+
+ .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
+ .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
+ .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
+ .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
+ .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
+ alignb 8
+ .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
+ .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
+ .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
+ .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
+ .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
+
+ .Guest.hwvirt.vmx.Vmcs resb 0x1000
+ .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
+ .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
+ .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
+ .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
+ .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
+ .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
+ .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
+ .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
+ alignb 8
+ .Guest.hwvirt.vmx.GCPhysVmxon resq 1
+ .Guest.hwvirt.vmx.GCPhysVmcs resq 1
+ .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
+ .Guest.hwvirt.vmx.enmDiag resd 1
+ .Guest.hwvirt.vmx.enmAbort resd 1
+ .Guest.hwvirt.vmx.uDiagAux resq 1
+ .Guest.hwvirt.vmx.uAbortAux resd 1
+ .Guest.hwvirt.vmx.fInVmxRootMode resb 1
+ .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
+ .Guest.hwvirt.vmx.fInterceptEvents resb 1
+ .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
+ .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
+ .Guest.hwvirt.vmx.uPrevPauseTick resq 1
+ .Guest.hwvirt.vmx.uEntryTick resq 1
+ .Guest.hwvirt.vmx.offVirtApicWrite resw 1
+ .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
+ alignb 8
+ .Guest.hwvirt.vmx.Msrs resb 224
+
+ alignb 8
+ .Guest.hwvirt.enmHwvirt resd 1
+ .Guest.hwvirt.fGif resb 1
+ alignb 4
+ .Guest.hwvirt.fSavedInhibit resd 1
+ alignb 64
+
+ .GuestMsrs resq 0
+ .GuestMsrs.au64 resq 64
+
+ ;
+ ; Other stuff.
+ ;
+ .hNestedVmxPreemptTimer resq 1
+
+ .fUseFlags resd 1
+ .fChanged resd 1
+ .u32RetCode resd 1
+ .fCpuIdApicFeatureVisible resb 1
+
+ ;
+ ; Host context state
+ ;
+ alignb 64
+ .Host resb 0
+ .Host.abXState resb 0x4000-0x300
+ .Host.XState EQU .Host.abXState
+ ;.Host.rax resq 1 - scratch
+ .Host.rbx resq 1
+ ;.Host.rcx resq 1 - scratch
+ ;.Host.rdx resq 1 - scratch
+ .Host.rdi resq 1
+ .Host.rsi resq 1
+ .Host.rbp resq 1
+ .Host.rsp resq 1
+ ;.Host.r8 resq 1 - scratch
+ ;.Host.r9 resq 1 - scratch
+ .Host.r10 resq 1
+ .Host.r11 resq 1
+ .Host.r12 resq 1
+ .Host.r13 resq 1
+ .Host.r14 resq 1
+ .Host.r15 resq 1
+ ;.Host.rip resd 1 - scratch
+ .Host.rflags resq 1
+ .Host.ss resw 1
+ .Host.ssPadding resw 1
+ .Host.gs resw 1
+ .Host.gsPadding resw 1
+ .Host.fs resw 1
+ .Host.fsPadding resw 1
+ .Host.es resw 1
+ .Host.esPadding resw 1
+ .Host.ds resw 1
+ .Host.dsPadding resw 1
+ .Host.cs resw 1
+ .Host.csPadding resw 1
+
+ .Host.cr0Fpu:
+ .Host.cr0 resq 1
+ ;.Host.cr2 resq 1 - scratch
+ .Host.cr3 resq 1
+ .Host.cr4 resq 1
+ .Host.cr8 resq 1
+
+ .Host.dr0 resq 1
+ .Host.dr1 resq 1
+ .Host.dr2 resq 1
+ .Host.dr3 resq 1
+ .Host.dr6 resq 1
+ .Host.dr7 resq 1
+
+ .Host.gdtr resb 10 ; GDT limit + linear address
+ .Host.gdtrPadding resw 1
+ .Host.idtr resb 10 ; IDT limit + linear address
+ .Host.idtrPadding resw 1
+ .Host.ldtr resw 1
+ .Host.ldtrPadding resw 1
+ .Host.tr resw 1
+ .Host.trPadding resw 1
+
+ .Host.SysEnter.cs resq 1
+ .Host.SysEnter.eip resq 1
+ .Host.SysEnter.esp resq 1
+ .Host.FSbase resq 1
+ .Host.GSbase resq 1
+ .Host.efer resq 1
+ alignb 8
+ .Host.xcr0 resq 1
+ .Host.fXStateMask resq 1
+
+ ;
+ ; Hypervisor Context.
+ ;
+ alignb 64
+ .Hyper resq 0
+ .Hyper.dr resq 8
+ .Hyper.cr3 resq 1
+ alignb 64
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ .aMagic resb 56
+ .uMagic resq 1
+%endif
+endstruc
+
+
+
+%if 0 ; Currently not used anywhere.
+;;
+; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
+;
+; Cleans the FPU state, if necessary, before restoring the FPU.
+;
+; This macro ASSUMES CR0.TS is not set!
+;
+; @param xDX Pointer to CPUMCPU.
+; @uses xAX, EFLAGS
+;
+; Changes here should also be reflected in CPUMRCA.asm's copy!
+;
+%macro CLEANFPU 0
+ test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
+ jz .nothing_to_clean
+
+ xor eax, eax
+ fnstsw ax ; FSW -> AX.
+ test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
+ ; while clearing & loading the FPU bits in 'clean_fpu' below.
+ jz .clean_fpu
+ fnclex
+
+.clean_fpu:
+ ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
+ ; for the upcoming push (load)
+ fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
+.nothing_to_clean:
+%endmacro
+%endif ; Unused.
+
+
+;;
+; Makes sure we don't trap (#NM) accessing the FPU.
+;
+; In ring-0 this is a bit of work since we may have try convince the host kernel
+; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
+; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
+;
+; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
+; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
+; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
+;
+; In raw-mode we will always have to clear TS and it will be recalculated
+; elsewhere and thus needs no saving.
+;
+; @param %1 Register to return the return status code in.
+; @param %2 Temporary scratch register.
+; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
+; of the EMT we're on.
+; @uses EFLAGS, CR0, %1, %2
+;
+%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
+ ;
+ ; ring-0 - slightly complicated (than old raw-mode).
+ ;
+ xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
+ mov [%3 + CPUMCPU.Host.cr0Fpu], %1
+
+ mov %2, cr0
+ test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
+ jz %%no_cr0_change
+
+ %ifdef VMM_R0_TOUCH_FPU
+ ; Touch the state and check that the kernel updated CR0 for us.
+ movdqa xmm0, xmm0
+ mov %2, cr0
+ test %2, X86_CR0_TS | X86_CR0_EM
+ jz %%cr0_changed
+ %endif
+
+ ; Save CR0 and clear them flags ourselves.
+ mov [%3 + CPUMCPU.Host.cr0Fpu], %2
+ and %2, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, %2
+
+%%cr0_changed:
+ mov %1, VINF_CPUM_HOST_CR0_MODIFIED
+%%no_cr0_change:
+%endmacro
+
+
+;;
+; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
+;
+; @param %1 The original state to restore (or zero).
+;
+%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
+ test %1, X86_CR0_TS | X86_CR0_EM
+ jz %%skip_cr0_restore
+ mov cr0, %1
+%%skip_cr0_restore:
+%endmacro
+
+
+;;
+; Saves the host state.
+;
+; @uses rax, rdx
+; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
+; @param pXState Define for the register containing the extended state pointer.
+;
+%macro CPUMR0_SAVE_HOST 0
+ ;
+ ; Load a couple of registers we'll use later in all branches.
+ ;
+ lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
+ mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
+
+ ;
+ ; XSAVE or FXSAVE?
+ ;
+ or eax, eax
+ jz %%host_fxsave
+
+ ; XSAVE
+ mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
+ %ifdef RT_ARCH_AMD64
+ o64 xsave [pXState]
+ %else
+ xsave [pXState]
+ %endif
+ jmp %%host_done
+
+ ; FXSAVE
+%%host_fxsave:
+ %ifdef RT_ARCH_AMD64
+ o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
+ %else
+ fxsave [pXState]
+ %endif
+
+%%host_done:
+%endmacro ; CPUMR0_SAVE_HOST
+
+
+;;
+; Loads the host state.
+;
+; @uses rax, rdx
+; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
+; @param pXState Define for the register containing the extended state pointer.
+;
+%macro CPUMR0_LOAD_HOST 0
+ ;
+ ; Load a couple of registers we'll use later in all branches.
+ ;
+ lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
+ mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
+
+ ;
+ ; XRSTOR or FXRSTOR?
+ ;
+ or eax, eax
+ jz %%host_fxrstor
+
+ ; XRSTOR
+ mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
+ %ifdef RT_ARCH_AMD64
+ o64 xrstor [pXState]
+ %else
+ xrstor [pXState]
+ %endif
+ jmp %%host_done
+
+ ; FXRSTOR
+%%host_fxrstor:
+ %ifdef RT_ARCH_AMD64
+ o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
+ %else
+ fxrstor [pXState]
+ %endif
+
+%%host_done:
+%endmacro ; CPUMR0_LOAD_HOST
+
+
+
+;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
+; save the 32-bit FPU state or 64-bit FPU state.
+;
+; @param %1 Pointer to CPUMCPU.
+; @param %2 Pointer to XState.
+; @param %3 Force AMD64
+; @param %4 The instruction to use (xsave or fxsave)
+; @uses xAX, xDX, EFLAGS, 20h of stack.
+;
+%macro SAVE_32_OR_64_FPU 4
+%if CPUM_IS_AMD64 || %3
+ ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
+ test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
+ jnz short %%save_long_mode_guest
+%endif
+ %4 [pXState]
+%if CPUM_IS_AMD64 || %3
+ jmp %%save_done_32bit_cs_ds
+
+%%save_long_mode_guest:
+ o64 %4 [pXState]
+
+ xor edx, edx
+ cmp dword [pXState + X86FXSTATE.FPUCS], 0
+ jne short %%save_done
+
+ sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
+ fnstenv [rsp]
+ movzx eax, word [rsp + 10h]
+ mov [pXState + X86FXSTATE.FPUCS], eax
+ movzx eax, word [rsp + 18h]
+ add rsp, 20h
+ mov [pXState + X86FXSTATE.FPUDS], eax
+%endif
+%%save_done_32bit_cs_ds:
+ mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
+%%save_done:
+ mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
+%endmacro ; SAVE_32_OR_64_FPU
+
+
+;;
+; Save the guest state.
+;
+; @uses rax, rdx
+; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
+; @param pXState Define for the register containing the extended state pointer.
+;
+%macro CPUMR0_SAVE_GUEST 0
+ ;
+ ; Load a couple of registers we'll use later in all branches.
+ ;
+ %ifdef IN_RING0
+ lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
+ %else
+ %error "Unsupported context!"
+ %endif
+ mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
+
+ ;
+ ; XSAVE or FXSAVE?
+ ;
+ or eax, eax
+ jz %%guest_fxsave
+
+ ; XSAVE
+ mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
+ %ifdef VBOX_WITH_KERNEL_USING_XMM
+ and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
+ %endif
+ SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
+ jmp %%guest_done
+
+ ; FXSAVE
+%%guest_fxsave:
+ SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
+
+%%guest_done:
+%endmacro ; CPUMR0_SAVE_GUEST
+
+
+;;
+; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
+;
+; @param %1 Pointer to CPUMCPU.
+; @param %2 Pointer to XState.
+; @param %3 Force AMD64.
+; @param %4 The instruction to use (xrstor or fxrstor).
+; @uses xAX, xDX, EFLAGS
+;
+%macro RESTORE_32_OR_64_FPU 4
+%if CPUM_IS_AMD64 || %3
+ ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
+ test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
+ jz %%restore_32bit_fpu
+ cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
+ jne short %%restore_64bit_fpu
+%%restore_32bit_fpu:
+%endif
+ %4 [pXState]
+%if CPUM_IS_AMD64 || %3
+ ; TODO: Restore XMM8-XMM15!
+ jmp short %%restore_fpu_done
+%%restore_64bit_fpu:
+ o64 %4 [pXState]
+%%restore_fpu_done:
+%endif
+%endmacro ; RESTORE_32_OR_64_FPU
+
+
+;;
+; Loads the guest state.
+;
+; @uses rax, rdx
+; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
+; @param pXState Define for the register containing the extended state pointer.
+;
+%macro CPUMR0_LOAD_GUEST 0
+ ;
+ ; Load a couple of registers we'll use later in all branches.
+ ;
+ lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
+ mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
+
+ ;
+ ; XRSTOR or FXRSTOR?
+ ;
+ or eax, eax
+ jz %%guest_fxrstor
+
+ ; XRSTOR
+ mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
+ %ifdef VBOX_WITH_KERNEL_USING_XMM
+ and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
+ %endif
+ RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
+ jmp %%guest_done
+
+ ; FXRSTOR
+%%guest_fxrstor:
+ RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
+
+%%guest_done:
+%endmacro ; CPUMR0_LOAD_GUEST
+
diff --git a/src/VBox/VMM/include/DBGFInline.h b/src/VBox/VMM/include/DBGFInline.h
new file mode 100644
index 00000000..0737c681
--- /dev/null
+++ b/src/VBox/VMM/include/DBGFInline.h
@@ -0,0 +1,135 @@
+/* $Id: DBGFInline.h $ */
+/** @file
+ * DBGF - Internal header file containing the inlined functions.
+ */
+
+/*
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_DBGFInline_h
+#define VMM_INCLUDED_SRC_include_DBGFInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+/**
+ * Initializes the given L2 table entry with the given values.
+ *
+ * @param pL2Entry The L2 entry to intialize.
+ * @param hBp The breakpoint handle.
+ * @param GCPtr The GC pointer used as the key (only the upper 6 bytes are used).
+ * @param idxL2Left The left L2 table index.
+ * @param idxL2Right The right L2 table index.
+ * @param iDepth The depth of the node in the tree.
+ */
+DECLINLINE(void) dbgfBpL2TblEntryInit(PDBGFBPL2ENTRY pL2Entry, DBGFBP hBp, RTGCPTR GCPtr,
+ uint32_t idxL2Left, uint32_t idxL2Right, uint8_t iDepth)
+{
+ uint64_t u64GCPtrKeyAndBpHnd1 = ((uint64_t)hBp & DBGF_BP_L2_ENTRY_BP_1ST_MASK) << DBGF_BP_L2_ENTRY_BP_1ST_SHIFT
+ | DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
+ uint64_t u64LeftRightIdxDepthBpHnd2 = (((uint64_t)hBp & DBGF_BP_L2_ENTRY_BP_2ND_MASK) >> 16) << DBGF_BP_L2_ENTRY_BP_2ND_SHIFT
+ | ((uint64_t)iDepth << DBGF_BP_L2_ENTRY_DEPTH_SHIFT)
+ | ((uint64_t)idxL2Right << DBGF_BP_L2_ENTRY_RIGHT_IDX_SHIFT)
+ | ((uint64_t)idxL2Left << DBGF_BP_L2_ENTRY_LEFT_IDX_SHIFT);
+
+ ASMAtomicWriteU64(&pL2Entry->u64GCPtrKeyAndBpHnd1, u64GCPtrKeyAndBpHnd1);
+ ASMAtomicWriteU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2, u64LeftRightIdxDepthBpHnd2);
+}
+
+
+/**
+ * Updates the given L2 table entry with the new pointers.
+ *
+ * @param pL2Entry The L2 entry to update.
+ * @param idxL2Left The new left L2 table index.
+ * @param idxL2Right The new right L2 table index.
+ * @param iDepth The new depth of the tree.
+ */
+DECLINLINE(void) dbgfBpL2TblEntryUpdate(PDBGFBPL2ENTRY pL2Entry, uint32_t idxL2Left, uint32_t idxL2Right,
+ uint8_t iDepth)
+{
+ uint64_t u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2) & DBGF_BP_L2_ENTRY_BP_2ND_L2_ENTRY_MASK;
+ u64LeftRightIdxDepthBpHnd2 |= ((uint64_t)iDepth << DBGF_BP_L2_ENTRY_DEPTH_SHIFT)
+ | ((uint64_t)idxL2Right << DBGF_BP_L2_ENTRY_RIGHT_IDX_SHIFT)
+ | ((uint64_t)idxL2Left << DBGF_BP_L2_ENTRY_LEFT_IDX_SHIFT);
+
+ ASMAtomicWriteU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2, u64LeftRightIdxDepthBpHnd2);
+}
+
+
+/**
+ * Updates the given L2 table entry with the left pointer.
+ *
+ * @param pL2Entry The L2 entry to update.
+ * @param idxL2Left The new left L2 table index.
+ * @param iDepth The new depth of the tree.
+ */
+DECLINLINE(void) dbgfBpL2TblEntryUpdateLeft(PDBGFBPL2ENTRY pL2Entry, uint32_t idxL2Left, uint8_t iDepth)
+{
+ uint64_t u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2) & ( DBGF_BP_L2_ENTRY_BP_2ND_L2_ENTRY_MASK
+ | DBGF_BP_L2_ENTRY_RIGHT_IDX_MASK);
+
+ u64LeftRightIdxDepthBpHnd2 |= ((uint64_t)iDepth << DBGF_BP_L2_ENTRY_DEPTH_SHIFT)
+ | ((uint64_t)idxL2Left << DBGF_BP_L2_ENTRY_LEFT_IDX_SHIFT);
+
+ ASMAtomicWriteU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2, u64LeftRightIdxDepthBpHnd2);
+}
+
+
+/**
+ * Updates the given L2 table entry with the right pointer.
+ *
+ * @param pL2Entry The L2 entry to update.
+ * @param idxL2Right The new right L2 table index.
+ * @param iDepth The new depth of the tree.
+ */
+DECLINLINE(void) dbgfBpL2TblEntryUpdateRight(PDBGFBPL2ENTRY pL2Entry, uint32_t idxL2Right, uint8_t iDepth)
+{
+ uint64_t u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2) & ( DBGF_BP_L2_ENTRY_BP_2ND_L2_ENTRY_MASK
+ | DBGF_BP_L2_ENTRY_LEFT_IDX_MASK);
+
+ u64LeftRightIdxDepthBpHnd2 |= ((uint64_t)iDepth << DBGF_BP_L2_ENTRY_DEPTH_SHIFT)
+ | ((uint64_t)idxL2Right << DBGF_BP_L2_ENTRY_RIGHT_IDX_SHIFT);
+
+ ASMAtomicWriteU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2, u64LeftRightIdxDepthBpHnd2);
+}
+
+#ifdef IN_RING3
+/**
+ * Returns the internal breakpoint owner state for the given handle.
+ *
+ * @returns Pointer to the internal breakpoint owner state or NULL if the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ * @param hBpOwner The breakpoint owner handle to resolve.
+ */
+DECLINLINE(PDBGFBPOWNERINT) dbgfR3BpOwnerGetByHnd(PUVM pUVM, DBGFBPOWNER hBpOwner)
+{
+ AssertReturn(hBpOwner < DBGF_BP_OWNER_COUNT_MAX, NULL);
+ AssertPtrReturn(pUVM->dbgf.s.pbmBpOwnersAllocR3, NULL);
+
+ AssertReturn(ASMBitTest(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner), NULL);
+ return &pUVM->dbgf.s.paBpOwnersR3[hBpOwner];
+}
+#endif
+
+#endif /* !VMM_INCLUDED_SRC_include_DBGFInline_h */
diff --git a/src/VBox/VMM/include/DBGFInternal.h b/src/VBox/VMM/include/DBGFInternal.h
new file mode 100644
index 00000000..195efa28
--- /dev/null
+++ b/src/VBox/VMM/include/DBGFInternal.h
@@ -0,0 +1,1522 @@
+/* $Id: DBGFInternal.h $ */
+/** @file
+ * DBGF - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_DBGFInternal_h
+#define VMM_INCLUDED_SRC_include_DBGFInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#ifdef IN_RING3
+# include <VBox/dis.h>
+#endif
+#include <VBox/types.h>
+#include <iprt/semaphore.h>
+#include <iprt/critsect.h>
+#include <iprt/string.h>
+#include <iprt/avl.h>
+#include <iprt/dbg.h>
+#include <iprt/tracelog.h>
+#include <VBox/vmm/dbgf.h>
+
+
+
+/** @defgroup grp_dbgf_int Internals
+ * @ingroup grp_dbgf
+ * @internal
+ * @{
+ */
+
+/** The maximum tracer instance (total) size, ring-0/raw-mode capable tracers. */
+#define DBGF_MAX_TRACER_INSTANCE_SIZE _512M
+/** The maximum tracers instance (total) size, ring-3 only tracers. */
+#define DBGF_MAX_TRACER_INSTANCE_SIZE_R3 _1G
+/** Event ringbuffer header size. */
+#define DBGF_TRACER_EVT_HDR_SZ (32)
+/** Event ringbuffer payload size. */
+#define DBGF_TRACER_EVT_PAYLOAD_SZ (32)
+/** Event ringbuffer entry size. */
+#define DBGF_TRACER_EVT_SZ (DBGF_TRACER_EVT_HDR_SZ + DBGF_TRACER_EVT_PAYLOAD_SZ)
+
+
+/** @name Global breakpoint table handling defines.
+ * @{ */
+/** Maximum number of breakpoint owners supported (power of two). */
+#define DBGF_BP_OWNER_COUNT_MAX _32K
+/** Maximum number of breakpoints supported (power of two). */
+#define DBGF_BP_COUNT_MAX _1M
+/** Size of a single breakpoint structure in bytes. */
+#define DBGF_BP_ENTRY_SZ 64
+/** Number of breakpoints handled in one chunk (power of two). */
+#define DBGF_BP_COUNT_PER_CHUNK _64K
+/** Number of chunks required to support all breakpoints. */
+#define DBGF_BP_CHUNK_COUNT (DBGF_BP_COUNT_MAX / DBGF_BP_COUNT_PER_CHUNK)
+/** Maximum number of instruction bytes when executing breakpointed instructions. */
+#define DBGF_BP_INSN_MAX 16
+/** @} */
+
+/** @name L2 lookup table limit defines.
+ * @{ */
+/** Maximum number of entreis in the L2 lookup table. */
+#define DBGF_BP_L2_TBL_ENTRY_COUNT_MAX _512K
+/** Number of L2 entries handled in one chunk. */
+#define DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK _64K
+/** Number of chunks required tp support all L2 lookup table entries. */
+#define DBGF_BP_L2_TBL_CHUNK_COUNT (DBGF_BP_L2_TBL_ENTRY_COUNT_MAX / DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK)
+/** @} */
+
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+
+/**
+ * Event entry types.
+ */
+typedef enum DBGFTRACEREVT
+{
+ /** Invalid type. */
+ DBGFTRACEREVT_INVALID = 0,
+ /** Register event source event. */
+ DBGFTRACEREVT_SRC_REGISTER,
+ /** Deregister event source event. */
+ DBGFTRACEREVT_SRC_DEREGISTER,
+ /** MMIO region create event. */
+ DBGFTRACEREVT_MMIO_REGION_CREATE,
+ /** MMIO map region event. */
+ DBGFTRACEREVT_MMIO_MAP,
+ /** MMIO unmap region event. */
+ DBGFTRACEREVT_MMIO_UNMAP,
+ /** MMIO read event. */
+ DBGFTRACEREVT_MMIO_READ,
+ /** MMIO write event. */
+ DBGFTRACEREVT_MMIO_WRITE,
+ /** MMIO fill event. */
+ DBGFTRACEREVT_MMIO_FILL,
+ /** I/O port region create event. */
+ DBGFTRACEREVT_IOPORT_REGION_CREATE,
+ /** I/O port map event. */
+ DBGFTRACEREVT_IOPORT_MAP,
+ /** I/O port unmap event. */
+ DBGFTRACEREVT_IOPORT_UNMAP,
+ /** I/O port read event. */
+ DBGFTRACEREVT_IOPORT_READ,
+ /** I/O port read string event. */
+ DBGFTRACEREVT_IOPORT_READ_STR,
+ /** I/O port write event. */
+ DBGFTRACEREVT_IOPORT_WRITE,
+ /** I/O port write string event. */
+ DBGFTRACEREVT_IOPORT_WRITE_STR,
+ /** IRQ event. */
+ DBGFTRACEREVT_IRQ,
+ /** I/O APIC MSI event. */
+ DBGFTRACEREVT_IOAPIC_MSI,
+ /** Read from guest physical memory. */
+ DBGFTRACEREVT_GCPHYS_READ,
+ /** Write to guest physical memory. */
+ DBGFTRACEREVT_GCPHYS_WRITE,
+ /** 32bit hack. */
+ DBGFTRACEREVT_32BIT_HACK
+} DBGFTRACEREVT;
+/** Pointer to a trace event entry type. */
+typedef DBGFTRACEREVT *PDBGFTRACEREVT;
+
+
+/**
+ * MMIO region create event.
+ */
+typedef struct DBGFTRACEREVTMMIOCREATE
+{
+ /** Unique region handle for the event source. */
+ uint64_t hMmioRegion;
+ /** Size of the region in bytes. */
+ RTGCPHYS cbRegion;
+ /** IOM flags passed to the region. */
+ uint32_t fIomFlags;
+ /** The PCI region for a PCI device. */
+ uint32_t iPciRegion;
+ /** Padding to 32byte. */
+ uint64_t u64Pad0;
+} DBGFTRACEREVTMMIOCREATE;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTMMIOCREATE *PDBGFTRACEREVTMMIOCREATE;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTMMIOCREATE *PCDBGFTRACEREVTMMIOCREATE;
+
+AssertCompileSize(DBGFTRACEREVTMMIOCREATE, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * MMIO region map event.
+ */
+typedef struct DBGFTRACEREVTMMIOMAP
+{
+ /** Unique region handle for the event source. */
+ uint64_t hMmioRegion;
+ /** The base guest physical address of the MMIO region. */
+ RTGCPHYS GCPhysMmioBase;
+ /** Padding to 32byte. */
+ uint64_t au64Pad0[2];
+} DBGFTRACEREVTMMIOMAP;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTMMIOMAP *PDBGFTRACEREVTMMIOMAP;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTMMIOMAP *PCDBGFTRACEREVTMMIOMAP;
+
+AssertCompileSize(DBGFTRACEREVTMMIOMAP, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * MMIO region unmap event.
+ */
+typedef struct DBGFTRACEREVTMMIOUNMAP
+{
+ /** Unique region handle for the event source. */
+ uint64_t hMmioRegion;
+ /** Padding to 32byte. */
+ uint64_t au64Pad0[3];
+} DBGFTRACEREVTMMIOUNMAP;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTMMIOUNMAP *PDBGFTRACEREVTMMIOUNMAP;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTMMIOUNMAP *PCDBGFTRACEREVTMMIOUNMAP;
+
+AssertCompileSize(DBGFTRACEREVTMMIOUNMAP, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * MMIO event.
+ */
+typedef struct DBGFTRACEREVTMMIO
+{
+ /** Unique region handle for the event source. */
+ uint64_t hMmioRegion;
+ /** Offset into the region the access happened. */
+ RTGCPHYS offMmio;
+ /** Number of bytes transfered (the direction is in the event header). */
+ uint64_t cbXfer;
+ /** The value transfered. */
+ uint64_t u64Val;
+} DBGFTRACEREVTMMIO;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTMMIO *PDBGFTRACEREVTMMIO;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTMMIO *PCDBGFTRACEREVTMMIO;
+
+AssertCompileSize(DBGFTRACEREVTMMIO, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * MMIO fill event.
+ */
+typedef struct DBGFTRACEREVTMMIOFILL
+{
+ /** Unique region handle for the event source. */
+ uint64_t hMmioRegion;
+ /** Offset into the region the access happened. */
+ RTGCPHYS offMmio;
+ /** Item size in bytes. */
+ uint32_t cbItem;
+ /** Amount of items being filled. */
+ uint32_t cItems;
+ /** The fill value. */
+ uint32_t u32Item;
+ /** Padding to 32bytes. */
+ uint32_t u32Pad0;
+} DBGFTRACEREVTMMIOFILL;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTMMIOFILL *PDBGFTRACEREVTMMIOFILL;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTMMIOFILL *PCDBGFTRACEREVTMMIOFILL;
+
+AssertCompileSize(DBGFTRACEREVTMMIOFILL, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * I/O port region create event.
+ */
+typedef struct DBGFTRACEREVTIOPORTCREATE
+{
+ /** Unique I/O port region handle for the event source. */
+ uint64_t hIoPorts;
+ /** Number of ports. */
+ RTIOPORT cPorts;
+ /** Padding. */
+ uint16_t u16Pad0;
+ /** IOM flags passed to the region. */
+ uint32_t fIomFlags;
+ /** The PCI region for a PCI device. */
+ uint32_t iPciRegion;
+ /** Padding to 32byte. */
+ uint32_t u32Pad0[3];
+} DBGFTRACEREVTIOPORTCREATE;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTIOPORTCREATE *PDBGFTRACEREVTIOPORTCREATE;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTIOPORTCREATE *PCDBGFTRACEREVTIOPORTCREATE;
+
+AssertCompileSize(DBGFTRACEREVTIOPORTCREATE, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * I/O port region map event.
+ */
+typedef struct DBGFTRACEREVTIOPORTMAP
+{
+ /** Unique I/O port region handle for the event source. */
+ uint64_t hIoPorts;
+ /** The base I/O port for the region. */
+ RTIOPORT IoPortBase;
+ /** Padding to 32byte. */
+ uint16_t au16Pad0[11];
+} DBGFTRACEREVTIOPORTMAP;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTIOPORTMAP *PDBGFTRACEREVTIOPORTMAP;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTIOPORTMAP *PCDBGFTRACEREVTIOPORTMAP;
+
+AssertCompileSize(DBGFTRACEREVTIOPORTMAP, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * MMIO region unmap event.
+ */
+typedef struct DBGFTRACEREVTIOPORTUNMAP
+{
+ /** Unique region handle for the event source. */
+ uint64_t hIoPorts;
+ /** Padding to 32byte. */
+ uint64_t au64Pad0[3];
+} DBGFTRACEREVTIOPORTUNMAP;
+/** Pointer to a MMIO map event. */
+typedef DBGFTRACEREVTIOPORTUNMAP *PDBGFTRACEREVTIOPORTUNMAP;
+/** Pointer to a const MMIO map event. */
+typedef const DBGFTRACEREVTIOPORTUNMAP *PCDBGFTRACEREVTIOPORTUNMAP;
+
+AssertCompileSize(DBGFTRACEREVTIOPORTUNMAP, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * I/O port event.
+ */
+typedef struct DBGFTRACEREVTIOPORT
+{
+ /** Unique region handle for the event source. */
+ uint64_t hIoPorts;
+ /** Offset into the I/O port region. */
+ RTIOPORT offPort;
+ /** 8 byte alignment. */
+ uint8_t abPad0[6];
+ /** Number of bytes transfered (the direction is in the event header). */
+ uint64_t cbXfer;
+ /** The value transfered. */
+ uint32_t u32Val;
+ /** Padding to 32bytes. */
+ uint8_t abPad1[4];
+} DBGFTRACEREVTIOPORT;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTIOPORT *PDBGFTRACEREVTIOPORT;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTIOPORT *PCDBGFTRACEREVTIOPORT;
+
+AssertCompileSize(DBGFTRACEREVTIOPORT, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * I/O port string event.
+ */
+typedef struct DBGFTRACEREVTIOPORTSTR
+{
+ /** Unique region handle for the event source. */
+ uint64_t hIoPorts;
+ /** Item size in bytes. */
+ uint32_t cbItem;
+ /** Number of transfers requested - for writes this gives the amount of valid data following. */
+ uint32_t cTransfersReq;
+ /** Number of transfers done - for reads this gives the amount of valid data following. */
+ uint32_t cTransfersRet;
+ /** Offset into the I/O port region. */
+ RTIOPORT offPort;
+ /** Data being transfered. */
+ uint8_t abData[10];
+} DBGFTRACEREVTIOPORTSTR;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTIOPORTSTR *PDBGFTRACEREVTIOPORTSTR;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTIOPORTSTR *PCDBGFTRACEREVTIOPORTSTR;
+
+AssertCompileSize(DBGFTRACEREVTIOPORTSTR, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * IRQ event.
+ */
+typedef struct DBGFTRACEREVTIRQ
+{
+ /** The IRQ line. */
+ int32_t iIrq;
+ /** IRQ level flags. */
+ int32_t fIrqLvl;
+ /** Padding to 32bytes. */
+ uint32_t au32Pad0[6];
+} DBGFTRACEREVTIRQ;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTIRQ *PDBGFTRACEREVTIRQ;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTIRQ *PCDBGFTRACEREVTIRQ;
+
+AssertCompileSize(DBGFTRACEREVTIRQ, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * I/O APIC MSI event.
+ */
+typedef struct DBGFTRACEREVTIOAPICMSI
+{
+ /** The guest physical address being written. */
+ RTGCPHYS GCPhys;
+ /** The value being written. */
+ uint32_t u32Val;
+ /** Padding to 32bytes. */
+ uint32_t au32Pad0[5];
+} DBGFTRACEREVTIOAPICMSI;
+/** Pointer to a MMIO event. */
+typedef DBGFTRACEREVTIOAPICMSI *PDBGFTRACEREVTIOAPICMSI;
+/** Pointer to a const MMIO event. */
+typedef const DBGFTRACEREVTIOAPICMSI *PCDBGFTRACEREVTIOAPICMSI;
+
+AssertCompileSize(DBGFTRACEREVTIOAPICMSI, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * Guest physical memory transfer.
+ */
+typedef struct DBGFTRACEREVTGCPHYS
+{
+ /** Guest physical address of the access. */
+ RTGCPHYS GCPhys;
+ /** Number of bytes transfered (the direction is in the event header).
+ * If the number is small enough to fit into the remaining space of the entry
+ * it is stored here, otherwise it will be stored in the next entry (and following
+ * entries). */
+ uint64_t cbXfer;
+ /** Guest data being transfered. */
+ uint8_t abData[16];
+} DBGFTRACEREVTGCPHYS;
+/** Pointer to a guest physical memory transfer event. */
+typedef DBGFTRACEREVTGCPHYS *PDBGFTRACEREVTGCPHYS;
+/** Pointer to a const uest physical memory transfer event. */
+typedef const DBGFTRACEREVTGCPHYS *PCDBGFTRACEREVTGCPHYS;
+
+AssertCompileSize(DBGFTRACEREVTGCPHYS, DBGF_TRACER_EVT_PAYLOAD_SZ);
+
+
+/**
+ * A trace event header in the shared ring buffer.
+ */
+typedef struct DBGFTRACEREVTHDR
+{
+ /** Event ID. */
+ volatile uint64_t idEvt;
+ /** The previous event ID this one links to,
+ * DBGF_TRACER_EVT_HDR_ID_INVALID if it links to no other event. */
+ uint64_t idEvtPrev;
+ /** Event source. */
+ DBGFTRACEREVTSRC hEvtSrc;
+ /** The event entry type. */
+ DBGFTRACEREVT enmEvt;
+ /** Flags for this event. */
+ uint32_t fFlags;
+} DBGFTRACEREVTHDR;
+/** Pointer to a trace event header. */
+typedef DBGFTRACEREVTHDR *PDBGFTRACEREVTHDR;
+/** Pointer to a const trace event header. */
+typedef const DBGFTRACEREVTHDR *PCDBGFTRACEREVTHDR;
+
+AssertCompileSize(DBGFTRACEREVTHDR, DBGF_TRACER_EVT_HDR_SZ);
+
+/** Invalid event ID, this is always set by the flush thread after processing one entry
+ * so the producers know when they are about to overwrite not yet processed entries in the ring buffer. */
+#define DBGF_TRACER_EVT_HDR_ID_INVALID UINT64_C(0xffffffffffffffff)
+
+/** The event came from R0. */
+#define DBGF_TRACER_EVT_HDR_F_R0 RT_BIT(0)
+
+/** Default event header tracer flags. */
+#ifdef IN_RING0
+# define DBGF_TRACER_EVT_HDR_F_DEFAULT DBGF_TRACER_EVT_HDR_F_R0
+#else
+# define DBGF_TRACER_EVT_HDR_F_DEFAULT (0)
+#endif
+
+
+/**
+ * Tracer instance data, shared structure.
+ */
+typedef struct DBGFTRACERSHARED
+{
+ /** The global event ID counter, monotonically increasing.
+ * Accessed by all threads causing a trace event. */
+ volatile uint64_t idEvt;
+ /** The SUP event semaphore for poking the flush thread. */
+ SUPSEMEVENT hSupSemEvtFlush;
+ /** Ring buffer size. */
+ size_t cbRingBuf;
+ /** Flag whether there are events in the ring buffer to get processed. */
+ volatile bool fEvtsWaiting;
+ /** Flag whether the flush thread is actively running or was kicked. */
+ volatile bool fFlushThrdActive;
+ /** Padding to a 64byte alignment. */
+ uint8_t abAlignment0[32];
+} DBGFTRACERSHARED;
+/** Pointer to the shared tarcer instance data. */
+typedef DBGFTRACERSHARED *PDBGFTRACERSHARED;
+
+AssertCompileSizeAlignment(DBGFTRACERSHARED, 64);
+
+
+/**
+ * Guest memory read/write data aggregation.
+ */
+typedef struct DBGFTRACERGCPHYSRWAGG
+{
+ /** The event ID which started the aggregation (used for the group ID when writing out the event). */
+ uint64_t idEvtStart;
+ /** The previous event ID used to link all the chunks together. */
+ uint64_t idEvtPrev;
+ /** Number of bytes being transfered. */
+ size_t cbXfer;
+ /** Amount of data left to aggregate before it can be written. */
+ size_t cbLeft;
+ /** Amount of bytes allocated. */
+ size_t cbBufMax;
+ /** Offset into the buffer to write next. */
+ size_t offBuf;
+ /** Pointer to the allocated buffer. */
+ uint8_t *pbBuf;
+} DBGFTRACERGCPHYSRWAGG;
+/** Pointer to a guest memory read/write data aggregation structure. */
+typedef DBGFTRACERGCPHYSRWAGG *PDBGFTRACERGCPHYSRWAGG;
+
+
+/**
+ * Tracer instance data, ring-3
+ */
+typedef struct DBGFTRACERINSR3
+{
+ /** Pointer to the next instance.
+ * (Head is pointed to by PDM::pTracerInstances.) */
+ R3PTRTYPE(struct DBGFTRACERINSR3 *) pNextR3;
+ /** R3 pointer to the VM this instance was created for. */
+ PVMR3 pVMR3;
+ /** Tracer instance number. */
+ uint32_t idTracer;
+ /** Flag whether the tracer has the R0 part enabled. */
+ bool fR0Enabled;
+ /** Flag whether the tracer flush thread should shut down. */
+ volatile bool fShutdown;
+ /** Padding. */
+ bool afPad0[6];
+ /** Next event source ID to return for a source registration. */
+ volatile DBGFTRACEREVTSRC hEvtSrcNext;
+ /** Pointer to the shared tracer instance data. */
+ R3PTRTYPE(PDBGFTRACERSHARED) pSharedR3;
+ /** The I/O thread writing the log from the shared event ringbuffer. */
+ RTTHREAD hThrdFlush;
+ /** Pointer to the start of the ring buffer. */
+ R3PTRTYPE(uint8_t *) pbRingBufR3;
+ /** The last processed event ID. */
+ uint64_t idEvtLast;
+ /** The trace log writer handle. */
+ RTTRACELOGWR hTraceLog;
+ /** Guest memory data aggregation structures to track
+ * currently pending guest memory reads/writes. */
+ DBGFTRACERGCPHYSRWAGG aGstMemRwData[10];
+} DBGFTRACERINSR3;
+/** Pointer to a tarcer instance - Ring-3 Ptr. */
+typedef R3PTRTYPE(DBGFTRACERINSR3 *) PDBGFTRACERINSR3;
+
+
+/**
+ * Private tracer instance data, ring-0
+ */
+typedef struct DBGFTRACERINSR0
+{
+ /** Pointer to the VM this instance was created for. */
+ R0PTRTYPE(PGVM) pGVM;
+ /** The tracer instance memory. */
+ RTR0MEMOBJ hMemObj;
+ /** The ring-3 mapping object. */
+ RTR0MEMOBJ hMapObj;
+ /** Pointer to the shared tracer instance data. */
+ R0PTRTYPE(PDBGFTRACERSHARED) pSharedR0;
+ /** Size of the ring buffer in bytes, kept here so R3 can not manipulate the ring buffer
+ * size afterwards to trick R0 into doing something harmful. */
+ size_t cbRingBuf;
+ /** Pointer to the start of the ring buffer. */
+ R0PTRTYPE(uint8_t *) pbRingBufR0;
+} DBGFTRACERINSR0;
+/** Pointer to a VM - Ring-0 Ptr. */
+typedef R0PTRTYPE(DBGFTRACERINSR0 *) PDBGFTRACERINSR0;
+
+
+/**
+ * Private device instance data, raw-mode
+ */
+typedef struct DBGFTRACERINSRC
+{
+ /** Pointer to the VM this instance was created for. */
+ RGPTRTYPE(PVM) pVMRC;
+} DBGFTRACERINSRC;
+
+
+#ifdef IN_RING3
+DECLHIDDEN(int) dbgfTracerR3EvtPostSingle(PVMCC pVM, PDBGFTRACERINSCC pThisCC, DBGFTRACEREVTSRC hEvtSrc,
+ DBGFTRACEREVT enmTraceEvt, const void *pvEvtDesc, size_t cbEvtDesc,
+ uint64_t *pidEvt);
+#endif
+
+/** VMM Debugger Command. */
+typedef enum DBGFCMD
+{
+ /** No command.
+ * This is assigned to the field by the emulation thread after
+ * a command has been completed. */
+ DBGFCMD_NO_COMMAND = 0,
+ /** Halt the VM. */
+ DBGFCMD_HALT,
+ /** Resume execution. */
+ DBGFCMD_GO,
+ /** Single step execution - stepping into calls. */
+ DBGFCMD_SINGLE_STEP
+} DBGFCMD;
+
+/**
+ * VMM Debugger Command.
+ */
+typedef union DBGFCMDDATA
+{
+ uint32_t uDummy;
+} DBGFCMDDATA;
+/** Pointer to DBGF Command Data. */
+typedef DBGFCMDDATA *PDBGFCMDDATA;
+
+/**
+ * Info type.
+ */
+typedef enum DBGFINFOTYPE
+{
+ /** Invalid. */
+ DBGFINFOTYPE_INVALID = 0,
+ /** Device owner. */
+ DBGFINFOTYPE_DEV,
+ /** Driver owner. */
+ DBGFINFOTYPE_DRV,
+ /** Internal owner. */
+ DBGFINFOTYPE_INT,
+ /** External owner. */
+ DBGFINFOTYPE_EXT,
+ /** Device owner. */
+ DBGFINFOTYPE_DEV_ARGV,
+ /** Driver owner. */
+ DBGFINFOTYPE_DRV_ARGV,
+ /** USB device owner. */
+ DBGFINFOTYPE_USB_ARGV,
+ /** Internal owner, argv. */
+ DBGFINFOTYPE_INT_ARGV,
+ /** External owner. */
+ DBGFINFOTYPE_EXT_ARGV
+} DBGFINFOTYPE;
+
+
+/** Pointer to info structure. */
+typedef struct DBGFINFO *PDBGFINFO;
+
+#ifdef IN_RING3
+/**
+ * Info structure.
+ */
+typedef struct DBGFINFO
+{
+ /** The flags. */
+ uint32_t fFlags;
+ /** Owner type. */
+ DBGFINFOTYPE enmType;
+ /** Per type data. */
+ union
+ {
+ /** DBGFINFOTYPE_DEV */
+ struct
+ {
+ /** Device info handler function. */
+ PFNDBGFHANDLERDEV pfnHandler;
+ /** The device instance. */
+ PPDMDEVINS pDevIns;
+ } Dev;
+
+ /** DBGFINFOTYPE_DRV */
+ struct
+ {
+ /** Driver info handler function. */
+ PFNDBGFHANDLERDRV pfnHandler;
+ /** The driver instance. */
+ PPDMDRVINS pDrvIns;
+ } Drv;
+
+ /** DBGFINFOTYPE_INT */
+ struct
+ {
+ /** Internal info handler function. */
+ PFNDBGFHANDLERINT pfnHandler;
+ } Int;
+
+ /** DBGFINFOTYPE_EXT */
+ struct
+ {
+ /** External info handler function. */
+ PFNDBGFHANDLEREXT pfnHandler;
+ /** The user argument. */
+ void *pvUser;
+ } Ext;
+
+ /** DBGFINFOTYPE_DEV_ARGV */
+ struct
+ {
+ /** Device info handler function. */
+ PFNDBGFINFOARGVDEV pfnHandler;
+ /** The device instance. */
+ PPDMDEVINS pDevIns;
+ } DevArgv;
+
+ /** DBGFINFOTYPE_DRV_ARGV */
+ struct
+ {
+ /** Driver info handler function. */
+ PFNDBGFINFOARGVDRV pfnHandler;
+ /** The driver instance. */
+ PPDMDRVINS pDrvIns;
+ } DrvArgv;
+
+ /** DBGFINFOTYPE_USB_ARGV */
+ struct
+ {
+ /** Driver info handler function. */
+ PFNDBGFINFOARGVUSB pfnHandler;
+ /** The driver instance. */
+ PPDMUSBINS pUsbIns;
+ } UsbArgv;
+
+ /** DBGFINFOTYPE_INT_ARGV */
+ struct
+ {
+ /** Internal info handler function. */
+ PFNDBGFINFOARGVINT pfnHandler;
+ } IntArgv;
+
+ /** DBGFINFOTYPE_EXT_ARGV */
+ struct
+ {
+ /** External info handler function. */
+ PFNDBGFINFOARGVEXT pfnHandler;
+ /** The user argument. */
+ void *pvUser;
+ } ExtArgv;
+ } u;
+
+ /** Pointer to the description. */
+ const char *pszDesc;
+ /** Pointer to the next info structure. */
+ PDBGFINFO pNext;
+ /** The identifier name length. */
+ size_t cchName;
+ /** The identifier name. (Extends 'beyond' the struct as usual.) */
+ char szName[1];
+} DBGFINFO;
+#endif /* IN_RING3 */
+
+
+#ifdef IN_RING3
+/**
+ * Guest OS digger instance.
+ */
+typedef struct DBGFOS
+{
+ /** Pointer to the registration record. */
+ PCDBGFOSREG pReg;
+ /** Pointer to the next OS we've registered. */
+ struct DBGFOS *pNext;
+ /** List of EMT interface wrappers. */
+ struct DBGFOSEMTWRAPPER *pWrapperHead;
+ /** The instance data (variable size). */
+ uint8_t abData[16];
+} DBGFOS;
+#endif
+/** Pointer to guest OS digger instance. */
+typedef struct DBGFOS *PDBGFOS;
+/** Pointer to const guest OS digger instance. */
+typedef struct DBGFOS const *PCDBGFOS;
+
+
+/** An invalid breakpoint chunk ID. */
+#define DBGF_BP_CHUNK_ID_INVALID UINT32_MAX
+/** Generates a unique breakpoint handle from the given chunk ID and entry inside the chunk. */
+#define DBGF_BP_HND_CREATE(a_idChunk, a_idEntry) RT_MAKE_U32(a_idEntry, a_idChunk);
+/** Returns the chunk ID from the given breakpoint handle. */
+#define DBGF_BP_HND_GET_CHUNK_ID(a_hBp) ((uint32_t)RT_HI_U16(a_hBp))
+/** Returns the entry index inside a chunk from the given breakpoint handle. */
+#define DBGF_BP_HND_GET_ENTRY(a_hBp) ((uint32_t)RT_LO_U16(a_hBp))
+
+
+/** @name DBGF int3 L1 lookup table entry types.
+ * @{ */
+/** No breakpoint handle assigned for this entry - special value which can be used
+ * for comparison with the whole entry. */
+#define DBGF_BP_INT3_L1_ENTRY_TYPE_NULL UINT32_C(0)
+/** Direct breakpoint handle. */
+#define DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND 1
+/** Index into the L2 tree denoting the root of a search tree. */
+#define DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX 2
+/** @} */
+
+
+/** Returns the entry type for the given L1 lookup table entry. */
+#define DBGF_BP_INT3_L1_ENTRY_GET_TYPE(a_u32Entry) ((a_u32Entry) >> 28)
+/** Returns a DBGF breakpoint handle from the given L1 lookup table entry,
+ * type needs to be DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND. */
+#define DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(a_u32Entry) ((DBGFBP)((a_u32Entry) & UINT32_C(0x0fffffff)))
+/** Returns a L2 index from the given L1 lookup table entry,
+ * type needs to be DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX. */
+#define DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(a_u32Entry) ((a_u32Entry) & UINT32_C(0x0fffffff))
+/** Creates a L1 entry value from the given type and data. */
+#define DBGF_BP_INT3_L1_ENTRY_CREATE(a_Type, a_u32Data) ((((uint32_t)(a_Type)) << 28) | ((a_u32Data) & UINT32_C(0x0fffffff)))
+/** Creates a breakpoint handle type L1 lookup entry. */
+#define DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(a_hBp) DBGF_BP_INT3_L1_ENTRY_CREATE(DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, a_hBp)
+/** Creates a L2 index type L1 lookup entry. */
+#define DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(a_idxL2) DBGF_BP_INT3_L1_ENTRY_CREATE(DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, a_idxL2)
+
+/** Extracts the lowest bits from the given GC pointer used as an index into the L1 lookup table. */
+#define DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(a_GCPtr) ((uint16_t)((a_GCPtr) & UINT16_C(0xffff)))
+
+/**
+ * The internal breakpoint owner state, shared part.
+ */
+typedef struct DBGFBPOWNERINT
+{
+ /** Reference counter indicating how man breakpoints use this owner currently. */
+ volatile uint32_t cRefs;
+ /** Padding. */
+ uint32_t u32Pad0;
+ /** Callback to call when a breakpoint has hit, Ring-3 Ptr. */
+ R3PTRTYPE(PFNDBGFBPHIT) pfnBpHitR3;
+ /** Callback to call when a I/O breakpoint has hit, Ring-3 Ptr. */
+ R3PTRTYPE(PFNDBGFBPIOHIT) pfnBpIoHitR3;
+ /** Padding. */
+ uint64_t u64Pad1;
+} DBGFBPOWNERINT;
+AssertCompileSize(DBGFBPOWNERINT, 32);
+/** Pointer to an internal breakpoint owner state, shared part. */
+typedef DBGFBPOWNERINT *PDBGFBPOWNERINT;
+/** Pointer to a constant internal breakpoint owner state, shared part. */
+typedef const DBGFBPOWNERINT *PCDBGFBPOWNERINT;
+
+
+/**
+ * The internal breakpoint owner state, Ring-0 part.
+ */
+typedef struct DBGFBPOWNERINTR0
+{
+ /** Reference counter indicating how man breakpoints use this owner currently. */
+ volatile uint32_t cRefs;
+ /** Padding. */
+ uint32_t u32Pad0;
+ /** Callback to call when a breakpoint has hit, Ring-0 Ptr. */
+ R0PTRTYPE(PFNDBGFBPHIT) pfnBpHitR0;
+ /** Callback to call when a I/O breakpoint has hit, Ring-0 Ptr. */
+ R0PTRTYPE(PFNDBGFBPIOHIT) pfnBpIoHitR0;
+ /** Padding. */
+ uint64_t u64Pad1;
+} DBGFBPOWNERINTR0;
+AssertCompileSize(DBGFBPOWNERINTR0, 32);
+/** Pointer to an internal breakpoint owner state, shared part. */
+typedef DBGFBPOWNERINTR0 *PDBGFBPOWNERINTR0;
+/** Pointer to a constant internal breakpoint owner state, shared part. */
+typedef const DBGFBPOWNERINTR0 *PCDBGFBPOWNERINTR0;
+
+
+/**
+ * The internal breakpoint state, shared part.
+ */
+typedef struct DBGFBPINT
+{
+ /** The publicly visible part. */
+ DBGFBPPUB Pub;
+ /** The opaque user argument for the owner callback, Ring-3 Ptr. */
+ R3PTRTYPE(void *) pvUserR3;
+} DBGFBPINT;
+AssertCompileSize(DBGFBPINT, DBGF_BP_ENTRY_SZ);
+/** Pointer to an internal breakpoint state. */
+typedef DBGFBPINT *PDBGFBPINT;
+/** Pointer to an const internal breakpoint state. */
+typedef const DBGFBPINT *PCDBGFBPINT;
+
+
+/**
+ * The internal breakpoint state, R0 part.
+ */
+typedef struct DBGFBPINTR0
+{
+ /** The owner handle. */
+ DBGFBPOWNER hOwner;
+ /** Flag whether the breakpoint is in use. */
+ bool fInUse;
+ /** Padding to 8 byte alignment. */
+ bool afPad[3];
+ /** Opaque user data for the owner callback, Ring-0 Ptr. */
+ R0PTRTYPE(void *) pvUserR0;
+} DBGFBPINTR0;
+AssertCompileMemberAlignment(DBGFBPINTR0, pvUserR0, 8);
+AssertCompileSize(DBGFBPINTR0, 16);
+/** Pointer to an internal breakpoint state - Ring-0 Ptr. */
+typedef R0PTRTYPE(DBGFBPINTR0 *) PDBGFBPINTR0;
+
+
+/**
+ * Hardware breakpoint state.
+ */
+typedef struct DBGFBPHW
+{
+ /** The flat GC address of the breakpoint. */
+ RTGCUINTPTR GCPtr;
+ /** The breakpoint handle if active, NIL_DBGFBP if not in use. */
+ volatile DBGFBP hBp;
+ /** The access type (one of the X86_DR7_RW_* value). */
+ uint8_t fType;
+ /** The access size. */
+ uint8_t cb;
+ /** Flag whether the breakpoint is currently enabled. */
+ volatile bool fEnabled;
+ /** Padding. */
+ uint8_t bPad;
+} DBGFBPHW;
+AssertCompileSize(DBGFBPHW, 16);
+/** Pointer to a hardware breakpoint state. */
+typedef DBGFBPHW *PDBGFBPHW;
+/** Pointer to a const hardware breakpoint state. */
+typedef const DBGFBPHW *PCDBGFBPHW;
+
+
+/**
+ * A breakpoint table chunk, ring-3 state.
+ */
+typedef struct DBGFBPCHUNKR3
+{
+ /** Pointer to the R3 base of the chunk. */
+ R3PTRTYPE(PDBGFBPINT) pBpBaseR3;
+ /** Bitmap of free/occupied breakpoint entries. */
+ R3PTRTYPE(volatile void *) pbmAlloc;
+ /** Number of free breakpoints in the chunk. */
+ volatile uint32_t cBpsFree;
+ /** The chunk index this tracking structure refers to. */
+ uint32_t idChunk;
+} DBGFBPCHUNKR3;
+/** Pointer to a breakpoint table chunk - Ring-3 Ptr. */
+typedef DBGFBPCHUNKR3 *PDBGFBPCHUNKR3;
+/** Pointer to a const breakpoint table chunk - Ring-3 Ptr. */
+typedef const DBGFBPCHUNKR3 *PCDBGFBPCHUNKR3;
+
+
+/**
+ * Breakpoint table chunk, ring-0 state.
+ */
+typedef struct DBGFBPCHUNKR0
+{
+ /** The chunks memory. */
+ RTR0MEMOBJ hMemObj;
+ /** The ring-3 mapping object. */
+ RTR0MEMOBJ hMapObj;
+ /** Pointer to the breakpoint entries base. */
+ R0PTRTYPE(PDBGFBPINT) paBpBaseSharedR0;
+ /** Pointer to the Ring-0 only part of the breakpoints. */
+ PDBGFBPINTR0 paBpBaseR0Only;
+} DBGFBPCHUNKR0;
+/** Pointer to a breakpoint table chunk - Ring-0 Ptr. */
+typedef R0PTRTYPE(DBGFBPCHUNKR0 *) PDBGFBPCHUNKR0;
+
+
+/**
+ * L2 lookup table entry.
+ *
+ * @remark The order of the members matters to be able to atomically update
+ * the AVL left/right pointers and depth with a single 64bit atomic write.
+ * @verbatim
+ * 7 6 5 4 3 2 1 0
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | hBp[15:0] | GCPtrKey[63:16] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | hBp[27:16] | iDepth | idxRight[21:0] | idxLeft[21:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * \_8 bits_/
+ * @endverbatim
+ */
+typedef struct DBGFBPL2ENTRY
+{
+ /** The upper 6 bytes of the breakpoint address and the low 16 bits of the breakpoint handle. */
+ volatile uint64_t u64GCPtrKeyAndBpHnd1;
+ /** Left/right lower index, tree depth and remaining 12 bits of the breakpoint handle. */
+ volatile uint64_t u64LeftRightIdxDepthBpHnd2;
+} DBGFBPL2ENTRY;
+AssertCompileSize(DBGFBPL2ENTRY, 16);
+/** Pointer to a L2 lookup table entry. */
+typedef DBGFBPL2ENTRY *PDBGFBPL2ENTRY;
+/** Pointer to a const L2 lookup table entry. */
+typedef const DBGFBPL2ENTRY *PCDBGFBPL2ENTRY;
+
+/** Extracts the part from the given GC pointer used as the key in the L2 binary search tree. */
+#define DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(a_GCPtr) ((uint64_t)((a_GCPtr) >> 16))
+
+/** An invalid breakpoint chunk ID. */
+#define DBGF_BP_L2_IDX_CHUNK_ID_INVALID UINT32_MAX
+/** Generates a unique breakpoint handle from the given chunk ID and entry inside the chunk. */
+#define DBGF_BP_L2_IDX_CREATE(a_idChunk, a_idEntry) RT_MAKE_U32(a_idEntry, a_idChunk);
+/** Returns the chunk ID from the given breakpoint handle. */
+#define DBGF_BP_L2_IDX_GET_CHUNK_ID(a_idxL2) ((uint32_t)RT_HI_U16(a_idxL2))
+/** Returns the entry index inside a chunk from the given breakpoint handle. */
+#define DBGF_BP_L2_IDX_GET_ENTRY(a_idxL2) ((uint32_t)RT_LO_U16(a_idxL2))
+
+/** Number of bits for the left/right index pointers. */
+#define DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_BITS 22
+/** Special index value marking the end of a tree. */
+#define DBGF_BP_L2_ENTRY_IDX_END UINT32_C(0x3fffff)
+/** Number of bits to shift the breakpoint handle in the first part. */
+#define DBGF_BP_L2_ENTRY_BP_1ST_SHIFT 48
+/** Mask for the first part of the breakpoint handle. */
+#define DBGF_BP_L2_ENTRY_BP_1ST_MASK UINT32_C(0x0000ffff)
+/** Number of bits to shift the breakpoint handle in the second part. */
+#define DBGF_BP_L2_ENTRY_BP_2ND_SHIFT 52
+/** Mask for the second part of the breakpoint handle. */
+#define DBGF_BP_L2_ENTRY_BP_2ND_MASK UINT32_C(0x0fff0000)
+/** Mask for the second part of the breakpoint handle stored in the L2 entry. */
+#define DBGF_BP_L2_ENTRY_BP_2ND_L2_ENTRY_MASK UINT64_C(0xfff0000000000000)
+/** Number of bits to shift the depth in the second part. */
+#define DBGF_BP_L2_ENTRY_DEPTH_SHIFT 44
+/** Mask for the depth. */
+#define DBGF_BP_L2_ENTRY_DEPTH_MASK UINT8_MAX
+/** Number of bits to shift the right L2 index in the second part. */
+#define DBGF_BP_L2_ENTRY_RIGHT_IDX_SHIFT 22
+/** Number of bits to shift the left L2 index in the second part. */
+#define DBGF_BP_L2_ENTRY_LEFT_IDX_SHIFT 0
+/** Index mask. */
+#define DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK (RT_BIT_32(DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_BITS) - 1)
+/** Left index mask. */
+#define DBGF_BP_L2_ENTRY_LEFT_IDX_MASK (DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK << DBGF_BP_L2_ENTRY_LEFT_IDX_SHIFT)
+/** Right index mask. */
+#define DBGF_BP_L2_ENTRY_RIGHT_IDX_MASK (DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK << DBGF_BP_L2_ENTRY_RIGHT_IDX_SHIFT)
+/** Returns the upper 6 bytes of the GC pointer from the given breakpoint entry. */
+#define DBGF_BP_L2_ENTRY_GET_GCPTR(a_u64GCPtrKeyAndBpHnd1) ((a_u64GCPtrKeyAndBpHnd1) & UINT64_C(0x0000ffffffffffff))
+/** Returns the breakpoint handle from both L2 entry members. */
+#define DBGF_BP_L2_ENTRY_GET_BP_HND(a_u64GCPtrKeyAndBpHnd1, a_u64LeftRightIdxDepthBpHnd2) \
+ ((DBGFBP)(((a_u64GCPtrKeyAndBpHnd1) >> DBGF_BP_L2_ENTRY_BP_1ST_SHIFT) | (((a_u64LeftRightIdxDepthBpHnd2) >> DBGF_BP_L2_ENTRY_BP_2ND_SHIFT) << 16)))
+/** Extracts the depth of the second 64bit L2 entry value. */
+#define DBGF_BP_L2_ENTRY_GET_DEPTH(a_u64LeftRightIdxDepthBpHnd2) ((uint8_t)(((a_u64LeftRightIdxDepthBpHnd2) >> DBGF_BP_L2_ENTRY_DEPTH_SHIFT) & DBGF_BP_L2_ENTRY_DEPTH_MASK))
+/** Extracts the lower right index value from the L2 entry value. */
+#define DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(a_u64LeftRightIdxDepthBpHnd2) \
+ ((uint32_t)(((a_u64LeftRightIdxDepthBpHnd2) >> 22) & DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK))
+/** Extracts the lower left index value from the L2 entry value. */
+#define DBGF_BP_L2_ENTRY_GET_IDX_LEFT(a_u64LeftRightIdxDepthBpHnd2) \
+ ((uint32_t)((a_u64LeftRightIdxDepthBpHnd2) & DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK))
+
+
+/**
+ * A breakpoint L2 lookup table chunk, ring-3 state.
+ */
+typedef struct DBGFBPL2TBLCHUNKR3
+{
+ /** Pointer to the R3 base of the chunk. */
+ R3PTRTYPE(PDBGFBPL2ENTRY) pL2BaseR3;
+ /** Bitmap of free/occupied breakpoint entries. */
+ R3PTRTYPE(volatile void *) pbmAlloc;
+ /** Number of free entries in the chunk. */
+ volatile uint32_t cFree;
+ /** The chunk index this tracking structure refers to. */
+ uint32_t idChunk;
+} DBGFBPL2TBLCHUNKR3;
+/** Pointer to a breakpoint L2 lookup table chunk - Ring-3 Ptr. */
+typedef DBGFBPL2TBLCHUNKR3 *PDBGFBPL2TBLCHUNKR3;
+/** Pointer to a const breakpoint L2 lookup table chunk - Ring-3 Ptr. */
+typedef const DBGFBPL2TBLCHUNKR3 *PCDBGFBPL2TBLCHUNKR3;
+
+
+/**
+ * Breakpoint L2 lookup table chunk, ring-0 state.
+ */
+typedef struct DBGFBPL2TBLCHUNKR0
+{
+ /** The chunks memory. */
+ RTR0MEMOBJ hMemObj;
+ /** The ring-3 mapping object. */
+ RTR0MEMOBJ hMapObj;
+ /** Pointer to the breakpoint entries base. */
+ R0PTRTYPE(PDBGFBPL2ENTRY) paBpL2TblBaseSharedR0;
+} DBGFBPL2TBLCHUNKR0;
+/** Pointer to a breakpoint L2 lookup table chunk - Ring-0 Ptr. */
+typedef R0PTRTYPE(DBGFBPL2TBLCHUNKR0 *) PDBGFBPL2TBLCHUNKR0;
+
+
+
+/**
+ * DBGF Data (part of VM)
+ */
+typedef struct DBGF
+{
+ /** Bitmap of enabled hardware interrupt breakpoints. */
+ uint32_t bmHardIntBreakpoints[256 / 32];
+ /** Bitmap of enabled software interrupt breakpoints. */
+ uint32_t bmSoftIntBreakpoints[256 / 32];
+ /** Bitmap of selected events.
+ * This includes non-selectable events too for simplicity, we maintain the
+ * state for some of these, as it may come in handy. */
+ uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
+
+ /** Enabled hardware interrupt breakpoints. */
+ uint32_t cHardIntBreakpoints;
+ /** Enabled software interrupt breakpoints. */
+ uint32_t cSoftIntBreakpoints;
+
+ /** The number of selected events. */
+ uint32_t cSelectedEvents;
+
+ /** The number of enabled hardware breakpoints. */
+ uint8_t cEnabledHwBreakpoints;
+ /** The number of enabled hardware I/O breakpoints. */
+ uint8_t cEnabledHwIoBreakpoints;
+ uint8_t au8Alignment1[2]; /**< Alignment padding. */
+ /** The number of enabled INT3 breakpoints. */
+ uint32_t volatile cEnabledInt3Breakpoints;
+
+ /** Debugger Attached flag.
+ * Set if a debugger is attached, elsewise it's clear.
+ */
+ bool volatile fAttached;
+
+ /** Stepping filtering. */
+ struct
+ {
+ /** The CPU doing the stepping.
+ * Set to NIL_VMCPUID when filtering is inactive */
+ VMCPUID idCpu;
+ /** The specified flags. */
+ uint32_t fFlags;
+ /** The effective PC address to stop at, if given. */
+ RTGCPTR AddrPc;
+ /** The lowest effective stack address to stop at.
+ * Together with cbStackPop, this forms a range of effective stack pointer
+ * addresses that we stop for. */
+ RTGCPTR AddrStackPop;
+ /** The size of the stack stop area starting at AddrStackPop. */
+ RTGCPTR cbStackPop;
+ /** Maximum number of steps. */
+ uint32_t cMaxSteps;
+
+ /** Number of steps made thus far. */
+ uint32_t cSteps;
+ /** Current call counting balance for step-over handling. */
+ uint32_t uCallDepth;
+
+ uint32_t u32Padding; /**< Alignment padding. */
+
+ } SteppingFilter;
+
+ uint32_t au32Alignment2[2]; /**< Alignment padding. */
+
+ /** @name Breakpoint handling related state.
+ * @{ */
+ /** Array of hardware breakpoints (0..3).
+ * This is shared among all the CPUs because life is much simpler that way. */
+ DBGFBPHW aHwBreakpoints[4];
+ /** @} */
+
+ /**
+ * Bug check data.
+ * @note This will not be reset on reset.
+ */
+ struct
+ {
+ /** The ID of the CPU reporting it. */
+ VMCPUID idCpu;
+ /** The event associated with the bug check (gives source).
+ * This is set to DBGFEVENT_END if no BSOD data here. */
+ DBGFEVENTTYPE enmEvent;
+ /** The total reset count at the time (VMGetResetCount). */
+ uint32_t uResetNo;
+ /** Explicit padding. */
+ uint32_t uPadding;
+ /** When it was reported (TMVirtualGet). */
+ uint64_t uTimestamp;
+ /** The bug check number.
+ * @note This is really just 32-bit wide, see KeBugCheckEx. */
+ uint64_t uBugCheck;
+ /** The bug check parameters. */
+ uint64_t auParameters[4];
+ } BugCheck;
+} DBGF;
+AssertCompileMemberAlignment(DBGF, aHwBreakpoints, 8);
+AssertCompileMemberAlignment(DBGF, bmHardIntBreakpoints, 8);
+/** Pointer to DBGF Data. */
+typedef DBGF *PDBGF;
+
+
+/**
+ * Event state (for DBGFCPU::aEvents).
+ */
+typedef enum DBGFEVENTSTATE
+{
+ /** Invalid event stack entry. */
+ DBGFEVENTSTATE_INVALID = 0,
+ /** The current event stack entry. */
+ DBGFEVENTSTATE_CURRENT,
+ /** Event that should be ignored but hasn't yet actually been ignored. */
+ DBGFEVENTSTATE_IGNORE,
+ /** Event that has been ignored but may be restored to IGNORE should another
+ * debug event fire before the instruction is completed. */
+ DBGFEVENTSTATE_RESTORABLE,
+ /** End of valid events. */
+ DBGFEVENTSTATE_END,
+ /** Make sure we've got a 32-bit type. */
+ DBGFEVENTSTATE_32BIT_HACK = 0x7fffffff
+} DBGFEVENTSTATE;
+
+
+/** Converts a DBGFCPU pointer into a VM pointer. */
+#define DBGFCPU_2_VM(pDbgfCpu) ((PVM)((uint8_t *)(pDbgfCpu) + (pDbgfCpu)->offVM))
+
+/**
+ * The per CPU data for DBGF.
+ */
+typedef struct DBGFCPU
+{
+ /** The offset into the VM structure.
+ * @see DBGFCPU_2_VM(). */
+ uint32_t offVM;
+
+ /** Flag whether the to invoke any owner handlers in ring-3 before dropping into the debugger. */
+ bool fBpInvokeOwnerCallback;
+ /** Set if we're singlestepping in raw mode.
+ * This is checked and cleared in the \#DB handler. */
+ bool fSingleSteppingRaw;
+ /** Flag whether an I/O breakpoint is pending. */
+ bool fBpIoActive;
+ /** Flagh whether the I/O breakpoint hit before the access or after. */
+ bool fBpIoBefore;
+ /** Current active breakpoint handle.
+ * This is NIL_DBGFBP if not active. It is set when a execution engine
+ * encounters a breakpoint and returns VINF_EM_DBG_BREAKPOINT.
+ *
+ * @todo drop this in favor of aEvents! */
+ DBGFBP hBpActive;
+ /** The access mask for a pending I/O breakpoint. */
+ uint32_t fBpIoAccess;
+ /** The address of the access. */
+ uint64_t uBpIoAddress;
+ /** The value of the access. */
+ uint64_t uBpIoValue;
+
+ /** The number of events on the stack (aEvents).
+ * The pending event is the last one (aEvents[cEvents - 1]), but only when
+ * enmState is DBGFEVENTSTATE_CURRENT. */
+ uint32_t cEvents;
+ /** Events - current, ignoring and ignored.
+ *
+ * We maintain a stack of events in order to try avoid ending up in an infinit
+ * loop when resuming after an event fired. There are cases where we may end
+ * generating additional events before the instruction can be executed
+ * successfully. Like for instance an XCHG on MMIO with separate read and write
+ * breakpoints, or a MOVSB instruction working on breakpointed MMIO as both
+ * source and destination.
+ *
+ * So, when resuming after dropping into the debugger for an event, we convert
+ * the DBGFEVENTSTATE_CURRENT event into a DBGFEVENTSTATE_IGNORE event, leaving
+ * cEvents unchanged. If the event is reported again, we will ignore it and
+ * tell the reporter to continue executing. The event change to the
+ * DBGFEVENTSTATE_RESTORABLE state.
+ *
+ * Currently, the event reporter has to figure out that it is a nested event and
+ * tell DBGF to restore DBGFEVENTSTATE_RESTORABLE events (and keep
+ * DBGFEVENTSTATE_IGNORE, should they happen out of order for some weird
+ * reason).
+ */
+ struct
+ {
+ /** The event details. */
+ DBGFEVENT Event;
+ /** The RIP at which this happend (for validating ignoring). */
+ uint64_t rip;
+ /** The event state. */
+ DBGFEVENTSTATE enmState;
+ /** Alignment padding. */
+ uint32_t u32Alignment;
+ } aEvents[3];
+} DBGFCPU;
+AssertCompileMemberAlignment(DBGFCPU, aEvents, 8);
+AssertCompileMemberSizeAlignment(DBGFCPU, aEvents[0], 8);
+/** Pointer to DBGFCPU data. */
+typedef DBGFCPU *PDBGFCPU;
+
+struct DBGFOSEMTWRAPPER;
+
+/**
+ * DBGF data kept in the ring-0 GVM.
+ */
+typedef struct DBGFR0PERVM
+{
+ /** Pointer to the tracer instance if enabled. */
+ R0PTRTYPE(struct DBGFTRACERINSR0 *) pTracerR0;
+
+ /** @name Breakpoint handling related state, Ring-0 only part.
+ * @{ */
+ /** The breakpoint owner table memory object. */
+ RTR0MEMOBJ hMemObjBpOwners;
+ /** The breakpoint owner table mapping object. */
+ RTR0MEMOBJ hMapObjBpOwners;
+ /** Base pointer to the breakpoint owners table. */
+ R0PTRTYPE(PDBGFBPOWNERINTR0) paBpOwnersR0;
+
+ /** Global breakpoint table chunk array. */
+ DBGFBPCHUNKR0 aBpChunks[DBGF_BP_CHUNK_COUNT];
+ /** Breakpoint L2 lookup table chunk array. */
+ DBGFBPL2TBLCHUNKR0 aBpL2TblChunks[DBGF_BP_L2_TBL_CHUNK_COUNT];
+ /** The L1 lookup tables memory object. */
+ RTR0MEMOBJ hMemObjBpLocL1;
+ /** The L1 lookup tables mapping object. */
+ RTR0MEMOBJ hMapObjBpLocL1;
+ /** The I/O port breakpoint lookup tables memory object. */
+ RTR0MEMOBJ hMemObjBpLocPortIo;
+ /** The I/O port breakpoint lookup tables mapping object. */
+ RTR0MEMOBJ hMapObjBpLocPortIo;
+ /** Base pointer to the L1 locator table. */
+ R0PTRTYPE(volatile uint32_t *) paBpLocL1R0;
+ /** Base pointer to the L1 locator table. */
+ R0PTRTYPE(volatile uint32_t *) paBpLocPortIoR0;
+ /** Flag whether the breakpoint manager was initialized (on demand). */
+ bool fInit;
+ /** @} */
+} DBGFR0PERVM;
+
+/**
+ * The DBGF data kept in the UVM.
+ */
+typedef struct DBGFUSERPERVM
+{
+ /** The address space database lock. */
+ RTSEMRW hAsDbLock;
+ /** The address space handle database. (Protected by hAsDbLock.) */
+ R3PTRTYPE(AVLPVTREE) AsHandleTree;
+ /** The address space process id database. (Protected by hAsDbLock.) */
+ R3PTRTYPE(AVLU32TREE) AsPidTree;
+ /** The address space name database. (Protected by hAsDbLock.) */
+ R3PTRTYPE(RTSTRSPACE) AsNameSpace;
+ /** Special address space aliases. (Protected by hAsDbLock.) */
+ RTDBGAS volatile ahAsAliases[DBGF_AS_COUNT];
+ /** For lazily populating the aliased address spaces. */
+ bool volatile afAsAliasPopuplated[DBGF_AS_COUNT];
+ /** Alignment padding. */
+ bool afAlignment1[2];
+ /** Debug configuration. */
+ R3PTRTYPE(RTDBGCFG) hDbgCfg;
+
+ /** The register database lock. */
+ RTSEMRW hRegDbLock;
+ /** String space for looking up registers. (Protected by hRegDbLock.) */
+ R3PTRTYPE(RTSTRSPACE) RegSpace;
+ /** String space holding the register sets. (Protected by hRegDbLock.) */
+ R3PTRTYPE(RTSTRSPACE) RegSetSpace;
+ /** The number of registers (aliases, sub-fields and the special CPU
+ * register aliases (eg AH) are not counted). */
+ uint32_t cRegs;
+ /** For early initialization by . */
+ bool volatile fRegDbInitialized;
+ /** Alignment padding. */
+ bool afAlignment2[3];
+
+ /** Critical section protecting the Guest OS Digger data, the info handlers
+ * and the plugins. These share to give the best possible plugin unload
+ * race protection. */
+ RTCRITSECTRW CritSect;
+ /** Head of the LIFO of loaded DBGF plugins. */
+ R3PTRTYPE(struct DBGFPLUGIN *) pPlugInHead;
+ /** The current Guest OS digger. */
+ R3PTRTYPE(PDBGFOS) pCurOS;
+ /** The head of the Guest OS digger instances. */
+ R3PTRTYPE(PDBGFOS) pOSHead;
+ /** List of registered info handlers. */
+ R3PTRTYPE(PDBGFINFO) pInfoFirst;
+
+ /** The configured tracer. */
+ PDBGFTRACERINSR3 pTracerR3;
+
+ /** @name VM -> Debugger event communication.
+ * @{ */
+ /** The event semaphore the debugger waits on for new events to arrive. */
+ RTSEMEVENT hEvtWait;
+ /** Multi event semaphore the vCPUs wait on in case the debug event ringbuffer is
+ * full and require growing (done from the thread waiting for events). */
+ RTSEMEVENTMULTI hEvtRingBufFull;
+ /** Fast mutex protecting the event ring from concurrent write accesses by multiple vCPUs. */
+ RTSEMFASTMUTEX hMtxDbgEvtWr;
+ /** Ringbuffer of events, dynamically allocated based on the number of available vCPUs
+ * (+ some safety entries). */
+ PDBGFEVENT paDbgEvts;
+ /** Number of entries in the event ring buffer. */
+ uint32_t cDbgEvtMax;
+ /** Next free entry to write to (vCPU thread). */
+ volatile uint32_t idxDbgEvtWrite;
+ /** Next event entry to from (debugger thread). */
+ volatile uint32_t idxDbgEvtRead;
+ /** @} */
+
+ /** @name Breakpoint handling related state.
+ * @{ */
+ /** Base pointer to the breakpoint owners table. */
+ R3PTRTYPE(PDBGFBPOWNERINT) paBpOwnersR3;
+ /** Pointer to the bitmap denoting occupied owner entries. */
+ R3PTRTYPE(volatile void *) pbmBpOwnersAllocR3;
+
+ /** Global breakpoint table chunk array. */
+ DBGFBPCHUNKR3 aBpChunks[DBGF_BP_CHUNK_COUNT];
+ /** Breakpoint L2 lookup table chunk array. */
+ DBGFBPL2TBLCHUNKR3 aBpL2TblChunks[DBGF_BP_L2_TBL_CHUNK_COUNT];
+ /** Base pointer to the L1 locator table. */
+ R3PTRTYPE(volatile uint32_t *) paBpLocL1R3;
+ /** Base pointer to the Port I/O breakpoint locator table. */
+ R3PTRTYPE(volatile uint32_t *) paBpLocPortIoR3;
+ /** Fast mutex protecting the L2 table from concurrent write accesses (EMTs
+ * can still do read accesses without holding it while traversing the trees). */
+ RTSEMFASTMUTEX hMtxBpL2Wr;
+ /** Number of armed port I/O breakpoints. */
+ volatile uint32_t cPortIoBps;
+ /** @} */
+
+ /** The type database lock. */
+ RTSEMRW hTypeDbLock;
+ /** String space for looking up types. (Protected by hTypeDbLock.) */
+ R3PTRTYPE(RTSTRSPACE) TypeSpace;
+ /** For early initialization by . */
+ bool volatile fTypeDbInitialized;
+ /** Alignment padding. */
+ bool afAlignment3[3];
+
+} DBGFUSERPERVM;
+typedef DBGFUSERPERVM *PDBGFUSERPERVM;
+typedef DBGFUSERPERVM const *PCDBGFUSERPERVM;
+
+/**
+ * The per-CPU DBGF data kept in the UVM.
+ */
+typedef struct DBGFUSERPERVMCPU
+{
+ /** The guest register set for this CPU. Can be NULL. */
+ R3PTRTYPE(struct DBGFREGSET *) pGuestRegSet;
+ /** The hypervisor register set for this CPU. Can be NULL. */
+ R3PTRTYPE(struct DBGFREGSET *) pHyperRegSet;
+
+ /** @name Debugger -> vCPU command communication.
+ * @{ */
+ /** Flag whether this vCPU is currently stopped waiting in the debugger. */
+ bool volatile fStopped;
+ /** The Command to the vCPU.
+ * Operated in an atomic fashion since the vCPU will poll on this.
+ * This means that a the command data must be written before this member
+ * is set. The VMM will reset this member to the no-command state
+ * when it have processed it.
+ */
+ DBGFCMD volatile enmDbgfCmd;
+ /** The Command data.
+ * Not all commands take data. */
+ DBGFCMDDATA DbgfCmdData;
+ /** @} */
+
+} DBGFUSERPERVMCPU;
+
+
+#ifdef IN_RING3
+int dbgfR3AsInit(PUVM pUVM);
+void dbgfR3AsTerm(PUVM pUVM);
+void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta);
+DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM);
+DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM);
+int dbgfR3InfoInit(PUVM pUVM);
+int dbgfR3InfoTerm(PUVM pUVM);
+int dbgfR3OSInit(PUVM pUVM);
+void dbgfR3OSTermPart1(PUVM pUVM);
+void dbgfR3OSTermPart2(PUVM pUVM);
+int dbgfR3OSStackUnwindAssist(PUVM pUVM, VMCPUID idCpu, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState,
+ PCCPUMCTX pInitialCtx, RTDBGAS hAs, uint64_t *puScratch);
+int dbgfR3RegInit(PUVM pUVM);
+void dbgfR3RegTerm(PUVM pUVM);
+int dbgfR3TraceInit(PVM pVM);
+void dbgfR3TraceRelocate(PVM pVM);
+void dbgfR3TraceTerm(PVM pVM);
+DECLHIDDEN(int) dbgfR3TypeInit(PUVM pUVM);
+DECLHIDDEN(void) dbgfR3TypeTerm(PUVM pUVM);
+int dbgfR3PlugInInit(PUVM pUVM);
+void dbgfR3PlugInTerm(PUVM pUVM);
+int dbgfR3BugCheckInit(PVM pVM);
+DECLHIDDEN(int) dbgfR3TracerInit(PVM pVM);
+DECLHIDDEN(void) dbgfR3TracerTerm(PVM pVM);
+
+/**
+ * DBGF disassembler state (substate of DISSTATE).
+ */
+typedef struct DBGFDISSTATE
+{
+ /** Pointer to the current instruction. */
+ PCDISOPCODE pCurInstr;
+ /** Size of the instruction in bytes. */
+ uint32_t cbInstr;
+ /** Parameters. */
+ DISOPPARAM Param1;
+ DISOPPARAM Param2;
+ DISOPPARAM Param3;
+ DISOPPARAM Param4;
+} DBGFDISSTATE;
+/** Pointer to a DBGF disassembler state. */
+typedef DBGFDISSTATE *PDBGFDISSTATE;
+
+DECLHIDDEN(int) dbgfR3DisasInstrStateEx(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddr, uint32_t fFlags,
+ char *pszOutput, uint32_t cbOutput, PDBGFDISSTATE pDisState);
+
+#endif /* IN_RING3 */
+
+#ifdef IN_RING0
+DECLHIDDEN(void) dbgfR0TracerDestroy(PGVM pGVM, PDBGFTRACERINSR0 pTracer);
+DECLHIDDEN(void) dbgfR0BpInit(PGVM pGVM);
+DECLHIDDEN(void) dbgfR0BpDestroy(PGVM pGVM);
+#endif /* !IN_RING0 */
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_DBGFInternal_h */
diff --git a/src/VBox/VMM/include/EMHandleRCTmpl.h b/src/VBox/VMM/include/EMHandleRCTmpl.h
new file mode 100644
index 00000000..db123db3
--- /dev/null
+++ b/src/VBox/VMM/include/EMHandleRCTmpl.h
@@ -0,0 +1,279 @@
+/* $Id: EMHandleRCTmpl.h $ */
+/** @file
+ * EM - emR3[Raw|Hm|Nem]HandleRC template.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_EMHandleRCTmpl_h
+#define VMM_INCLUDED_SRC_include_EMHandleRCTmpl_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#if defined(EMHANDLERC_WITH_PATM) + defined(EMHANDLERC_WITH_HM) + defined(EMHANDLERC_WITH_NEM) != 1
+# error "Exactly one of these must be defined: EMHANDLERC_WITH_PATM, EMHANDLERC_WITH_HM, EMHANDLERC_WITH_NEM"
+#endif
+
+
+/**
+ * Process a subset of the raw-mode, HM and NEM return codes.
+ *
+ * Since we have to share this with raw-mode single stepping, this inline
+ * function has been created to avoid code duplication.
+ *
+ * @returns VINF_SUCCESS if it's ok to continue raw mode.
+ * @returns VBox status code to return to the EM main loop.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc The return code.
+ */
+#if defined(EMHANDLERC_WITH_HM) || defined(DOXYGEN_RUNNING)
+int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
+#elif defined(EMHANDLERC_WITH_NEM)
+int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
+#endif
+{
+ switch (rc)
+ {
+ /*
+ * Common & simple ones.
+ */
+ case VINF_SUCCESS:
+ break;
+ case VINF_EM_RESCHEDULE_RAW:
+ case VINF_EM_RESCHEDULE_HM:
+ case VINF_EM_RAW_INTERRUPT:
+ case VINF_EM_RAW_TO_R3:
+ case VINF_EM_RAW_TIMER_PENDING:
+ case VINF_EM_PENDING_REQUEST:
+ rc = VINF_SUCCESS;
+ break;
+
+#ifndef EMHANDLERC_WITH_NEM
+ /*
+ * Conflict or out of page tables.
+ *
+ * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
+ * do here is to execute the pending forced actions.
+ */
+ case VINF_PGM_SYNC_CR3:
+ AssertMsg(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
+ ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
+ rc = VINF_SUCCESS;
+ break;
+
+ /*
+ * PGM pool flush pending (guest SMP only).
+ */
+ /** @todo jumping back and forth between ring 0 and 3 can burn a lot of cycles
+ * if the EMT thread that's supposed to handle the flush is currently not active
+ * (e.g. waiting to be scheduled) -> fix this properly!
+ *
+ * bird: Since the clearing is global and done via a rendezvous any CPU can do
+ * it. They would have to choose who to call VMMR3EmtRendezvous and send
+ * the rest to VMMR3EmtRendezvousFF ... Hmm ... that's not going to work
+ * all that well since the latter will race the setup done by the
+ * first. Guess that means we need some new magic in that area for
+ * handling this case. :/
+ */
+ case VINF_PGM_POOL_FLUSH_PENDING:
+ rc = VINF_SUCCESS;
+ break;
+#endif /* !EMHANDLERC_WITH_NEM */
+
+ /*
+ * I/O Port access - emulate the instruction.
+ */
+ case VINF_IOM_R3_IOPORT_READ:
+ case VINF_IOM_R3_IOPORT_WRITE:
+ case VINF_EM_RESUME_R3_HISTORY_EXEC: /* Resume EMHistoryExec after VMCPU_FF_IOM. */
+ rc = emR3ExecuteIOInstruction(pVM, pVCpu);
+ break;
+
+ /*
+ * Execute pending I/O Port access.
+ */
+ case VINF_EM_PENDING_R3_IOPORT_WRITE:
+ rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortWrite(pVM, pVCpu));
+ break;
+ case VINF_EM_PENDING_R3_IOPORT_READ:
+ rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortRead(pVM, pVCpu));
+ break;
+
+ /*
+ * Memory mapped I/O access - emulate the instruction.
+ */
+ case VINF_IOM_R3_MMIO_READ:
+ case VINF_IOM_R3_MMIO_WRITE:
+ case VINF_IOM_R3_MMIO_READ_WRITE:
+ rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
+ break;
+
+ /*
+ * Machine specific register access - emulate the instruction.
+ */
+ case VINF_CPUM_R3_MSR_READ:
+ case VINF_CPUM_R3_MSR_WRITE:
+ rc = emR3ExecuteInstruction(pVM, pVCpu, "MSR");
+ break;
+
+ /*
+ * GIM hypercall.
+ */
+ case VINF_GIM_R3_HYPERCALL:
+ rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
+ break;
+
+#ifdef EMHANDLERC_WITH_HM
+ case VINF_EM_HM_PATCH_TPR_INSTR:
+ rc = HMR3PatchTprInstr(pVM, pVCpu);
+ break;
+#endif
+
+ case VINF_EM_RAW_GUEST_TRAP:
+ case VINF_EM_RAW_EMULATE_INSTR:
+ AssertMsg(!TRPMHasTrap(pVCpu), ("trap=%#x\n", TRPMGetTrapNo(pVCpu))); /* We're directly executing instructions below without respecting any pending traps! */
+ rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ");
+ break;
+
+ case VINF_EM_RAW_INJECT_TRPM_EVENT:
+ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
+ rc = VBOXSTRICTRC_VAL(IEMInjectTrpmEvent(pVCpu));
+ /* The following condition should be removed when IEM_IMPLEMENTS_TASKSWITCH becomes true. */
+ if (rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
+ rc = emR3ExecuteInstruction(pVM, pVCpu, "EVENT: ");
+ break;
+
+ case VINF_EM_EMULATE_SPLIT_LOCK:
+ rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu));
+ break;
+
+
+ /*
+ * Up a level.
+ */
+ case VINF_EM_TERMINATE:
+ case VINF_EM_OFF:
+ case VINF_EM_RESET:
+ case VINF_EM_SUSPEND:
+ case VINF_EM_HALT:
+ case VINF_EM_RESUME:
+ case VINF_EM_NO_MEMORY:
+ case VINF_EM_RESCHEDULE:
+ case VINF_EM_RESCHEDULE_REM:
+ case VINF_EM_WAIT_SIPI:
+ break;
+
+ /*
+ * Up a level and invoke the debugger.
+ */
+ case VINF_EM_DBG_STEPPED:
+ case VINF_EM_DBG_BREAKPOINT:
+ case VINF_EM_DBG_STEP:
+ case VINF_EM_DBG_HYPER_BREAKPOINT:
+ case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_DBG_HYPER_ASSERTION:
+ case VINF_EM_DBG_STOP:
+ case VINF_EM_DBG_EVENT:
+ break;
+
+ /*
+ * Up a level, dump and debug.
+ */
+ case VERR_TRPM_DONT_PANIC:
+ case VERR_TRPM_PANIC:
+ case VERR_VMM_RING0_ASSERTION:
+ case VINF_EM_TRIPLE_FAULT:
+ case VERR_VMM_HYPER_CR3_MISMATCH:
+ case VERR_VMM_RING3_CALL_DISABLED:
+ case VERR_IEM_INSTR_NOT_IMPLEMENTED:
+ case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
+ case VERR_EM_GUEST_CPU_HANG:
+ break;
+
+#ifdef EMHANDLERC_WITH_HM
+ /*
+ * Up a level, after Hm have done some release logging.
+ */
+ case VERR_VMX_INVALID_VMCS_FIELD:
+ case VERR_VMX_INVALID_VMCS_PTR:
+ case VERR_VMX_INVALID_VMXON_PTR:
+ case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
+ case VERR_VMX_UNEXPECTED_EXCEPTION:
+ case VERR_VMX_UNEXPECTED_EXIT:
+ case VERR_VMX_INVALID_GUEST_STATE:
+ case VERR_VMX_UNABLE_TO_START_VM:
+ case VERR_SVM_UNKNOWN_EXIT:
+ case VERR_SVM_UNEXPECTED_EXIT:
+ case VERR_SVM_UNEXPECTED_PATCH_TYPE:
+ case VERR_SVM_UNEXPECTED_XCPT_EXIT:
+ HMR3CheckError(pVM, rc);
+ break;
+
+ /* Up a level; fatal */
+ case VERR_VMX_IN_VMX_ROOT_MODE:
+ case VERR_SVM_IN_USE:
+ case VERR_SVM_UNABLE_TO_START_VM:
+ break;
+#endif
+
+#ifdef EMHANDLERC_WITH_NEM
+ /* Fatal stuff, up a level. */
+ case VERR_NEM_IPE_0:
+ case VERR_NEM_IPE_1:
+ case VERR_NEM_IPE_2:
+ case VERR_NEM_IPE_3:
+ case VERR_NEM_IPE_4:
+ case VERR_NEM_IPE_5:
+ case VERR_NEM_IPE_6:
+ case VERR_NEM_IPE_7:
+ case VERR_NEM_IPE_8:
+ case VERR_NEM_IPE_9:
+ break;
+#endif
+
+ /*
+ * These two should be handled via the force flag already, but just in
+ * case they end up here deal with it.
+ */
+ case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
+ case VINF_IOM_R3_MMIO_COMMIT_WRITE:
+ AssertFailed();
+ rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
+ break;
+
+ /*
+ * Anything which is not known to us means an internal error
+ * and the termination of the VM!
+ */
+ default:
+ AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
+ break;
+ }
+ return rc;
+}
+
+#endif /* !VMM_INCLUDED_SRC_include_EMHandleRCTmpl_h */
+
diff --git a/src/VBox/VMM/include/EMInternal.h b/src/VBox/VMM/include/EMInternal.h
new file mode 100644
index 00000000..ff3bafc6
--- /dev/null
+++ b/src/VBox/VMM/include/EMInternal.h
@@ -0,0 +1,339 @@
+/* $Id: EMInternal.h $ */
+/** @file
+ * EM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_EMInternal_h
+#define VMM_INCLUDED_SRC_include_EMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/dis.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <iprt/avl.h>
+#include <iprt/setjmp-without-sigmask.h>
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_em_int Internal
+ * @ingroup grp_em
+ * @internal
+ * @{
+ */
+
+/** The saved state version. */
+#define EM_SAVED_STATE_VERSION 5
+#define EM_SAVED_STATE_VERSION_PRE_IEM 4
+#define EM_SAVED_STATE_VERSION_PRE_MWAIT 3
+#define EM_SAVED_STATE_VERSION_PRE_SMP 2
+
+
+/** @name MWait state flags.
+ * @{
+ */
+/** MWait activated. */
+#define EMMWAIT_FLAG_ACTIVE RT_BIT(0)
+/** MWait will continue when an interrupt is pending even when IF=0. */
+#define EMMWAIT_FLAG_BREAKIRQIF0 RT_BIT(1)
+/** Monitor instruction was executed previously. */
+#define EMMWAIT_FLAG_MONITOR_ACTIVE RT_BIT(2)
+/** @} */
+
+/** EM time slice in ms; used for capping execution time. */
+#define EM_TIME_SLICE 100
+
+/**
+ * Cli node structure
+ */
+typedef struct CLISTAT
+{
+ /** The key is the cli address. */
+ AVLGCPTRNODECORE Core;
+#if HC_ARCH_BITS == 32 && !defined(RT_OS_WINDOWS)
+ /** Padding. */
+ uint32_t u32Padding;
+#endif
+ /** Occurrences. */
+ STAMCOUNTER Counter;
+} CLISTAT, *PCLISTAT;
+#ifdef IN_RING3
+AssertCompileMemberAlignment(CLISTAT, Counter, 8);
+#endif
+
+
+/**
+ * Exit history entry.
+ *
+ * @remarks We could perhaps trim this down a little bit by assuming uFlatPC
+ * only needs 48 bits (currently true but will change) and stuffing
+ * the flags+type in the available 16 bits made available. The
+ * timestamp could likewise be shortened to accomodate the index, or
+ * we might skip the index entirely. However, since we will have to
+ * deal with 56-bit wide PC address before long, there's not point.
+ *
+ * On the upside, there are unused bits in both uFlagsAndType and the
+ * idxSlot fields if needed for anything.
+ */
+typedef struct EMEXITENTRY
+{
+ /** The flat PC (CS:EIP/RIP) address of the exit.
+ * UINT64_MAX if not available. */
+ uint64_t uFlatPC;
+ /** The EMEXIT_MAKE_FLAGS_AND_TYPE */
+ uint32_t uFlagsAndType;
+ /** The index into the exit slot hash table.
+ * UINT32_MAX if too many collisions and not entered into it. */
+ uint32_t idxSlot;
+ /** The TSC timestamp of the exit.
+ * This is 0 if not timestamped. */
+ uint64_t uTimestamp;
+} EMEXITENTRY;
+/** Pointer to an exit history entry. */
+typedef EMEXITENTRY *PEMEXITENTRY;
+/** Pointer to a const exit history entry. */
+typedef EMEXITENTRY const *PCEMEXITENTRY;
+
+
+/**
+ * EM VM Instance data.
+ */
+typedef struct EM
+{
+ /** Whether IEM executes everything. */
+ bool fIemExecutesAll;
+ /** Whether a triple fault triggers a guru. */
+ bool fGuruOnTripleFault;
+ /** Alignment padding. */
+ bool afPadding[2];
+
+ /** Id of the VCPU that last executed code in the recompiler. */
+ VMCPUID idLastRemCpu;
+} EM;
+/** Pointer to EM VM instance data. */
+typedef EM *PEM;
+
+
+/**
+ * EM VMCPU Instance data.
+ */
+typedef struct EMCPU
+{
+ /** Execution Manager State. */
+ EMSTATE volatile enmState;
+
+ /** The state prior to the suspending of the VM. */
+ EMSTATE enmPrevState;
+
+ /** Set if hypercall instruction VMMCALL (AMD) & VMCALL (Intel) are enabled.
+ * GIM sets this and the execution managers queries it. Not saved, as GIM
+ * takes care of that bit too. */
+ bool fHypercallEnabled;
+
+ /** Explicit padding. */
+ uint8_t abPadding0[3];
+
+ /** The number of instructions we've executed in IEM since switching to the
+ * EMSTATE_IEM_THEN_REM state. */
+ uint32_t cIemThenRemInstructions;
+
+ /** Start of the current time slice in ms. */
+ uint64_t u64TimeSliceStart;
+ /** Start of the current time slice in thread execution time (ms). */
+ uint64_t u64TimeSliceStartExec;
+ /** Current time slice value. */
+ uint64_t u64TimeSliceExec;
+
+ /** Pending ring-3 I/O port access (VINF_EM_PENDING_R3_IOPORT_READ / VINF_EM_PENDING_R3_IOPORT_WRITE). */
+ struct
+ {
+ RTIOPORT uPort; /**< The I/O port number.*/
+ uint8_t cbValue; /**< The value size in bytes. Zero when not pending. */
+ uint8_t cbInstr; /**< The instruction length. */
+ uint32_t uValue; /**< The value to write. */
+ } PendingIoPortAccess;
+
+ /** MWait halt state. */
+ struct
+ {
+ uint32_t fWait; /**< Type of mwait; see EMMWAIT_FLAG_*. */
+ uint32_t u32Padding;
+ RTGCPTR uMWaitRAX; /**< MWAIT hints. */
+ RTGCPTR uMWaitRCX; /**< MWAIT extensions. */
+ RTGCPTR uMonitorRAX; /**< Monitored address. */
+ RTGCPTR uMonitorRCX; /**< Monitor extension. */
+ RTGCPTR uMonitorRDX; /**< Monitor hint. */
+ } MWait;
+
+#if 0
+ /** Make sure the jmp_buf is at a 32-byte boundrary. */
+ uint64_t au64Padding1[4];
+#endif
+ union
+ {
+ /** Padding used in the other rings.
+ * This must be larger than jmp_buf on any supported platform. */
+ char achPaddingFatalLongJump[256];
+#ifdef IN_RING3
+ /** Long buffer jump for fatal VM errors.
+ * It will jump to before the outer EM loop is entered. */
+ jmp_buf FatalLongJump;
+#endif
+ } u;
+
+ /** For saving stack space, the disassembler state is allocated here instead of
+ * on the stack. */
+ DISCPUSTATE DisState;
+
+ /** @name Execution profiling.
+ * @{ */
+ STAMPROFILE StatForcedActions;
+ STAMPROFILE StatHalted;
+ STAMPROFILEADV StatCapped;
+ STAMPROFILEADV StatHMEntry;
+ STAMPROFILE StatHMExec;
+ STAMPROFILE StatIEMEmu;
+ STAMPROFILE StatIEMThenREM;
+ STAMPROFILEADV StatNEMEntry;
+ STAMPROFILE StatNEMExec;
+ STAMPROFILE StatREMEmu;
+ STAMPROFILE StatREMExec;
+ STAMPROFILE StatREMSync;
+ STAMPROFILEADV StatREMTotal;
+ STAMPROFILE StatRAWExec;
+ STAMPROFILEADV StatRAWEntry;
+ STAMPROFILEADV StatRAWTail;
+ STAMPROFILEADV StatRAWTotal;
+ STAMPROFILEADV StatTotal;
+ /** @} */
+
+ /** R3: Profiling of emR3RawExecuteIOInstruction. */
+ STAMPROFILE StatIOEmu;
+ STAMCOUNTER StatIoRestarted;
+ STAMCOUNTER StatIoIem;
+ /** R3: Profiling of emR3RawPrivileged. */
+ STAMPROFILE StatPrivEmu;
+ /** R3: Number of times emR3HmExecute is called. */
+ STAMCOUNTER StatHMExecuteCalled;
+ /** R3: Number of times emR3NEMExecute is called. */
+ STAMCOUNTER StatNEMExecuteCalled;
+
+ /** Align the next member at a 32-byte boundrary. */
+ uint64_t au64Padding2[1+2];
+
+ /** Exit history table (6KB). */
+ EMEXITENTRY aExitHistory[256];
+ /** Where to store the next exit history entry.
+ * Since aExitHistory is 256 items longs, we'll just increment this and
+ * mask it when using it. That help the readers detect whether we've
+ * wrapped around or not. */
+ uint64_t iNextExit;
+
+ /** Index into aExitRecords set by EMHistoryExec when returning to ring-3.
+ * This is UINT16_MAX if not armed. */
+ uint16_t volatile idxContinueExitRec;
+ /** Whether exit optimizations are enabled or not (in general). */
+ bool fExitOptimizationEnabled : 1;
+ /** Whether exit optimizations are enabled for ring-0 (in general). */
+ bool fExitOptimizationEnabledR0 : 1;
+ /** Whether exit optimizations are enabled for ring-0 when preemption is disabled. */
+ bool fExitOptimizationEnabledR0PreemptDisabled : 1;
+ /** Explicit padding. */
+ bool fPadding2;
+ /** Max number of instructions to execute. */
+ uint16_t cHistoryExecMaxInstructions;
+ /** Min number of instructions to execute while probing. */
+ uint16_t cHistoryProbeMinInstructions;
+ /** Max number of instructions to execute without an exit before giving up probe. */
+ uint16_t cHistoryProbeMaxInstructionsWithoutExit;
+ uint16_t uPadding3;
+ /** Number of exit records in use. */
+ uint32_t cExitRecordUsed;
+ /** Profiling the EMHistoryExec when executing (not probing). */
+ STAMPROFILE StatHistoryExec;
+ /** Number of saved exits. */
+ STAMCOUNTER StatHistoryExecSavedExits;
+ /** Number of instructions executed by EMHistoryExec. */
+ STAMCOUNTER StatHistoryExecInstructions;
+ uint64_t uPadding4;
+ /** Number of instructions executed by EMHistoryExec when probing. */
+ STAMCOUNTER StatHistoryProbeInstructions;
+ /** Number of times probing resulted in EMEXITACTION_NORMAL_PROBED. */
+ STAMCOUNTER StatHistoryProbedNormal;
+ /** Number of times probing resulted in EMEXITACTION_EXEC_WITH_MAX. */
+ STAMCOUNTER StatHistoryProbedExecWithMax;
+ /** Number of times probing resulted in ring-3 continuation. */
+ STAMCOUNTER StatHistoryProbedToRing3;
+ /** Profiling the EMHistoryExec when probing.*/
+ STAMPROFILE StatHistoryProbe;
+ /** Hit statistics for each lookup step. */
+ STAMCOUNTER aStatHistoryRecHits[16];
+ /** Type change statistics for each lookup step. */
+ STAMCOUNTER aStatHistoryRecTypeChanged[16];
+ /** Replacement statistics for each lookup step. */
+ STAMCOUNTER aStatHistoryRecReplaced[16];
+ /** New record statistics for each lookup step. */
+ STAMCOUNTER aStatHistoryRecNew[16];
+
+ /** Exit records (32KB). (Aligned on 32 byte boundrary.) */
+ EMEXITREC aExitRecords[1024];
+} EMCPU;
+/** Pointer to EM VM instance data. */
+typedef EMCPU *PEMCPU;
+
+/** @} */
+
+int emR3InitDbg(PVM pVM);
+
+int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
+VBOXSTRICTRC emR3NemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
+int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
+
+EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu);
+int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
+VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
+
+int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);
+int emR3RawStep(PVM pVM, PVMCPU pVCpu);
+
+VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags);
+
+int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations);
+
+bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu);
+
+VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu);
+VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu);
+VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu);
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_EMInternal_h */
+
diff --git a/src/VBox/VMM/include/GCMInternal.h b/src/VBox/VMM/include/GCMInternal.h
new file mode 100644
index 00000000..b1e6886d
--- /dev/null
+++ b/src/VBox/VMM/include/GCMInternal.h
@@ -0,0 +1,66 @@
+/** @file
+ * GCM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2022-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_GCMInternal_h
+#define VMM_INCLUDED_SRC_include_GCMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/gcm.h>
+#include <VBox/vmm/pgm.h>
+
+RT_C_DECLS_BEGIN
+
+/** @defgroup grp_gcm_int Internal
+ * @ingroup grp_gcm
+ * @internal
+ * @{
+ */
+
+/** The saved state version. */
+#define GCM_SAVED_STATE_VERSION 1
+
+/**
+ * GCM VM Instance data.
+ */
+typedef struct GCM
+{
+ /** The provider that is active for this VM. */
+ int32_t enmFixerIds;
+ /** The interface implementation version. */
+ uint32_t u32Version;
+
+} GCM;
+/** Pointer to GCM VM instance data. */
+typedef GCM *PGCM;
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_GCMInternal_h */
+
diff --git a/src/VBox/VMM/include/GIMHvInternal.h b/src/VBox/VMM/include/GIMHvInternal.h
new file mode 100644
index 00000000..960dc36c
--- /dev/null
+++ b/src/VBox/VMM/include/GIMHvInternal.h
@@ -0,0 +1,1380 @@
+/* $Id: GIMHvInternal.h $ */
+/** @file
+ * GIM - Hyper-V, Internal header file.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_GIMHvInternal_h
+#define VMM_INCLUDED_SRC_include_GIMHvInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpum.h>
+
+#include <iprt/net.h>
+
+/** @name Hyper-V base feature identification.
+ * Features based on current partition privileges (per-VM).
+ * @{
+ */
+/** Virtual processor runtime MSR available. */
+#define GIM_HV_BASE_FEAT_VP_RUNTIME_MSR RT_BIT(0)
+/** Partition reference counter MSR available. */
+#define GIM_HV_BASE_FEAT_PART_TIME_REF_COUNT_MSR RT_BIT(1)
+/** Basic Synthetic Interrupt Controller MSRs available. */
+#define GIM_HV_BASE_FEAT_BASIC_SYNIC_MSRS RT_BIT(2)
+/** Synthetic Timer MSRs available. */
+#define GIM_HV_BASE_FEAT_STIMER_MSRS RT_BIT(3)
+/** APIC access MSRs (EOI, ICR, TPR) available. */
+#define GIM_HV_BASE_FEAT_APIC_ACCESS_MSRS RT_BIT(4)
+/** Hypercall MSRs available. */
+#define GIM_HV_BASE_FEAT_HYPERCALL_MSRS RT_BIT(5)
+/** Access to VCPU index MSR available. */
+#define GIM_HV_BASE_FEAT_VP_ID_MSR RT_BIT(6)
+/** Virtual system reset MSR available. */
+#define GIM_HV_BASE_FEAT_VIRT_SYS_RESET_MSR RT_BIT(7)
+/** Statistic pages MSRs available. */
+#define GIM_HV_BASE_FEAT_STAT_PAGES_MSR RT_BIT(8)
+/** Paritition reference TSC MSR available. */
+#define GIM_HV_BASE_FEAT_PART_REF_TSC_MSR RT_BIT(9)
+/** Virtual guest idle state MSR available. */
+#define GIM_HV_BASE_FEAT_GUEST_IDLE_STATE_MSR RT_BIT(10)
+/** Timer frequency MSRs (TSC and APIC) available. */
+#define GIM_HV_BASE_FEAT_TIMER_FREQ_MSRS RT_BIT(11)
+/** Debug MSRs available. */
+#define GIM_HV_BASE_FEAT_DEBUG_MSRS RT_BIT(12)
+/** @} */
+
+/** @name Hyper-V partition-creation feature identification.
+ * Indicates flags specified during partition creation.
+ * @{
+ */
+/** Create partitions. */
+#define GIM_HV_PART_FLAGS_CREATE_PART RT_BIT(0)
+/** Access partition Id. */
+#define GIM_HV_PART_FLAGS_ACCESS_PART_ID RT_BIT(1)
+/** Access memory pool. */
+#define GIM_HV_PART_FLAGS_ACCESS_MEMORY_POOL RT_BIT(2)
+/** Adjust message buffers. */
+#define GIM_HV_PART_FLAGS_ADJUST_MSG_BUFFERS RT_BIT(3)
+/** Post messages. */
+#define GIM_HV_PART_FLAGS_POST_MSGS RT_BIT(4)
+/** Signal events. */
+#define GIM_HV_PART_FLAGS_SIGNAL_EVENTS RT_BIT(5)
+/** Create port. */
+#define GIM_HV_PART_FLAGS_CREATE_PORT RT_BIT(6)
+/** Connect port. */
+#define GIM_HV_PART_FLAGS_CONNECT_PORT RT_BIT(7)
+/** Access statistics. */
+#define GIM_HV_PART_FLAGS_ACCESS_STATS RT_BIT(8)
+/** Debugging.*/
+#define GIM_HV_PART_FLAGS_DEBUGGING RT_BIT(11)
+/** CPU management. */
+#define GIM_HV_PART_FLAGS_CPU_MGMT RT_BIT(12)
+/** CPU profiler. */
+#define GIM_HV_PART_FLAGS_CPU_PROFILER RT_BIT(13)
+/** Enable expanded stack walking. */
+#define GIM_HV_PART_FLAGS_EXPANDED_STACK_WALK RT_BIT(14)
+/** Access VSM. */
+#define GIM_HV_PART_FLAGS_ACCESS_VSM RT_BIT(16)
+/** Access VP registers. */
+#define GIM_HV_PART_FLAGS_ACCESS_VP_REGS RT_BIT(17)
+/** Enable extended hypercalls. */
+#define GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS RT_BIT(20)
+/** Start virtual processor. */
+#define GIM_HV_PART_FLAGS_START_VP RT_BIT(21)
+/** @} */
+
+/** @name Hyper-V power management feature identification.
+ * @{
+ */
+/** Maximum CPU power state C0. */
+#define GIM_HV_PM_MAX_CPU_POWER_STATE_C0 RT_BIT(0)
+/** Maximum CPU power state C1. */
+#define GIM_HV_PM_MAX_CPU_POWER_STATE_C1 RT_BIT(1)
+/** Maximum CPU power state C2. */
+#define GIM_HV_PM_MAX_CPU_POWER_STATE_C2 RT_BIT(2)
+/** Maximum CPU power state C3. */
+#define GIM_HV_PM_MAX_CPU_POWER_STATE_C3 RT_BIT(3)
+/** HPET is required to enter C3 power state. */
+#define GIM_HV_PM_HPET_REQD_FOR_C3 RT_BIT(4)
+/** @} */
+
+/** @name Hyper-V miscellaneous feature identification.
+ * Miscellaneous features available for the current partition.
+ * @{
+ */
+/** MWAIT instruction available. */
+#define GIM_HV_MISC_FEAT_MWAIT RT_BIT(0)
+/** Guest debugging support available. */
+#define GIM_HV_MISC_FEAT_GUEST_DEBUGGING RT_BIT(1)
+/** Performance monitor support is available. */
+#define GIM_HV_MISC_FEAT_PERF_MON RT_BIT(2)
+/** Support for physical CPU dynamic partitioning events. */
+#define GIM_HV_MISC_FEAT_PCPU_DYN_PART_EVENT RT_BIT(3)
+/** Support for passing hypercall input parameter block via XMM registers. */
+#define GIM_HV_MISC_FEAT_XMM_HYPERCALL_INPUT RT_BIT(4)
+/** Support for virtual guest idle state. */
+#define GIM_HV_MISC_FEAT_GUEST_IDLE_STATE RT_BIT(5)
+/** Support for hypervisor sleep state. */
+#define GIM_HV_MISC_FEAT_HYPERVISOR_SLEEP_STATE RT_BIT(6)
+/** Support for querying NUMA distances. */
+#define GIM_HV_MISC_FEAT_QUERY_NUMA_DISTANCE RT_BIT(7)
+/** Support for determining timer frequencies. */
+#define GIM_HV_MISC_FEAT_TIMER_FREQ RT_BIT(8)
+/** Support for injecting synthetic machine checks. */
+#define GIM_HV_MISC_FEAT_INJECT_SYNMC_XCPT RT_BIT(9)
+/** Support for guest crash MSRs. */
+#define GIM_HV_MISC_FEAT_GUEST_CRASH_MSRS RT_BIT(10)
+/** Support for debug MSRs. */
+#define GIM_HV_MISC_FEAT_DEBUG_MSRS RT_BIT(11)
+/** Npiep1 Available */ /** @todo What the heck is this? */
+#define GIM_HV_MISC_FEAT_NPIEP1 RT_BIT(12)
+/** Disable hypervisor available. */
+#define GIM_HV_MISC_FEAT_DISABLE_HYPERVISOR RT_BIT(13)
+/** Extended GVA ranges for FlushVirtualAddressList available. */
+#define GIM_HV_MISC_FEAT_EXT_GVA_RANGE_FOR_FLUSH_VA_LIST RT_BIT(14)
+/** Support for returning hypercall output via XMM registers. */
+#define GIM_HV_MISC_FEAT_HYPERCALL_OUTPUT_XMM RT_BIT(15)
+/** Synthetic interrupt source polling mode available. */
+#define GIM_HV_MISC_FEAT_SINT_POLLING_MODE RT_BIT(17)
+/** Hypercall MSR lock available. */
+#define GIM_HV_MISC_FEAT_HYPERCALL_MSR_LOCK RT_BIT(18)
+/** Use direct synthetic MSRs. */
+#define GIM_HV_MISC_FEAT_USE_DIRECT_SYNTH_MSRS RT_BIT(19)
+/** @} */
+
+/** @name Hyper-V implementation recommendations.
+ * Recommendations from the hypervisor for the guest for optimal performance.
+ * @{
+ */
+/** Use hypercall for address space switches rather than MOV CR3. */
+#define GIM_HV_HINT_HYPERCALL_FOR_PROCESS_SWITCH RT_BIT(0)
+/** Use hypercall for local TLB flushes rather than INVLPG/MOV CR3. */
+#define GIM_HV_HINT_HYPERCALL_FOR_TLB_FLUSH RT_BIT(1)
+/** Use hypercall for inter-CPU TLB flushes rather than IPIs. */
+#define GIM_HV_HINT_HYPERCALL_FOR_TLB_SHOOTDOWN RT_BIT(2)
+/** Use MSRs for APIC access (EOI, ICR, TPR) rather than MMIO. */
+#define GIM_HV_HINT_MSR_FOR_APIC_ACCESS RT_BIT(3)
+/** Use hypervisor provided MSR for a system reset. */
+#define GIM_HV_HINT_MSR_FOR_SYS_RESET RT_BIT(4)
+/** Relax timer-related checks (watchdogs/deadman timeouts) that rely on
+ * timely deliver of external interrupts. */
+#define GIM_HV_HINT_RELAX_TIME_CHECKS RT_BIT(5)
+/** Recommend using DMA remapping. */
+#define GIM_HV_HINT_DMA_REMAPPING RT_BIT(6)
+/** Recommend using interrupt remapping. */
+#define GIM_HV_HINT_INTERRUPT_REMAPPING RT_BIT(7)
+/** Recommend using X2APIC MSRs rather than MMIO. */
+#define GIM_HV_HINT_X2APIC_MSRS RT_BIT(8)
+/** Recommend deprecating Auto EOI (end of interrupt). */
+#define GIM_HV_HINT_DEPRECATE_AUTO_EOI RT_BIT(9)
+/** Recommend using SyntheticClusterIpi hypercall. */
+#define GIM_HV_HINT_SYNTH_CLUSTER_IPI_HYPERCALL RT_BIT(10)
+/** Recommend using newer ExProcessMasks interface. */
+#define GIM_HV_HINT_EX_PROC_MASKS_INTERFACE RT_BIT(11)
+/** Indicate that Hyper-V is nested within a Hyper-V partition. */
+#define GIM_HV_HINT_NESTED_HYPERV RT_BIT(12)
+/** Recommend using INT for MBEC system calls. */
+#define GIM_HV_HINT_INT_FOR_MBEC_SYSCALLS RT_BIT(13)
+/** Recommend using enlightened VMCS interfacea and nested enlightenments. */
+#define GIM_HV_HINT_NESTED_ENLIGHTENED_VMCS_INTERFACE RT_BIT(14)
+/** @} */
+
+
+/** @name Hyper-V implementation hardware features.
+ * Which hardware features are in use by the hypervisor.
+ * @{
+ */
+/** APIC overlay is used. */
+#define GIM_HV_HOST_FEAT_AVIC RT_BIT(0)
+/** MSR bitmaps is used. */
+#define GIM_HV_HOST_FEAT_MSR_BITMAP RT_BIT(1)
+/** Architectural performance counter supported. */
+#define GIM_HV_HOST_FEAT_PERF_COUNTER RT_BIT(2)
+/** Nested paging is used. */
+#define GIM_HV_HOST_FEAT_NESTED_PAGING RT_BIT(3)
+/** DMA remapping is used. */
+#define GIM_HV_HOST_FEAT_DMA_REMAPPING RT_BIT(4)
+/** Interrupt remapping is used. */
+#define GIM_HV_HOST_FEAT_INTERRUPT_REMAPPING RT_BIT(5)
+/** Memory patrol scrubber is present. */
+#define GIM_HV_HOST_FEAT_MEM_PATROL_SCRUBBER RT_BIT(6)
+/** DMA protection is in use. */
+#define GIM_HV_HOST_FEAT_DMA_PROT_IN_USE RT_BIT(7)
+/** HPET is requested. */
+#define GIM_HV_HOST_FEAT_HPET_REQUESTED RT_BIT(8)
+/** Synthetic timers are volatile. */
+#define GIM_HV_HOST_FEAT_STIMER_VOLATILE RT_BIT(9)
+/** @} */
+
+
+/** @name Hyper-V MSRs.
+ * @{
+ */
+/** Start of range 0. */
+#define MSR_GIM_HV_RANGE0_FIRST UINT32_C(0x40000000)
+/** Guest OS identification (R/W) */
+#define MSR_GIM_HV_GUEST_OS_ID UINT32_C(0x40000000)
+/** Enable hypercall interface (R/W) */
+#define MSR_GIM_HV_HYPERCALL UINT32_C(0x40000001)
+/** Virtual processor's (VCPU) index (R) */
+#define MSR_GIM_HV_VP_INDEX UINT32_C(0x40000002)
+/** Reset operation (R/W) */
+#define MSR_GIM_HV_RESET UINT32_C(0x40000003)
+/** End of range 0. */
+#define MSR_GIM_HV_RANGE0_LAST MSR_GIM_HV_RESET
+
+/** Start of range 1. */
+#define MSR_GIM_HV_RANGE1_FIRST UINT32_C(0x40000010)
+/** Virtual processor's (VCPU) runtime (R) */
+#define MSR_GIM_HV_VP_RUNTIME UINT32_C(0x40000010)
+/** End of range 1. */
+#define MSR_GIM_HV_RANGE1_LAST MSR_GIM_HV_VP_RUNTIME
+
+/** Start of range 2. */
+#define MSR_GIM_HV_RANGE2_FIRST UINT32_C(0x40000020)
+/** Per-VM reference counter (R) */
+#define MSR_GIM_HV_TIME_REF_COUNT UINT32_C(0x40000020)
+/** Per-VM TSC page (R/W) */
+#define MSR_GIM_HV_REF_TSC UINT32_C(0x40000021)
+/** Frequency of TSC in Hz as reported by the hypervisor (R) */
+#define MSR_GIM_HV_TSC_FREQ UINT32_C(0x40000022)
+/** Frequency of LAPIC in Hz as reported by the hypervisor (R) */
+#define MSR_GIM_HV_APIC_FREQ UINT32_C(0x40000023)
+/** End of range 2. */
+#define MSR_GIM_HV_RANGE2_LAST MSR_GIM_HV_APIC_FREQ
+
+/** Start of range 3. */
+#define MSR_GIM_HV_RANGE3_FIRST UINT32_C(0x40000070)
+/** Access to APIC EOI (End-Of-Interrupt) register (W) */
+#define MSR_GIM_HV_EOI UINT32_C(0x40000070)
+/** Access to APIC ICR (Interrupt Command) register (R/W) */
+#define MSR_GIM_HV_ICR UINT32_C(0x40000071)
+/** Access to APIC TPR (Task Priority) register (R/W) */
+#define MSR_GIM_HV_TPR UINT32_C(0x40000072)
+/** Enables lazy EOI processing (R/W) */
+#define MSR_GIM_HV_APIC_ASSIST_PAGE UINT32_C(0x40000073)
+/** End of range 3. */
+#define MSR_GIM_HV_RANGE3_LAST MSR_GIM_HV_APIC_ASSIST_PAGE
+
+/** Start of range 4. */
+#define MSR_GIM_HV_RANGE4_FIRST UINT32_C(0x40000080)
+/** Control behaviour of synthetic interrupt controller (R/W) */
+#define MSR_GIM_HV_SCONTROL UINT32_C(0x40000080)
+/** Synthetic interrupt controller version (R) */
+#define MSR_GIM_HV_SVERSION UINT32_C(0x40000081)
+/** Base address of synthetic interrupt event flag (R/W) */
+#define MSR_GIM_HV_SIEFP UINT32_C(0x40000082)
+/** Base address of synthetic interrupt message page (R/W) */
+#define MSR_GIM_HV_SIMP UINT32_C(0x40000083)
+/** End-Of-Message in synthetic interrupt parameter page (W) */
+#define MSR_GIM_HV_EOM UINT32_C(0x40000084)
+/** End of range 4. */
+#define MSR_GIM_HV_RANGE4_LAST MSR_GIM_HV_EOM
+
+/** Start of range 5. */
+#define MSR_GIM_HV_RANGE5_FIRST UINT32_C(0x40000090)
+/** Configures synthetic interrupt source 0 (R/W) */
+#define MSR_GIM_HV_SINT0 UINT32_C(0x40000090)
+/** Configures synthetic interrupt source 1 (R/W) */
+#define MSR_GIM_HV_SINT1 UINT32_C(0x40000091)
+/** Configures synthetic interrupt source 2 (R/W) */
+#define MSR_GIM_HV_SINT2 UINT32_C(0x40000092)
+/** Configures synthetic interrupt source 3 (R/W) */
+#define MSR_GIM_HV_SINT3 UINT32_C(0x40000093)
+/** Configures synthetic interrupt source 4 (R/W) */
+#define MSR_GIM_HV_SINT4 UINT32_C(0x40000094)
+/** Configures synthetic interrupt source 5 (R/W) */
+#define MSR_GIM_HV_SINT5 UINT32_C(0x40000095)
+/** Configures synthetic interrupt source 6 (R/W) */
+#define MSR_GIM_HV_SINT6 UINT32_C(0x40000096)
+/** Configures synthetic interrupt source 7 (R/W) */
+#define MSR_GIM_HV_SINT7 UINT32_C(0x40000097)
+/** Configures synthetic interrupt source 8 (R/W) */
+#define MSR_GIM_HV_SINT8 UINT32_C(0x40000098)
+/** Configures synthetic interrupt source 9 (R/W) */
+#define MSR_GIM_HV_SINT9 UINT32_C(0x40000099)
+/** Configures synthetic interrupt source 10 (R/W) */
+#define MSR_GIM_HV_SINT10 UINT32_C(0x4000009A)
+/** Configures synthetic interrupt source 11 (R/W) */
+#define MSR_GIM_HV_SINT11 UINT32_C(0x4000009B)
+/** Configures synthetic interrupt source 12 (R/W) */
+#define MSR_GIM_HV_SINT12 UINT32_C(0x4000009C)
+/** Configures synthetic interrupt source 13 (R/W) */
+#define MSR_GIM_HV_SINT13 UINT32_C(0x4000009D)
+/** Configures synthetic interrupt source 14 (R/W) */
+#define MSR_GIM_HV_SINT14 UINT32_C(0x4000009E)
+/** Configures synthetic interrupt source 15 (R/W) */
+#define MSR_GIM_HV_SINT15 UINT32_C(0x4000009F)
+/** End of range 5. */
+#define MSR_GIM_HV_RANGE5_LAST MSR_GIM_HV_SINT15
+
+/** Start of range 6. */
+#define MSR_GIM_HV_RANGE6_FIRST UINT32_C(0x400000B0)
+/** Configures register for synthetic timer 0 (R/W) */
+#define MSR_GIM_HV_STIMER0_CONFIG UINT32_C(0x400000B0)
+/** Expiration time or period for synthetic timer 0 (R/W) */
+#define MSR_GIM_HV_STIMER0_COUNT UINT32_C(0x400000B1)
+/** Configures register for synthetic timer 1 (R/W) */
+#define MSR_GIM_HV_STIMER1_CONFIG UINT32_C(0x400000B2)
+/** Expiration time or period for synthetic timer 1 (R/W) */
+#define MSR_GIM_HV_STIMER1_COUNT UINT32_C(0x400000B3)
+/** Configures register for synthetic timer 2 (R/W) */
+#define MSR_GIM_HV_STIMER2_CONFIG UINT32_C(0x400000B4)
+/** Expiration time or period for synthetic timer 2 (R/W) */
+#define MSR_GIM_HV_STIMER2_COUNT UINT32_C(0x400000B5)
+/** Configures register for synthetic timer 3 (R/W) */
+#define MSR_GIM_HV_STIMER3_CONFIG UINT32_C(0x400000B6)
+/** Expiration time or period for synthetic timer 3 (R/W) */
+#define MSR_GIM_HV_STIMER3_COUNT UINT32_C(0x400000B7)
+/** End of range 6. */
+#define MSR_GIM_HV_RANGE6_LAST MSR_GIM_HV_STIMER3_COUNT
+
+/** Start of range 7. */
+#define MSR_GIM_HV_RANGE7_FIRST UINT32_C(0x400000C1)
+/** Trigger to transition to power state C1 (R) */
+#define MSR_GIM_HV_POWER_STATE_TRIGGER_C1 UINT32_C(0x400000C1)
+/** Trigger to transition to power state C2 (R) */
+#define MSR_GIM_HV_POWER_STATE_TRIGGER_C2 UINT32_C(0x400000C2)
+/** Trigger to transition to power state C3 (R) */
+#define MSR_GIM_HV_POWER_STATE_TRIGGER_C3 UINT32_C(0x400000C3)
+/** End of range 7. */
+#define MSR_GIM_HV_RANGE7_LAST MSR_GIM_HV_POWER_STATE_TRIGGER_C3
+
+/** Start of range 8. */
+#define MSR_GIM_HV_RANGE8_FIRST UINT32_C(0x400000D1)
+/** Configure the recipe for power state transitions to C1 (R/W) */
+#define MSR_GIM_HV_POWER_STATE_CONFIG_C1 UINT32_C(0x400000D1)
+/** Configure the recipe for power state transitions to C2 (R/W) */
+#define MSR_GIM_HV_POWER_STATE_CONFIG_C2 UINT32_C(0x400000D2)
+/** Configure the recipe for power state transitions to C3 (R/W) */
+#define MSR_GIM_HV_POWER_STATE_CONFIG_C3 UINT32_C(0x400000D3)
+/** End of range 8. */
+#define MSR_GIM_HV_RANGE8_LAST MSR_GIM_HV_POWER_STATE_CONFIG_C3
+
+/** Start of range 9. */
+#define MSR_GIM_HV_RANGE9_FIRST UINT32_C(0x400000E0)
+/** Map the guest's retail partition stats page (R/W) */
+#define MSR_GIM_HV_STATS_PART_RETAIL_PAGE UINT32_C(0x400000E0)
+/** Map the guest's internal partition stats page (R/W) */
+#define MSR_GIM_HV_STATS_PART_INTERNAL_PAGE UINT32_C(0x400000E1)
+/** Map the guest's retail VP stats page (R/W) */
+#define MSR_GIM_HV_STATS_VP_RETAIL_PAGE UINT32_C(0x400000E2)
+/** Map the guest's internal VP stats page (R/W) */
+#define MSR_GIM_HV_STATS_VP_INTERNAL_PAGE UINT32_C(0x400000E3)
+/** End of range 9. */
+#define MSR_GIM_HV_RANGE9_LAST MSR_GIM_HV_STATS_VP_INTERNAL_PAGE
+
+/** Start of range 10. */
+#define MSR_GIM_HV_RANGE10_FIRST UINT32_C(0x400000F0)
+/** Trigger the guest's transition to idle power state (R) */
+#define MSR_GIM_HV_GUEST_IDLE UINT32_C(0x400000F0)
+/** Synthetic debug control. */
+#define MSR_GIM_HV_SYNTH_DEBUG_CONTROL UINT32_C(0x400000F1)
+/** Synthetic debug status. */
+#define MSR_GIM_HV_SYNTH_DEBUG_STATUS UINT32_C(0x400000F2)
+/** Synthetic debug send buffer. */
+#define MSR_GIM_HV_SYNTH_DEBUG_SEND_BUFFER UINT32_C(0x400000F3)
+/** Synthetic debug receive buffer. */
+#define MSR_GIM_HV_SYNTH_DEBUG_RECEIVE_BUFFER UINT32_C(0x400000F4)
+/** Synthetic debug pending buffer. */
+#define MSR_GIM_HV_SYNTH_DEBUG_PENDING_BUFFER UINT32_C(0x400000F5)
+/** End of range 10. */
+#define MSR_GIM_HV_RANGE10_LAST MSR_GIM_HV_SYNTH_DEBUG_PENDING_BUFFER
+
+/** Start of range 11. */
+#define MSR_GIM_HV_RANGE11_FIRST UINT32_C(0x400000FF)
+/** Undocumented debug options MSR. */
+#define MSR_GIM_HV_DEBUG_OPTIONS_MSR UINT32_C(0x400000FF)
+/** End of range 11. */
+#define MSR_GIM_HV_RANGE11_LAST MSR_GIM_HV_DEBUG_OPTIONS_MSR
+
+/** Start of range 12. */
+#define MSR_GIM_HV_RANGE12_FIRST UINT32_C(0x40000100)
+/** Guest crash MSR 0. */
+#define MSR_GIM_HV_CRASH_P0 UINT32_C(0x40000100)
+/** Guest crash MSR 1. */
+#define MSR_GIM_HV_CRASH_P1 UINT32_C(0x40000101)
+/** Guest crash MSR 2. */
+#define MSR_GIM_HV_CRASH_P2 UINT32_C(0x40000102)
+/** Guest crash MSR 3. */
+#define MSR_GIM_HV_CRASH_P3 UINT32_C(0x40000103)
+/** Guest crash MSR 4. */
+#define MSR_GIM_HV_CRASH_P4 UINT32_C(0x40000104)
+/** Guest crash control. */
+#define MSR_GIM_HV_CRASH_CTL UINT32_C(0x40000105)
+/** End of range 12. */
+#define MSR_GIM_HV_RANGE12_LAST MSR_GIM_HV_CRASH_CTL
+/** @} */
+
+AssertCompile(MSR_GIM_HV_RANGE0_FIRST <= MSR_GIM_HV_RANGE0_LAST);
+AssertCompile(MSR_GIM_HV_RANGE1_FIRST <= MSR_GIM_HV_RANGE1_LAST);
+AssertCompile(MSR_GIM_HV_RANGE2_FIRST <= MSR_GIM_HV_RANGE2_LAST);
+AssertCompile(MSR_GIM_HV_RANGE3_FIRST <= MSR_GIM_HV_RANGE3_LAST);
+AssertCompile(MSR_GIM_HV_RANGE4_FIRST <= MSR_GIM_HV_RANGE4_LAST);
+AssertCompile(MSR_GIM_HV_RANGE5_FIRST <= MSR_GIM_HV_RANGE5_LAST);
+AssertCompile(MSR_GIM_HV_RANGE6_FIRST <= MSR_GIM_HV_RANGE6_LAST);
+AssertCompile(MSR_GIM_HV_RANGE7_FIRST <= MSR_GIM_HV_RANGE7_LAST);
+AssertCompile(MSR_GIM_HV_RANGE8_FIRST <= MSR_GIM_HV_RANGE8_LAST);
+AssertCompile(MSR_GIM_HV_RANGE9_FIRST <= MSR_GIM_HV_RANGE9_LAST);
+AssertCompile(MSR_GIM_HV_RANGE10_FIRST <= MSR_GIM_HV_RANGE10_LAST);
+AssertCompile(MSR_GIM_HV_RANGE11_FIRST <= MSR_GIM_HV_RANGE11_LAST);
+
+/** @name Hyper-V MSR - Reset (MSR_GIM_HV_RESET).
+ * @{
+ */
+/** The reset enable mask. */
+#define MSR_GIM_HV_RESET_ENABLE RT_BIT_64(0)
+/** Whether the reset MSR is enabled. */
+#define MSR_GIM_HV_RESET_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_RESET_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Hypercall (MSR_GIM_HV_HYPERCALL).
+ * @{
+ */
+/** Guest-physical page frame number of the hypercall-page. */
+#define MSR_GIM_HV_HYPERCALL_GUEST_PFN(a) ((a) >> 12)
+/** The hypercall enable mask. */
+#define MSR_GIM_HV_HYPERCALL_PAGE_ENABLE RT_BIT_64(0)
+/** Whether the hypercall-page is enabled or not. */
+#define MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_HYPERCALL_PAGE_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Reference TSC (MSR_GIM_HV_REF_TSC).
+ * @{
+ */
+/** Guest-physical page frame number of the TSC-page. */
+#define MSR_GIM_HV_REF_TSC_GUEST_PFN(a) ((a) >> 12)
+/** The TSC-page enable mask. */
+#define MSR_GIM_HV_REF_TSC_ENABLE RT_BIT_64(0)
+/** Whether the TSC-page is enabled or not. */
+#define MSR_GIM_HV_REF_TSC_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_REF_TSC_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Guest crash control (MSR_GIM_HV_CRASH_CTL).
+ * @{
+ */
+/** The Crash Control notify mask. */
+#define MSR_GIM_HV_CRASH_CTL_NOTIFY RT_BIT_64(63)
+/** @} */
+
+/** @name Hyper-V MSR - Guest OS ID (MSR_GIM_HV_GUEST_OS_ID).
+ * @{
+ */
+/** An open-source operating system. */
+#define MSR_GIM_HV_GUEST_OS_ID_IS_OPENSOURCE(a) RT_BOOL((a) & RT_BIT_64(63))
+/** Vendor ID. */
+#define MSR_GIM_HV_GUEST_OS_ID_VENDOR(a) (uint32_t)(((a) >> 48) & 0xfff)
+/** Guest OS variant, depending on the vendor ID. */
+#define MSR_GIM_HV_GUEST_OS_ID_OS_VARIANT(a) (uint32_t)(((a) >> 40) & 0xff)
+/** Guest OS major version. */
+#define MSR_GIM_HV_GUEST_OS_ID_MAJOR_VERSION(a) (uint32_t)(((a) >> 32) & 0xff)
+/** Guest OS minor version. */
+#define MSR_GIM_HV_GUEST_OS_ID_MINOR_VERSION(a) (uint32_t)(((a) >> 24) & 0xff)
+/** Guest OS service version (e.g. service pack number in case of Windows). */
+#define MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(a) (uint32_t)(((a) >> 16) & 0xff)
+/** Guest OS build number. */
+#define MSR_GIM_HV_GUEST_OS_ID_BUILD(a) (uint32_t)((a) & 0xffff)
+/** @} */
+
+/** @name Hyper-V MSR - APIC-assist page (MSR_GIM_HV_APIC_ASSIST_PAGE).
+ * @{
+ */
+/** Guest-physical page frame number of the APIC-assist page. */
+#define MSR_GIM_HV_APICASSIST_GUEST_PFN(a) ((a) >> 12)
+/** The APIC-assist page enable mask. */
+#define MSR_GIM_HV_APICASSIST_PAGE_ENABLE RT_BIT_64(0)
+/** Whether the APIC-assist page is enabled or not. */
+#define MSR_GIM_HV_APICASSIST_PAGE_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_APICASSIST_PAGE_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Synthetic Interrupt Event Flags page
+ * (MSR_GIM_HV_SIEFP).
+ * @{
+ */
+/** Guest-physical page frame number of the APIC-assist page. */
+#define MSR_GIM_HV_SIEF_GUEST_PFN(a) ((a) >> 12)
+/** The SIEF enable mask. */
+#define MSR_GIM_HV_SIEF_PAGE_ENABLE RT_BIT_64(0)
+/** Whether the SIEF page is enabled or not. */
+#define MSR_GIM_HV_SIEF_PAGE_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_SIEF_PAGE_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Synthetic Interrupt Control (MSR_GIM_HV_CONTROL).
+ * @{
+ */
+/** The SControl enable mask. */
+#define MSR_GIM_HV_SCONTROL_ENABLE RT_BIT_64(0)
+/** Whether SControl is enabled or not. */
+#define MSR_GIM_HV_SCONTROL_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_SCONTROL_ENABLE)
+/** @} */
+
+/** @name Hyper-V MSR - Synthetic Timer Config (MSR_GIM_HV_STIMER_CONFIG).
+ * @{
+ */
+/** The Stimer enable mask. */
+#define MSR_GIM_HV_STIMER_ENABLE RT_BIT_64(0)
+/** Whether Stimer is enabled or not. */
+#define MSR_GIM_HV_STIMER_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_STIMER_ENABLE)
+/** The Stimer periodic mask. */
+#define MSR_GIM_HV_STIMER_PERIODIC RT_BIT_64(1)
+/** Whether Stimer is enabled or not. */
+#define MSR_GIM_HV_STIMER_IS_PERIODIC(a) RT_BOOL((a) & MSR_GIM_HV_STIMER_PERIODIC)
+/** The Stimer lazy mask. */
+#define MSR_GIM_HV_STIMER_LAZY RT_BIT_64(2)
+/** Whether Stimer is enabled or not. */
+#define MSR_GIM_HV_STIMER_IS_LAZY(a) RT_BOOL((a) & MSR_GIM_HV_STIMER_LAZY)
+/** The Stimer auto-enable mask. */
+#define MSR_GIM_HV_STIMER_AUTO_ENABLE RT_BIT_64(3)
+/** Whether Stimer is enabled or not. */
+#define MSR_GIM_HV_STIMER_IS_AUTO_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_STIMER_AUTO_ENABLE)
+/** The Stimer SINTx mask (bits 16:19). */
+#define MSR_GIM_HV_STIMER_SINTX UINT64_C(0xf0000)
+/** Gets the Stimer synthetic interrupt source. */
+#define MSR_GIM_HV_STIMER_GET_SINTX(a) (((a) >> 16) & 0xf)
+/** The Stimer valid read/write mask. */
+#define MSR_GIM_HV_STIMER_RW_VALID ( MSR_GIM_HV_STIMER_ENABLE | MSR_GIM_HV_STIMER_PERIODIC \
+ | MSR_GIM_HV_STIMER_LAZY | MSR_GIM_HV_STIMER_AUTO_ENABLE \
+ | MSR_GIM_HV_STIMER_SINTX)
+/** @} */
+
+/**
+ * Hyper-V APIC-assist (HV_REFERENCE_TSC_PAGE) structure placed in the TSC
+ * reference page.
+ */
+typedef struct GIMHVAPICASSIST
+{
+ uint32_t fNoEoiRequired : 1;
+ uint32_t u31Reserved0 : 31;
+} GIMHVAPICASSIST;
+/** Pointer to Hyper-V reference TSC. */
+typedef GIMHVAPICASSIST *PGIMHVAPICASSIST;
+/** Pointer to a const Hyper-V reference TSC. */
+typedef GIMHVAPICASSIST const *PCGIMHVAPICASSIST;
+AssertCompileSize(GIMHVAPICASSIST, 4);
+
+/**
+ * Hypercall parameter type.
+ */
+typedef enum GIMHVHYPERCALLPARAM
+{
+ GIMHVHYPERCALLPARAM_IN = 0,
+ GIMHVHYPERCALLPARAM_OUT
+} GIMHVHYPERCALLPARAM;
+
+
+/** @name Hyper-V hypercall op codes.
+ * @{
+ */
+/** Post message to hypervisor or VMs. */
+#define GIM_HV_HYPERCALL_OP_POST_MESSAGE 0x5C
+/** Post debug data to hypervisor. */
+#define GIM_HV_HYPERCALL_OP_POST_DEBUG_DATA 0x69
+/** Retreive debug data from hypervisor. */
+#define GIM_HV_HYPERCALL_OP_RETREIVE_DEBUG_DATA 0x6A
+/** Reset debug session. */
+#define GIM_HV_HYPERCALL_OP_RESET_DEBUG_SESSION 0x6B
+/** @} */
+
+/** @name Hyper-V extended hypercall op codes.
+ * @{
+ */
+/** Query extended hypercall capabilities. */
+#define GIM_HV_EXT_HYPERCALL_OP_QUERY_CAP 0x8001
+/** Query guest physical address range that has zero'd filled memory. */
+#define GIM_HV_EXT_HYPERCALL_OP_GET_BOOT_ZEROED_MEM 0x8002
+/** @} */
+
+
+/** @name Hyper-V Extended hypercall - HvExtCallQueryCapabilities.
+ * @{
+ */
+/** Boot time zeroed pages. */
+#define GIM_HV_EXT_HYPERCALL_CAP_ZERO_MEM RT_BIT_64(0)
+/** Whether boot time zeroed pages capability is enabled. */
+#define GIM_HV_EXT_HYPERCALL_CAP_IS_ZERO_MEM_ENABLED(a) RT_BOOL((a) & GIM_HV_EXT_HYPERCALL_CAP_ZERO_MEM)
+/** @} */
+
+
+/** @name Hyper-V hypercall inputs.
+ * @{
+ */
+/** The hypercall call operation code. */
+#define GIM_HV_HYPERCALL_IN_CALL_CODE(a) ((a) & UINT64_C(0xffff))
+/** Whether it's a fast (register based) hypercall or not (memory-based). */
+#define GIM_HV_HYPERCALL_IN_IS_FAST(a) RT_BOOL((a) & RT_BIT_64(16))
+/** Total number of reps for a rep hypercall. */
+#define GIM_HV_HYPERCALL_IN_REP_COUNT(a) (((a) << 32) & UINT64_C(0xfff))
+/** Rep start index for a rep hypercall. */
+#define GIM_HV_HYPERCALL_IN_REP_START_IDX(a) (((a) << 48) & UINT64_C(0xfff))
+/** Reserved bits range 1. */
+#define GIM_HV_HYPERCALL_IN_RSVD_1(a) (((a) << 17) & UINT64_C(0x7fff))
+/** Reserved bits range 2. */
+#define GIM_HV_HYPERCALL_IN_RSVD_2(a) (((a) << 44) & UINT64_C(0xf))
+/** Reserved bits range 3. */
+#define GIM_HV_HYPERCALL_IN_RSVD_3(a) (((a) << 60) & UINT64_C(0x7))
+/** @} */
+
+
+/** @name Hyper-V hypercall status codes.
+ * @{
+ */
+/** Success. */
+#define GIM_HV_STATUS_SUCCESS 0x00
+/** Unrecognized hypercall. */
+#define GIM_HV_STATUS_INVALID_HYPERCALL_CODE 0x02
+/** Invalid hypercall input (rep count, rsvd bits). */
+#define GIM_HV_STATUS_INVALID_HYPERCALL_INPUT 0x03
+/** Hypercall guest-physical address not 8-byte aligned or crosses page boundary. */
+#define GIM_HV_STATUS_INVALID_ALIGNMENT 0x04
+/** Invalid hypercall parameters. */
+#define GIM_HV_STATUS_INVALID_PARAMETER 0x05
+/** Access denied. */
+#define GIM_HV_STATUS_ACCESS_DENIED 0x06
+/** The partition state not valid for specified op. */
+#define GIM_HV_STATUS_INVALID_PARTITION_STATE 0x07
+/** The hypercall operation could not be performed. */
+#define GIM_HV_STATUS_OPERATION_DENIED 0x08
+/** Specified partition property ID not recognized. */
+#define GIM_HV_STATUS_UNKNOWN_PROPERTY 0x09
+/** Specified partition property value not within range. */
+#define GIM_HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE 0x0a
+/** Insufficient memory for performing the hypercall. */
+#define GIM_HV_STATUS_INSUFFICIENT_MEMORY 0x0b
+/** Maximum partition depth has been exceeded for the partition hierarchy. */
+#define GIM_HV_STATUS_PARTITION_TOO_DEEP 0x0c
+/** The specified partition ID is not valid. */
+#define GIM_HV_STATUS_INVALID_PARTITION_ID 0x0d
+/** The specified virtual processor index in invalid. */
+#define GIM_HV_STATUS_INVALID_VP_INDEX 0x0e
+/** The specified port ID is not unique or doesn't exist. */
+#define GIM_HV_STATUS_INVALID_PORT_ID 0x11
+/** The specified connection ID is not unique or doesn't exist. */
+#define GIM_HV_STATUS_INVALID_CONNECTION_ID 0x12
+/** The target port doesn't have sufficient buffers for the caller to post a message. */
+#define GIM_HV_STATUS_INSUFFICIENT_BUFFERS 0x13
+/** External interrupt not acknowledged.*/
+#define GIM_HV_STATUS_NOT_ACKNOWLEDGED 0x14
+/** External interrupt acknowledged. */
+#define GIM_HV_STATUS_ACKNOWLEDGED 0x16
+/** Invalid state due to misordering Hv[Save|Restore]PartitionState. */
+#define GIM_HV_STATUS_INVALID_SAVE_RESTORE_STATE 0x17
+/** Operation not perform due to a required feature of SynIc was disabled. */
+#define GIM_HV_STATUS_INVALID_SYNIC_STATE 0x18
+/** Object or value already in use. */
+#define GIM_HV_STATUS_OBJECT_IN_USE 0x19
+/** Invalid proximity domain information. */
+#define GIM_HV_STATUS_INVALID_PROXIMITY_DOMAIN_INFO 0x1A
+/** Attempt to retrieve data failed. */
+#define GIM_HV_STATUS_NO_DATA 0x1B
+/** Debug connection has not recieved any new data since the last time. */
+#define GIM_HV_STATUS_INACTIVE 0x1C
+/** A resource is unavailable for allocation. */
+#define GIM_HV_STATUS_NO_RESOURCES 0x1D
+/** A hypervisor feature is not available to the caller. */
+#define GIM_HV_STATUS_FEATURE_UNAVAILABLE 0x1E
+/** The debug packet returned is partial due to an I/O error. */
+#define GIM_HV_STATUS_PARTIAL_PACKET 0x1F
+/** Processor feature SSE3 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SSE3_NOT_SUPPORTED 0x20
+/** Processor feature LAHSAHF unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_LAHSAHF_NOT_SUPPORTED 0x21
+/** Processor feature SSSE3 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SSSE3_NOT_SUPPORTED 0x22
+/** Processor feature SSE4.1 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SSE4_1_NOT_SUPPORTED 0x23
+/** Processor feature SSE4.2 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SSE4_2_NOT_SUPPORTED 0x24
+/** Processor feature SSE4A unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SSE4A_NOT_SUPPORTED 0x25
+/** Processor feature XOP unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XOP_NOT_SUPPORTED 0x26
+/** Processor feature POPCNT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_POPCNT_NOT_SUPPORTED 0x27
+/** Processor feature CMPXCHG16B unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_CMPXCHG16B_NOT_SUPPORTED 0x28
+/** Processor feature ALTMOVCR8 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_ALTMOVCR8_NOT_SUPPORTED 0x29
+/** Processor feature LZCNT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_LZCNT_NOT_SUPPORTED 0x2A
+/** Processor feature misaligned SSE unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_MISALIGNED_SSE_NOT_SUPPORTED 0x2B
+/** Processor feature MMX extensions unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_MMX_EXT_NOT_SUPPORTED 0x2C
+/** Processor feature 3DNow! unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_3DNOW_NOT_SUPPORTED 0x2D
+/** Processor feature Extended 3DNow! unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_EXTENDED_3DNOW_NOT_SUPPORTED 0x2E
+/** Processor feature 1GB large page unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_PAGE_1GB_NOT_SUPPORTED 0x2F
+/** Processor cache line flush size incompatible. */
+#define GIM_HV_STATUS_PROC_CACHE_LINE_FLUSH_SIZE_INCOMPATIBLE 0x30
+/** Processor feature XSAVE unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_NOT_SUPPORTED 0x31
+/** Processor feature XSAVEOPT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVEOPT_NOT_SUPPORTED 0x32
+/** The specified buffer was too small for all requested data. */
+#define GIM_HV_STATUS_INSUFFICIENT_BUFFER 0x33
+/** Processor feature XSAVEOPT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_AVX_NOT_SUPPORTED 0x34
+/** Processor feature XSAVEOPT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_FEAT_NOT_SUPPORTED 0x35 /** Huh, isn't this same as 0x31? */
+/** Processor feature XSAVEOPT unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_PAGE_XSAVE_SAVE_AREA_INCOMPATIBLE 0x36
+/** Processor architecture unsupoorted. */
+#define GIM_HV_STATUS_INCOMPATIBLE_PROCESSOR 0x37
+/** Max. domains for platform I/O remapping reached. */
+#define GIM_HV_STATUS_INSUFFICIENT_DEVICE_DOMAINS 0x38
+/** Processor feature AES unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_AES_NOT_SUPPORTED 0x39
+/** Processor feature PCMULQDQ unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_PCMULQDQ_NOT_SUPPORTED 0x3A
+/** Processor feature XSAVE features unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_FEATURES_INCOMPATIBLE 0x3B
+/** Generic CPUID validation error. */
+#define GIM_HV_STATUS_CPUID_FEAT_VALIDATION_ERROR 0x3C
+/** XSAVE CPUID validation error. */
+#define GIM_HV_STATUS_CPUID_XSAVE_FEAT_VALIDATION_ERROR 0x3D
+/** Processor startup timed out. */
+#define GIM_HV_STATUS_PROCESSOR_STARTUP_TIMEOUT 0x3E
+/** SMX enabled by the BIOS. */
+#define GIM_HV_STATUS_SMX_ENABLED 0x3F
+/** Processor feature PCID unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_PCID_NOT_SUPPORTED 0x40
+/** Invalid LP index. */
+#define GIM_HV_STATUS_INVALID_LP_INDEX 0x41
+/** Processor feature PCID unsupported. */
+#define GIM_HV_STATUS_FEAT_FMA4_NOT_SUPPORTED 0x42
+/** Processor feature PCID unsupported. */
+#define GIM_HV_STATUS_FEAT_F16C_NOT_SUPPORTED 0x43
+/** Processor feature PCID unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_RDRAND_NOT_SUPPORTED 0x44
+/** Processor feature RDWRFSGS unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_RDWRFSGS_NOT_SUPPORTED 0x45
+/** Processor feature SMEP unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_SMEP_NOT_SUPPORTED 0x46
+/** Processor feature enhanced fast string unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_ENHANCED_FAST_STRING_NOT_SUPPORTED 0x47
+/** Processor feature MOVBE unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_MOVBE_NOT_SUPPORTED 0x48
+/** Processor feature BMI1 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_BMI1_NOT_SUPPORTED 0x49
+/** Processor feature BMI2 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_BMI2_NOT_SUPPORTED 0x4A
+/** Processor feature HLE unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_HLE_NOT_SUPPORTED 0x4B
+/** Processor feature RTM unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_RTM_NOT_SUPPORTED 0x4C
+/** Processor feature XSAVE FMA unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_FMA_NOT_SUPPORTED 0x4D
+/** Processor feature XSAVE AVX2 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_XSAVE_AVX2_NOT_SUPPORTED 0x4E
+/** Processor feature NPIEP1 unsupported. */
+#define GIM_HV_STATUS_PROC_FEAT_NPIEP1_NOT_SUPPORTED 0x4F
+/** @} */
+
+
+/** @name Hyper-V MSR - Debug control (MSR_GIM_HV_SYNTH_DEBUG_CONTROL).
+ * @{
+ */
+/** Perform debug write. */
+#define MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_WRITE(a) RT_BOOL((a) & RT_BIT_64(0))
+/** Perform debug read. */
+#define MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_READ(a) RT_BOOL((a) & RT_BIT_64(1))
+/** Returns length of the debug write buffer. */
+#define MSR_GIM_HV_SYNTH_DEBUG_CONTROL_W_LEN(a) (((a) & UINT64_C(0xffff0000)) >> 16)
+/** @} */
+
+
+/** @name Hyper-V MSR - Debug status (MSR_GIM_HV_SYNTH_DEBUG_STATUS).
+ * @{
+ */
+/** Debug send buffer operation success. */
+#define MSR_GIM_HV_SYNTH_DEBUG_STATUS_W_SUCCESS RT_BIT_64(0)
+/** Debug receive buffer operation success. */
+#define MSR_GIM_HV_SYNTH_DEBUG_STATUS_R_SUCCESS RT_BIT_64(2)
+/** Debug connection was reset. */
+#define MSR_GIM_HV_SYNTH_DEBUG_STATUS_CONN_RESET RT_BIT_64(3)
+/** @} */
+
+
+/** @name Hyper-V MSR - synthetic interrupt (MSR_GIM_HV_SINTx).
+ * @{
+ */
+/** The interrupt masked mask. */
+#define MSR_GIM_HV_SINT_MASKED RT_BIT_64(16)
+/** Whether the interrupt source is masked. */
+#define MSR_GIM_HV_SINT_IS_MASKED(a) RT_BOOL((a) & MSR_GIM_HV_SINT_MASKED)
+/** Gets the interrupt vector. */
+#define MSR_GIM_HV_SINT_GET_VECTOR(a) ((a) & UINT64_C(0xff))
+/** The AutoEoi mask. */
+#define MSR_GIM_HV_SINT_AUTOEOI RT_BIT_64(17)
+/** Gets whether AutoEoi is enabled for the synthetic interrupt. */
+#define MSR_GIM_HV_SINT_IS_AUTOEOI(a) RT_BOOL((a) & MSR_GIM_HV_SINT_AUTOEOI)
+/** @} */
+
+
+/** @name Hyper-V MSR - synthetic interrupt message page (MSR_GIM_HV_SIMP).
+ * @{
+ */
+/** The SIMP enable mask. */
+#define MSR_GIM_HV_SIMP_ENABLE RT_BIT_64(0)
+/** Whether the SIMP is enabled. */
+#define MSR_GIM_HV_SIMP_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_HV_SIMP_ENABLE)
+/** The SIMP guest-physical address. */
+#define MSR_GIM_HV_SIMP_GPA(a) ((a) & UINT64_C(0xfffffffffffff000))
+/** @} */
+
+
+/** @name Hyper-V hypercall debug options.
+ * @{ */
+/** Maximum debug data payload size in bytes. */
+#define GIM_HV_DEBUG_MAX_DATA_SIZE 4088
+
+/** The undocumented bit for MSR_GIM_HV_DEBUG_OPTIONS_MSR that makes it all
+ * work. */
+#define GIM_HV_DEBUG_OPTIONS_USE_HYPERCALLS RT_BIT(2)
+
+/** Guest will perform the HvPostDebugData hypercall until completion. */
+#define GIM_HV_DEBUG_POST_LOOP RT_BIT_32(0)
+/** Mask of valid HvPostDebugData options. */
+#define GIM_HV_DEBUG_POST_OPTIONS_MASK RT_BIT_32(0)
+
+/** Guest will perform the HvRetrieveDebugData hypercall until completion. */
+#define GIM_HV_DEBUG_RETREIVE_LOOP RT_BIT_32(0)
+/** Guest checks if any global debug session is active. */
+#define GIM_HV_DEBUG_RETREIVE_TEST_ACTIVITY RT_BIT_32(1)
+/** Mask of valid HvRetrieveDebugData options. */
+#define GIM_HV_DEBUG_RETREIVE_OPTIONS_MASK RT_BIT_32(0) | RT_BIT_32(1)
+
+/** Guest requests purging of incoming debug data. */
+#define GIM_HV_DEBUG_PURGE_INCOMING_DATA RT_BIT_32(0)
+/** Guest requests purging of outgoing debug data. */
+#define GIM_HV_DEBUG_PURGE_OUTGOING_DATA RT_BIT_32(1)
+/** @} */
+
+
+/** @name VMBus.
+ * These are just arbitrary definitions made up by Microsoft without
+ * any publicly available specification behind it.
+ * @{ */
+/** VMBus connection ID. */
+#define GIM_HV_VMBUS_MSG_CONNECTION_ID 1
+/** VMBus synthetic interrupt source (see VMBUS_MESSAGE_SINT in linux
+ * sources). */
+#define GIM_HV_VMBUS_MSG_SINT 2
+/** @} */
+
+/** @name SynIC.
+ * Synthetic Interrupt Controller definitions.
+ * @{ */
+/** SynIC version register. */
+#define GIM_HV_SVERSION 1
+/** Number of synthetic interrupt sources (warning, fixed in saved-states!). */
+#define GIM_HV_SINT_COUNT 16
+/** Lowest valid vector for synthetic interrupt. */
+#define GIM_HV_SINT_VECTOR_VALID_MIN 16
+/** Highest valid vector for synthetic interrupt. */
+#define GIM_HV_SINT_VECTOR_VALID_MAX 255
+/** Number of synthetic timers. */
+#define GIM_HV_STIMER_COUNT 4
+/** @} */
+
+/** @name Hyper-V synthetic interrupt message type.
+ * See 14.8.2 "SynIC Message Types"
+ * @{
+ */
+typedef enum GIMHVMSGTYPE
+{
+ GIMHVMSGTYPE_NONE = 0, /* Common messages */
+ GIMHVMSGTYPE_VMBUS = 1, /* Guest messages */
+ GIMHVMSGTYPE_UNMAPPEDGPA = 0x80000000, /* Hypervisor messages */
+ GIMHVMSGTYPE_GPAINTERCEPT = 0x80000001,
+ GIMHVMSGTYPE_TIMEREXPIRED = 0x80000010,
+ GIMHVMSGTYPE_INVALIDVPREGVAL = 0x80000020,
+ GIMHVMSGTYPE_UNRECOVERABLEXCPT = 0x80000021,
+ GIMHVMSGTYPE_UNSUPPORTEDFEAT = 0x80000022,
+ GIMHVMSGTYPE_APICEOI = 0x80000030,
+ GIMHVMSGTYPE_X64LEGACYFPERROR = 0x80000031,
+ GIMHVMSGTYPE_EVENTLOGBUFSCOMPLETE = 0x80000040,
+ GIMHVMSGTYPE_X64IOPORTINTERCEPT = 0x80010000,
+ GIMHVMSGTYPE_X64MSRINTERCEPT = 0x80010001,
+ GIMHVMSGTYPE_X64CPUIDINTERCEPT = 0x80010002,
+ GIMHVMSGTYPE_X64XCPTINTERCEPT = 0x80010003
+} GIMHVMSGTYPE;
+AssertCompileSize(GIMHVMSGTYPE, 4);
+/** @} */
+
+
+/** @name Hyper-V synthetic interrupt message format.
+ * @{ */
+#define GIM_HV_MSG_SIZE 256
+#define GIM_HV_MSG_MAX_PAYLOAD_SIZE 240
+#define GIM_HV_MSG_MAX_PAYLOAD_UNITS 30
+
+/**
+ * Synthetic interrupt message flags.
+ */
+typedef union GIMHVMSGFLAGS
+{
+ struct
+ {
+ uint8_t u1Pending : 1;
+ uint8_t u7Reserved : 7;
+ } n;
+ uint8_t u;
+} GIMHVMSGFLAGS;
+AssertCompileSize(GIMHVMSGFLAGS, sizeof(uint8_t));
+
+/**
+ * Synthetic interrupt message header.
+ *
+ * @remarks The layout of this structure differs from
+ * the Hyper-V spec. Aug 8, 2013 v4.0a. Layout
+ * in accordance w/ VMBus client expectations.
+ */
+typedef struct GIMHVMSGHDR
+{
+ GIMHVMSGTYPE enmMessageType;
+ uint8_t cbPayload;
+ GIMHVMSGFLAGS MessageFlags;
+ uint16_t uRsvd;
+ union
+ {
+ uint64_t uOriginatorId;
+ uint64_t uPartitionId;
+ uint64_t uPortId;
+ } msgid;
+} GIMHVMSGHDR;
+/** Pointer to a synthetic interrupt message header. */
+typedef GIMHVMSGHDR *PGIMHVMSGHDR;
+AssertCompileMemberOffset(GIMHVMSGHDR, cbPayload, 4);
+AssertCompileMemberOffset(GIMHVMSGHDR, MessageFlags, 5);
+AssertCompileMemberOffset(GIMHVMSGHDR, msgid, 8);
+AssertCompileSize(GIMHVMSGHDR, GIM_HV_MSG_SIZE - GIM_HV_MSG_MAX_PAYLOAD_SIZE);
+
+/**
+ * Synthetic interrupt message.
+ */
+typedef struct GIMHVMSG
+{
+ GIMHVMSGHDR MsgHdr;
+ uint64_t aPayload[GIM_HV_MSG_MAX_PAYLOAD_UNITS];
+} GIMHVMSG;
+/** Pointer to a synthetic interrupt message. */
+typedef GIMHVMSG *PGIMHVMSG;
+AssertCompileSize(GIMHVMSG, GIM_HV_MSG_SIZE);
+/** @} */
+
+
+/** @name Hyper-V hypercall parameters.
+ * @{ */
+/**
+ * HvPostMessage hypercall input.
+ */
+typedef struct GIMHVPOSTMESSAGEIN
+{
+ uint32_t uConnectionId;
+ uint32_t uPadding;
+ GIMHVMSGTYPE enmMessageType;
+ uint32_t cbPayload;
+} GIMHVPOSTMESSAGEIN;
+/** Pointer to a HvPostMessage input struct. */
+typedef GIMHVPOSTMESSAGEIN *PGIMHVPOSTMESSAGEIN;
+AssertCompileSize(GIMHVPOSTMESSAGEIN, 16);
+
+/**
+ * HvResetDebugData hypercall input.
+ */
+typedef struct GIMHVDEBUGRESETIN
+{
+ uint32_t fFlags;
+ uint32_t uPadding;
+} GIMHVDEBUGRESETIN;
+/** Pointer to a HvResetDebugData input struct. */
+typedef GIMHVDEBUGRESETIN *PGIMHVDEBUGRESETIN;
+AssertCompileSize(GIMHVDEBUGRESETIN, 8);
+
+/**
+ * HvPostDebugData hypercall input.
+ */
+typedef struct GIMHVDEBUGPOSTIN
+{
+ uint32_t cbWrite;
+ uint32_t fFlags;
+} GIMHVDEBUGPOSTIN;
+/** Pointer to a HvPostDebugData input struct. */
+typedef GIMHVDEBUGPOSTIN *PGIMHVDEBUGPOSTIN;
+AssertCompileSize(GIMHVDEBUGPOSTIN, 8);
+
+/**
+ * HvPostDebugData hypercall output.
+ */
+typedef struct GIMHVDEBUGPOSTOUT
+{
+ uint32_t cbPending;
+ uint32_t uPadding;
+} GIMHVDEBUGPOSTOUT;
+/** Pointer to a HvPostDebugData output struct. */
+typedef GIMHVDEBUGPOSTOUT *PGIMHVDEBUGPOSTOUT;
+AssertCompileSize(GIMHVDEBUGPOSTOUT, 8);
+
+/**
+ * HvRetrieveDebugData hypercall input.
+ */
+typedef struct GIMHVDEBUGRETRIEVEIN
+{
+ uint32_t cbRead;
+ uint32_t fFlags;
+ uint64_t u64Timeout;
+} GIMHVDEBUGRETRIEVEIN;
+/** Pointer to a HvRetrieveDebugData input struct. */
+typedef GIMHVDEBUGRETRIEVEIN *PGIMHVDEBUGRETRIEVEIN;
+AssertCompileSize(GIMHVDEBUGRETRIEVEIN, 16);
+
+/**
+ * HvRetriveDebugData hypercall output.
+ */
+typedef struct GIMHVDEBUGRETRIEVEOUT
+{
+ uint32_t cbRead;
+ uint32_t cbRemaining;
+} GIMHVDEBUGRETRIEVEOUT;
+/** Pointer to a HvRetrieveDebugData output struct. */
+typedef GIMHVDEBUGRETRIEVEOUT *PGIMHVDEBUGRETRIEVEOUT;
+AssertCompileSize(GIMHVDEBUGRETRIEVEOUT, 8);
+
+/**
+ * HvExtCallQueryCapabilities hypercall output.
+ */
+typedef struct GIMHVEXTQUERYCAP
+{
+ uint64_t fCapabilities;
+} GIMHVEXTQUERYCAP;
+/** Pointer to a HvExtCallQueryCapabilities output struct. */
+typedef GIMHVEXTQUERYCAP *PGIMHVEXTQUERYCAP;
+AssertCompileSize(GIMHVEXTQUERYCAP, 8);
+
+/**
+ * HvExtCallGetBootZeroedMemory hypercall output.
+ */
+typedef struct GIMHVEXTGETBOOTZEROMEM
+{
+ RTGCPHYS GCPhysStart;
+ uint64_t cPages;
+} GIMHVEXTGETBOOTZEROMEM;
+/** Pointer to a HvExtCallGetBootZeroedMemory output struct. */
+typedef GIMHVEXTGETBOOTZEROMEM *PGIMHVEXTGETBOOTZEROMEM;
+AssertCompileSize(GIMHVEXTGETBOOTZEROMEM, 16);
+/** @} */
+
+
+/** Hyper-V page size. */
+#define GIM_HV_PAGE_SIZE 4096
+/** Hyper-V page shift. */
+#define GIM_HV_PAGE_SHIFT 12
+
+/** Microsoft Hyper-V vendor signature. */
+#define GIM_HV_VENDOR_MICROSOFT "Microsoft Hv"
+
+/**
+ * MMIO2 region indices.
+ */
+/** The hypercall page region. */
+#define GIM_HV_HYPERCALL_PAGE_REGION_IDX UINT8_C(0)
+/** The TSC page region. */
+#define GIM_HV_REF_TSC_PAGE_REGION_IDX UINT8_C(1)
+/** The maximum region index (must be <= UINT8_MAX). */
+#define GIM_HV_REGION_IDX_MAX GIM_HV_REF_TSC_PAGE_REGION_IDX
+
+/**
+ * Hyper-V TSC (HV_REFERENCE_TSC_PAGE) structure placed in the TSC reference
+ * page.
+ */
+typedef struct GIMHVREFTSC
+{
+ uint32_t u32TscSequence;
+ uint32_t uReserved0;
+ uint64_t u64TscScale;
+ int64_t i64TscOffset;
+} GIMHVTSCPAGE;
+/** Pointer to Hyper-V reference TSC. */
+typedef GIMHVREFTSC *PGIMHVREFTSC;
+/** Pointer to a const Hyper-V reference TSC. */
+typedef GIMHVREFTSC const *PCGIMHVREFTSC;
+
+/**
+ * Type of the next reply to be sent to the debug connection of the guest.
+ *
+ * @remarks This is saved as part of saved-state, so don't re-order or
+ * alter the size!
+ */
+typedef enum GIMHVDEBUGREPLY
+{
+ /** Send UDP packet. */
+ GIMHVDEBUGREPLY_UDP = 0,
+ /** Send DHCP offer for DHCP discover. */
+ GIMHVDEBUGREPLY_DHCP_OFFER,
+ /** DHCP offer sent. */
+ GIMHVDEBUGREPLY_DHCP_OFFER_SENT,
+ /** Send DHCP acknowledgement for DHCP request. */
+ GIMHVDEBUGREPLY_DHCP_ACK,
+ /** DHCP acknowledgement sent. */
+ GIMHVDEBUGREPLY_DHCP_ACK_SENT,
+ /** Sent ARP reply. */
+ GIMHVDEBUGREPLY_ARP_REPLY,
+ /** ARP reply sent. */
+ GIMHVDEBUGREPLY_ARP_REPLY_SENT,
+ /** Customary 32-bit type hack. */
+ GIMHVDEBUGREPLY_32BIT_HACK = 0x7fffffff
+} GIMHVDEBUGREPLY;
+AssertCompileSize(GIMHVDEBUGREPLY, sizeof(uint32_t));
+
+/**
+ * GIM Hyper-V VM instance data.
+ * Changes to this must checked against the padding of the gim union in VM!
+ */
+typedef struct GIMHV
+{
+ /** @name Primary MSRs.
+ * @{ */
+ /** Guest OS identity MSR. */
+ uint64_t u64GuestOsIdMsr;
+ /** Hypercall MSR. */
+ uint64_t u64HypercallMsr;
+ /** Reference TSC page MSR. */
+ uint64_t u64TscPageMsr;
+ /** @} */
+
+ /** @name CPUID features.
+ * @{ */
+ /** Basic features. */
+ uint32_t uBaseFeat;
+ /** Partition flags. */
+ uint32_t uPartFlags;
+ /** Power management. */
+ uint32_t uPowMgmtFeat;
+ /** Miscellaneous. */
+ uint32_t uMiscFeat;
+ /** Hypervisor hints to the guest. */
+ uint32_t uHyperHints;
+ /** Hypervisor capabilities. */
+ uint32_t uHyperCaps;
+ /** @} */
+
+ /** @name Guest Crash MSRs.
+ * @{
+ */
+ /** Guest crash control MSR. */
+ uint64_t uCrashCtlMsr;
+ /** Guest crash parameter 0 MSR. */
+ uint64_t uCrashP0Msr;
+ /** Guest crash parameter 1 MSR. */
+ uint64_t uCrashP1Msr;
+ /** Guest crash parameter 2 MSR. */
+ uint64_t uCrashP2Msr;
+ /** Guest crash parameter 3 MSR. */
+ uint64_t uCrashP3Msr;
+ /** Guest crash parameter 4 MSR. */
+ uint64_t uCrashP4Msr;
+ /** @} */
+
+ /** @name Time management.
+ * @{ */
+ /** Per-VM R0 Spinlock for protecting EMT writes to the TSC page. */
+ RTSPINLOCK hSpinlockR0;
+ /** The TSC frequency (in HZ) reported to the guest. */
+ uint64_t cTscTicksPerSecond;
+ /** @} */
+
+ /** @name Hypercalls.
+ * @{ */
+ /** Guest address of the hypercall input parameter page. */
+ RTGCPHYS GCPhysHypercallIn;
+ /** Guest address of the hypercall output parameter page. */
+ RTGCPHYS GCPhysHypercallOut;
+ /** Pointer to the hypercall input parameter page - R3. */
+ R3PTRTYPE(uint8_t *) pbHypercallIn;
+ /** Pointer to the hypercall output parameter page - R3. */
+ R3PTRTYPE(uint8_t *) pbHypercallOut;
+ /** @} */
+
+ /** @name Guest debugging.
+ * @{ */
+ /** Whether we're posing as the Microsoft vendor. */
+ bool fIsVendorMsHv;
+ /** Whether we're posing as the Microsoft virtualization service. */
+ bool fIsInterfaceVs;
+ /** Whether debugging support is enabled. */
+ bool fDbgEnabled;
+ /** Whether we should suggest a hypercall-based debug interface to the guest. */
+ bool fDbgHypercallInterface;
+ bool afAlignment0[4];
+ /** The action to take while sending replies. */
+ GIMHVDEBUGREPLY enmDbgReply;
+ /** The IP address chosen by/assigned to the guest. */
+ RTNETADDRIPV4 DbgGuestIp4Addr;
+ /** Transaction ID for the BOOTP+DHCP sequence. */
+ uint32_t uDbgBootpXId;
+ /** The source UDP port used by the guest while sending debug packets. */
+ uint16_t uUdpGuestSrcPort;
+ /** The destination UDP port used by the guest while sending debug packets. */
+ uint16_t uUdpGuestDstPort;
+ /** Debug send buffer MSR. */
+ uint64_t uDbgSendBufferMsr;
+ /** Debug receive buffer MSR. */
+ uint64_t uDbgRecvBufferMsr;
+ /** Debug pending buffer MSR. */
+ uint64_t uDbgPendingBufferMsr;
+ /** Debug status MSR. */
+ uint64_t uDbgStatusMsr;
+ /** Intermediate debug I/O buffer (GIM_HV_PAGE_SIZE). */
+ R3PTRTYPE(void *) pvDbgBuffer;
+ R3PTRTYPE(void *) pvAlignment0;
+ /** @} */
+
+ /** Array of MMIO2 regions. */
+ GIMMMIO2REGION aMmio2Regions[GIM_HV_REGION_IDX_MAX + 1];
+} GIMHV;
+/** Pointer to per-VM GIM Hyper-V instance data. */
+typedef GIMHV *PGIMHV;
+/** Pointer to const per-VM GIM Hyper-V instance data. */
+typedef GIMHV const *PCGIMHV;
+AssertCompileMemberAlignment(GIMHV, aMmio2Regions, 8);
+AssertCompileMemberAlignment(GIMHV, hSpinlockR0, sizeof(uintptr_t));
+
+/**
+ * Hyper-V per-VCPU synthetic timer.
+ */
+typedef struct GIMHVSTIMER
+{
+ /** Synthetic timer handle. */
+ TMTIMERHANDLE hTimer;
+ /** Virtual CPU ID this timer belongs to (for reverse mapping). */
+ VMCPUID idCpu;
+ /** The index of this timer in the auStimers array (for reverse mapping). */
+ uint32_t idxStimer;
+ /** Synthetic timer config MSR. */
+ uint64_t uStimerConfigMsr;
+ /** Synthetic timer count MSR. */
+ uint64_t uStimerCountMsr;
+} GIMHVSTIMER;
+/** Pointer to per-VCPU Hyper-V synthetic timer. */
+typedef GIMHVSTIMER *PGIMHVSTIMER;
+/** Pointer to a const per-VCPU Hyper-V synthetic timer. */
+typedef GIMHVSTIMER const *PCGIMHVSTIMER;
+AssertCompileSizeAlignment(GIMHVSTIMER, 8);
+
+/**
+ * Hyper-V VCPU instance data.
+ * Changes to this must checked against the padding of the gim union in VMCPU!
+ */
+typedef struct GIMHVCPU
+{
+ /** @name Synthetic interrupt MSRs.
+ * @{ */
+ /** Synthetic interrupt message page MSR. */
+ uint64_t uSimpMsr;
+ /** Interrupt source MSRs. */
+ uint64_t auSintMsrs[GIM_HV_SINT_COUNT];
+ /** Synethtic interrupt events flag page MSR. */
+ uint64_t uSiefpMsr;
+ /** APIC-assist page MSR. */
+ uint64_t uApicAssistPageMsr;
+ /** Synthetic interrupt control MSR. */
+ uint64_t uSControlMsr;
+ /** Synthetic timers. */
+ GIMHVSTIMER aStimers[GIM_HV_STIMER_COUNT];
+ /** @} */
+
+ /** @name Statistics.
+ * @{ */
+ STAMCOUNTER aStatStimerFired[GIM_HV_STIMER_COUNT];
+ /** @} */
+} GIMHVCPU;
+/** Pointer to per-VCPU GIM Hyper-V instance data. */
+typedef GIMHVCPU *PGIMHVCPU;
+/** Pointer to const per-VCPU GIM Hyper-V instance data. */
+typedef GIMHVCPU const *PCGIMHVCPU;
+
+
+RT_C_DECLS_BEGIN
+
+#ifdef IN_RING0
+VMMR0_INT_DECL(int) gimR0HvInitVM(PVMCC pVM);
+VMMR0_INT_DECL(int) gimR0HvTermVM(PVMCC pVM);
+VMMR0_INT_DECL(int) gimR0HvUpdateParavirtTsc(PVMCC pVM, uint64_t u64Offset);
+#endif /* IN_RING0 */
+
+#ifdef IN_RING3
+VMMR3_INT_DECL(int) gimR3HvInit(PVM pVM, PCFGMNODE pGimCfg);
+VMMR3_INT_DECL(int) gimR3HvInitCompleted(PVM pVM);
+VMMR3_INT_DECL(int) gimR3HvTerm(PVM pVM);
+VMMR3_INT_DECL(void) gimR3HvRelocate(PVM pVM, RTGCINTPTR offDelta);
+VMMR3_INT_DECL(void) gimR3HvReset(PVM pVM);
+VMMR3_INT_DECL(int) gimR3HvSave(PVM pVM, PSSMHANDLE pSSM);
+VMMR3_INT_DECL(int) gimR3HvLoad(PVM pVM, PSSMHANDLE pSSM);
+VMMR3_INT_DECL(int) gimR3HvLoadDone(PVM pVM, PSSMHANDLE pSSM);
+VMMR3_INT_DECL(int) gimR3HvGetDebugSetup(PVM pVM, PGIMDEBUGSETUP pDbgSetup);
+
+VMMR3_INT_DECL(int) gimR3HvDisableSiefPage(PVMCPU pVCpu);
+VMMR3_INT_DECL(int) gimR3HvEnableSiefPage(PVMCPU pVCpu, RTGCPHYS GCPhysSiefPage);
+VMMR3_INT_DECL(int) gimR3HvEnableSimPage(PVMCPU pVCpu, RTGCPHYS GCPhysSimPage);
+VMMR3_INT_DECL(int) gimR3HvDisableSimPage(PVMCPU pVCpu);
+VMMR3_INT_DECL(int) gimR3HvDisableApicAssistPage(PVMCPU pVCpu);
+VMMR3_INT_DECL(int) gimR3HvEnableApicAssistPage(PVMCPU pVCpu, RTGCPHYS GCPhysTscPage);
+VMMR3_INT_DECL(int) gimR3HvDisableTscPage(PVM pVM);
+VMMR3_INT_DECL(int) gimR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage, bool fUseThisTscSeq, uint32_t uTscSeq);
+VMMR3_INT_DECL(int) gimR3HvDisableHypercallPage(PVM pVM);
+VMMR3_INT_DECL(int) gimR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage);
+
+VMMR3_INT_DECL(int) gimR3HvHypercallPostDebugData(PVM pVM, int *prcHv);
+VMMR3_INT_DECL(int) gimR3HvHypercallRetrieveDebugData(PVM pVM, int *prcHv);
+VMMR3_INT_DECL(int) gimR3HvDebugWrite(PVM pVM, void *pvData, uint32_t cbWrite, uint32_t *pcbWritten, bool fUdpPkt);
+VMMR3_INT_DECL(int) gimR3HvDebugRead(PVM pVM, void *pvBuf, uint32_t cbBuf, uint32_t cbRead, uint32_t *pcbRead,
+ uint32_t cMsTimeout, bool fUdpPkt);
+VMMR3_INT_DECL(int) gimR3HvHypercallExtQueryCap(PVM pVM, int *prcHv);
+VMMR3_INT_DECL(int) gimR3HvHypercallExtGetBootZeroedMem(PVM pVM, int *prcHv);
+
+#endif /* IN_RING3 */
+
+VMM_INT_DECL(PGIMMMIO2REGION) gimHvGetMmio2Regions(PVM pVM, uint32_t *pcRegions);
+VMM_INT_DECL(bool) gimHvIsParavirtTscEnabled(PVM pVM);
+VMM_INT_DECL(bool) gimHvAreHypercallsEnabled(PCVM pVM);
+VMM_INT_DECL(bool) gimHvShouldTrapXcptUD(PVMCPU pVCpu);
+VMM_INT_DECL(VBOXSTRICTRC) gimHvXcptUD(PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr);
+VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx);
+VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC) gimHvReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
+VMM_INT_DECL(VBOXSTRICTRC) gimHvWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
+
+VMM_INT_DECL(void) gimHvStartStimer(PVMCPUCC pVCpu, PCGIMHVSTIMER pHvStimer);
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_GIMHvInternal_h */
+
diff --git a/src/VBox/VMM/include/GIMInternal.h b/src/VBox/VMM/include/GIMInternal.h
new file mode 100644
index 00000000..e8b59f21
--- /dev/null
+++ b/src/VBox/VMM/include/GIMInternal.h
@@ -0,0 +1,131 @@
+/* $Id: GIMInternal.h $ */
+/** @file
+ * GIM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_GIMInternal_h
+#define VMM_INCLUDED_SRC_include_GIMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/pgm.h>
+#include "GIMHvInternal.h"
+#include "GIMKvmInternal.h"
+#include "GIMMinimalInternal.h"
+
+RT_C_DECLS_BEGIN
+
+/** @defgroup grp_gim_int Internal
+ * @ingroup grp_gim
+ * @internal
+ * @{
+ */
+
+/** The saved state version. */
+#define GIM_SAVED_STATE_VERSION 1
+
+/**
+ * GIM VM Instance data.
+ */
+typedef struct GIM
+{
+ /** The provider that is active for this VM. */
+ GIMPROVIDERID enmProviderId;
+ /** The interface implementation version. */
+ uint32_t u32Version;
+
+ /** Physical access handler type for semi-read-only MMIO2 memory. Lazy creation. */
+ PGMPHYSHANDLERTYPE hSemiReadOnlyMmio2Handler;
+
+ /** Pointer to the GIM device - R3 ptr. */
+ R3PTRTYPE(PPDMDEVINS) pDevInsR3;
+ /** The debug struct - R3 ptr. */
+ R3PTRTYPE(PGIMDEBUG) pDbgR3;
+
+ /** The provider specific data. */
+ union
+ {
+ GIMHV Hv;
+ GIMKVM Kvm;
+ } u;
+
+ /** Number of hypercalls initiated. */
+ STAMCOUNTER StatHypercalls;
+ /** Debug packets sent. */
+ STAMCOUNTER StatDbgXmit;
+ /** Debug bytes sent. */
+ STAMCOUNTER StatDbgXmitBytes;
+ /** Debug packets received. */
+ STAMCOUNTER StatDbgRecv;
+ /** Debug bytes received. */
+ STAMCOUNTER StatDbgRecvBytes;
+} GIM;
+/** Pointer to GIM VM instance data. */
+typedef GIM *PGIM;
+
+/**
+ * GIM VMCPU Instance data.
+ */
+typedef struct GIMCPU
+{
+ union
+ {
+ GIMKVMCPU KvmCpu;
+ GIMHVCPU HvCpu;
+ } u;
+} GIMCPU;
+/** Pointer to GIM VMCPU instance data. */
+typedef GIMCPU *PGIMCPU;
+
+/**
+ * Callback when a debug buffer read has completed and before signalling the next
+ * read.
+ *
+ * @param pVM The cross context VM structure.
+ */
+typedef DECLCALLBACKTYPE(void, FNGIMDEBUGBUFREADCOMPLETED,(PVM pVM));
+/** Pointer to GIM debug buffer read completion callback. */
+typedef FNGIMDEBUGBUFREADCOMPLETED *PFNGIMDEBUGBUFREADCOMPLETED;
+
+#ifdef IN_RING3
+#if 0
+VMMR3_INT_DECL(int) gimR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion);
+VMMR3_INT_DECL(int) gimR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion);
+VMMR3_INT_DECL(int) gimR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion);
+VMMR3_INT_DECL(int) gimR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion);
+#endif
+
+VMMR3_INT_DECL(int) gimR3DebugRead(PVM pVM, void *pvRead, size_t *pcbRead, PFNGIMDEBUGBUFREADCOMPLETED pfnReadComplete);
+VMMR3_INT_DECL(int) gimR3DebugWrite(PVM pVM, void *pvWrite, size_t *pcbWrite);
+#endif /* IN_RING3 */
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_GIMInternal_h */
+
diff --git a/src/VBox/VMM/include/GIMKvmInternal.h b/src/VBox/VMM/include/GIMKvmInternal.h
new file mode 100644
index 00000000..2b17706d
--- /dev/null
+++ b/src/VBox/VMM/include/GIMKvmInternal.h
@@ -0,0 +1,282 @@
+/* $Id: GIMKvmInternal.h $ */
+/** @file
+ * GIM - KVM, Internal header file.
+ */
+
+/*
+ * Copyright (C) 2015-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_GIMKvmInternal_h
+#define VMM_INCLUDED_SRC_include_GIMKvmInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/cpum.h>
+
+
+/** @name KVM base features.
+ * @{
+ */
+/** Old, deprecated clock source available. */
+#define GIM_KVM_BASE_FEAT_CLOCK_OLD RT_BIT(0)
+/** No need for artifical delays on IO operations. */
+#define GIM_KVM_BASE_FEAT_NOP_IO_DELAY RT_BIT(1)
+/** MMU op supported (deprecated, unused). */
+#define GIM_KVM_BASE_FEAT_MMU_OP RT_BIT(2)
+/** Clock source available. */
+#define GIM_KVM_BASE_FEAT_CLOCK RT_BIT(3)
+/** Asynchronous page faults supported. */
+#define GIM_KVM_BASE_FEAT_ASYNC_PF RT_BIT(4)
+/** Steal time (VCPU not executing guest code time in ns) available. */
+#define GIM_KVM_BASE_FEAT_STEAL_TIME RT_BIT(5)
+/** Paravirtualized EOI (end-of-interrupt) supported. */
+#define GIM_KVM_BASE_FEAT_PV_EOI RT_BIT(6)
+/** Paravirtualized spinlock (unhalting VCPU) supported. */
+#define GIM_KVM_BASE_FEAT_PV_UNHALT RT_BIT(7)
+/** The TSC is stable (fixed rate, monotonic). */
+#define GIM_KVM_BASE_FEAT_TSC_STABLE RT_BIT(24)
+/** @} */
+
+
+/** @name KVM MSRs.
+ * @{
+ */
+/** Start of range 0. */
+#define MSR_GIM_KVM_RANGE0_FIRST UINT32_C(0x11)
+/** Old, deprecated wall clock. */
+#define MSR_GIM_KVM_WALL_CLOCK_OLD UINT32_C(0x11)
+/** Old, deprecated System time. */
+#define MSR_GIM_KVM_SYSTEM_TIME_OLD UINT32_C(0x12)
+/** End of range 0. */
+#define MSR_GIM_KVM_RANGE0_LAST MSR_GIM_KVM_SYSTEM_TIME_OLD
+
+/** Start of range 1. */
+#define MSR_GIM_KVM_RANGE1_FIRST UINT32_C(0x4b564d00)
+/** Wall clock. */
+#define MSR_GIM_KVM_WALL_CLOCK UINT32_C(0x4b564d00)
+/** System time. */
+#define MSR_GIM_KVM_SYSTEM_TIME UINT32_C(0x4b564d01)
+/** Asynchronous page fault. */
+#define MSR_GIM_KVM_ASYNC_PF UINT32_C(0x4b564d02)
+/** Steal time. */
+#define MSR_GIM_KVM_STEAL_TIME UINT32_C(0x4b564d03)
+/** Paravirtualized EOI (end-of-interrupt). */
+#define MSR_GIM_KVM_EOI UINT32_C(0x4b564d04)
+/** End of range 1. */
+#define MSR_GIM_KVM_RANGE1_LAST MSR_GIM_KVM_EOI
+
+AssertCompile(MSR_GIM_KVM_RANGE0_FIRST <= MSR_GIM_KVM_RANGE0_LAST);
+AssertCompile(MSR_GIM_KVM_RANGE1_FIRST <= MSR_GIM_KVM_RANGE1_LAST);
+/** @} */
+
+/** KVM page size. */
+#define GIM_KVM_PAGE_SIZE 0x1000
+
+/**
+ * MMIO2 region indices.
+ */
+/** The system time page(s) region. */
+#define GIM_KVM_SYSTEM_TIME_PAGE_REGION_IDX UINT8_C(0)
+/** The steal time page(s) region. */
+#define GIM_KVM_STEAL_TIME_PAGE_REGION_IDX UINT8_C(1)
+/** The maximum region index (must be <= UINT8_MAX). */
+#define GIM_KVM_REGION_IDX_MAX GIM_KVM_STEAL_TIME_PAGE_REGION_IDX
+
+/**
+ * KVM system-time structure (GIM_KVM_SYSTEM_TIME_FLAGS_XXX) flags.
+ * See "Documentation/virtual/kvm/api.txt".
+ */
+/** The TSC is stable (monotonic). */
+#define GIM_KVM_SYSTEM_TIME_FLAGS_TSC_STABLE RT_BIT(0)
+/** The guest VCPU has been paused by the hypervisor. */
+#define GIM_KVM_SYSTEM_TIME_FLAGS_GUEST_PAUSED RT_BIT(1)
+/** */
+
+/** @name KVM MSR - System time (MSR_GIM_KVM_SYSTEM_TIME and
+ * MSR_GIM_KVM_SYSTEM_TIME_OLD).
+ * @{
+ */
+/** The system-time enable bit. */
+#define MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT RT_BIT_64(0)
+/** Whether the system-time struct. is enabled or not. */
+#define MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(a) RT_BOOL((a) & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT)
+/** Guest-physical address of the system-time struct. */
+#define MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(a) ((a) & ~MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT)
+/** @} */
+
+/** @name KVM MSR - Wall clock (MSR_GIM_KVM_WALL_CLOCK and
+ * MSR_GIM_KVM_WALL_CLOCK_OLD).
+ * @{
+ */
+/** Guest-physical address of the wall-clock struct. */
+#define MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(a) (a)
+/** @} */
+
+
+/** @name KVM Hypercall operations.
+ * @{ */
+#define KVM_HYPERCALL_OP_VAPIC_POLL_IRQ 1
+#define KVM_HYPERCALL_OP_MMU 2
+#define KVM_HYPERCALL_OP_FEATURES 3
+#define KVM_HYPERCALL_OP_KICK_CPU 5
+/** @} */
+
+/** @name KVM Hypercall return values.
+ * @{ */
+/* Return values for hypercalls */
+#define KVM_HYPERCALL_RET_SUCCESS 0
+#define KVM_HYPERCALL_RET_ENOSYS (uint64_t)(-1000)
+#define KVM_HYPERCALL_RET_EFAULT (uint64_t)(-14)
+#define KVM_HYPERCALL_RET_E2BIG (uint64_t)(-7)
+#define KVM_HYPERCALL_RET_EPERM (uint64_t)(-1)
+/** @} */
+
+/**
+ * KVM per-VCPU system-time structure.
+ */
+typedef struct GIMKVMSYSTEMTIME
+{
+ /** Version (sequence number). */
+ uint32_t u32Version;
+ /** Alignment padding. */
+ uint32_t u32Padding0;
+ /** TSC time stamp. */
+ uint64_t u64Tsc;
+ /** System time in nanoseconds. */
+ uint64_t u64NanoTS;
+ /** TSC to system time scale factor. */
+ uint32_t u32TscScale;
+ /** TSC frequency shift. */
+ int8_t i8TscShift;
+ /** Clock source (GIM_KVM_SYSTEM_TIME_FLAGS_XXX) flags. */
+ uint8_t fFlags;
+ /** Alignment padding. */
+ uint8_t abPadding0[2];
+} GIMKVMSYSTEMTIME;
+/** Pointer to KVM system-time struct. */
+typedef GIMKVMSYSTEMTIME *PGIMKVMSYSTEMTIME;
+/** Pointer to a const KVM system-time struct. */
+typedef GIMKVMSYSTEMTIME const *PCGIMKVMSYSTEMTIME;
+AssertCompileSize(GIMKVMSYSTEMTIME, 32);
+
+
+/**
+ * KVM per-VM wall-clock structure.
+ */
+typedef struct GIMKVMWALLCLOCK
+{
+ /** Version (sequence number). */
+ uint32_t u32Version;
+ /** Number of seconds since boot. */
+ uint32_t u32Sec;
+ /** Number of nanoseconds since boot. */
+ uint32_t u32Nano;
+} GIMKVMWALLCLOCK;
+/** Pointer to KVM wall-clock struct. */
+typedef GIMKVMWALLCLOCK *PGIMKVMWALLCLOCK;
+/** Pointer to a const KVM wall-clock struct. */
+typedef GIMKVMWALLCLOCK const *PCGIMKVMWALLCLOCK;
+AssertCompileSize(GIMKVMWALLCLOCK, 12);
+
+
+/**
+ * GIM KVM VM instance data.
+ * Changes to this must checked against the padding of the gim union in VM!
+ */
+typedef struct GIMKVM
+{
+ /** Wall-clock MSR. */
+ uint64_t u64WallClockMsr;
+ /** CPUID features: Basic. */
+ uint32_t uBaseFeat;
+ /** Whether GIM needs to trap \#UD exceptions. */
+ bool fTrapXcptUD;
+ /** Disassembler opcode of hypercall instruction native for this host CPU. */
+ uint16_t uOpcodeNative;
+ /** Native hypercall opcode bytes. Use for replacing. */
+ uint8_t abOpcodeNative[3];
+ /** Alignment padding. */
+ uint8_t abPadding[5];
+ /** The TSC frequency (in HZ) reported to the guest. */
+ uint64_t cTscTicksPerSecond;
+} GIMKVM;
+/** Pointer to per-VM GIM KVM instance data. */
+typedef GIMKVM *PGIMKVM;
+/** Pointer to const per-VM GIM KVM instance data. */
+typedef GIMKVM const *PCGIMKVM;
+
+/**
+ * GIM KVMV VCPU instance data.
+ * Changes to this must checked against the padding of the gim union in VMCPU!
+ */
+typedef struct GIMKVMCPU
+{
+ /** System-time MSR. */
+ uint64_t u64SystemTimeMsr;
+ /** The guest-physical address of the system-time struct. */
+ RTGCPHYS GCPhysSystemTime;
+ /** The version (sequence number) of the system-time struct. */
+ uint32_t u32SystemTimeVersion;
+ /** The guest TSC value while enabling the system-time MSR. */
+ uint64_t uTsc;
+ /** The guest virtual time while enabling the system-time MSR. */
+ uint64_t uVirtNanoTS;
+ /** The flags of the system-time struct. */
+ uint8_t fSystemTimeFlags;
+} GIMKVMCPU;
+/** Pointer to per-VCPU GIM KVM instance data. */
+typedef GIMKVMCPU *PGIMKVMCPU;
+/** Pointer to const per-VCPU GIM KVM instance data. */
+typedef GIMKVMCPU const *PCGIMKVMCPU;
+
+
+RT_C_DECLS_BEGIN
+
+#ifdef IN_RING3
+VMMR3_INT_DECL(int) gimR3KvmInit(PVM pVM);
+VMMR3_INT_DECL(int) gimR3KvmInitCompleted(PVM pVM);
+VMMR3_INT_DECL(int) gimR3KvmTerm(PVM pVM);
+VMMR3_INT_DECL(void) gimR3KvmRelocate(PVM pVM, RTGCINTPTR offDelta);
+VMMR3_INT_DECL(void) gimR3KvmReset(PVM pVM);
+VMMR3_INT_DECL(int) gimR3KvmSave(PVM pVM, PSSMHANDLE pSSM);
+VMMR3_INT_DECL(int) gimR3KvmLoad(PVM pVM, PSSMHANDLE pSSM);
+
+VMMR3_INT_DECL(int) gimR3KvmDisableSystemTime(PVM pVM);
+VMMR3_INT_DECL(int) gimR3KvmEnableSystemTime(PVM pVM, PVMCPU pVCpu, uint64_t uMsrSystemTime);
+VMMR3_INT_DECL(int) gimR3KvmEnableWallClock(PVM pVM, RTGCPHYS GCPhysSysTime);
+#endif /* IN_RING3 */
+
+VMM_INT_DECL(bool) gimKvmIsParavirtTscEnabled(PVMCC pVM);
+VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu);
+VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx);
+VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
+VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
+VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM);
+VMM_INT_DECL(VBOXSTRICTRC) gimKvmXcptUD(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr);
+VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr);
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_GIMKvmInternal_h */
+
diff --git a/src/VBox/VMM/include/GIMMinimalInternal.h b/src/VBox/VMM/include/GIMMinimalInternal.h
new file mode 100644
index 00000000..71e4eef5
--- /dev/null
+++ b/src/VBox/VMM/include/GIMMinimalInternal.h
@@ -0,0 +1,48 @@
+/* $Id: GIMMinimalInternal.h $ */
+/** @file
+ * GIM - Minimal, Internal header file.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_GIMMinimalInternal_h
+#define VMM_INCLUDED_SRC_include_GIMMinimalInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/cdefs.h>
+#include <VBox/types.h>
+
+RT_C_DECLS_BEGIN
+
+#ifdef IN_RING3
+VMMR3_INT_DECL(int) gimR3MinimalInit(PVM pVM);
+VMMR3_INT_DECL(int) gimR3MinimalInitCompleted(PVM pVM);
+VMMR3_INT_DECL(void) gimR3MinimalRelocate(PVM pVM, RTGCINTPTR offDelta);
+#endif /* IN_RING3 */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_GIMMinimalInternal_h */
+
diff --git a/src/VBox/VMM/include/HMInternal.h b/src/VBox/VMM/include/HMInternal.h
new file mode 100644
index 00000000..67d1af99
--- /dev/null
+++ b/src/VBox/VMM/include/HMInternal.h
@@ -0,0 +1,1322 @@
+/* $Id: HMInternal.h $ */
+/** @file
+ * HM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_HMInternal_h
+#define VMM_INCLUDED_SRC_include_HMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/dis.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/vmm/hm_svm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/trpm.h>
+#include <iprt/memobj.h>
+#include <iprt/cpuset.h>
+#include <iprt/mp.h>
+#include <iprt/avl.h>
+#include <iprt/string.h>
+
+#include "VMXInternal.h"
+#include "SVMInternal.h"
+
+#if HC_ARCH_BITS == 32
+# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
+#endif
+
+/** @def HM_PROFILE_EXIT_DISPATCH
+ * Enables profiling of the VM exit handler dispatching. */
+#if 0 || defined(DOXYGEN_RUNNING)
+# define HM_PROFILE_EXIT_DISPATCH
+#endif
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_hm_int Internal
+ * @ingroup grp_hm
+ * @internal
+ * @{
+ */
+
+/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
+#define HM_EPT_IDENTITY_PG_TABLE_SIZE HOST_PAGE_SIZE
+/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
+#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * X86_PAGE_SIZE + 1)
+/** Total guest mapped memory needed. */
+#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
+
+
+/** @name Macros for enabling and disabling preemption.
+ * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
+ * preemption has already been disabled when there is no context hook.
+ * @{ */
+#ifdef VBOX_STRICT
+# define HM_DISABLE_PREEMPT(a_pVCpu) \
+ RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled((a_pVCpu))); \
+ RTThreadPreemptDisable(&PreemptStateInternal)
+#else
+# define HM_DISABLE_PREEMPT(a_pVCpu) \
+ RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
+ RTThreadPreemptDisable(&PreemptStateInternal)
+#endif /* VBOX_STRICT */
+#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
+/** @} */
+
+
+/** @name HM saved state versions.
+ * @{
+ */
+#define HM_SAVED_STATE_VERSION HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
+#define HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 6
+#define HM_SAVED_STATE_VERSION_TPR_PATCHING 5
+#define HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 4
+#define HM_SAVED_STATE_VERSION_2_0_X 3
+/** @} */
+
+
+/**
+ * HM physical (host) CPU information.
+ */
+typedef struct HMPHYSCPU
+{
+ /** The CPU ID. */
+ RTCPUID idCpu;
+ /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
+ RTR0MEMOBJ hMemObj;
+ /** The physical address of the first page in hMemObj (it's a
+ * physcially contigous allocation if it spans multiple pages). */
+ RTHCPHYS HCPhysMemObj;
+ /** The address of the memory (for pfnEnable). */
+ void *pvMemObj;
+ /** Current ASID (AMD-V) / VPID (Intel). */
+ uint32_t uCurrentAsid;
+ /** TLB flush count. */
+ uint32_t cTlbFlushes;
+ /** Whether to flush each new ASID/VPID before use. */
+ bool fFlushAsidBeforeUse;
+ /** Configured for VT-x or AMD-V. */
+ bool fConfigured;
+ /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
+ bool fIgnoreAMDVInUseError;
+ /** Whether CR4.VMXE was already enabled prior to us enabling it. */
+ bool fVmxeAlreadyEnabled;
+ /** In use by our code. (for power suspend) */
+ bool volatile fInUse;
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ /** Nested-guest union (put data common to SVM/VMX outside the union). */
+ union
+ {
+ /** Nested-guest SVM data. */
+ struct
+ {
+ /** The active nested-guest MSR permission bitmap memory backing. */
+ RTR0MEMOBJ hNstGstMsrpm;
+ /** The physical address of the first page in hNstGstMsrpm (physcially
+ * contiguous allocation). */
+ RTHCPHYS HCPhysNstGstMsrpm;
+ /** The address of the active nested-guest MSRPM. */
+ void *pvNstGstMsrpm;
+ } svm;
+ /** @todo Nested-VMX. */
+ } n;
+#endif
+} HMPHYSCPU;
+/** Pointer to HMPHYSCPU struct. */
+typedef HMPHYSCPU *PHMPHYSCPU;
+/** Pointer to a const HMPHYSCPU struct. */
+typedef const HMPHYSCPU *PCHMPHYSCPU;
+
+/**
+ * TPR-instruction type.
+ */
+typedef enum
+{
+ HMTPRINSTR_INVALID,
+ HMTPRINSTR_READ,
+ HMTPRINSTR_READ_SHR4,
+ HMTPRINSTR_WRITE_REG,
+ HMTPRINSTR_WRITE_IMM,
+ HMTPRINSTR_JUMP_REPLACEMENT,
+ /** The usual 32-bit paranoia. */
+ HMTPRINSTR_32BIT_HACK = 0x7fffffff
+} HMTPRINSTR;
+
+/**
+ * TPR patch information.
+ */
+typedef struct
+{
+ /** The key is the address of patched instruction. (32 bits GC ptr) */
+ AVLOU32NODECORE Core;
+ /** Original opcode. */
+ uint8_t aOpcode[16];
+ /** Instruction size. */
+ uint32_t cbOp;
+ /** Replacement opcode. */
+ uint8_t aNewOpcode[16];
+ /** Replacement instruction size. */
+ uint32_t cbNewOp;
+ /** Instruction type. */
+ HMTPRINSTR enmType;
+ /** Source operand. */
+ uint32_t uSrcOperand;
+ /** Destination operand. */
+ uint32_t uDstOperand;
+ /** Number of times the instruction caused a fault. */
+ uint32_t cFaults;
+ /** Patch address of the jump replacement. */
+ RTGCPTR32 pJumpTarget;
+} HMTPRPATCH;
+/** Pointer to HMTPRPATCH. */
+typedef HMTPRPATCH *PHMTPRPATCH;
+/** Pointer to a const HMTPRPATCH. */
+typedef const HMTPRPATCH *PCHMTPRPATCH;
+
+
+/**
+ * Makes a HMEXITSTAT::uKey value from a program counter and an exit code.
+ *
+ * @returns 64-bit key
+ * @param a_uPC The RIP + CS.BASE value of the exit.
+ * @param a_uExit The exit code.
+ * @todo Add CPL?
+ */
+#define HMEXITSTAT_MAKE_KEY(a_uPC, a_uExit) (((a_uPC) & UINT64_C(0x0000ffffffffffff)) | (uint64_t)(a_uExit) << 48)
+
+typedef struct HMEXITINFO
+{
+ /** See HMEXITSTAT_MAKE_KEY(). */
+ uint64_t uKey;
+ /** Number of recent hits (depreciates with time). */
+ uint32_t volatile cHits;
+ /** The age + lock. */
+ uint16_t volatile uAge;
+ /** Action or action table index. */
+ uint16_t iAction;
+} HMEXITINFO;
+AssertCompileSize(HMEXITINFO, 16); /* Lots of these guys, so don't add any unnecessary stuff! */
+
+typedef struct HMEXITHISTORY
+{
+ /** The exit timestamp. */
+ uint64_t uTscExit;
+ /** The index of the corresponding HMEXITINFO entry.
+ * UINT32_MAX if none (too many collisions, race, whatever). */
+ uint32_t iExitInfo;
+ /** Figure out later, needed for padding now. */
+ uint32_t uSomeClueOrSomething;
+} HMEXITHISTORY;
+
+/**
+ * Switcher function, HC to the special 64-bit RC.
+ *
+ * @param pVM The cross context VM structure.
+ * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
+ * @returns Return code indicating the action to take.
+ */
+typedef DECLCALLBACKTYPE(int, FNHMSWITCHERHC,(PVM pVM, uint32_t offCpumVCpu));
+/** Pointer to switcher function. */
+typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
+
+
+/**
+ * HM VM Instance data.
+ * Changes to this must checked against the padding of the hm union in VM!
+ */
+typedef struct HM
+{
+ /** Set when the debug facility has breakpoints/events enabled that requires
+ * us to use the debug execution loop in ring-0. */
+ bool fUseDebugLoop;
+ /** Set when TPR patching is allowed. */
+ bool fTprPatchingAllowed;
+ /** Set when TPR patching is active. */
+ bool fTprPatchingActive;
+ /** Alignment padding. */
+ bool afAlignment1[5];
+
+ struct
+ {
+ /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */
+ bool fSupported;
+ /** Set when we've enabled VMX. */
+ bool fEnabled;
+ /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */
+ uint8_t cPreemptTimerShift;
+
+ /** @name Configuration (gets copied if problematic)
+ * @{ */
+ /** Set if Last Branch Record (LBR) is enabled. */
+ bool fLbrCfg;
+ /** Set if VT-x VPID is allowed. */
+ bool fAllowVpid;
+ /** Set if unrestricted guest execution is in use (real and protected mode
+ * without paging). */
+ bool fUnrestrictedGuestCfg;
+ /** Set if the preemption timer should be used if available. Ring-0
+ * quietly clears this if the hardware doesn't support the preemption timer. */
+ bool fUsePreemptTimerCfg;
+ /** Whether to always intercept MOV DRx: 1 (always), 0 (default), -1 (lazy).
+ * In the default case it is only always intercepted when setting DR6 to 0 on
+ * the host results in a value different from X86_DR6_RA1_MASK. */
+ int8_t fAlwaysInterceptMovDRxCfg;
+ /** @} */
+
+ /** Pause-loop exiting (PLE) gap in ticks. */
+ uint32_t cPleGapTicks;
+ /** Pause-loop exiting (PLE) window in ticks. */
+ uint32_t cPleWindowTicks;
+
+ /** Virtual address of the TSS page used for real mode emulation. */
+ R3PTRTYPE(PVBOXTSS) pRealModeTSS;
+ /** Virtual address of the identity page table used for real mode and protected
+ * mode without paging emulation in EPT mode. */
+ R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
+ } vmx;
+
+ struct
+ {
+ /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */
+ bool fSupported;
+ /** Set when we've enabled SVM. */
+ bool fEnabled;
+ /** Set when the hack to ignore VERR_SVM_IN_USE is active.
+ * @todo Safe? */
+ bool fIgnoreInUseError;
+ /** Whether to use virtualized VMSAVE/VMLOAD feature. */
+ bool fVirtVmsaveVmload;
+ /** Whether to use virtual GIF feature. */
+ bool fVGif;
+ /** Whether to use LBR virtualization feature. */
+ bool fLbrVirt;
+ bool afAlignment1[2];
+
+ /** Pause filter counter. */
+ uint16_t cPauseFilter;
+ /** Pause filter treshold in ticks. */
+ uint16_t cPauseFilterThresholdTicks;
+ uint32_t u32Alignment2;
+ } svm;
+
+ /** AVL tree with all patches (active or disabled) sorted by guest instruction address.
+ * @todo For @bugref{9217} this AVL tree must be eliminated and instead
+ * sort aPatches by address and do a safe binary search on it. */
+ AVLOU32TREE PatchTree;
+ uint32_t cPatches;
+ HMTPRPATCH aPatches[64];
+
+ /** Guest allocated memory for patching purposes. */
+ RTGCPTR pGuestPatchMem;
+ /** Current free pointer inside the patch block. */
+ RTGCPTR pFreeGuestPatchMem;
+ /** Size of the guest patch memory block. */
+ uint32_t cbGuestPatchMem;
+ uint32_t u32Alignment2;
+
+ /** For ring-3 use only. */
+ struct
+ {
+ /** Last recorded error code during HM ring-0 init. */
+ int32_t rcInit;
+ uint32_t u32Alignment3;
+
+ /** Maximum ASID allowed.
+ * This is mainly for the release log. */
+ uint32_t uMaxAsid;
+ /** World switcher flags (HM_WSF_XXX) for the release log. */
+ uint32_t fWorldSwitcher;
+
+ struct
+ {
+ /** Set if VPID is supported (ring-3 copy). */
+ bool fVpid;
+ /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
+ * init, for logging). */
+ bool fSupportsVmcsEfer;
+ /** Whether to use VMCS shadowing. */
+ bool fUseVmcsShadowing;
+ /** Whether MOV DRx is always intercepted or not (set by ring-0 VMX init, for
+ * logging). */
+ bool fAlwaysInterceptMovDRx;
+
+ /** Host CR4 value (set by ring-0 VMX init, for logging). */
+ uint64_t u64HostCr4;
+ /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
+ uint64_t u64HostSmmMonitorCtl;
+ /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
+ uint64_t u64HostMsrEfer;
+ /** Host IA32_FEATURE_CONTROL MSR (set by ring-0 VMX init, for logging). */
+ uint64_t u64HostFeatCtrl;
+ /** Host zero'ed DR6 value (set by ring-0 VMX init, for logging). */
+ uint64_t u64HostDr6Zeroed;
+
+ /** The first valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrFirst;
+ /** The last valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrLast;
+
+ /** The first valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrFirst;
+ /** The last valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrLast;
+
+ /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
+ RTHCPHYS HCPhysVmxEnableError;
+ /** VMX MSR values (only for ring-3 consumption). */
+ VMXMSRS Msrs;
+
+ /** Tagged-TLB flush type (only for ring-3 consumption). */
+ VMXTLBFLUSHTYPE enmTlbFlushType;
+ /** Flush type to use for INVEPT (only for ring-3 consumption). */
+ VMXTLBFLUSHEPT enmTlbFlushEpt;
+ /** Flush type to use for INVVPID (only for ring-3 consumption). */
+ VMXTLBFLUSHVPID enmTlbFlushVpid;
+ } vmx;
+
+ struct
+ {
+ /** SVM revision. */
+ uint32_t u32Rev;
+ /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
+ uint32_t fFeatures;
+ /** HWCR MSR (for diagnostics). */
+ uint64_t u64MsrHwcr;
+ } svm;
+ } ForR3;
+
+ /** @name Configuration not used (much) after VM setup
+ * @{ */
+ /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
+ * This number is set much higher when RTThreadPreemptIsPending is reliable. */
+ uint32_t cMaxResumeLoopsCfg;
+ /** Set if nested paging is enabled.
+ * Config value that is copied to HMR0PERVM::fNestedPaging on setup. */
+ bool fNestedPagingCfg;
+ /** Set if large pages are enabled (requires nested paging).
+ * Config only, passed on the PGM where it really belongs.
+ * @todo move to PGM */
+ bool fLargePages;
+ /** Set if we can support 64-bit guests or not.
+ * Config value that is copied to HMR0PERVM::fAllow64BitGuests on setup. */
+ bool fAllow64BitGuestsCfg;
+ /** Set when we initialize VT-x or AMD-V once for all CPUs. */
+ bool fGlobalInit;
+ /** Set if hardware APIC virtualization is enabled.
+ * @todo Not really used by HM, move to APIC where it's actually used. */
+ bool fVirtApicRegs;
+ /** Set if posted interrupt processing is enabled.
+ * @todo Not really used by HM, move to APIC where it's actually used. */
+ bool fPostedIntrs;
+ /** VM needs workaround for missing TLB flush in OS/2, see ticketref:20625.
+ * @note Currently only heeded by AMD-V. */
+ bool fMissingOS2TlbFlushWorkaround;
+ /** @} */
+
+ /** @name Processed into HMR0PERVCPU::fWorldSwitcher by ring-0 on VM init.
+ * @{ */
+ /** Set if indirect branch prediction barrier on VM exit. */
+ bool fIbpbOnVmExit;
+ /** Set if indirect branch prediction barrier on VM entry. */
+ bool fIbpbOnVmEntry;
+ /** Set if level 1 data cache should be flushed on VM entry. */
+ bool fL1dFlushOnVmEntry;
+ /** Set if level 1 data cache should be flushed on EMT scheduling. */
+ bool fL1dFlushOnSched;
+ /** Set if MDS related buffers should be cleared on VM entry. */
+ bool fMdsClearOnVmEntry;
+ /** Set if MDS related buffers should be cleared on EMT scheduling. */
+ bool fMdsClearOnSched;
+ /** Set if host manages speculation control settings.
+ * @todo doesn't do anything ... */
+ bool fSpecCtrlByHost;
+ /** @} */
+
+ /** Set when we've finalized the VMX / SVM initialization in ring-3
+ * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */
+ bool fInitialized;
+
+ bool afAlignment2[5];
+
+ STAMCOUNTER StatTprPatchSuccess;
+ STAMCOUNTER StatTprPatchFailure;
+ STAMCOUNTER StatTprReplaceSuccessCr8;
+ STAMCOUNTER StatTprReplaceSuccessVmc;
+ STAMCOUNTER StatTprReplaceFailure;
+} HM;
+/** Pointer to HM VM instance data. */
+typedef HM *PHM;
+AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
+AssertCompileMemberAlignment(HM, vmx, 8);
+AssertCompileMemberAlignment(HM, svm, 8);
+AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
+AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */
+
+
+/**
+ * Per-VM ring-0 instance data for HM.
+ */
+typedef struct HMR0PERVM
+{
+ /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
+ * This number is set much higher when RTThreadPreemptIsPending is reliable. */
+ uint32_t cMaxResumeLoops;
+
+ /** Set if nested paging is enabled. */
+ bool fNestedPaging;
+ /** Set if we can support 64-bit guests or not. */
+ bool fAllow64BitGuests;
+ bool afAlignment1[1];
+
+ /** AMD-V specific data. */
+ struct HMR0SVMVM
+ {
+ /** Set if erratum 170 affects the AMD cpu. */
+ bool fAlwaysFlushTLB;
+ } svm;
+
+ /** VT-x specific data. */
+ struct HMR0VMXVM
+ {
+ /** Set if unrestricted guest execution is in use (real and protected mode
+ * without paging). */
+ bool fUnrestrictedGuest;
+ /** Set if the preemption timer is in use. */
+ bool fUsePreemptTimer;
+ /** Whether to use VMCS shadowing. */
+ bool fUseVmcsShadowing;
+ /** Set if Last Branch Record (LBR) is enabled. */
+ bool fLbr;
+ /** Set always intercept MOV DRx. */
+ bool fAlwaysInterceptMovDRx;
+ bool afAlignment2[2];
+
+ /** Set if VPID is supported (copy in HM::vmx::fVpidForRing3). */
+ bool fVpid;
+ /** Tagged-TLB flush type. */
+ VMXTLBFLUSHTYPE enmTlbFlushType;
+ /** Flush type to use for INVEPT. */
+ VMXTLBFLUSHEPT enmTlbFlushEpt;
+ /** Flush type to use for INVVPID. */
+ VMXTLBFLUSHVPID enmTlbFlushVpid;
+
+ /** The host LBR TOS (top-of-stack) MSR id. */
+ uint32_t idLbrTosMsr;
+
+ /** The first valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrFirst;
+ /** The last valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrLast;
+
+ /** The first valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrFirst;
+ /** The last valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrLast;
+
+ /** Pointer to the VMREAD bitmap. */
+ R0PTRTYPE(void *) pvVmreadBitmap;
+ /** Pointer to the VMWRITE bitmap. */
+ R0PTRTYPE(void *) pvVmwriteBitmap;
+
+ /** Pointer to the shadow VMCS read-only fields array. */
+ R0PTRTYPE(uint32_t *) paShadowVmcsRoFields;
+ /** Pointer to the shadow VMCS read/write fields array. */
+ R0PTRTYPE(uint32_t *) paShadowVmcsFields;
+ /** Number of elements in the shadow VMCS read-only fields array. */
+ uint32_t cShadowVmcsRoFields;
+ /** Number of elements in the shadow VMCS read-write fields array. */
+ uint32_t cShadowVmcsFields;
+
+ /** Host-physical address of the APIC-access page. */
+ RTHCPHYS HCPhysApicAccess;
+ /** Host-physical address of the VMREAD bitmap. */
+ RTHCPHYS HCPhysVmreadBitmap;
+ /** Host-physical address of the VMWRITE bitmap. */
+ RTHCPHYS HCPhysVmwriteBitmap;
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ /** Host-physical address of the crash-dump scratch area. */
+ RTHCPHYS HCPhysScratch;
+ /** Pointer to the crash-dump scratch bitmap. */
+ R0PTRTYPE(uint8_t *) pbScratch;
+#endif
+
+ /** Ring-0 memory object for per-VM VMX structures. */
+ RTR0MEMOBJ hMemObj;
+ /** Virtual address of the APIC-access page (not used). */
+ R0PTRTYPE(uint8_t *) pbApicAccess;
+ } vmx;
+} HMR0PERVM;
+/** Pointer to HM's per-VM ring-0 instance data. */
+typedef HMR0PERVM *PHMR0PERVM;
+
+
+/** @addtogroup grp_hm_int_svm SVM Internal
+ * @{ */
+/** SVM VMRun function, see SVMR0VMRun(). */
+typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB));
+/** Pointer to a SVM VMRun function. */
+typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
+
+/**
+ * SVM nested-guest VMCB cache.
+ *
+ * Contains VMCB fields from the nested-guest VMCB before they're modified by
+ * SVM R0 code for hardware-assisted SVM execution of a nested-guest.
+ *
+ * A VMCB field needs to be cached when it needs to be modified for execution using
+ * hardware-assisted SVM and any of the following are true:
+ * - If the original field needs to be inspected during execution of the
+ * nested-guest or \#VMEXIT processing.
+ * - If the field is written back to memory on \#VMEXIT by the physical CPU.
+ *
+ * A VMCB field needs to be restored only when the field is written back to
+ * memory on \#VMEXIT by the physical CPU and thus would be visible to the
+ * guest.
+ *
+ * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
+ * this structure.
+ */
+typedef struct SVMNESTEDVMCBCACHE
+{
+ /** Cache of CRX read intercepts. */
+ uint16_t u16InterceptRdCRx;
+ /** Cache of CRX write intercepts. */
+ uint16_t u16InterceptWrCRx;
+ /** Cache of DRX read intercepts. */
+ uint16_t u16InterceptRdDRx;
+ /** Cache of DRX write intercepts. */
+ uint16_t u16InterceptWrDRx;
+
+ /** Cache of the pause-filter threshold. */
+ uint16_t u16PauseFilterThreshold;
+ /** Cache of the pause-filter count. */
+ uint16_t u16PauseFilterCount;
+
+ /** Cache of exception intercepts. */
+ uint32_t u32InterceptXcpt;
+ /** Cache of control intercepts. */
+ uint64_t u64InterceptCtrl;
+
+ /** Cache of the TSC offset. */
+ uint64_t u64TSCOffset;
+
+ /** Cache of V_INTR_MASKING bit. */
+ bool fVIntrMasking;
+ /** Cache of the nested-paging bit. */
+ bool fNestedPaging;
+ /** Cache of the LBR virtualization bit. */
+ bool fLbrVirt;
+ /** Whether the VMCB is cached by HM. */
+ bool fCacheValid;
+ /** Alignment. */
+ bool afPadding0[4];
+} SVMNESTEDVMCBCACHE;
+/** Pointer to the SVMNESTEDVMCBCACHE structure. */
+typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
+/** Pointer to a const SVMNESTEDVMCBCACHE structure. */
+typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
+AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
+
+/** @} */
+
+
+/** @addtogroup grp_hm_int_vmx VMX Internal
+ * @{ */
+
+/** @name Host-state restoration flags.
+ * @note If you change these values don't forget to update the assembly
+ * defines as well!
+ * @{
+ */
+#define VMX_RESTORE_HOST_SEL_DS RT_BIT(0)
+#define VMX_RESTORE_HOST_SEL_ES RT_BIT(1)
+#define VMX_RESTORE_HOST_SEL_FS RT_BIT(2)
+#define VMX_RESTORE_HOST_SEL_GS RT_BIT(3)
+#define VMX_RESTORE_HOST_SEL_TR RT_BIT(4)
+#define VMX_RESTORE_HOST_GDTR RT_BIT(5)
+#define VMX_RESTORE_HOST_IDTR RT_BIT(6)
+#define VMX_RESTORE_HOST_GDT_READ_ONLY RT_BIT(7)
+#define VMX_RESTORE_HOST_GDT_NEED_WRITABLE RT_BIT(8)
+#define VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE RT_BIT(9)
+/**
+ * This _must_ be the top most bit, so that we can easily check that it and
+ * something else is set w/o having to do two checks like this:
+ * @code
+ * if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
+ * && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
+ * @endcode
+ * Instead we can then do:
+ * @code
+ * if (pVCpu->hm.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
+ * @endcode
+ */
+#define VMX_RESTORE_HOST_REQUIRED RT_BIT(10)
+/** @} */
+
+/**
+ * Host-state restoration structure.
+ *
+ * This holds host-state fields that require manual restoration.
+ * Assembly version found in HMInternal.mac (should be automatically verified).
+ */
+typedef struct VMXRESTOREHOST
+{
+ RTSEL uHostSelDS; /**< 0x00 */
+ RTSEL uHostSelES; /**< 0x02 */
+ RTSEL uHostSelFS; /**< 0x04 */
+ X86XDTR64 HostGdtr; /**< 0x06 - should be aligned by its 64-bit member. */
+ RTSEL uHostSelGS; /**< 0x10 */
+ RTSEL uHostSelTR; /**< 0x12 */
+ RTSEL uHostSelSS; /**< 0x14 - not restored, just for fetching */
+ X86XDTR64 HostGdtrRw; /**< 0x16 - should be aligned by its 64-bit member. */
+ RTSEL uHostSelCS; /**< 0x20 - not restored, just for fetching */
+ uint8_t abPadding1[4]; /**< 0x22 */
+ X86XDTR64 HostIdtr; /**< 0x26 - should be aligned by its 64-bit member. */
+ uint64_t uHostFSBase; /**< 0x30 */
+ uint64_t uHostGSBase; /**< 0x38 */
+} VMXRESTOREHOST;
+/** Pointer to VMXRESTOREHOST. */
+typedef VMXRESTOREHOST *PVMXRESTOREHOST;
+AssertCompileSize(X86XDTR64, 10);
+AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr.uAddr, 0x08);
+AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtrRw.uAddr, 0x18);
+AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr.uAddr, 0x28);
+AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 0x30);
+AssertCompileSize(VMXRESTOREHOST, 64);
+AssertCompileSizeAlignment(VMXRESTOREHOST, 8);
+
+/**
+ * VMX StartVM function.
+ *
+ * @returns VBox status code (no informational stuff).
+ * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
+ * @param pVCpu Pointer to the cross context per-CPU structure.
+ * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
+ */
+typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume));
+/** Pointer to a VMX StartVM function. */
+typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
+/** @} */
+
+
+/**
+ * HM VMCPU Instance data.
+ *
+ * Note! If you change members of this struct, make sure to check if the
+ * assembly counterpart in HMInternal.mac needs to be updated as well.
+ *
+ * Note! The members here are ordered and aligned based on estimated frequency of
+ * usage and grouped to fit within a cache line in hot code paths. Even subtle
+ * changes here have a noticeable effect in the bootsector benchmarks. Modify with
+ * care.
+ */
+typedef struct HMCPU
+{
+ /** Set when the TLB has been checked until we return from the world switch. */
+ bool volatile fCheckedTLBFlush;
+ /** Set when we're using VT-x or AMD-V at that moment.
+ * @todo r=bird: Misleading description. For AMD-V this will be set the first
+ * time HMCanExecuteGuest() is called and only cleared again by
+ * HMR3ResetCpu(). For VT-x it will be set by HMCanExecuteGuest when we
+ * can execute something in VT-x mode, and cleared if we cannot.
+ *
+ * The field is much more about recording the last HMCanExecuteGuest
+ * return value than anything about any "moment". */
+ bool fActive;
+
+ /** Whether we should use the debug loop because of single stepping or special
+ * debug breakpoints / events are armed. */
+ bool fUseDebugLoop;
+
+ /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
+ bool fGIMTrapXcptUD;
+ /** Whether \#GP needs to be intercepted for mesa driver workaround. */
+ bool fTrapXcptGpForLovelyMesaDrv;
+ /** Whether we're executing a single instruction. */
+ bool fSingleInstruction;
+ /** Whether \#DE needs to be intercepted (may be required by GCM). */
+ bool fGCMTrapXcptDE;
+
+ bool afAlignment0[1];
+
+ /** An additional error code used for some gurus. */
+ uint32_t u32HMError;
+ /** The last exit-to-ring-3 reason. */
+ int32_t rcLastExitToR3;
+ /** CPU-context changed flags (see HM_CHANGED_xxx). */
+ uint64_t fCtxChanged;
+
+ /** VT-x data. */
+ struct HMCPUVMX
+ {
+ /** @name Guest information.
+ * @{ */
+ /** Guest VMCS information shared with ring-3. */
+ VMXVMCSINFOSHARED VmcsInfo;
+ /** Nested-guest VMCS information shared with ring-3. */
+ VMXVMCSINFOSHARED VmcsInfoNstGst;
+ /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
+ * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs */
+ bool fSwitchedToNstGstVmcsCopyForRing3;
+ /** Whether the static guest VMCS controls has been merged with the
+ * nested-guest VMCS controls. */
+ bool fMergedNstGstCtls;
+ /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
+ bool fCopiedNstGstToShadowVmcs;
+ /** Whether flushing the TLB is required due to switching to/from the
+ * nested-guest. */
+ bool fSwitchedNstGstFlushTlb;
+ /** Alignment. */
+ bool afAlignment0[4];
+ /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
+ uint64_t u64GstMsrApicBase;
+ /** @} */
+
+ /** @name Error reporting and diagnostics.
+ * @{ */
+ /** VT-x error-reporting (mainly for ring-3 propagation). */
+ struct
+ {
+ RTCPUID idCurrentCpu;
+ RTCPUID idEnteredCpu;
+ RTHCPHYS HCPhysCurrentVmcs;
+ uint32_t u32VmcsRev;
+ uint32_t u32InstrError;
+ uint32_t u32ExitReason;
+ uint32_t u32GuestIntrState;
+ } LastError;
+ /** @} */
+ } vmx;
+
+ /** SVM data. */
+ struct HMCPUSVM
+ {
+ /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
+ * does. This means intercepting \#UD to emulate the instructions in
+ * long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
+ * preserve the upper 32 bits written to them (AMD will ignore and discard). */
+ bool fEmulateLongModeSysEnterExit;
+ uint8_t au8Alignment0[7];
+
+ /** Cache of the nested-guest's VMCB fields that we modify in order to run the
+ * nested-guest using AMD-V. This will be restored on \#VMEXIT. */
+ SVMNESTEDVMCBCACHE NstGstVmcbCache;
+ } svm;
+
+ /** Event injection state. */
+ HMEVENT Event;
+
+ /** Current shadow paging mode for updating CR4.
+ * @todo move later (@bugref{9217}). */
+ PGMMODE enmShadowMode;
+ uint32_t u32TemporaryPadding;
+
+ /** The PAE PDPEs used with Nested Paging (only valid when
+ * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
+ X86PDPE aPdpes[4];
+
+ /* These two comes because they are accessed from assembly and we don't
+ want to detail all the stats in the assembly version of this structure. */
+ STAMCOUNTER StatVmxWriteHostRip;
+ STAMCOUNTER StatVmxWriteHostRsp;
+ STAMCOUNTER StatVmxVmLaunch;
+ STAMCOUNTER StatVmxVmResume;
+
+ STAMPROFILEADV StatEntry;
+ STAMPROFILEADV StatPreExit;
+ STAMPROFILEADV StatExitHandling;
+ STAMPROFILEADV StatExitIO;
+ STAMPROFILEADV StatExitMovCRx;
+ STAMPROFILEADV StatExitXcptNmi;
+ STAMPROFILEADV StatExitVmentry;
+ STAMPROFILEADV StatImportGuestState;
+ STAMPROFILEADV StatExportGuestState;
+ STAMPROFILEADV StatLoadGuestFpuState;
+ STAMPROFILEADV StatInGC;
+ STAMPROFILEADV StatPoke;
+ STAMPROFILEADV StatSpinPoke;
+ STAMPROFILEADV StatSpinPokeFailed;
+
+ STAMCOUNTER StatInjectInterrupt;
+ STAMCOUNTER StatInjectXcpt;
+ STAMCOUNTER StatInjectReflect;
+ STAMCOUNTER StatInjectConvertDF;
+ STAMCOUNTER StatInjectInterpret;
+ STAMCOUNTER StatInjectReflectNPF;
+
+ STAMCOUNTER StatImportGuestStateFallback;
+ STAMCOUNTER StatReadToTransientFallback;
+
+ STAMCOUNTER StatExitAll;
+ STAMCOUNTER StatDebugExitAll;
+ STAMCOUNTER StatNestedExitAll;
+ STAMCOUNTER StatExitShadowNM;
+ STAMCOUNTER StatExitGuestNM;
+ STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
+ STAMCOUNTER StatExitShadowPFEM;
+ STAMCOUNTER StatExitGuestPF;
+ STAMCOUNTER StatExitGuestUD;
+ STAMCOUNTER StatExitGuestSS;
+ STAMCOUNTER StatExitGuestNP;
+ STAMCOUNTER StatExitGuestTS;
+ STAMCOUNTER StatExitGuestOF;
+ STAMCOUNTER StatExitGuestGP;
+ STAMCOUNTER StatExitGuestDE;
+ STAMCOUNTER StatExitGuestDF;
+ STAMCOUNTER StatExitGuestBR;
+ STAMCOUNTER StatExitGuestAC;
+ STAMCOUNTER StatExitGuestACSplitLock;
+ STAMCOUNTER StatExitGuestDB;
+ STAMCOUNTER StatExitGuestMF;
+ STAMCOUNTER StatExitGuestBP;
+ STAMCOUNTER StatExitGuestXF;
+ STAMCOUNTER StatExitGuestXcpUnk;
+ STAMCOUNTER StatExitDRxWrite;
+ STAMCOUNTER StatExitDRxRead;
+ STAMCOUNTER StatExitCR0Read;
+ STAMCOUNTER StatExitCR2Read;
+ STAMCOUNTER StatExitCR3Read;
+ STAMCOUNTER StatExitCR4Read;
+ STAMCOUNTER StatExitCR8Read;
+ STAMCOUNTER StatExitCR0Write;
+ STAMCOUNTER StatExitCR2Write;
+ STAMCOUNTER StatExitCR3Write;
+ STAMCOUNTER StatExitCR4Write;
+ STAMCOUNTER StatExitCR8Write;
+ STAMCOUNTER StatExitRdmsr;
+ STAMCOUNTER StatExitWrmsr;
+ STAMCOUNTER StatExitClts;
+ STAMCOUNTER StatExitXdtrAccess;
+ STAMCOUNTER StatExitLmsw;
+ STAMCOUNTER StatExitIOWrite;
+ STAMCOUNTER StatExitIORead;
+ STAMCOUNTER StatExitIOStringWrite;
+ STAMCOUNTER StatExitIOStringRead;
+ STAMCOUNTER StatExitIntWindow;
+ STAMCOUNTER StatExitExtInt;
+ STAMCOUNTER StatExitHostNmiInGC;
+ STAMCOUNTER StatExitHostNmiInGCIpi;
+ STAMCOUNTER StatExitPreemptTimer;
+ STAMCOUNTER StatExitTprBelowThreshold;
+ STAMCOUNTER StatExitTaskSwitch;
+ STAMCOUNTER StatExitApicAccess;
+ STAMCOUNTER StatExitReasonNpf;
+
+ STAMCOUNTER StatNestedExitReasonNpf;
+
+ STAMCOUNTER StatFlushPage;
+ STAMCOUNTER StatFlushPageManual;
+ STAMCOUNTER StatFlushPhysPageManual;
+ STAMCOUNTER StatFlushTlb;
+ STAMCOUNTER StatFlushTlbNstGst;
+ STAMCOUNTER StatFlushTlbManual;
+ STAMCOUNTER StatFlushTlbWorldSwitch;
+ STAMCOUNTER StatNoFlushTlbWorldSwitch;
+ STAMCOUNTER StatFlushEntire;
+ STAMCOUNTER StatFlushAsid;
+ STAMCOUNTER StatFlushNestedPaging;
+ STAMCOUNTER StatFlushTlbInvlpgVirt;
+ STAMCOUNTER StatFlushTlbInvlpgPhys;
+ STAMCOUNTER StatTlbShootdown;
+ STAMCOUNTER StatTlbShootdownFlush;
+
+ STAMCOUNTER StatSwitchPendingHostIrq;
+ STAMCOUNTER StatSwitchTprMaskedIrq;
+ STAMCOUNTER StatSwitchGuestIrq;
+ STAMCOUNTER StatSwitchHmToR3FF;
+ STAMCOUNTER StatSwitchVmReq;
+ STAMCOUNTER StatSwitchPgmPoolFlush;
+ STAMCOUNTER StatSwitchDma;
+ STAMCOUNTER StatSwitchExitToR3;
+ STAMCOUNTER StatSwitchLongJmpToR3;
+ STAMCOUNTER StatSwitchMaxResumeLoops;
+ STAMCOUNTER StatSwitchHltToR3;
+ STAMCOUNTER StatSwitchApicAccessToR3;
+ STAMCOUNTER StatSwitchPreempt;
+ STAMCOUNTER StatSwitchNstGstVmexit;
+
+ STAMCOUNTER StatTscParavirt;
+ STAMCOUNTER StatTscOffset;
+ STAMCOUNTER StatTscIntercept;
+
+ STAMCOUNTER StatDRxArmed;
+ STAMCOUNTER StatDRxContextSwitch;
+ STAMCOUNTER StatDRxIoCheck;
+
+ STAMCOUNTER StatExportMinimal;
+ STAMCOUNTER StatExportFull;
+ STAMCOUNTER StatLoadGuestFpu;
+ STAMCOUNTER StatExportHostState;
+
+ STAMCOUNTER StatVmxCheckBadRmSelBase;
+ STAMCOUNTER StatVmxCheckBadRmSelLimit;
+ STAMCOUNTER StatVmxCheckBadRmSelAttr;
+ STAMCOUNTER StatVmxCheckBadV86SelBase;
+ STAMCOUNTER StatVmxCheckBadV86SelLimit;
+ STAMCOUNTER StatVmxCheckBadV86SelAttr;
+ STAMCOUNTER StatVmxCheckRmOk;
+ STAMCOUNTER StatVmxCheckBadSel;
+ STAMCOUNTER StatVmxCheckBadRpl;
+ STAMCOUNTER StatVmxCheckPmOk;
+
+ STAMCOUNTER StatVmxPreemptionRecalcingDeadline;
+ STAMCOUNTER StatVmxPreemptionRecalcingDeadlineExpired;
+ STAMCOUNTER StatVmxPreemptionReusingDeadline;
+ STAMCOUNTER StatVmxPreemptionReusingDeadlineExpired;
+
+#ifdef VBOX_WITH_STATISTICS
+ STAMCOUNTER aStatExitReason[MAX_EXITREASON_STAT];
+ STAMCOUNTER aStatNestedExitReason[MAX_EXITREASON_STAT];
+ STAMCOUNTER aStatInjectedIrqs[256];
+ STAMCOUNTER aStatInjectedXcpts[X86_XCPT_LAST + 1];
+#endif
+#ifdef HM_PROFILE_EXIT_DISPATCH
+ STAMPROFILEADV StatExitDispatch;
+#endif
+} HMCPU;
+/** Pointer to HM VMCPU instance data. */
+typedef HMCPU *PHMCPU;
+AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4);
+AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
+AssertCompileMemberAlignment(HMCPU, vmx, 8);
+AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfo, 8);
+AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfoNstGst, 8);
+AssertCompileMemberAlignment(HMCPU, svm, 8);
+AssertCompileMemberAlignment(HMCPU, Event, 8);
+
+
+/**
+ * HM per-VCpu ring-0 only instance data.
+ */
+typedef struct HMR0PERVCPU
+{
+ /** World switch exit counter. */
+ uint32_t volatile cWorldSwitchExits;
+ /** TLB flush count. */
+ uint32_t cTlbFlushes;
+ /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */
+ RTCPUID idLastCpu;
+ /** The CPU ID of the CPU currently owning the VMCS. Set in
+ * HMR0Enter and cleared in HMR0Leave. */
+ RTCPUID idEnteredCpu;
+ /** Current ASID in use by the VM. */
+ uint32_t uCurrentAsid;
+
+ /** Set if we need to flush the TLB during the world switch. */
+ bool fForceTLBFlush;
+ /** Whether we've completed the inner HM leave function. */
+ bool fLeaveDone;
+ /** Whether we're using the hyper DR7 or guest DR7. */
+ bool fUsingHyperDR7;
+ /** Whether we are currently executing in the debug loop.
+ * Mainly for assertions. */
+ bool fUsingDebugLoop;
+ /** Set if we using the debug loop and wish to intercept RDTSC. */
+ bool fDebugWantRdTscExit;
+ /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
+ * execution. */
+ bool fLoadSaveGuestXcr0;
+ /** Set if we need to clear the trap flag because of single stepping. */
+ bool fClearTrapFlag;
+
+ bool afPadding1[1];
+ /** World switcher flags (HM_WSF_XXX - was CPUMCTX::fWorldSwitcher in 6.1). */
+ uint32_t fWorldSwitcher;
+ /** The raw host TSC value from the last VM exit (set by HMR0A.asm). */
+ uint64_t uTscExit;
+
+ /** VT-x data. */
+ struct HMR0CPUVMX
+ {
+ /** Ring-0 pointer to the hardware-assisted VMX execution function. */
+ PFNHMVMXSTARTVM pfnStartVm;
+ /** Absolute TSC deadline. */
+ uint64_t uTscDeadline;
+ /** The deadline version number. */
+ uint64_t uTscDeadlineVersion;
+
+ /** @name Guest information.
+ * @{ */
+ /** Guest VMCS information. */
+ VMXVMCSINFO VmcsInfo;
+ /** Nested-guest VMCS information. */
+ VMXVMCSINFO VmcsInfoNstGst;
+ /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
+ * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3 */
+ bool fSwitchedToNstGstVmcs;
+ bool afAlignment0[7];
+ /** Pointer to the VMX transient info during VM-exit. */
+ PVMXTRANSIENT pVmxTransient;
+ /** @} */
+
+ /** @name Host information.
+ * @{ */
+ /** Host LSTAR MSR to restore lazily while leaving VT-x. */
+ uint64_t u64HostMsrLStar;
+ /** Host STAR MSR to restore lazily while leaving VT-x. */
+ uint64_t u64HostMsrStar;
+ /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
+ uint64_t u64HostMsrSfMask;
+ /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
+ uint64_t u64HostMsrKernelGsBase;
+ /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
+ uint32_t fLazyMsrs;
+ /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
+ bool fUpdatedHostAutoMsrs;
+ /** Alignment. */
+ uint8_t au8Alignment0[3];
+ /** Which host-state bits to restore before being preempted, see
+ * VMX_RESTORE_HOST_XXX. */
+ uint32_t fRestoreHostFlags;
+ /** Alignment. */
+ uint32_t u32Alignment0;
+ /** The host-state restoration structure. */
+ VMXRESTOREHOST RestoreHost;
+ /** @} */
+ } vmx;
+
+ /** SVM data. */
+ struct HMR0CPUSVM
+ {
+ /** Ring 0 handlers for VT-x. */
+ PFNHMSVMVMRUN pfnVMRun;
+
+ /** Physical address of the host VMCB which holds additional host-state. */
+ RTHCPHYS HCPhysVmcbHost;
+ /** R0 memory object for the host VMCB which holds additional host-state. */
+ RTR0MEMOBJ hMemObjVmcbHost;
+
+ /** Physical address of the guest VMCB. */
+ RTHCPHYS HCPhysVmcb;
+ /** R0 memory object for the guest VMCB. */
+ RTR0MEMOBJ hMemObjVmcb;
+ /** Pointer to the guest VMCB. */
+ R0PTRTYPE(PSVMVMCB) pVmcb;
+
+ /** Physical address of the MSR bitmap (8 KB). */
+ RTHCPHYS HCPhysMsrBitmap;
+ /** R0 memory object for the MSR bitmap (8 KB). */
+ RTR0MEMOBJ hMemObjMsrBitmap;
+ /** Pointer to the MSR bitmap. */
+ R0PTRTYPE(void *) pvMsrBitmap;
+
+ /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
+ * we should check if the VTPR changed on every VM-exit. */
+ bool fSyncVTpr;
+ bool afAlignment[7];
+
+ /** Pointer to the SVM transient info during VM-exit. */
+ PSVMTRANSIENT pSvmTransient;
+ /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
+ uint64_t u64HostTscAux;
+
+ /** For saving stack space, the disassembler state is allocated here
+ * instead of on the stack. */
+ DISCPUSTATE DisState;
+ } svm;
+} HMR0PERVCPU;
+/** Pointer to HM ring-0 VMCPU instance data. */
+typedef HMR0PERVCPU *PHMR0PERVCPU;
+AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
+AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush, 4);
+AssertCompileMemberAlignment(HMR0PERVCPU, vmx.RestoreHost, 8);
+
+
+/** @name HM_WSF_XXX - @bugref{9453}, @bugref{9087}
+ * @note If you change these values don't forget to update the assembly
+ * defines as well!
+ * @{ */
+/** Touch IA32_PRED_CMD.IBPB on VM exit. */
+#define HM_WSF_IBPB_EXIT RT_BIT_32(0)
+/** Touch IA32_PRED_CMD.IBPB on VM entry. */
+#define HM_WSF_IBPB_ENTRY RT_BIT_32(1)
+/** Touch IA32_FLUSH_CMD.L1D on VM entry. */
+#define HM_WSF_L1D_ENTRY RT_BIT_32(2)
+/** Flush MDS buffers on VM entry. */
+#define HM_WSF_MDS_ENTRY RT_BIT_32(3)
+
+/** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */
+#define HM_WSF_L1D_SCHED RT_BIT_32(16)
+/** Flush MDS buffers on VM scheduling. */
+#define HM_WSF_MDS_SCHED RT_BIT_32(17)
+/** @} */
+
+
+#ifdef IN_RING0
+extern bool g_fHmVmxSupported;
+extern uint32_t g_fHmHostKernelFeatures;
+extern uint32_t g_uHmMaxAsid;
+extern bool g_fHmVmxUsePreemptTimer;
+extern uint8_t g_cHmVmxPreemptTimerShift;
+extern bool g_fHmVmxSupportsVmcsEfer;
+extern uint64_t g_uHmVmxHostCr4;
+extern uint64_t g_uHmVmxHostMsrEfer;
+extern uint64_t g_uHmVmxHostSmmMonitorCtl;
+extern bool g_fHmSvmSupported;
+extern uint32_t g_uHmSvmRev;
+extern uint32_t g_fHmSvmFeatures;
+
+extern SUPHWVIRTMSRS g_HmMsrs;
+
+
+VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
+VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPUCC pVCpu);
+
+# ifdef VBOX_STRICT
+# define HM_DUMP_REG_FLAGS_GPRS RT_BIT(0)
+# define HM_DUMP_REG_FLAGS_FPU RT_BIT(1)
+# define HM_DUMP_REG_FLAGS_MSRS RT_BIT(2)
+# define HM_DUMP_REG_FLAGS_ALL (HM_DUMP_REG_FLAGS_GPRS | HM_DUMP_REG_FLAGS_FPU | HM_DUMP_REG_FLAGS_MSRS)
+
+VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPUCC pVCpu, uint32_t fFlags);
+VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
+# endif
+
+DECLASM(void) hmR0MdsClear(void);
+#endif /* IN_RING0 */
+
+
+/** @addtogroup grp_hm_int_svm SVM Internal
+ * @{ */
+VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCC pVM, PVMCPUCC pVCpu);
+
+/**
+ * Prepares for and executes VMRUN (64-bit register context).
+ *
+ * @returns VBox status code (no informational stuff).
+ * @param pVM The cross context VM structure. (Not used.)
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param HCPhyspVMCB Physical address of the VMCB.
+ *
+ * @remarks With spectre mitigations and the usual need for speed (/ micro
+ * optimizations), we have a bunch of variations of this code depending
+ * on a few precoditions. In release builds, the code is entirely
+ * without conditionals. Debug builds have a couple of assertions that
+ * shouldn't ever be triggered.
+ *
+ * @{
+ */
+DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
+/** @} */
+
+/** @} */
+
+
+/** @addtogroup grp_hm_int_vmx VMX Internal
+ * @{ */
+VMM_INT_DECL(PVMXVMCSINFOSHARED) hmGetVmxActiveVmcsInfoShared(PVMCPUCC pVCpu);
+
+/**
+ * Used on platforms with poor inline assembly support to retrieve all the
+ * info from the CPU and put it in the @a pRestoreHost structure.
+ */
+DECLASM(void) hmR0VmxExportHostSegmentRegsAsmHlp(PVMXRESTOREHOST pRestoreHost, bool fHaveFsGsBase);
+
+/**
+ * Restores some host-state fields that need not be done on every VM-exit.
+ *
+ * @returns VBox status code.
+ * @param fRestoreHostFlags Flags of which host registers needs to be
+ * restored.
+ * @param pRestoreHost Pointer to the host-restore structure.
+ */
+DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
+
+/**
+ * VMX StartVM functions.
+ *
+ * @returns VBox status code (no informational stuff).
+ * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
+ * @param pVCpu Pointer to the cross context per-CPU structure of the
+ * calling EMT.
+ * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
+ *
+ * @remarks With spectre mitigations and the usual need for speed (/ micro
+ * optimizations), we have a bunch of variations of this code depending
+ * on a few precoditions. In release builds, the code is entirely
+ * without conditionals. Debug builds have a couple of assertions that
+ * shouldn't ever be triggered.
+ *
+ * @{
+ */
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
+/** @} */
+
+/** @} */
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_HMInternal_h */
+
diff --git a/src/VBox/VMM/include/HMInternal.mac b/src/VBox/VMM/include/HMInternal.mac
new file mode 100644
index 00000000..e15aacd6
--- /dev/null
+++ b/src/VBox/VMM/include/HMInternal.mac
@@ -0,0 +1,278 @@
+;$Id: HMInternal.mac $
+;; @file
+; HM - Internal header file.
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+%ifndef VMX_VMCS_GUEST_FIELD_ES
+ %include "VBox/vmm/hm_vmx.mac" ; For VMXRESTOREHOST
+%endif
+
+struc VMXVMCSINFOSHARED
+ .fWasInRealMode resb 1
+ alignb 8
+ .RealMode.AttrCS resd 1
+ .RealMode.AttrDS resd 1
+ .RealMode.AttrES resd 1
+ .RealMode.AttrFS resd 1
+ .RealMode.AttrGS resd 1
+ .RealMode.AttrSS resd 1
+ .RealMode.Eflags resd 1 ; should be EFlags?
+ .RealMode.fRealOnV86Active resb 1
+
+ alignb 8
+ .au64LbrFromIpMsr resq 32
+ .au64LbrToIpMsr resq 32
+ .au64LbrInfoMsr resq 32
+ .u64LbrTosMsr resq 1
+ .u64LerFromIpMsr resq 1
+ .u64LerToIpMsr resq 1
+endstruc
+
+
+struc VMXVMCSINFO
+ .pShared RTR0PTR_RES 1
+
+ .HCPhysEPTP RTHCPHYS_RES 1
+ .fVmcsState resd 1
+ .fShadowVmcsState resd 1
+ .idHostCpuState resd 1
+ .idHostCpuExec resd 1
+ .cEntryMsrLoad resd 1
+ .cExitMsrStore resd 1
+ .cExitMsrLoad resd 1
+
+ .u32PinCtls resd 1
+ .u32ProcCtls resd 1
+ .u32ProcCtls2 resd 1
+ .u64ProcCtls3 resq 1
+ .u32EntryCtls resd 1
+ .u32ExitCtls resd 1
+ .u32XcptBitmap resd 1
+ .u32XcptPFMask resd 1
+ .u32XcptPFMatch resd 1
+
+ alignb 8
+ .u64TscOffset resq 1
+ .u64VmcsLinkPtr resq 1
+ .u64Cr0Mask resq 1
+ .u64Cr4Mask resq 1
+ .uHostRip resq 1
+ .uHostRsp resq 1
+
+ .pvVmcs RTR0PTR_RES 1
+ .pvShadowVmcs RTR0PTR_RES 1
+ .pbVirtApic RTR0PTR_RES 1
+ .pvMsrBitmap RTR0PTR_RES 1
+ .pvGuestMsrLoad RTR0PTR_RES 1
+ .pvGuestMsrStore RTR0PTR_RES 1
+ .pvHostMsrLoad RTR0PTR_RES 1
+
+ alignb 8
+ .HCPhysVmcs RTHCPHYS_RES 1
+ .HCPhysShadowVmcs RTHCPHYS_RES 1
+ .HCPhysVirtApic RTHCPHYS_RES 1
+ .HCPhysMsrBitmap RTHCPHYS_RES 1
+ .HCPhysGuestMsrLoad RTHCPHYS_RES 1
+ .HCPhysGuestMsrStore RTHCPHYS_RES 1
+ .HCPhysHostMsrLoad RTHCPHYS_RES 1
+
+ .hMemObj RTR0PTR_RES 1
+endstruc
+
+%define VMX_RESTORE_HOST_SEL_DS 0001h ;RT_BIT(0)
+%define VMX_RESTORE_HOST_SEL_ES 0002h ;RT_BIT(1)
+%define VMX_RESTORE_HOST_SEL_FS 0004h ;RT_BIT(2)
+%define VMX_RESTORE_HOST_SEL_GS 0008h ;RT_BIT(3)
+%define VMX_RESTORE_HOST_SEL_TR 0010h ;RT_BIT(4)
+%define VMX_RESTORE_HOST_GDTR 0020h ;RT_BIT(5)
+%define VMX_RESTORE_HOST_IDTR 0040h ;RT_BIT(6)
+%define VMX_RESTORE_HOST_GDT_READ_ONLY 0080h ;RT_BIT(7)
+%define VMX_RESTORE_HOST_GDT_NEED_WRITABLE 0100h ;RT_BIT(8)
+%define VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE 0200h ;RT_BIT(9)
+%define VMX_RESTORE_HOST_REQUIRED 0400h ;RT_BIT(10) - must be the highest bit!
+struc VMXRESTOREHOST
+ .uHostSelDS resw 1
+ .uHostSelES resw 1
+ .uHostSelFS resw 1
+ .HostGdtr resb 10
+ .uHostSelGS resw 1
+ .uHostSelTR resw 1
+ .uHostSelSS resw 1
+ .HostGdtrRw resb 10
+ .uHostSelCS resw 1
+ .abPadding1 resb 4
+ .HostIdtr resb 10
+ alignb 8
+ .uHostFSBase resq 1
+ .uHostGSBase resq 1
+endstruc
+
+struc HMCPUVMX
+ .VmcsInfo resb VMXVMCSINFOSHARED_size
+ .VmcsInfoNstGst resb VMXVMCSINFOSHARED_size
+ .fSwitchedToNstGstVmcsCopyForRing3 resb 1
+ .fMergedNstGstCtls resb 1
+ .fCopiedNstGstToShadowVmcs resb 1
+ .fSwitchedNstGstFlushTlb resb 1
+
+ alignb 8
+ .u64GstMsrApicBase resq 1
+
+ .LastError.idCurrentCpu resd 1
+ .LastError.idEnteredCpu resd 1
+ .LastError.HCPhysCurrentVmcs resq 1
+ .LastError.u32VmcsRev resd 1
+ .LastError.u32InstrError resd 1
+ .LastError.u32ExitReason resd 1
+ .LastError.u32GuestIntrState resd 1
+endstruc
+
+struc HMCPUSVM
+ .fEmulateLongModeSysEnterExit resb 1
+
+ alignb 8
+ .NstGstVmcbCache resb 40
+endstruc
+
+struc HMCPU
+ .fCheckedTLBFlush resb 1
+ .fActive resb 1
+ .fUseDebugLoop resb 1
+
+ .fGIMTrapXcptUD resb 1
+ .fTrapXcptGpForLovelyMesaDrv resb 1
+ .fSingleInstruction resb 1
+ alignb 8
+
+ .u32HMError resd 1
+ .rcLastExitToR3 resd 1
+ alignb 8
+ .fCtxChanged resq 1
+
+ alignb 8
+ .vmx resb HMCPUVMX_size
+ alignb 8
+ .svm resb HMCPUSVM_size
+
+ .Event.fPending resd 1
+ .Event.u32ErrCode resd 1
+ .Event.cbInstr resd 1
+ alignb 8
+ .Event.u64IntInfo resq 1
+ .Event.GCPtrFaultAddress RTGCPTR_RES 1
+
+ .enmShadowMode resd 1
+ alignb 8
+ .aPdpes resq 4
+
+ .StatVmxWriteHostRip resq 1
+ .StatVmxWriteHostRsp resq 1
+
+ ; The remainer is disassembly state and statistics.
+endstruc
+
+struc HMR0CPUVMX
+ .pfnStartVm RTR0PTR_RES 1
+ .uTscDeadline resq 1
+ .uTscDeadlineVersion resq 1
+
+
+ .VmcsInfo resb VMXVMCSINFO_size
+ .VmcsInfoNstGst resb VMXVMCSINFO_size
+ .fSwitchedToNstGstVmcs resb 1
+ alignb 8
+ .pVmxTransient RTR0PTR_RES 1
+
+ .u64HostMsrLStar resq 1
+ .u64HostMsrStar resq 1
+ .u64HostMsrSfMask resq 1
+ .u64HostMsrKernelGsBase resq 1
+ .fLazyMsrs resd 1
+ .fUpdatedHostAutoMsrs resb 1
+ alignb 4
+ .fRestoreHostFlags resd 1
+ alignb 8
+ .RestoreHost resb VMXRESTOREHOST_size
+endstruc
+
+struc HMR0CPUSVM
+ .pfnVMRun RTR0PTR_RES 1
+
+ alignb 8
+ .HCPhysVmcbHost RTHCPHYS_RES 1
+
+ alignb 8
+ .hMemObjVmcbHost RTR0PTR_RES 1
+
+ alignb 8
+ .HCPhysVmcb RTHCPHYS_RES 1
+ .hMemObjVmcb RTR0PTR_RES 1
+ .pVmcb RTR0PTR_RES 1
+
+ alignb 8
+ .HCPhysMsrBitmap RTHCPHYS_RES 1
+ .hMemObjMsrBitmap RTR0PTR_RES 1
+ .pvMsrBitmap RTR0PTR_RES 1
+
+ .fSyncVTpr resb 1
+
+ alignb 8
+ .pSvmTransient RTR0PTR_RES 1
+ .u64HostTscAux resq 1
+
+ alignb 8
+ .DisState resb 0d8h
+endstruc
+
+struc HMR0PERVCPU
+ .cWorldSwitchExits resd 1
+ .cTlbFlushes resd 1
+ .idLastCpu resd 1
+ .idEnteredCpu resd 1
+ .uCurrentAsid resd 1
+
+ .fForceTLBFlush resb 1
+ .fLeaveDone resb 1
+ .fUsingHyperDR7 resb 1
+ .fUsingDebugLoop resb 1
+ .fDebugWantRdTscExit resb 1
+ .fLoadSaveGuestXcr0 resb 1
+ .fClearTrapFlag resb 1
+
+ alignb 4
+ .fWorldSwitcher resd 1
+ .uTscExit resq 1
+
+ alignb 8
+ .vmx resb HMR0CPUVMX_size
+ alignb 8
+ .svm resb HMR0CPUSVM_size
+endstruc
+
+%define HM_WSF_IBPB_EXIT RT_BIT_32(0)
+%define HM_WSF_IBPB_ENTRY RT_BIT_32(1)
+%define HM_WSF_L1D_ENTRY RT_BIT_32(2)
+%define HM_WSF_MDS_ENTRY RT_BIT_32(3)
+
diff --git a/src/VBox/VMM/include/HMVMXCommon.h b/src/VBox/VMM/include/HMVMXCommon.h
new file mode 100644
index 00000000..047911a6
--- /dev/null
+++ b/src/VBox/VMM/include/HMVMXCommon.h
@@ -0,0 +1,435 @@
+/* $Id: HMVMXCommon.h $ */
+/** @file
+ * HM/VMX - Internal header file for sharing common bits between the
+ * VMX template code (which is also used with NEM on darwin) and HM.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_HMVMXCommon_h
+#define VMM_INCLUDED_SRC_include_HMVMXCommon_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_hm_int Internal
+ * @ingroup grp_hm
+ * @internal
+ * @{
+ */
+
+/** @name HM_CHANGED_XXX
+ * HM CPU-context changed flags.
+ *
+ * These flags are used to keep track of which registers and state has been
+ * modified since they were imported back into the guest-CPU context.
+ *
+ * @{
+ */
+#define HM_CHANGED_HOST_CONTEXT UINT64_C(0x0000000000000001)
+#define HM_CHANGED_GUEST_RIP UINT64_C(0x0000000000000004)
+#define HM_CHANGED_GUEST_RFLAGS UINT64_C(0x0000000000000008)
+
+#define HM_CHANGED_GUEST_RAX UINT64_C(0x0000000000000010)
+#define HM_CHANGED_GUEST_RCX UINT64_C(0x0000000000000020)
+#define HM_CHANGED_GUEST_RDX UINT64_C(0x0000000000000040)
+#define HM_CHANGED_GUEST_RBX UINT64_C(0x0000000000000080)
+#define HM_CHANGED_GUEST_RSP UINT64_C(0x0000000000000100)
+#define HM_CHANGED_GUEST_RBP UINT64_C(0x0000000000000200)
+#define HM_CHANGED_GUEST_RSI UINT64_C(0x0000000000000400)
+#define HM_CHANGED_GUEST_RDI UINT64_C(0x0000000000000800)
+#define HM_CHANGED_GUEST_R8_R15 UINT64_C(0x0000000000001000)
+#define HM_CHANGED_GUEST_GPRS_MASK UINT64_C(0x0000000000001ff0)
+
+#define HM_CHANGED_GUEST_ES UINT64_C(0x0000000000002000)
+#define HM_CHANGED_GUEST_CS UINT64_C(0x0000000000004000)
+#define HM_CHANGED_GUEST_SS UINT64_C(0x0000000000008000)
+#define HM_CHANGED_GUEST_DS UINT64_C(0x0000000000010000)
+#define HM_CHANGED_GUEST_FS UINT64_C(0x0000000000020000)
+#define HM_CHANGED_GUEST_GS UINT64_C(0x0000000000040000)
+#define HM_CHANGED_GUEST_SREG_MASK UINT64_C(0x000000000007e000)
+
+#define HM_CHANGED_GUEST_GDTR UINT64_C(0x0000000000080000)
+#define HM_CHANGED_GUEST_IDTR UINT64_C(0x0000000000100000)
+#define HM_CHANGED_GUEST_LDTR UINT64_C(0x0000000000200000)
+#define HM_CHANGED_GUEST_TR UINT64_C(0x0000000000400000)
+#define HM_CHANGED_GUEST_TABLE_MASK UINT64_C(0x0000000000780000)
+
+#define HM_CHANGED_GUEST_CR0 UINT64_C(0x0000000000800000)
+#define HM_CHANGED_GUEST_CR2 UINT64_C(0x0000000001000000)
+#define HM_CHANGED_GUEST_CR3 UINT64_C(0x0000000002000000)
+#define HM_CHANGED_GUEST_CR4 UINT64_C(0x0000000004000000)
+#define HM_CHANGED_GUEST_CR_MASK UINT64_C(0x0000000007800000)
+
+#define HM_CHANGED_GUEST_APIC_TPR UINT64_C(0x0000000008000000)
+#define HM_CHANGED_GUEST_EFER_MSR UINT64_C(0x0000000010000000)
+
+#define HM_CHANGED_GUEST_DR0_DR3 UINT64_C(0x0000000020000000)
+#define HM_CHANGED_GUEST_DR6 UINT64_C(0x0000000040000000)
+#define HM_CHANGED_GUEST_DR7 UINT64_C(0x0000000080000000)
+#define HM_CHANGED_GUEST_DR_MASK UINT64_C(0x00000000e0000000)
+
+#define HM_CHANGED_GUEST_X87 UINT64_C(0x0000000100000000)
+#define HM_CHANGED_GUEST_SSE_AVX UINT64_C(0x0000000200000000)
+#define HM_CHANGED_GUEST_OTHER_XSAVE UINT64_C(0x0000000400000000)
+#define HM_CHANGED_GUEST_XCRx UINT64_C(0x0000000800000000)
+
+#define HM_CHANGED_GUEST_KERNEL_GS_BASE UINT64_C(0x0000001000000000)
+#define HM_CHANGED_GUEST_SYSCALL_MSRS UINT64_C(0x0000002000000000)
+#define HM_CHANGED_GUEST_SYSENTER_CS_MSR UINT64_C(0x0000004000000000)
+#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR UINT64_C(0x0000008000000000)
+#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR UINT64_C(0x0000010000000000)
+#define HM_CHANGED_GUEST_SYSENTER_MSR_MASK UINT64_C(0x000001c000000000)
+#define HM_CHANGED_GUEST_TSC_AUX UINT64_C(0x0000020000000000)
+#define HM_CHANGED_GUEST_OTHER_MSRS UINT64_C(0x0000040000000000)
+#define HM_CHANGED_GUEST_ALL_MSRS ( HM_CHANGED_GUEST_EFER \
+ | HM_CHANGED_GUEST_KERNEL_GS_BASE \
+ | HM_CHANGED_GUEST_SYSCALL_MSRS \
+ | HM_CHANGED_GUEST_SYSENTER_MSR_MASK \
+ | HM_CHANGED_GUEST_TSC_AUX \
+ | HM_CHANGED_GUEST_OTHER_MSRS)
+
+#define HM_CHANGED_GUEST_HWVIRT UINT64_C(0x0000080000000000)
+#define HM_CHANGED_GUEST_MASK UINT64_C(0x00000ffffffffffc)
+
+#define HM_CHANGED_KEEPER_STATE_MASK UINT64_C(0xffff000000000000)
+
+#define HM_CHANGED_VMX_XCPT_INTERCEPTS UINT64_C(0x0001000000000000)
+#define HM_CHANGED_VMX_GUEST_AUTO_MSRS UINT64_C(0x0002000000000000)
+#define HM_CHANGED_VMX_GUEST_LAZY_MSRS UINT64_C(0x0004000000000000)
+#define HM_CHANGED_VMX_ENTRY_EXIT_CTLS UINT64_C(0x0008000000000000)
+#define HM_CHANGED_VMX_MASK UINT64_C(0x000f000000000000)
+#define HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_DR_MASK \
+ | HM_CHANGED_VMX_GUEST_LAZY_MSRS)
+
+#define HM_CHANGED_SVM_XCPT_INTERCEPTS UINT64_C(0x0001000000000000)
+#define HM_CHANGED_SVM_MASK UINT64_C(0x0001000000000000)
+#define HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE HM_CHANGED_GUEST_DR_MASK
+
+#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_MASK \
+ | HM_CHANGED_KEEPER_STATE_MASK)
+
+/** Mask of what state might have changed when IEM raised an exception.
+ * This is a based on IEM_CPUMCTX_EXTRN_XCPT_MASK. */
+#define HM_CHANGED_RAISED_XCPT_MASK ( HM_CHANGED_GUEST_GPRS_MASK \
+ | HM_CHANGED_GUEST_RIP \
+ | HM_CHANGED_GUEST_RFLAGS \
+ | HM_CHANGED_GUEST_SS \
+ | HM_CHANGED_GUEST_CS \
+ | HM_CHANGED_GUEST_CR0 \
+ | HM_CHANGED_GUEST_CR3 \
+ | HM_CHANGED_GUEST_CR4 \
+ | HM_CHANGED_GUEST_APIC_TPR \
+ | HM_CHANGED_GUEST_EFER_MSR \
+ | HM_CHANGED_GUEST_DR7 \
+ | HM_CHANGED_GUEST_CR2 \
+ | HM_CHANGED_GUEST_SREG_MASK \
+ | HM_CHANGED_GUEST_TABLE_MASK)
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+/** Mask of what state might have changed when \#VMEXIT is emulated. */
+# define HM_CHANGED_SVM_VMEXIT_MASK ( HM_CHANGED_GUEST_RSP \
+ | HM_CHANGED_GUEST_RAX \
+ | HM_CHANGED_GUEST_RIP \
+ | HM_CHANGED_GUEST_RFLAGS \
+ | HM_CHANGED_GUEST_CS \
+ | HM_CHANGED_GUEST_SS \
+ | HM_CHANGED_GUEST_DS \
+ | HM_CHANGED_GUEST_ES \
+ | HM_CHANGED_GUEST_GDTR \
+ | HM_CHANGED_GUEST_IDTR \
+ | HM_CHANGED_GUEST_CR_MASK \
+ | HM_CHANGED_GUEST_EFER_MSR \
+ | HM_CHANGED_GUEST_DR6 \
+ | HM_CHANGED_GUEST_DR7 \
+ | HM_CHANGED_GUEST_OTHER_MSRS \
+ | HM_CHANGED_GUEST_HWVIRT \
+ | HM_CHANGED_SVM_MASK \
+ | HM_CHANGED_GUEST_APIC_TPR)
+
+/** Mask of what state might have changed when VMRUN is emulated. */
+# define HM_CHANGED_SVM_VMRUN_MASK HM_CHANGED_SVM_VMEXIT_MASK
+#endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+/** Mask of what state might have changed when VM-exit is emulated.
+ *
+ * This is currently unused, but keeping it here in case we can get away a bit more
+ * fine-grained state handling.
+ *
+ * @note Update IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK when this changes. */
+# define HM_CHANGED_VMX_VMEXIT_MASK ( HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 \
+ | HM_CHANGED_GUEST_DR7 | HM_CHANGED_GUEST_DR6 \
+ | HM_CHANGED_GUEST_EFER_MSR \
+ | HM_CHANGED_GUEST_SYSENTER_MSR_MASK \
+ | HM_CHANGED_GUEST_OTHER_MSRS /* for PAT MSR */ \
+ | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS \
+ | HM_CHANGED_GUEST_SREG_MASK \
+ | HM_CHANGED_GUEST_TR \
+ | HM_CHANGED_GUEST_LDTR | HM_CHANGED_GUEST_GDTR | HM_CHANGED_GUEST_IDTR \
+ | HM_CHANGED_GUEST_HWVIRT )
+#endif
+/** @} */
+
+
+/** Maximum number of exit reason statistics counters. */
+#define MAX_EXITREASON_STAT 0x100
+#define MASK_EXITREASON_STAT 0xff
+#define MASK_INJECT_IRQ_STAT 0xff
+
+
+/**
+ * HM event.
+ *
+ * VT-x and AMD-V common event injection structure.
+ */
+typedef struct HMEVENT
+{
+ /** Whether the event is pending. */
+ uint32_t fPending;
+ /** The error-code associated with the event. */
+ uint32_t u32ErrCode;
+ /** The length of the instruction in bytes (only relevant for software
+ * interrupts or software exceptions). */
+ uint32_t cbInstr;
+ /** Alignment. */
+ uint32_t u32Padding;
+ /** The encoded event (VM-entry interruption-information for VT-x or EVENTINJ
+ * for SVM). */
+ uint64_t u64IntInfo;
+ /** Guest virtual address if this is a page-fault event. */
+ RTGCUINTPTR GCPtrFaultAddress;
+} HMEVENT;
+/** Pointer to a HMEVENT struct. */
+typedef HMEVENT *PHMEVENT;
+/** Pointer to a const HMEVENT struct. */
+typedef const HMEVENT *PCHMEVENT;
+AssertCompileSizeAlignment(HMEVENT, 8);
+
+/** Initializer for a HMEVENT structure with */
+#define HMEVENT_INIT_ONLY_INT_INFO(a_uIntInfo) { 0, 0, 0, 0, (a_uIntInfo), 0 }
+
+/**
+ * VMX VMCS information, shared.
+ *
+ * This structure provides information maintained for and during the executing of a
+ * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX.
+ *
+ * Note! The members here are ordered and aligned based on estimated frequency of
+ * usage and grouped to fit within a cache line in hot code paths. Even subtle
+ * changes here have a noticeable effect in the bootsector benchmarks. Modify with
+ * care.
+ */
+typedef struct VMXVMCSINFOSHARED
+{
+ /** @name Real-mode emulation state.
+ * @{ */
+ /** Set if guest was executing in real mode (extra checks). */
+ bool fWasInRealMode;
+ /** Padding. */
+ bool afPadding0[7];
+ struct
+ {
+ X86DESCATTR AttrCS;
+ X86DESCATTR AttrDS;
+ X86DESCATTR AttrES;
+ X86DESCATTR AttrFS;
+ X86DESCATTR AttrGS;
+ X86DESCATTR AttrSS;
+ X86EFLAGS Eflags;
+ bool fRealOnV86Active;
+ bool afPadding1[3];
+ } RealMode;
+ /** @} */
+
+ /** @name LBR MSR data.
+ * @{ */
+ /** List of LastBranch-From-IP MSRs. */
+ uint64_t au64LbrFromIpMsr[32];
+ /** List of LastBranch-To-IP MSRs. */
+ uint64_t au64LbrToIpMsr[32];
+ /** List of LastBranch-Info MSRs. */
+ uint64_t au64LbrInfoMsr[32];
+ /** The MSR containing the index to the most recent branch record. */
+ uint64_t u64LbrTosMsr;
+ /** The MSR containing the last event record from IP value. */
+ uint64_t u64LerFromIpMsr;
+ /** The MSR containing the last event record to IP value. */
+ uint64_t u64LerToIpMsr;
+ /** @} */
+} VMXVMCSINFOSHARED;
+/** Pointer to a VMXVMCSINFOSHARED struct. */
+typedef VMXVMCSINFOSHARED *PVMXVMCSINFOSHARED;
+/** Pointer to a const VMXVMCSINFOSHARED struct. */
+typedef const VMXVMCSINFOSHARED *PCVMXVMCSINFOSHARED;
+AssertCompileSizeAlignment(VMXVMCSINFOSHARED, 8);
+
+
+/**
+ * VMX VMCS information, ring-0 only.
+ *
+ * This structure provides information maintained for and during the executing of a
+ * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX.
+ *
+ * Note! The members here are ordered and aligned based on estimated frequency of
+ * usage and grouped to fit within a cache line in hot code paths. Even subtle
+ * changes here have a noticeable effect in the bootsector benchmarks. Modify with
+ * care.
+ */
+typedef struct VMXVMCSINFO
+{
+ /** Pointer to the bits we share with ring-3. */
+ R3R0PTRTYPE(PVMXVMCSINFOSHARED) pShared;
+
+ /** @name Auxiliary information.
+ * @{ */
+ /** Host-physical address of the EPTP. */
+ RTHCPHYS HCPhysEPTP;
+ /** The VMCS launch state, see VMX_V_VMCS_LAUNCH_STATE_XXX. */
+ uint32_t fVmcsState;
+ /** The VMCS launch state of the shadow VMCS, see VMX_V_VMCS_LAUNCH_STATE_XXX. */
+ uint32_t fShadowVmcsState;
+ /** The host CPU for which its state has been exported to this VMCS. */
+ RTCPUID idHostCpuState;
+ /** The host CPU on which we last executed this VMCS. */
+ RTCPUID idHostCpuExec;
+ /** Number of guest MSRs in the VM-entry MSR-load area. */
+ uint32_t cEntryMsrLoad;
+ /** Number of guest MSRs in the VM-exit MSR-store area. */
+ uint32_t cExitMsrStore;
+ /** Number of host MSRs in the VM-exit MSR-load area. */
+ uint32_t cExitMsrLoad;
+ /** @} */
+
+ /** @name Cache of execution related VMCS fields.
+ * @{ */
+ /** Pin-based VM-execution controls. */
+ uint32_t u32PinCtls;
+ /** Processor-based VM-execution controls. */
+ uint32_t u32ProcCtls;
+ /** Secondary processor-based VM-execution controls. */
+ uint32_t u32ProcCtls2;
+ /** Tertiary processor-based VM-execution controls. */
+ uint64_t u64ProcCtls3;
+ /** VM-entry controls. */
+ uint32_t u32EntryCtls;
+ /** VM-exit controls. */
+ uint32_t u32ExitCtls;
+ /** Exception bitmap. */
+ uint32_t u32XcptBitmap;
+ /** Page-fault exception error-code mask. */
+ uint32_t u32XcptPFMask;
+ /** Page-fault exception error-code match. */
+ uint32_t u32XcptPFMatch;
+ /** Padding. */
+ uint32_t u32Alignment0;
+ /** TSC offset. */
+ uint64_t u64TscOffset;
+ /** VMCS link pointer. */
+ uint64_t u64VmcsLinkPtr;
+ /** CR0 guest/host mask. */
+ uint64_t u64Cr0Mask;
+ /** CR4 guest/host mask. */
+ uint64_t u64Cr4Mask;
+#ifndef IN_NEM_DARWIN
+ /** Current VMX_VMCS_HOST_RIP value (only used in HMR0A.asm). */
+ uint64_t uHostRip;
+ /** Current VMX_VMCS_HOST_RSP value (only used in HMR0A.asm). */
+ uint64_t uHostRsp;
+#endif
+ /** @} */
+
+ /** @name Host-virtual address of VMCS and related data structures.
+ * @{ */
+ /** The VMCS. */
+ R3R0PTRTYPE(void *) pvVmcs;
+ /** The shadow VMCS. */
+ R3R0PTRTYPE(void *) pvShadowVmcs;
+ /** The virtual-APIC page. */
+ R3R0PTRTYPE(uint8_t *) pbVirtApic;
+ /** The MSR bitmap. */
+ R3R0PTRTYPE(void *) pvMsrBitmap;
+ /** The VM-entry MSR-load area. */
+ R3R0PTRTYPE(void *) pvGuestMsrLoad;
+ /** The VM-exit MSR-store area. */
+ R3R0PTRTYPE(void *) pvGuestMsrStore;
+ /** The VM-exit MSR-load area. */
+ R3R0PTRTYPE(void *) pvHostMsrLoad;
+ /** @} */
+
+#ifndef IN_NEM_DARWIN
+ /** @name Host-physical address of VMCS and related data structures.
+ * @{ */
+ /** The VMCS. */
+ RTHCPHYS HCPhysVmcs;
+ /** The shadow VMCS. */
+ RTHCPHYS HCPhysShadowVmcs;
+ /** The virtual APIC page. */
+ RTHCPHYS HCPhysVirtApic;
+ /** The MSR bitmap. */
+ RTHCPHYS HCPhysMsrBitmap;
+ /** The VM-entry MSR-load area. */
+ RTHCPHYS HCPhysGuestMsrLoad;
+ /** The VM-exit MSR-store area. */
+ RTHCPHYS HCPhysGuestMsrStore;
+ /** The VM-exit MSR-load area. */
+ RTHCPHYS HCPhysHostMsrLoad;
+ /** @} */
+
+ /** @name R0-memory objects address for VMCS and related data structures.
+ * @{ */
+ /** R0-memory object for VMCS and related data structures. */
+ RTR0MEMOBJ hMemObj;
+ /** @} */
+#endif
+} VMXVMCSINFO;
+/** Pointer to a VMXVMCSINFOR0 struct. */
+typedef VMXVMCSINFO *PVMXVMCSINFO;
+/** Pointer to a const VMXVMCSINFO struct. */
+typedef const VMXVMCSINFO *PCVMXVMCSINFO;
+AssertCompileSizeAlignment(VMXVMCSINFO, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4);
+AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvShadowVmcs, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pbVirtApic, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrLoad, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrStore, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, pvHostMsrLoad, 8);
+#ifndef IN_NEM_DARWIN
+AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8);
+AssertCompileMemberAlignment(VMXVMCSINFO, hMemObj, 8);
+#endif
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_HMVMXCommon_h */
+
diff --git a/src/VBox/VMM/include/IEMInline.h b/src/VBox/VMM/include/IEMInline.h
new file mode 100644
index 00000000..0d6e3975
--- /dev/null
+++ b/src/VBox/VMM/include/IEMInline.h
@@ -0,0 +1,2880 @@
+/* $Id: IEMInline.h $ */
+/** @file
+ * IEM - Interpreted Execution Manager - Inlined Functions.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
+#define VMM_INCLUDED_SRC_include_IEMInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+
+/**
+ * Makes status code addjustments (pass up from I/O and access handler)
+ * as well as maintaining statistics.
+ *
+ * @returns Strict VBox status code to pass up.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param rcStrict The status from executing an instruction.
+ */
+DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
+{
+ if (rcStrict != VINF_SUCCESS)
+ {
+ if (RT_SUCCESS(rcStrict))
+ {
+ AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
+ || rcStrict == VINF_IOM_R3_IOPORT_READ
+ || rcStrict == VINF_IOM_R3_IOPORT_WRITE
+ || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
+ || rcStrict == VINF_IOM_R3_MMIO_READ
+ || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
+ || rcStrict == VINF_IOM_R3_MMIO_WRITE
+ || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
+ || rcStrict == VINF_CPUM_R3_MSR_READ
+ || rcStrict == VINF_CPUM_R3_MSR_WRITE
+ || rcStrict == VINF_EM_RAW_EMULATE_INSTR
+ || rcStrict == VINF_EM_RAW_TO_R3
+ || rcStrict == VINF_EM_TRIPLE_FAULT
+ || rcStrict == VINF_GIM_R3_HYPERCALL
+ /* raw-mode / virt handlers only: */
+ || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
+ || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
+ || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
+ || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
+ || rcStrict == VINF_SELM_SYNC_GDT
+ || rcStrict == VINF_CSAM_PENDING_ACTION
+ || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
+ /* nested hw.virt codes: */
+ || rcStrict == VINF_VMX_VMEXIT
+ || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
+ || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
+ || rcStrict == VINF_SVM_VMEXIT
+ , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
+ int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ if ( rcStrict == VINF_VMX_VMEXIT
+ && rcPassUp == VINF_SUCCESS)
+ rcStrict = VINF_SUCCESS;
+ else
+#endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+ if ( rcStrict == VINF_SVM_VMEXIT
+ && rcPassUp == VINF_SUCCESS)
+ rcStrict = VINF_SUCCESS;
+ else
+#endif
+ if (rcPassUp == VINF_SUCCESS)
+ pVCpu->iem.s.cRetInfStatuses++;
+ else if ( rcPassUp < VINF_EM_FIRST
+ || rcPassUp > VINF_EM_LAST
+ || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
+ {
+ Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
+ pVCpu->iem.s.cRetPassUpStatus++;
+ rcStrict = rcPassUp;
+ }
+ else
+ {
+ Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
+ pVCpu->iem.s.cRetInfStatuses++;
+ }
+ }
+ else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
+ pVCpu->iem.s.cRetAspectNotImplemented++;
+ else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
+ pVCpu->iem.s.cRetInstrNotImplemented++;
+ else
+ pVCpu->iem.s.cRetErrStatuses++;
+ }
+ else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
+ {
+ pVCpu->iem.s.cRetPassUpStatus++;
+ rcStrict = pVCpu->iem.s.rcPassUp;
+ }
+
+ return rcStrict;
+}
+
+
+/**
+ * Sets the pass up status.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ * @param rcPassUp The pass up status. Must be informational.
+ * VINF_SUCCESS is not allowed.
+ */
+DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
+{
+ AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
+
+ int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
+ if (rcOldPassUp == VINF_SUCCESS)
+ pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
+ /* If both are EM scheduling codes, use EM priority rules. */
+ else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
+ && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
+ {
+ if (rcPassUp < rcOldPassUp)
+ {
+ Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
+ pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
+ }
+ else
+ Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
+ }
+ /* Override EM scheduling with specific status code. */
+ else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
+ {
+ Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
+ pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
+ }
+ /* Don't override specific status code, first come first served. */
+ else
+ Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Calculates the CPU mode.
+ *
+ * This is mainly for updating IEMCPU::enmCpuMode.
+ *
+ * @returns CPU mode.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ */
+DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
+ return IEMMODE_64BIT;
+ if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
+ return IEMMODE_32BIT;
+ return IEMMODE_16BIT;
+}
+
+
+#if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
+/**
+ * Initializes the execution state.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ * @param fBypassHandlers Whether to bypass access handlers.
+ *
+ * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
+ * side-effects in strict builds.
+ */
+DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers) RT_NOEXCEPT
+{
+ IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
+
+ pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
+ pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
+# ifdef VBOX_STRICT
+ pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
+ pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
+ pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
+ pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
+ pVCpu->iem.s.fPrefixes = 0xfeedbeef;
+ pVCpu->iem.s.uRexReg = 127;
+ pVCpu->iem.s.uRexB = 127;
+ pVCpu->iem.s.offModRm = 127;
+ pVCpu->iem.s.uRexIndex = 127;
+ pVCpu->iem.s.iEffSeg = 127;
+ pVCpu->iem.s.idxPrefix = 127;
+ pVCpu->iem.s.uVex3rdReg = 127;
+ pVCpu->iem.s.uVexLength = 127;
+ pVCpu->iem.s.fEvexStuff = 127;
+ pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
+# ifdef IEM_WITH_CODE_TLB
+ pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
+ pVCpu->iem.s.pbInstrBuf = NULL;
+ pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
+ pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
+ pVCpu->iem.s.offCurInstrStart = INT16_MAX;
+ pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
+# else
+ pVCpu->iem.s.offOpcode = 127;
+ pVCpu->iem.s.cbOpcode = 127;
+# endif
+# endif /* VBOX_STRICT */
+
+ pVCpu->iem.s.cActiveMappings = 0;
+ pVCpu->iem.s.iNextMapping = 0;
+ pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
+ pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
+ pVCpu->iem.s.fDisregardLock = false;
+ pVCpu->iem.s.fPendingInstructionBreakpoints = false;
+ pVCpu->iem.s.fPendingDataBreakpoints = false;
+ pVCpu->iem.s.fPendingIoBreakpoints = false;
+ if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
+ && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
+ { /* likely */ }
+ else
+ iemInitPendingBreakpointsSlow(pVCpu);
+}
+#endif /* VBOX_INCLUDED_vmm_dbgf_h */
+
+
+#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
+/**
+ * Performs a minimal reinitialization of the execution state.
+ *
+ * This is intended to be used by VM-exits, SMM, LOADALL and other similar
+ * 'world-switch' types operations on the CPU. Currently only nested
+ * hardware-virtualization uses it.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
+ uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
+
+ pVCpu->iem.s.uCpl = uCpl;
+ pVCpu->iem.s.enmCpuMode = enmMode;
+ pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
+ pVCpu->iem.s.enmEffAddrMode = enmMode;
+ if (enmMode != IEMMODE_64BIT)
+ {
+ pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
+ pVCpu->iem.s.enmEffOpSize = enmMode;
+ }
+ else
+ {
+ pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
+ pVCpu->iem.s.enmEffOpSize = enmMode;
+ }
+ pVCpu->iem.s.iEffSeg = X86_SREG_DS;
+# ifndef IEM_WITH_CODE_TLB
+ /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
+ pVCpu->iem.s.offOpcode = 0;
+ pVCpu->iem.s.cbOpcode = 0;
+# endif
+ pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
+}
+#endif
+
+
+/**
+ * Counterpart to #iemInitExec that undoes evil strict-build stuff.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ */
+DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
+#ifdef VBOX_STRICT
+# ifdef IEM_WITH_CODE_TLB
+ NOREF(pVCpu);
+# else
+ pVCpu->iem.s.cbOpcode = 0;
+# endif
+#else
+ NOREF(pVCpu);
+#endif
+}
+
+
+/**
+ * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
+ *
+ * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
+ *
+ * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param rcStrict The status code to fiddle.
+ */
+DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
+{
+ iemUninitExec(pVCpu);
+ return iemExecStatusCodeFiddling(pVCpu, rcStrict);
+}
+
+
+/**
+ * Macro used by the IEMExec* method to check the given instruction length.
+ *
+ * Will return on failure!
+ *
+ * @param a_cbInstr The given instruction length.
+ * @param a_cbMin The minimum length.
+ */
+#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
+ AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
+ ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
+
+
+#ifndef IEM_WITH_SETJMP
+
+/**
+ * Fetches the first opcode byte.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ * @param pu8 Where to return the opcode byte.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
+{
+ /*
+ * Check for hardware instruction breakpoints.
+ */
+ if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
+ { /* likely */ }
+ else
+ {
+ VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
+ pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
+ if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+ { /* likely */ }
+ else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
+ return iemRaiseDebugException(pVCpu);
+ else
+ return rcStrict;
+ }
+
+ /*
+ * Fetch the first opcode byte.
+ */
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU8Slow(pVCpu, pu8);
+}
+
+#else /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the first opcode byte, longjmp on error.
+ *
+ * @returns The opcode byte.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+ /*
+ * Check for hardware instruction breakpoints.
+ */
+ if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
+ { /* likely */ }
+ else
+ {
+ VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
+ pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
+ if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+ { /* likely */ }
+ else
+ {
+ if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
+ rcStrict = iemRaiseDebugException(pVCpu);
+ IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
+ }
+ }
+
+ /*
+ * Fetch the first opcode byte.
+ */
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf < pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
+ return pbBuf[offBuf];
+ }
+# else
+ uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ return pVCpu->iem.s.abOpcode[offOpcode];
+ }
+# endif
+ return iemOpcodeGetNextU8SlowJmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the first opcode byte, returns/throws automatically on failure.
+ *
+ * @param a_pu8 Where to return the opcode byte.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
+ if (rcStrict2 == VINF_SUCCESS) \
+ { /* likely */ } \
+ else \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
+#endif /* IEM_WITH_SETJMP */
+
+
+#ifndef IEM_WITH_SETJMP
+
+/**
+ * Fetches the next opcode byte.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ * @param pu8 Where to return the opcode byte.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
+{
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU8Slow(pVCpu, pu8);
+}
+
+#else /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode byte, longjmp on error.
+ *
+ * @returns The opcode byte.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf < pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
+ return pbBuf[offBuf];
+ }
+# else
+ uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ return pVCpu->iem.s.abOpcode[offOpcode];
+ }
+# endif
+ return iemOpcodeGetNextU8SlowJmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode byte, returns automatically on failure.
+ *
+ * @param a_pu8 Where to return the opcode byte.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
+ if (rcStrict2 == VINF_SUCCESS) \
+ { /* likely */ } \
+ else \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
+#endif /* IEM_WITH_SETJMP */
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed byte from the opcode stream.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pi8 Where to return the signed byte.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
+{
+ return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
+}
+#endif /* !IEM_WITH_SETJMP */
+
+
+/**
+ * Fetches the next signed byte from the opcode stream, returning automatically
+ * on failure.
+ *
+ * @param a_pi8 Where to return the signed byte.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else /* IEM_WITH_SETJMP */
+# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
+
+#endif /* IEM_WITH_SETJMP */
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed byte from the opcode stream, extending it to
+ * unsigned 16-bit.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu16 Where to return the unsigned word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
+
+ *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
+ pVCpu->iem.s.offOpcode = offOpcode + 1;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next signed byte from the opcode stream and sign-extending it to
+ * a word, returning automatically on failure.
+ *
+ * @param a_pu16 Where to return the word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed byte from the opcode stream, extending it to
+ * unsigned 32-bit.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu32 Where to return the unsigned dword.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
+
+ *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
+ pVCpu->iem.s.offOpcode = offOpcode + 1;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next signed byte from the opcode stream and sign-extending it to
+ * a word, returning automatically on failure.
+ *
+ * @param a_pu32 Where to return the word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed byte from the opcode stream, extending it to
+ * unsigned 64-bit.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu64 Where to return the unsigned qword.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
+
+ *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
+ pVCpu->iem.s.offOpcode = offOpcode + 1;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next signed byte from the opcode stream and sign-extending it to
+ * a word, returning automatically on failure.
+ *
+ * @param a_pu64 Where to return the word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next opcode byte.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the
+ * calling thread.
+ * @param pu8 Where to return the opcode byte.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
+{
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ pVCpu->iem.s.offModRm = offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU8Slow(pVCpu, pu8);
+}
+#else /* IEM_WITH_SETJMP */
+/**
+ * Fetches the next opcode byte, longjmp on error.
+ *
+ * @returns The opcode byte.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ pVCpu->iem.s.offModRm = offBuf;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf < pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
+ return pbBuf[offBuf];
+ }
+# else
+ uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
+ pVCpu->iem.s.offModRm = offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
+ return pVCpu->iem.s.abOpcode[offOpcode];
+ }
+# endif
+ return iemOpcodeGetNextU8SlowJmp(pVCpu);
+}
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
+ * on failure.
+ *
+ * Will note down the position of the ModR/M byte for VT-x exits.
+ *
+ * @param a_pbRm Where to return the RM opcode byte.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
+ if (rcStrict2 == VINF_SUCCESS) \
+ { /* likely */ } \
+ else \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
+#endif /* IEM_WITH_SETJMP */
+
+
+#ifndef IEM_WITH_SETJMP
+
+/**
+ * Fetches the next opcode word.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu16 Where to return the opcode word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
+{
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
+# endif
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU16Slow(pVCpu, pu16);
+}
+
+#else /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode word, longjmp on error.
+ *
+ * @returns The opcode word.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint16_t const *)&pbBuf[offBuf];
+# else
+ return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
+# endif
+ }
+# else /* !IEM_WITH_CODE_TLB */
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
+# endif
+ }
+# endif /* !IEM_WITH_CODE_TLB */
+ return iemOpcodeGetNextU16SlowJmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode word, returns automatically on failure.
+ *
+ * @param a_pu16 Where to return the opcode word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next opcode word, zero extending it to a double word.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu32 Where to return the opcode double word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
+
+ *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
+ pVCpu->iem.s.offOpcode = offOpcode + 2;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode word and zero extends it to a double word, returns
+ * automatically on failure.
+ *
+ * @param a_pu32 Where to return the opcode double word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next opcode word, zero extending it to a quad word.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu64 Where to return the opcode quad word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
+
+ *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
+ pVCpu->iem.s.offOpcode = offOpcode + 2;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode word and zero extends it to a quad word, returns
+ * automatically on failure.
+ *
+ * @param a_pu64 Where to return the opcode quad word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed word from the opcode stream.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pi16 Where to return the signed word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
+{
+ return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
+}
+#endif /* !IEM_WITH_SETJMP */
+
+
+/**
+ * Fetches the next signed word from the opcode stream, returning automatically
+ * on failure.
+ *
+ * @param a_pi16 Where to return the signed word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+
+/**
+ * Fetches the next opcode dword.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu32 Where to return the opcode double word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
+{
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3]);
+# endif
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU32Slow(pVCpu, pu32);
+}
+
+#else /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode dword, longjmp on error.
+ *
+ * @returns The opcode dword.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint32_t const *)&pbBuf[offBuf];
+# else
+ return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
+ pbBuf[offBuf + 1],
+ pbBuf[offBuf + 2],
+ pbBuf[offBuf + 3]);
+# endif
+ }
+# else
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3]);
+# endif
+ }
+# endif
+ return iemOpcodeGetNextU32SlowJmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode dword, returns automatically on failure.
+ *
+ * @param a_pu32 Where to return the opcode dword.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next opcode dword, zero extending it to a quad word.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu64 Where to return the opcode quad word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
+
+ *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3]);
+ pVCpu->iem.s.offOpcode = offOpcode + 4;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode dword and zero extends it to a quad word, returns
+ * automatically on failure.
+ *
+ * @param a_pu64 Where to return the opcode quad word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next signed double word from the opcode stream.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pi32 Where to return the signed double word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
+{
+ return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
+}
+#endif
+
+/**
+ * Fetches the next signed double word from the opcode stream, returning
+ * automatically on failure.
+ *
+ * @param a_pi32 Where to return the signed double word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+/**
+ * Fetches the next opcode dword, sign extending it into a quad word.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu64 Where to return the opcode quad word.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
+{
+ uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
+ return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
+
+ int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3]);
+ *pu64 = i32;
+ pVCpu->iem.s.offOpcode = offOpcode + 4;
+ return VINF_SUCCESS;
+}
+#endif /* !IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode double word and sign extends it to a quad word,
+ * returns automatically on failure.
+ *
+ * @param a_pu64 Where to return the opcode quad word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+
+/**
+ * Fetches the next opcode qword.
+ *
+ * @returns Strict VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pu64 Where to return the opcode qword.
+ */
+DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
+{
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
+ {
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3],
+ pVCpu->iem.s.abOpcode[offOpcode + 4],
+ pVCpu->iem.s.abOpcode[offOpcode + 5],
+ pVCpu->iem.s.abOpcode[offOpcode + 6],
+ pVCpu->iem.s.abOpcode[offOpcode + 7]);
+# endif
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
+ return VINF_SUCCESS;
+ }
+ return iemOpcodeGetNextU64Slow(pVCpu, pu64);
+}
+
+#else /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode qword, longjmp on error.
+ *
+ * @returns The opcode qword.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+{
+# ifdef IEM_WITH_CODE_TLB
+ uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
+ uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
+ if (RT_LIKELY( pbBuf != NULL
+ && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
+ {
+ pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint64_t const *)&pbBuf[offBuf];
+# else
+ return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
+ pbBuf[offBuf + 1],
+ pbBuf[offBuf + 2],
+ pbBuf[offBuf + 3],
+ pbBuf[offBuf + 4],
+ pbBuf[offBuf + 5],
+ pbBuf[offBuf + 6],
+ pbBuf[offBuf + 7]);
+# endif
+ }
+# else
+ uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
+ if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
+ {
+ pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
+# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
+ return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
+# else
+ return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
+ pVCpu->iem.s.abOpcode[offOpcode + 1],
+ pVCpu->iem.s.abOpcode[offOpcode + 2],
+ pVCpu->iem.s.abOpcode[offOpcode + 3],
+ pVCpu->iem.s.abOpcode[offOpcode + 4],
+ pVCpu->iem.s.abOpcode[offOpcode + 5],
+ pVCpu->iem.s.abOpcode[offOpcode + 6],
+ pVCpu->iem.s.abOpcode[offOpcode + 7]);
+# endif
+ }
+# endif
+ return iemOpcodeGetNextU64SlowJmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fetches the next opcode quad word, returns automatically on failure.
+ *
+ * @param a_pu64 Where to return the opcode quad word.
+ * @remark Implicitly references pVCpu.
+ */
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
+#endif
+
+
+/** @name Misc Worker Functions.
+ * @{
+ */
+
+/**
+ * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
+ * not (kind of obsolete now).
+ *
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
+
+/**
+ * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
+ *
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param a_fEfl The new EFLAGS.
+ */
+#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
+
+
+/**
+ * Loads a NULL data selector into a selector register, both the hidden and
+ * visible parts, in protected mode.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pSReg Pointer to the segment register.
+ * @param uRpl The RPL.
+ */
+DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
+{
+ /** @todo Testcase: write a testcase checking what happends when loading a NULL
+ * data selector in protected mode. */
+ pSReg->Sel = uRpl;
+ pSReg->ValidSel = uRpl;
+ pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
+ if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
+ {
+ /* VT-x (Intel 3960x) observed doing something like this. */
+ pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
+ pSReg->u32Limit = UINT32_MAX;
+ pSReg->u64Base = 0;
+ }
+ else
+ {
+ pSReg->Attr.u = X86DESCATTR_UNUSABLE;
+ pSReg->u32Limit = 0;
+ pSReg->u64Base = 0;
+ }
+}
+
+/** @} */
+
+
+/*
+ *
+ * Helpers routines.
+ * Helpers routines.
+ * Helpers routines.
+ *
+ */
+
+/**
+ * Recalculates the effective operand size.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ switch (pVCpu->iem.s.enmCpuMode)
+ {
+ case IEMMODE_16BIT:
+ pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
+ break;
+ case IEMMODE_32BIT:
+ pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
+ break;
+ case IEMMODE_64BIT:
+ switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
+ {
+ case 0:
+ pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
+ break;
+ case IEM_OP_PRF_SIZE_OP:
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
+ break;
+ case IEM_OP_PRF_SIZE_REX_W:
+ case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
+ break;
+ }
+ break;
+ default:
+ AssertFailed();
+ }
+}
+
+
+/**
+ * Sets the default operand size to 64-bit and recalculates the effective
+ * operand size.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
+ pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
+ if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
+ else
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
+}
+
+
+/**
+ * Sets the default operand size to 64-bit and recalculates the effective
+ * operand size, with intel ignoring any operand size prefix (AMD respects it).
+ *
+ * This is for the relative jumps.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
+ pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
+ if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
+ || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
+ else
+ pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
+}
+
+
+
+
+/** @name Register Access.
+ * @{
+ */
+
+/**
+ * Gets a reference (pointer) to the specified hidden segment register.
+ *
+ * @returns Hidden register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iSegReg The segment register.
+ */
+DECLINLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
+{
+ Assert(iSegReg < X86_SREG_COUNT);
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
+
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
+ return pSReg;
+}
+
+
+/**
+ * Ensures that the given hidden segment register is up to date.
+ *
+ * @returns Hidden register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pSReg The segment register.
+ */
+DECLINLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
+{
+ Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
+ NOREF(pVCpu);
+ return pSReg;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified segment register (the selector
+ * value).
+ *
+ * @returns Pointer to the selector variable.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iSegReg The segment register.
+ */
+DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
+{
+ Assert(iSegReg < X86_SREG_COUNT);
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
+}
+
+
+/**
+ * Fetches the selector value of a segment register.
+ *
+ * @returns The selector value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iSegReg The segment register.
+ */
+DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
+{
+ Assert(iSegReg < X86_SREG_COUNT);
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
+}
+
+
+/**
+ * Fetches the base address value of a segment register.
+ *
+ * @returns The selector value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iSegReg The segment register.
+ */
+DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
+{
+ Assert(iSegReg < X86_SREG_COUNT);
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The general purpose register.
+ */
+DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg];
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified 8-bit general purpose register.
+ *
+ * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
+ {
+ Assert(iReg < 16);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
+ }
+ /* high 8-bit register. */
+ Assert(iReg < 8);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified 16-bit general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified 32-bit general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified 64-bit general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 64);
+ return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
+ *
+ * @returns Register reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
+}
+
+
+/**
+ * Gets a reference (pointer) to the specified segment register's base address.
+ *
+ * @returns Segment register base address reference.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iSegReg The segment selector.
+ */
+DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
+{
+ Assert(iSegReg < X86_SREG_COUNT);
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
+}
+
+
+/**
+ * Fetches the value of a 8-bit general purpose register.
+ *
+ * @returns The register value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ return *iemGRegRefU8(pVCpu, iReg);
+}
+
+
+/**
+ * Fetches the value of a 16-bit general purpose register.
+ *
+ * @returns The register value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
+}
+
+
+/**
+ * Fetches the value of a 32-bit general purpose register.
+ *
+ * @returns The register value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
+}
+
+
+/**
+ * Fetches the value of a 64-bit general purpose register.
+ *
+ * @returns The register value.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iReg The register.
+ */
+DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
+{
+ Assert(iReg < 16);
+ return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
+}
+
+
+/**
+ * Get the address of the top of the stack.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
+{
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ return pVCpu->cpum.GstCtx.rsp;
+ if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ return pVCpu->cpum.GstCtx.esp;
+ return pVCpu->cpum.GstCtx.sp;
+}
+
+
+/**
+ * Updates the RIP/EIP/IP to point to the next instruction.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbInstr The number of bytes to add.
+ */
+DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
+{
+ /*
+ * Advance RIP.
+ *
+ * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
+ * while in all other modes except LM64 the updates are 32-bit. This means
+ * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
+ * 4GB and 64KB rollovers, and decide whether anything needs masking.
+ *
+ * See PC wrap around tests in bs3-cpu-weird-1.
+ */
+ uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
+ uint64_t const uRipNext = uRipPrev + cbInstr;
+ if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
+ || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT))
+ pVCpu->cpum.GstCtx.rip = uRipNext;
+ else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
+ pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
+ else
+ pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
+}
+
+
+/**
+ * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
+ * following EFLAGS bits are set:
+ * - X86_EFL_RF - clear it.
+ * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
+ * - X86_EFL_TF - generate single step \#DB trap.
+ * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
+ * instruction).
+ *
+ * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
+ * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
+ * takes priority over both NMIs and hardware interrupts. So, neither is
+ * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
+ * either unsupported will be triggered on-top of any \#DB raised here.)
+ *
+ * The RF flag only needs to be cleared here as it only suppresses instruction
+ * breakpoints which are not raised here (happens synchronously during
+ * instruction fetching).
+ *
+ * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
+ * status has no bearing on whether \#DB exceptions are raised.
+ *
+ * @note This must *NOT* be called by the two instructions setting the
+ * CPUMCTX_INHIBIT_SHADOW_SS flag.
+ *
+ * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
+ * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
+ * Stacks}
+ */
+static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ /*
+ * Normally we're just here to clear RF and/or interrupt shadow bits.
+ */
+ if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
+ pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
+ else
+ {
+ /*
+ * Raise a #DB or/and DBGF event.
+ */
+ VBOXSTRICTRC rcStrict;
+ if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
+ {
+ IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
+ pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
+ if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
+ pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
+ pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
+ LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
+ pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
+ pVCpu->cpum.GstCtx.rflags.uBoth));
+
+ pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
+ rcStrict = iemRaiseDebugException(pVCpu);
+
+ /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
+ if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
+ {
+ rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
+ LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ }
+ else
+ {
+ Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
+ rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
+ LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
+ return rcStrict;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ /*
+ * We assume that most of the time nothing actually needs doing here.
+ */
+ AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
+ if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
+ & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
+ return VINF_SUCCESS;
+ return iemFinishInstructionWithFlagsSet(pVCpu);
+}
+
+
+/**
+ * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
+ * and CPUMCTX_INHIBIT_SHADOW.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbInstr The number of bytes to add.
+ */
+DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
+{
+ iemRegAddToRip(pVCpu, cbInstr);
+ return iemRegFinishClearingRF(pVCpu);
+}
+
+
+/**
+ * Extended version of iemFinishInstructionWithFlagsSet that goes with
+ * iemRegAddToRipAndFinishingClearingRfEx.
+ *
+ * See iemFinishInstructionWithFlagsSet() for details.
+ */
+static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ /*
+ * Raise a #DB.
+ */
+ IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
+ pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
+ pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
+ | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
+ /** @todo Do we set all pending \#DB events, or just one? */
+ LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
+ pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
+ pVCpu->cpum.GstCtx.rflags.uBoth));
+ pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
+ return iemRaiseDebugException(pVCpu);
+}
+
+
+/**
+ * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
+ * others potentially updating EFLAGS.TF.
+ *
+ * The single step event must be generated using the TF value at the start of
+ * the instruction, not the new value set by it.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbInstr The number of bytes to add.
+ * @param fEflOld The EFLAGS at the start of the instruction
+ * execution.
+ */
+DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
+{
+ iemRegAddToRip(pVCpu, cbInstr);
+ if (!(fEflOld & X86_EFL_TF))
+ return iemRegFinishClearingRF(pVCpu);
+ return iemFinishInstructionWithTfSet(pVCpu);
+}
+
+
+/**
+ * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
+}
+
+
+/**
+ * Adds to the stack pointer.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbToAdd The number of bytes to add (8-bit!).
+ */
+DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
+{
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ pVCpu->cpum.GstCtx.rsp += cbToAdd;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ pVCpu->cpum.GstCtx.esp += cbToAdd;
+ else
+ pVCpu->cpum.GstCtx.sp += cbToAdd;
+}
+
+
+/**
+ * Subtracts from the stack pointer.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbToSub The number of bytes to subtract (8-bit!).
+ */
+DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
+{
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ pVCpu->cpum.GstCtx.rsp -= cbToSub;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ pVCpu->cpum.GstCtx.esp -= cbToSub;
+ else
+ pVCpu->cpum.GstCtx.sp -= cbToSub;
+}
+
+
+/**
+ * Adds to the temporary stack pointer.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pTmpRsp The temporary SP/ESP/RSP to update.
+ * @param cbToAdd The number of bytes to add (16-bit).
+ */
+DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
+{
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ pTmpRsp->u += cbToAdd;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ pTmpRsp->DWords.dw0 += cbToAdd;
+ else
+ pTmpRsp->Words.w0 += cbToAdd;
+}
+
+
+/**
+ * Subtracts from the temporary stack pointer.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pTmpRsp The temporary SP/ESP/RSP to update.
+ * @param cbToSub The number of bytes to subtract.
+ * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
+ * expecting that.
+ */
+DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
+{
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ pTmpRsp->u -= cbToSub;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ pTmpRsp->DWords.dw0 -= cbToSub;
+ else
+ pTmpRsp->Words.w0 -= cbToSub;
+}
+
+
+/**
+ * Calculates the effective stack address for a push of the specified size as
+ * well as the new RSP value (upper bits may be masked).
+ *
+ * @returns Effective stack addressf for the push.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbItem The size of the stack item to pop.
+ * @param puNewRsp Where to return the new RSP value.
+ */
+DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
+{
+ RTUINT64U uTmpRsp;
+ RTGCPTR GCPtrTop;
+ uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
+
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ GCPtrTop = uTmpRsp.u -= cbItem;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
+ else
+ GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
+ *puNewRsp = uTmpRsp.u;
+ return GCPtrTop;
+}
+
+
+/**
+ * Gets the current stack pointer and calculates the value after a pop of the
+ * specified size.
+ *
+ * @returns Current stack pointer.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param cbItem The size of the stack item to pop.
+ * @param puNewRsp Where to return the new RSP value.
+ */
+DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
+{
+ RTUINT64U uTmpRsp;
+ RTGCPTR GCPtrTop;
+ uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
+
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ {
+ GCPtrTop = uTmpRsp.u;
+ uTmpRsp.u += cbItem;
+ }
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ {
+ GCPtrTop = uTmpRsp.DWords.dw0;
+ uTmpRsp.DWords.dw0 += cbItem;
+ }
+ else
+ {
+ GCPtrTop = uTmpRsp.Words.w0;
+ uTmpRsp.Words.w0 += cbItem;
+ }
+ *puNewRsp = uTmpRsp.u;
+ return GCPtrTop;
+}
+
+
+/**
+ * Calculates the effective stack address for a push of the specified size as
+ * well as the new temporary RSP value (upper bits may be masked).
+ *
+ * @returns Effective stack addressf for the push.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pTmpRsp The temporary stack pointer. This is updated.
+ * @param cbItem The size of the stack item to pop.
+ */
+DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
+{
+ RTGCPTR GCPtrTop;
+
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ GCPtrTop = pTmpRsp->u -= cbItem;
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
+ else
+ GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
+ return GCPtrTop;
+}
+
+
+/**
+ * Gets the effective stack address for a pop of the specified size and
+ * calculates and updates the temporary RSP.
+ *
+ * @returns Current stack pointer.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pTmpRsp The temporary stack pointer. This is updated.
+ * @param cbItem The size of the stack item to pop.
+ */
+DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
+{
+ RTGCPTR GCPtrTop;
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ {
+ GCPtrTop = pTmpRsp->u;
+ pTmpRsp->u += cbItem;
+ }
+ else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
+ {
+ GCPtrTop = pTmpRsp->DWords.dw0;
+ pTmpRsp->DWords.dw0 += cbItem;
+ }
+ else
+ {
+ GCPtrTop = pTmpRsp->Words.w0;
+ pTmpRsp->Words.w0 += cbItem;
+ }
+ return GCPtrTop;
+}
+
+/** @} */
+
+
+/** @name FPU access and helpers.
+ *
+ * @{
+ */
+
+
+/**
+ * Hook for preparing to use the host FPU.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#ifdef IN_RING3
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
+#else
+ CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+}
+
+
+/**
+ * Hook for preparing to use the host FPU for SSE.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ iemFpuPrepareUsage(pVCpu);
+}
+
+
+/**
+ * Hook for preparing to use the host FPU for AVX.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ iemFpuPrepareUsage(pVCpu);
+}
+
+
+/**
+ * Hook for actualizing the guest FPU state before the interpreter reads it.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#ifdef IN_RING3
+ NOREF(pVCpu);
+#else
+ CPUMRZFpuStateActualizeForRead(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+}
+
+
+/**
+ * Hook for actualizing the guest FPU state before the interpreter changes it.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#ifdef IN_RING3
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
+#else
+ CPUMRZFpuStateActualizeForChange(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+}
+
+
+/**
+ * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
+ * only.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
+ NOREF(pVCpu);
+#else
+ CPUMRZFpuStateActualizeSseForRead(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+}
+
+
+/**
+ * Hook for actualizing the guest XMM0..15 and MXCSR register state for
+ * read+write.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
+#else
+ CPUMRZFpuStateActualizeForChange(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+
+ /* Make sure any changes are loaded the next time around. */
+ pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
+}
+
+
+/**
+ * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
+ * only.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#ifdef IN_RING3
+ NOREF(pVCpu);
+#else
+ CPUMRZFpuStateActualizeAvxForRead(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+}
+
+
+/**
+ * Hook for actualizing the guest YMM0..15 and MXCSR register state for
+ * read+write.
+ *
+ * This is necessary in ring-0 and raw-mode context (nop in ring-3).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+#ifdef IN_RING3
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
+#else
+ CPUMRZFpuStateActualizeForChange(pVCpu);
+#endif
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
+
+ /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
+ pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
+}
+
+
+/**
+ * Stores a QNaN value into a FPU register.
+ *
+ * @param pReg Pointer to the register.
+ */
+DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
+{
+ pReg->au32[0] = UINT32_C(0x00000000);
+ pReg->au32[1] = UINT32_C(0xc0000000);
+ pReg->au16[4] = UINT16_C(0xffff);
+}
+
+
+/**
+ * Updates the FOP, FPU.CS and FPUIP registers.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pFpuCtx The FPU context.
+ */
+DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
+{
+ Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
+ pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
+ /** @todo x87.CS and FPUIP needs to be kept seperately. */
+ if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
+ {
+ /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
+ * happens in real mode here based on the fnsave and fnstenv images. */
+ pFpuCtx->CS = 0;
+ pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
+ }
+ else if (!IEM_IS_LONG_MODE(pVCpu))
+ {
+ pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
+ pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
+ }
+ else
+ *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
+}
+
+
+
+
+
+/**
+ * Marks the specified stack register as free (for FFREE).
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param iStReg The register to free.
+ */
+DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
+{
+ Assert(iStReg < 8);
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
+ pFpuCtx->FTW &= ~RT_BIT(iReg);
+}
+
+
+/**
+ * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t uFsw = pFpuCtx->FSW;
+ uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
+ uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
+ uFsw &= ~X86_FSW_TOP_MASK;
+ uFsw |= uTop;
+ pFpuCtx->FSW = uFsw;
+}
+
+
+/**
+ * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t uFsw = pFpuCtx->FSW;
+ uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
+ uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
+ uFsw &= ~X86_FSW_TOP_MASK;
+ uFsw |= uTop;
+ pFpuCtx->FSW = uFsw;
+}
+
+
+
+
+DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
+ if (pFpuCtx->FTW & RT_BIT(iReg))
+ return VINF_SUCCESS;
+ return VERR_NOT_FOUND;
+}
+
+
+DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
+ if (pFpuCtx->FTW & RT_BIT(iReg))
+ {
+ *ppRef = &pFpuCtx->aRegs[iStReg].r80;
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_FOUND;
+}
+
+
+DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
+ uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
+ uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
+ uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
+ if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
+ {
+ *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
+ *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_FOUND;
+}
+
+
+DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
+{
+ PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
+ uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
+ uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
+ uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
+ if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
+ {
+ *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_FOUND;
+}
+
+
+/**
+ * Rotates the stack registers when setting new TOS.
+ *
+ * @param pFpuCtx The FPU context.
+ * @param iNewTop New TOS value.
+ * @remarks We only do this to speed up fxsave/fxrstor which
+ * arrange the FP registers in stack order.
+ * MUST be done before writing the new TOS (FSW).
+ */
+DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
+{
+ uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
+ RTFLOAT80U ar80Temp[8];
+
+ if (iOldTop == iNewTop)
+ return;
+
+ /* Unscrew the stack and get it into 'native' order. */
+ ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
+ ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
+
+ /* Now rotate the stack to the new position. */
+ pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
+ pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
+}
+
+
+/**
+ * Updates the FPU exception status after FCW is changed.
+ *
+ * @param pFpuCtx The FPU context.
+ */
+DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
+{
+ uint16_t u16Fsw = pFpuCtx->FSW;
+ if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
+ u16Fsw |= X86_FSW_ES | X86_FSW_B;
+ else
+ u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
+ pFpuCtx->FSW = u16Fsw;
+}
+
+
+/**
+ * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
+ *
+ * @returns The full FTW.
+ * @param pFpuCtx The FPU context.
+ */
+DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
+{
+ uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
+ uint16_t u16Ftw = 0;
+ unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
+ for (unsigned iSt = 0; iSt < 8; iSt++)
+ {
+ unsigned const iReg = (iSt + iTop) & 7;
+ if (!(u8Ftw & RT_BIT(iReg)))
+ u16Ftw |= 3 << (iReg * 2); /* empty */
+ else
+ {
+ uint16_t uTag;
+ PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
+ if (pr80Reg->s.uExponent == 0x7fff)
+ uTag = 2; /* Exponent is all 1's => Special. */
+ else if (pr80Reg->s.uExponent == 0x0000)
+ {
+ if (pr80Reg->s.uMantissa == 0x0000)
+ uTag = 1; /* All bits are zero => Zero. */
+ else
+ uTag = 2; /* Must be special. */
+ }
+ else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
+ uTag = 0; /* Valid. */
+ else
+ uTag = 2; /* Must be special. */
+
+ u16Ftw |= uTag << (iReg * 2);
+ }
+ }
+
+ return u16Ftw;
+}
+
+
+/**
+ * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
+ *
+ * @returns The compressed FTW.
+ * @param u16FullFtw The full FTW to convert.
+ */
+DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
+{
+ uint8_t u8Ftw = 0;
+ for (unsigned i = 0; i < 8; i++)
+ {
+ if ((u16FullFtw & 3) != 3 /*empty*/)
+ u8Ftw |= RT_BIT(i);
+ u16FullFtw >>= 2;
+ }
+
+ return u8Ftw;
+}
+
+/** @} */
+
+
+/** @name Memory access.
+ *
+ * @{
+ */
+
+
+/**
+ * Checks whether alignment checks are enabled or not.
+ *
+ * @returns true if enabled, false if not.
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
+{
+ AssertCompile(X86_CR0_AM == X86_EFL_AC);
+ return pVCpu->iem.s.uCpl == 3
+ && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
+}
+
+/**
+ * Checks if the given segment can be written to, raise the appropriate
+ * exception if not.
+ *
+ * @returns VBox strict status code.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pHid Pointer to the hidden register.
+ * @param iSegReg The register number.
+ * @param pu64BaseAddr Where to return the base address to use for the
+ * segment. (In 64-bit code it may differ from the
+ * base in the hidden segment.)
+ */
+DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
+ uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
+{
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
+ else
+ {
+ if (!pHid->Attr.n.u1Present)
+ {
+ uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
+ AssertRelease(uSel == 0);
+ Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
+ return iemRaiseGeneralProtectionFault0(pVCpu);
+ }
+
+ if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
+ || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
+ && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
+ return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
+ *pu64BaseAddr = pHid->u64Base;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the given segment can be read from, raise the appropriate
+ * exception if not.
+ *
+ * @returns VBox strict status code.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param pHid Pointer to the hidden register.
+ * @param iSegReg The register number.
+ * @param pu64BaseAddr Where to return the base address to use for the
+ * segment. (In 64-bit code it may differ from the
+ * base in the hidden segment.)
+ */
+DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
+ uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
+{
+ IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
+ else
+ {
+ if (!pHid->Attr.n.u1Present)
+ {
+ uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
+ AssertRelease(uSel == 0);
+ Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
+ return iemRaiseGeneralProtectionFault0(pVCpu);
+ }
+
+ if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
+ return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
+ *pu64BaseAddr = pHid->u64Base;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Maps a physical page.
+ *
+ * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param GCPhysMem The physical address.
+ * @param fAccess The intended access.
+ * @param ppvMem Where to return the mapping address.
+ * @param pLock The PGM lock.
+ */
+DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
+ void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
+{
+#ifdef IEM_LOG_MEMORY_WRITES
+ if (fAccess & IEM_ACCESS_TYPE_WRITE)
+ return VERR_PGM_PHYS_TLB_CATCH_ALL;
+#endif
+
+ /** @todo This API may require some improving later. A private deal with PGM
+ * regarding locking and unlocking needs to be struct. A couple of TLBs
+ * living in PGM, but with publicly accessible inlined access methods
+ * could perhaps be an even better solution. */
+ int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
+ GCPhysMem,
+ RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
+ pVCpu->iem.s.fBypassHandlers,
+ ppvMem,
+ pLock);
+ /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
+ AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
+
+ return rc;
+}
+
+
+/**
+ * Unmap a page previously mapped by iemMemPageMap.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param GCPhysMem The physical address.
+ * @param fAccess The intended access.
+ * @param pvMem What iemMemPageMap returned.
+ * @param pLock The PGM lock.
+ */
+DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
+ const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
+{
+ NOREF(pVCpu);
+ NOREF(GCPhysMem);
+ NOREF(fAccess);
+ NOREF(pvMem);
+ PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
+}
+
+#ifdef IEM_WITH_SETJMP
+
+/** @todo slim this down */
+DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
+ size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
+{
+ Assert(cbMem >= 1);
+ Assert(iSegReg < X86_SREG_COUNT);
+
+ /*
+ * 64-bit mode is simpler.
+ */
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ {
+ if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
+ {
+ IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
+ GCPtrMem += pSel->u64Base;
+ }
+
+ if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
+ return GCPtrMem;
+ iemRaiseGeneralProtectionFault0Jmp(pVCpu);
+ }
+ /*
+ * 16-bit and 32-bit segmentation.
+ */
+ else if (iSegReg != UINT8_MAX)
+ {
+ /** @todo Does this apply to segments with 4G-1 limit? */
+ uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
+ if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
+ {
+ IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
+ switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
+ | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
+ | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
+ | X86_SEL_TYPE_CODE))
+ {
+ case X86DESCATTR_P: /* readonly data, expand up */
+ case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
+ case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
+ case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
+ /* expand up */
+ if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
+ return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
+ Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
+ (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
+ break;
+
+ case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
+ case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
+ /* expand down */
+ if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
+ && ( pSel->Attr.n.u1DefBig
+ || GCPtrLast32 <= UINT32_C(0xffff)) ))
+ return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
+ Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
+ (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
+ break;
+
+ default:
+ Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
+ iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
+ break;
+ }
+ }
+ Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
+ iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
+ }
+ /*
+ * 32-bit flat address.
+ */
+ else
+ return GCPtrMem;
+}
+
+
+/** @todo slim this down */
+DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
+ RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
+{
+ Assert(cbMem >= 1);
+ Assert(iSegReg < X86_SREG_COUNT);
+
+ /*
+ * 64-bit mode is simpler.
+ */
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
+ {
+ if (iSegReg >= X86_SREG_FS)
+ {
+ IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
+ GCPtrMem += pSel->u64Base;
+ }
+
+ if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
+ return GCPtrMem;
+ }
+ /*
+ * 16-bit and 32-bit segmentation.
+ */
+ else
+ {
+ IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
+ PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
+ uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
+ | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
+ if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
+ {
+ /* expand up */
+ uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
+ if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
+ && GCPtrLast32 > (uint32_t)GCPtrMem))
+ return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
+ }
+ else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
+ {
+ /* expand down */
+ uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
+ if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
+ && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
+ && GCPtrLast32 > (uint32_t)GCPtrMem))
+ return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
+ }
+ else
+ iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
+ iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
+ }
+ iemRaiseGeneralProtectionFault0Jmp(pVCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
+/**
+ * Fakes a long mode stack selector for SS = 0.
+ *
+ * @param pDescSs Where to return the fake stack descriptor.
+ * @param uDpl The DPL we want.
+ */
+DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
+{
+ pDescSs->Long.au64[0] = 0;
+ pDescSs->Long.au64[1] = 0;
+ pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
+ pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
+ pDescSs->Long.Gen.u2Dpl = uDpl;
+ pDescSs->Long.Gen.u1Present = 1;
+ pDescSs->Long.Gen.u1Long = 1;
+}
+
+/** @} */
+
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/**
+ * Gets CR0 fixed-0 bits in VMX operation.
+ *
+ * We do this rather than fetching what we report to the guest (in
+ * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
+ * values regardless of whether unrestricted-guest feature is available on the CPU.
+ *
+ * @returns CR0 fixed-0 bits.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
+ * must be returned. When @c false, the CR0 fixed-0
+ * bits for VMX root mode is returned.
+ *
+ */
+DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
+{
+ Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
+
+ PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
+ if ( fVmxNonRootMode
+ && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
+ return VMX_V_CR0_FIXED0_UX;
+ return VMX_V_CR0_FIXED0;
+}
+
+
+/**
+ * Sets virtual-APIC write emulation as pending.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param offApic The offset in the virtual-APIC page that was written.
+ */
+DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
+{
+ Assert(offApic < XAPIC_OFF_END + 4);
+
+ /*
+ * Record the currently updated APIC offset, as we need this later for figuring
+ * out whether to perform TPR, EOI or self-IPI virtualization as well as well
+ * as for supplying the exit qualification when causing an APIC-write VM-exit.
+ */
+ pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
+
+ /*
+ * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
+ * virtualization or APIC-write emulation).
+ */
+ if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
+}
+
+#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
+
+#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
diff --git a/src/VBox/VMM/include/IEMInternal.h b/src/VBox/VMM/include/IEMInternal.h
new file mode 100644
index 00000000..828673e4
--- /dev/null
+++ b/src/VBox/VMM/include/IEMInternal.h
@@ -0,0 +1,4320 @@
+/* $Id: IEMInternal.h $ */
+/** @file
+ * IEM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
+#define VMM_INCLUDED_SRC_include_IEMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/param.h>
+
+#include <iprt/setjmp-without-sigmask.h>
+
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_iem_int Internals
+ * @ingroup grp_iem
+ * @internal
+ * @{
+ */
+
+/** For expanding symbol in slickedit and other products tagging and
+ * crossreferencing IEM symbols. */
+#ifndef IEM_STATIC
+# define IEM_STATIC static
+#endif
+
+/** @def IEM_WITH_SETJMP
+ * Enables alternative status code handling using setjmps.
+ *
+ * This adds a bit of expense via the setjmp() call since it saves all the
+ * non-volatile registers. However, it eliminates return code checks and allows
+ * for more optimal return value passing (return regs instead of stack buffer).
+ */
+#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
+# define IEM_WITH_SETJMP
+#endif
+
+/** @def IEM_WITH_THROW_CATCH
+ * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
+ * mode code when IEM_WITH_SETJMP is in effect.
+ *
+ * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
+ * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
+ * result value improving by more than 1%. (Best out of three.)
+ *
+ * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
+ * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
+ * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
+ * Linux, but it should be quite a bit faster for normal code.
+ */
+#if (defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
+ || defined(DOXYGEN_RUNNING)
+# define IEM_WITH_THROW_CATCH
+#endif
+
+/** @def IEM_DO_LONGJMP
+ *
+ * Wrapper around longjmp / throw.
+ *
+ * @param a_pVCpu The CPU handle.
+ * @param a_rc The status code jump back with / throw.
+ */
+#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
+# ifdef IEM_WITH_THROW_CATCH
+# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
+# else
+# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
+# endif
+#endif
+
+/** For use with IEM function that may do a longjmp (when enabled).
+ *
+ * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
+ * attribute. So, we indicate that function that may be part of a longjmp may
+ * throw "exceptions" and that the compiler should definitely not generate and
+ * std::terminate calling unwind code.
+ *
+ * Here is one example of this ending in std::terminate:
+ * @code{.txt}
+00 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
+01 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
+02 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
+03 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
+04 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
+05 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
+06 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
+07 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
+08 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
+09 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
+0a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
+0b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
+0c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
+0d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
+0e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
+0f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
+10 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
+ @endcode
+ *
+ * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
+ */
+#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
+# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
+#else
+# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
+#endif
+
+#define IEM_IMPLEMENTS_TASKSWITCH
+
+/** @def IEM_WITH_3DNOW
+ * Includes the 3DNow decoding. */
+#define IEM_WITH_3DNOW
+
+/** @def IEM_WITH_THREE_0F_38
+ * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
+#define IEM_WITH_THREE_0F_38
+
+/** @def IEM_WITH_THREE_0F_3A
+ * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
+#define IEM_WITH_THREE_0F_3A
+
+/** @def IEM_WITH_VEX
+ * Includes the VEX decoding. */
+#define IEM_WITH_VEX
+
+/** @def IEM_CFG_TARGET_CPU
+ * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
+ *
+ * By default we allow this to be configured by the user via the
+ * CPUM/GuestCpuName config string, but this comes at a slight cost during
+ * decoding. So, for applications of this code where there is no need to
+ * be dynamic wrt target CPU, just modify this define.
+ */
+#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
+# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
+#endif
+
+//#define IEM_WITH_CODE_TLB // - work in progress
+//#define IEM_WITH_DATA_TLB // - work in progress
+
+
+/** @def IEM_USE_UNALIGNED_DATA_ACCESS
+ * Use unaligned accesses instead of elaborate byte assembly. */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
+# define IEM_USE_UNALIGNED_DATA_ACCESS
+#endif
+
+//#define IEM_LOG_MEMORY_WRITES
+
+#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
+/** Instruction statistics. */
+typedef struct IEMINSTRSTATS
+{
+# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
+# include "IEMInstructionStatisticsTmpl.h"
+# undef IEM_DO_INSTR_STAT
+} IEMINSTRSTATS;
+#else
+struct IEMINSTRSTATS;
+typedef struct IEMINSTRSTATS IEMINSTRSTATS;
+#endif
+/** Pointer to IEM instruction statistics. */
+typedef IEMINSTRSTATS *PIEMINSTRSTATS;
+
+
+/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
+ * @{ */
+#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
+#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
+#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
+#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
+#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
+/** Selects the right variant from a_aArray.
+ * pVCpu is implicit in the caller context. */
+#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
+ (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
+/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
+ * be used because the host CPU does not support the operation. */
+#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
+ (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
+/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
+ * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
+ * into the two.
+ * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
+ (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
+#else
+# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
+ (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
+#endif
+/** @} */
+
+/**
+ * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
+ * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
+ *
+ * On non-x86 hosts, this will shortcut to the fallback w/o checking the
+ * indicator.
+ *
+ * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
+ */
+#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
+ (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
+#else
+# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
+#endif
+
+
+/**
+ * Extended operand mode that includes a representation of 8-bit.
+ *
+ * This is used for packing down modes when invoking some C instruction
+ * implementations.
+ */
+typedef enum IEMMODEX
+{
+ IEMMODEX_16BIT = IEMMODE_16BIT,
+ IEMMODEX_32BIT = IEMMODE_32BIT,
+ IEMMODEX_64BIT = IEMMODE_64BIT,
+ IEMMODEX_8BIT
+} IEMMODEX;
+AssertCompileSize(IEMMODEX, 4);
+
+
+/**
+ * Branch types.
+ */
+typedef enum IEMBRANCH
+{
+ IEMBRANCH_JUMP = 1,
+ IEMBRANCH_CALL,
+ IEMBRANCH_TRAP,
+ IEMBRANCH_SOFTWARE_INT,
+ IEMBRANCH_HARDWARE_INT
+} IEMBRANCH;
+AssertCompileSize(IEMBRANCH, 4);
+
+
+/**
+ * INT instruction types.
+ */
+typedef enum IEMINT
+{
+ /** INT n instruction (opcode 0xcd imm). */
+ IEMINT_INTN = 0,
+ /** Single byte INT3 instruction (opcode 0xcc). */
+ IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
+ /** Single byte INTO instruction (opcode 0xce). */
+ IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
+ /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
+ IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
+} IEMINT;
+AssertCompileSize(IEMINT, 4);
+
+
+/**
+ * A FPU result.
+ */
+typedef struct IEMFPURESULT
+{
+ /** The output value. */
+ RTFLOAT80U r80Result;
+ /** The output status. */
+ uint16_t FSW;
+} IEMFPURESULT;
+AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
+/** Pointer to a FPU result. */
+typedef IEMFPURESULT *PIEMFPURESULT;
+/** Pointer to a const FPU result. */
+typedef IEMFPURESULT const *PCIEMFPURESULT;
+
+
+/**
+ * A FPU result consisting of two output values and FSW.
+ */
+typedef struct IEMFPURESULTTWO
+{
+ /** The first output value. */
+ RTFLOAT80U r80Result1;
+ /** The output status. */
+ uint16_t FSW;
+ /** The second output value. */
+ RTFLOAT80U r80Result2;
+} IEMFPURESULTTWO;
+AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
+AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
+/** Pointer to a FPU result consisting of two output values and FSW. */
+typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
+/** Pointer to a const FPU result consisting of two output values and FSW. */
+typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
+
+
+/**
+ * IEM TLB entry.
+ *
+ * Lookup assembly:
+ * @code{.asm}
+ ; Calculate tag.
+ mov rax, [VA]
+ shl rax, 16
+ shr rax, 16 + X86_PAGE_SHIFT
+ or rax, [uTlbRevision]
+
+ ; Do indexing.
+ movzx ecx, al
+ lea rcx, [pTlbEntries + rcx]
+
+ ; Check tag.
+ cmp [rcx + IEMTLBENTRY.uTag], rax
+ jne .TlbMiss
+
+ ; Check access.
+ mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
+ and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
+ cmp rax, [uTlbPhysRev]
+ jne .TlbMiss
+
+ ; Calc address and we're done.
+ mov eax, X86_PAGE_OFFSET_MASK
+ and eax, [VA]
+ or rax, [rcx + IEMTLBENTRY.pMappingR3]
+ %ifdef VBOX_WITH_STATISTICS
+ inc qword [cTlbHits]
+ %endif
+ jmp .Done
+
+ .TlbMiss:
+ mov r8d, ACCESS_FLAGS
+ mov rdx, [VA]
+ mov rcx, [pVCpu]
+ call iemTlbTypeMiss
+ .Done:
+
+ @endcode
+ *
+ */
+typedef struct IEMTLBENTRY
+{
+ /** The TLB entry tag.
+ * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
+ * is ASSUMING a virtual address width of 48 bits.
+ *
+ * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
+ *
+ * The TLB lookup code uses the current TLB revision, which won't ever be zero,
+ * enabling an extremely cheap TLB invalidation most of the time. When the TLB
+ * revision wraps around though, the tags needs to be zeroed.
+ *
+ * @note Try use SHRD instruction? After seeing
+ * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
+ *
+ * @todo This will need to be reorganized for 57-bit wide virtual address and
+ * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
+ * have to move the TLB entry versioning entirely to the
+ * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
+ * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
+ * consumed by PCID and ASID (12 + 6 = 18).
+ */
+ uint64_t uTag;
+ /** Access flags and physical TLB revision.
+ *
+ * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
+ * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
+ * - Bit 2 - page tables - not user (complemented X86_PTE_US).
+ * - Bit 3 - pgm phys/virt - not directly writable.
+ * - Bit 4 - pgm phys page - not directly readable.
+ * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
+ * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
+ * - Bit 7 - tlb entry - pMappingR3 member not valid.
+ * - Bits 63 thru 8 are used for the physical TLB revision number.
+ *
+ * We're using complemented bit meanings here because it makes it easy to check
+ * whether special action is required. For instance a user mode write access
+ * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
+ * non-zero result would mean special handling needed because either it wasn't
+ * writable, or it wasn't user, or the page wasn't dirty. A user mode read
+ * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
+ * need to check any PTE flag.
+ */
+ uint64_t fFlagsAndPhysRev;
+ /** The guest physical page address. */
+ uint64_t GCPhys;
+ /** Pointer to the ring-3 mapping. */
+ R3PTRTYPE(uint8_t *) pbMappingR3;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Padding1;
+#endif
+} IEMTLBENTRY;
+AssertCompileSize(IEMTLBENTRY, 32);
+/** Pointer to an IEM TLB entry. */
+typedef IEMTLBENTRY *PIEMTLBENTRY;
+
+/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
+ * @{ */
+#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
+#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
+#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
+#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
+#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
+#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
+#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
+#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
+#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
+#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffe00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
+/** @} */
+
+
+/**
+ * An IEM TLB.
+ *
+ * We've got two of these, one for data and one for instructions.
+ */
+typedef struct IEMTLB
+{
+ /** The TLB entries.
+ * We've choosen 256 because that way we can obtain the result directly from a
+ * 8-bit register without an additional AND instruction. */
+ IEMTLBENTRY aEntries[256];
+ /** The TLB revision.
+ * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
+ * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
+ * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
+ * (The revision zero indicates an invalid TLB entry.)
+ *
+ * The initial value is choosen to cause an early wraparound. */
+ uint64_t uTlbRevision;
+ /** The TLB physical address revision - shadow of PGM variable.
+ *
+ * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
+ * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
+ * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
+ * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
+ *
+ * The initial value is choosen to cause an early wraparound. */
+ uint64_t volatile uTlbPhysRev;
+
+ /* Statistics: */
+
+ /** TLB hits (VBOX_WITH_STATISTICS only). */
+ uint64_t cTlbHits;
+ /** TLB misses. */
+ uint32_t cTlbMisses;
+ /** Slow read path. */
+ uint32_t cTlbSlowReadPath;
+#if 0
+ /** TLB misses because of tag mismatch. */
+ uint32_t cTlbMissesTag;
+ /** TLB misses because of virtual access violation. */
+ uint32_t cTlbMissesVirtAccess;
+ /** TLB misses because of dirty bit. */
+ uint32_t cTlbMissesDirty;
+ /** TLB misses because of MMIO */
+ uint32_t cTlbMissesMmio;
+ /** TLB misses because of write access handlers. */
+ uint32_t cTlbMissesWriteHandler;
+ /** TLB misses because no r3(/r0) mapping. */
+ uint32_t cTlbMissesMapping;
+#endif
+ /** Alignment padding. */
+ uint32_t au32Padding[3+5];
+} IEMTLB;
+AssertCompileSizeAlignment(IEMTLB, 64);
+/** IEMTLB::uTlbRevision increment. */
+#define IEMTLB_REVISION_INCR RT_BIT_64(36)
+/** IEMTLB::uTlbRevision mask. */
+#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
+/** IEMTLB::uTlbPhysRev increment.
+ * @sa IEMTLBE_F_PHYS_REV */
+#define IEMTLB_PHYS_REV_INCR RT_BIT_64(9)
+/**
+ * Calculates the TLB tag for a virtual address.
+ * @returns Tag value for indexing and comparing with IEMTLB::uTag.
+ * @param a_pTlb The TLB.
+ * @param a_GCPtr The virtual address.
+ */
+#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
+/**
+ * Calculates the TLB tag for a virtual address but without TLB revision.
+ * @returns Tag value for indexing and comparing with IEMTLB::uTag.
+ * @param a_GCPtr The virtual address.
+ */
+#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
+/**
+ * Converts a TLB tag value into a TLB index.
+ * @returns Index into IEMTLB::aEntries.
+ * @param a_uTag Value returned by IEMTLB_CALC_TAG.
+ */
+#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
+/**
+ * Converts a TLB tag value into a TLB index.
+ * @returns Index into IEMTLB::aEntries.
+ * @param a_pTlb The TLB.
+ * @param a_uTag Value returned by IEMTLB_CALC_TAG.
+ */
+#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
+
+
+/**
+ * The per-CPU IEM state.
+ */
+typedef struct IEMCPU
+{
+ /** Info status code that needs to be propagated to the IEM caller.
+ * This cannot be passed internally, as it would complicate all success
+ * checks within the interpreter making the code larger and almost impossible
+ * to get right. Instead, we'll store status codes to pass on here. Each
+ * source of these codes will perform appropriate sanity checks. */
+ int32_t rcPassUp; /* 0x00 */
+
+ /** The current CPU execution mode (CS). */
+ IEMMODE enmCpuMode; /* 0x04 */
+ /** The CPL. */
+ uint8_t uCpl; /* 0x05 */
+
+ /** Whether to bypass access handlers or not. */
+ bool fBypassHandlers : 1; /* 0x06.0 */
+ /** Whether to disregard the lock prefix (implied or not). */
+ bool fDisregardLock : 1; /* 0x06.1 */
+ /** Whether there are pending hardware instruction breakpoints. */
+ bool fPendingInstructionBreakpoints : 1; /* 0x06.2 */
+ /** Whether there are pending hardware data breakpoints. */
+ bool fPendingDataBreakpoints : 1; /* 0x06.3 */
+ /** Whether there are pending hardware I/O breakpoints. */
+ bool fPendingIoBreakpoints : 1; /* 0x06.4 */
+
+ /* Unused/padding */
+ bool fUnused; /* 0x07 */
+
+ /** @name Decoder state.
+ * @{ */
+#ifdef IEM_WITH_CODE_TLB
+ /** The offset of the next instruction byte. */
+ uint32_t offInstrNextByte; /* 0x08 */
+ /** The number of bytes available at pbInstrBuf for the current instruction.
+ * This takes the max opcode length into account so that doesn't need to be
+ * checked separately. */
+ uint32_t cbInstrBuf; /* 0x0c */
+ /** Pointer to the page containing RIP, user specified buffer or abOpcode.
+ * This can be NULL if the page isn't mappable for some reason, in which
+ * case we'll do fallback stuff.
+ *
+ * If we're executing an instruction from a user specified buffer,
+ * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
+ * aligned pointer but pointer to the user data.
+ *
+ * For instructions crossing pages, this will start on the first page and be
+ * advanced to the next page by the time we've decoded the instruction. This
+ * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
+ */
+ uint8_t const *pbInstrBuf; /* 0x10 */
+# if ARCH_BITS == 32
+ uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
+# endif
+ /** The program counter corresponding to pbInstrBuf.
+ * This is set to a non-canonical address when we need to invalidate it. */
+ uint64_t uInstrBufPc; /* 0x18 */
+ /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
+ * This takes the CS segment limit into account. */
+ uint16_t cbInstrBufTotal; /* 0x20 */
+ /** Offset into pbInstrBuf of the first byte of the current instruction.
+ * Can be negative to efficiently handle cross page instructions. */
+ int16_t offCurInstrStart; /* 0x22 */
+
+ /** The prefix mask (IEM_OP_PRF_XXX). */
+ uint32_t fPrefixes; /* 0x24 */
+ /** The extra REX ModR/M register field bit (REX.R << 3). */
+ uint8_t uRexReg; /* 0x28 */
+ /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
+ * (REX.B << 3). */
+ uint8_t uRexB; /* 0x29 */
+ /** The extra REX SIB index field bit (REX.X << 3). */
+ uint8_t uRexIndex; /* 0x2a */
+
+ /** The effective segment register (X86_SREG_XXX). */
+ uint8_t iEffSeg; /* 0x2b */
+
+ /** The offset of the ModR/M byte relative to the start of the instruction. */
+ uint8_t offModRm; /* 0x2c */
+#else /* !IEM_WITH_CODE_TLB */
+ /** The size of what has currently been fetched into abOpcode. */
+ uint8_t cbOpcode; /* 0x08 */
+ /** The current offset into abOpcode. */
+ uint8_t offOpcode; /* 0x09 */
+ /** The offset of the ModR/M byte relative to the start of the instruction. */
+ uint8_t offModRm; /* 0x0a */
+
+ /** The effective segment register (X86_SREG_XXX). */
+ uint8_t iEffSeg; /* 0x0b */
+
+ /** The prefix mask (IEM_OP_PRF_XXX). */
+ uint32_t fPrefixes; /* 0x0c */
+ /** The extra REX ModR/M register field bit (REX.R << 3). */
+ uint8_t uRexReg; /* 0x10 */
+ /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
+ * (REX.B << 3). */
+ uint8_t uRexB; /* 0x11 */
+ /** The extra REX SIB index field bit (REX.X << 3). */
+ uint8_t uRexIndex; /* 0x12 */
+
+#endif /* !IEM_WITH_CODE_TLB */
+
+ /** The effective operand mode. */
+ IEMMODE enmEffOpSize; /* 0x2d, 0x13 */
+ /** The default addressing mode. */
+ IEMMODE enmDefAddrMode; /* 0x2e, 0x14 */
+ /** The effective addressing mode. */
+ IEMMODE enmEffAddrMode; /* 0x2f, 0x15 */
+ /** The default operand mode. */
+ IEMMODE enmDefOpSize; /* 0x30, 0x16 */
+
+ /** Prefix index (VEX.pp) for two byte and three byte tables. */
+ uint8_t idxPrefix; /* 0x31, 0x17 */
+ /** 3rd VEX/EVEX/XOP register.
+ * Please use IEM_GET_EFFECTIVE_VVVV to access. */
+ uint8_t uVex3rdReg; /* 0x32, 0x18 */
+ /** The VEX/EVEX/XOP length field. */
+ uint8_t uVexLength; /* 0x33, 0x19 */
+ /** Additional EVEX stuff. */
+ uint8_t fEvexStuff; /* 0x34, 0x1a */
+
+ /** Explicit alignment padding. */
+ uint8_t abAlignment2a[1]; /* 0x35, 0x1b */
+ /** The FPU opcode (FOP). */
+ uint16_t uFpuOpcode; /* 0x36, 0x1c */
+#ifndef IEM_WITH_CODE_TLB
+ /** Explicit alignment padding. */
+ uint8_t abAlignment2b[2]; /* 0x1e */
+#endif
+
+ /** The opcode bytes. */
+ uint8_t abOpcode[15]; /* 0x48, 0x20 */
+ /** Explicit alignment padding. */
+#ifdef IEM_WITH_CODE_TLB
+ uint8_t abAlignment2c[0x48 - 0x47]; /* 0x37 */
+#else
+ uint8_t abAlignment2c[0x48 - 0x2f]; /* 0x2f */
+#endif
+ /** @} */
+
+
+ /** The flags of the current exception / interrupt. */
+ uint32_t fCurXcpt; /* 0x48, 0x48 */
+ /** The current exception / interrupt. */
+ uint8_t uCurXcpt;
+ /** Exception / interrupt recursion depth. */
+ int8_t cXcptRecursions;
+
+ /** The number of active guest memory mappings. */
+ uint8_t cActiveMappings;
+ /** The next unused mapping index. */
+ uint8_t iNextMapping;
+ /** Records for tracking guest memory mappings. */
+ struct
+ {
+ /** The address of the mapped bytes. */
+ void *pv;
+ /** The access flags (IEM_ACCESS_XXX).
+ * IEM_ACCESS_INVALID if the entry is unused. */
+ uint32_t fAccess;
+#if HC_ARCH_BITS == 64
+ uint32_t u32Alignment4; /**< Alignment padding. */
+#endif
+ } aMemMappings[3];
+
+ /** Locking records for the mapped memory. */
+ union
+ {
+ PGMPAGEMAPLOCK Lock;
+ uint64_t au64Padding[2];
+ } aMemMappingLocks[3];
+
+ /** Bounce buffer info.
+ * This runs in parallel to aMemMappings. */
+ struct
+ {
+ /** The physical address of the first byte. */
+ RTGCPHYS GCPhysFirst;
+ /** The physical address of the second page. */
+ RTGCPHYS GCPhysSecond;
+ /** The number of bytes in the first page. */
+ uint16_t cbFirst;
+ /** The number of bytes in the second page. */
+ uint16_t cbSecond;
+ /** Whether it's unassigned memory. */
+ bool fUnassigned;
+ /** Explicit alignment padding. */
+ bool afAlignment5[3];
+ } aMemBbMappings[3];
+
+ /* Ensure that aBounceBuffers are aligned at a 32 byte boundrary. */
+ uint64_t abAlignment7[1];
+
+ /** Bounce buffer storage.
+ * This runs in parallel to aMemMappings and aMemBbMappings. */
+ struct
+ {
+ uint8_t ab[512];
+ } aBounceBuffers[3];
+
+
+ /** Pointer set jump buffer - ring-3 context. */
+ R3PTRTYPE(jmp_buf *) pJmpBufR3;
+ /** Pointer set jump buffer - ring-0 context. */
+ R0PTRTYPE(jmp_buf *) pJmpBufR0;
+
+ /** @todo Should move this near @a fCurXcpt later. */
+ /** The CR2 for the current exception / interrupt. */
+ uint64_t uCurXcptCr2;
+ /** The error code for the current exception / interrupt. */
+ uint32_t uCurXcptErr;
+
+ /** @name Statistics
+ * @{ */
+ /** The number of instructions we've executed. */
+ uint32_t cInstructions;
+ /** The number of potential exits. */
+ uint32_t cPotentialExits;
+ /** The number of bytes data or stack written (mostly for IEMExecOneEx).
+ * This may contain uncommitted writes. */
+ uint32_t cbWritten;
+ /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
+ uint32_t cRetInstrNotImplemented;
+ /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
+ uint32_t cRetAspectNotImplemented;
+ /** Counts informational statuses returned (other than VINF_SUCCESS). */
+ uint32_t cRetInfStatuses;
+ /** Counts other error statuses returned. */
+ uint32_t cRetErrStatuses;
+ /** Number of times rcPassUp has been used. */
+ uint32_t cRetPassUpStatus;
+ /** Number of times RZ left with instruction commit pending for ring-3. */
+ uint32_t cPendingCommit;
+ /** Number of long jumps. */
+ uint32_t cLongJumps;
+ /** @} */
+
+ /** @name Target CPU information.
+ * @{ */
+#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
+ /** The target CPU. */
+ uint8_t uTargetCpu;
+#else
+ uint8_t bTargetCpuPadding;
+#endif
+ /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
+ * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
+ * native host support and the 2nd for when there is.
+ *
+ * The two values are typically indexed by a g_CpumHostFeatures bit.
+ *
+ * This is for instance used for the BSF & BSR instructions where AMD and
+ * Intel CPUs produce different EFLAGS. */
+ uint8_t aidxTargetCpuEflFlavour[2];
+
+ /** The CPU vendor. */
+ CPUMCPUVENDOR enmCpuVendor;
+ /** @} */
+
+ /** @name Host CPU information.
+ * @{ */
+ /** The CPU vendor. */
+ CPUMCPUVENDOR enmHostCpuVendor;
+ /** @} */
+
+ /** Counts RDMSR \#GP(0) LogRel(). */
+ uint8_t cLogRelRdMsr;
+ /** Counts WRMSR \#GP(0) LogRel(). */
+ uint8_t cLogRelWrMsr;
+ /** Alignment padding. */
+ uint8_t abAlignment8[42];
+
+ /** Data TLB.
+ * @remarks Must be 64-byte aligned. */
+ IEMTLB DataTlb;
+ /** Instruction TLB.
+ * @remarks Must be 64-byte aligned. */
+ IEMTLB CodeTlb;
+
+ /** Exception statistics. */
+ STAMCOUNTER aStatXcpts[32];
+ /** Interrupt statistics. */
+ uint32_t aStatInts[256];
+
+#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
+ /** Instruction statistics for ring-0/raw-mode. */
+ IEMINSTRSTATS StatsRZ;
+ /** Instruction statistics for ring-3. */
+ IEMINSTRSTATS StatsR3;
+#endif
+} IEMCPU;
+AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48);
+AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 8);
+AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 16);
+AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 32);
+AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
+AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
+AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
+
+/** Pointer to the per-CPU IEM state. */
+typedef IEMCPU *PIEMCPU;
+/** Pointer to the const per-CPU IEM state. */
+typedef IEMCPU const *PCIEMCPU;
+
+
+/** @def IEM_GET_CTX
+ * Gets the guest CPU context for the calling EMT.
+ * @returns PCPUMCTX
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
+
+/** @def IEM_CTX_ASSERT
+ * Asserts that the @a a_fExtrnMbz is present in the CPU context.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
+ */
+#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
+ ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, \
+ (a_fExtrnMbz)))
+
+/** @def IEM_CTX_IMPORT_RET
+ * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
+ *
+ * Will call the keep to import the bits as needed.
+ *
+ * Returns on import failure.
+ *
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
+ */
+#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
+ do { \
+ if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
+ { /* likely */ } \
+ else \
+ { \
+ int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
+ AssertRCReturn(rcCtxImport, rcCtxImport); \
+ } \
+ } while (0)
+
+/** @def IEM_CTX_IMPORT_NORET
+ * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
+ *
+ * Will call the keep to import the bits as needed.
+ *
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
+ */
+#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
+ do { \
+ if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
+ { /* likely */ } \
+ else \
+ { \
+ int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
+ AssertLogRelRC(rcCtxImport); \
+ } \
+ } while (0)
+
+/** @def IEM_CTX_IMPORT_JMP
+ * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
+ *
+ * Will call the keep to import the bits as needed.
+ *
+ * Jumps on import failure.
+ *
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
+ */
+#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
+ do { \
+ if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
+ { /* likely */ } \
+ else \
+ { \
+ int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
+ AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
+ } \
+ } while (0)
+
+
+
+/** @def IEM_GET_TARGET_CPU
+ * Gets the current IEMTARGETCPU value.
+ * @returns IEMTARGETCPU value.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
+# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
+#else
+# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
+#endif
+
+/** @def IEM_GET_INSTR_LEN
+ * Gets the instruction length. */
+#ifdef IEM_WITH_CODE_TLB
+# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
+#else
+# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
+#endif
+
+
+/**
+ * Shared per-VM IEM data.
+ */
+typedef struct IEM
+{
+ /** The VMX APIC-access page handler type. */
+ PGMPHYSHANDLERTYPE hVmxApicAccessPage;
+#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
+ /** Set if the CPUID host call functionality is enabled. */
+ bool fCpuIdHostCall;
+#endif
+} IEM;
+
+
+
+/** @name IEM_ACCESS_XXX - Access details.
+ * @{ */
+#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
+#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
+#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
+#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
+#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
+#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
+#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
+#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
+#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
+#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
+/** The writes are partial, so if initialize the bounce buffer with the
+ * orignal RAM content. */
+#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
+/** Used in aMemMappings to indicate that the entry is bounce buffered. */
+#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
+/** Bounce buffer with ring-3 write pending, first page. */
+#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
+/** Bounce buffer with ring-3 write pending, second page. */
+#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
+/** Not locked, accessed via the TLB. */
+#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
+/** Valid bit mask. */
+#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
+/** Shift count for the TLB flags (upper word). */
+#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
+
+/** Read+write data alias. */
+#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
+/** Write data alias. */
+#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
+/** Read data alias. */
+#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
+/** Instruction fetch alias. */
+#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
+/** Stack write alias. */
+#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
+/** Stack read alias. */
+#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
+/** Stack read+write alias. */
+#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
+/** Read system table alias. */
+#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
+/** Read+write system table alias. */
+#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
+/** @} */
+
+/** @name Prefix constants (IEMCPU::fPrefixes)
+ * @{ */
+#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
+#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
+#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
+#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
+#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
+#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
+#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
+
+#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
+#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
+#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
+
+#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
+#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
+#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
+
+#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
+#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
+#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
+#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
+/** Mask with all the REX prefix flags.
+ * This is generally for use when needing to undo the REX prefixes when they
+ * are followed legacy prefixes and therefore does not immediately preceed
+ * the first opcode byte.
+ * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
+#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
+
+#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
+#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
+#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
+/** @} */
+
+/** @name IEMOPFORM_XXX - Opcode forms
+ * @note These are ORed together with IEMOPHINT_XXX.
+ * @{ */
+/** ModR/M: reg, r/m */
+#define IEMOPFORM_RM 0
+/** ModR/M: reg, r/m (register) */
+#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
+/** ModR/M: reg, r/m (memory) */
+#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
+/** ModR/M: reg, r/m */
+#define IEMOPFORM_RMI 1
+/** ModR/M: reg, r/m (register) */
+#define IEMOPFORM_RMI_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
+/** ModR/M: reg, r/m (memory) */
+#define IEMOPFORM_RMI_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
+/** ModR/M: r/m, reg */
+#define IEMOPFORM_MR 2
+/** ModR/M: r/m (register), reg */
+#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
+/** ModR/M: r/m (memory), reg */
+#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
+/** ModR/M: r/m, reg */
+#define IEMOPFORM_MRI 3
+/** ModR/M: r/m (register), reg */
+#define IEMOPFORM_MRI_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
+/** ModR/M: r/m (memory), reg */
+#define IEMOPFORM_MRI_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
+/** ModR/M: r/m only */
+#define IEMOPFORM_M 4
+/** ModR/M: r/m only (register). */
+#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
+/** ModR/M: r/m only (memory). */
+#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
+/** ModR/M: reg only */
+#define IEMOPFORM_R 5
+
+/** VEX+ModR/M: reg, r/m */
+#define IEMOPFORM_VEX_RM 8
+/** VEX+ModR/M: reg, r/m (register) */
+#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
+/** VEX+ModR/M: reg, r/m (memory) */
+#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: r/m, reg */
+#define IEMOPFORM_VEX_MR 9
+/** VEX+ModR/M: r/m (register), reg */
+#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
+/** VEX+ModR/M: r/m (memory), reg */
+#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: r/m only */
+#define IEMOPFORM_VEX_M 10
+/** VEX+ModR/M: r/m only (register). */
+#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
+/** VEX+ModR/M: r/m only (memory). */
+#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: reg only */
+#define IEMOPFORM_VEX_R 11
+/** VEX+ModR/M: reg, vvvv, r/m */
+#define IEMOPFORM_VEX_RVM 12
+/** VEX+ModR/M: reg, vvvv, r/m (register). */
+#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
+/** VEX+ModR/M: reg, vvvv, r/m (memory). */
+#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: reg, r/m, vvvv */
+#define IEMOPFORM_VEX_RMV 13
+/** VEX+ModR/M: reg, r/m, vvvv (register). */
+#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
+/** VEX+ModR/M: reg, r/m, vvvv (memory). */
+#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: reg, r/m, imm8 */
+#define IEMOPFORM_VEX_RMI 14
+/** VEX+ModR/M: reg, r/m, imm8 (register). */
+#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
+/** VEX+ModR/M: reg, r/m, imm8 (memory). */
+#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M: r/m, vvvv, reg */
+#define IEMOPFORM_VEX_MVR 15
+/** VEX+ModR/M: r/m, vvvv, reg (register) */
+#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
+/** VEX+ModR/M: r/m, vvvv, reg (memory) */
+#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
+/** VEX+ModR/M+/n: vvvv, r/m */
+#define IEMOPFORM_VEX_VM 16
+/** VEX+ModR/M+/n: vvvv, r/m (register) */
+#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
+/** VEX+ModR/M+/n: vvvv, r/m (memory) */
+#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
+
+/** Fixed register instruction, no R/M. */
+#define IEMOPFORM_FIXED 32
+
+/** The r/m is a register. */
+#define IEMOPFORM_MOD3 RT_BIT_32(8)
+/** The r/m is a memory access. */
+#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
+/** @} */
+
+/** @name IEMOPHINT_XXX - Additional Opcode Hints
+ * @note These are ORed together with IEMOPFORM_XXX.
+ * @{ */
+/** Ignores the operand size prefix (66h). */
+#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
+/** Ignores REX.W (aka WIG). */
+#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
+/** Both the operand size prefixes (66h + REX.W) are ignored. */
+#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
+/** Allowed with the lock prefix. */
+#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
+/** The VEX.L value is ignored (aka LIG). */
+#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
+/** The VEX.L value must be zero (i.e. 128-bit width only). */
+#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
+/** The VEX.V value must be zero. */
+#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14)
+
+/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
+#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
+/** @} */
+
+/**
+ * Possible hardware task switch sources.
+ */
+typedef enum IEMTASKSWITCH
+{
+ /** Task switch caused by an interrupt/exception. */
+ IEMTASKSWITCH_INT_XCPT = 1,
+ /** Task switch caused by a far CALL. */
+ IEMTASKSWITCH_CALL,
+ /** Task switch caused by a far JMP. */
+ IEMTASKSWITCH_JUMP,
+ /** Task switch caused by an IRET. */
+ IEMTASKSWITCH_IRET
+} IEMTASKSWITCH;
+AssertCompileSize(IEMTASKSWITCH, 4);
+
+/**
+ * Possible CrX load (write) sources.
+ */
+typedef enum IEMACCESSCRX
+{
+ /** CrX access caused by 'mov crX' instruction. */
+ IEMACCESSCRX_MOV_CRX,
+ /** CrX (CR0) write caused by 'lmsw' instruction. */
+ IEMACCESSCRX_LMSW,
+ /** CrX (CR0) write caused by 'clts' instruction. */
+ IEMACCESSCRX_CLTS,
+ /** CrX (CR0) read caused by 'smsw' instruction. */
+ IEMACCESSCRX_SMSW
+} IEMACCESSCRX;
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
+ *
+ * These flags provide further context to SLAT page-walk failures that could not be
+ * determined by PGM (e.g, PGM is not privy to memory access permissions).
+ *
+ * @{
+ */
+/** Translating a nested-guest linear address failed accessing a nested-guest
+ * physical address. */
+# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
+/** Translating a nested-guest linear address failed accessing a
+ * paging-structure entry or updating accessed/dirty bits. */
+# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
+/** @} */
+
+DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
+# ifndef IN_RING3
+DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
+# endif
+#endif
+
+/**
+ * Indicates to the verifier that the given flag set is undefined.
+ *
+ * Can be invoked again to add more flags.
+ *
+ * This is a NOOP if the verifier isn't compiled in.
+ *
+ * @note We're temporarily keeping this until code is converted to new
+ * disassembler style opcode handling.
+ */
+#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
+
+
+/** @def IEM_DECL_IMPL_TYPE
+ * For typedef'ing an instruction implementation function.
+ *
+ * @param a_RetType The return type.
+ * @param a_Name The name of the type.
+ * @param a_ArgList The argument list enclosed in parentheses.
+ */
+
+/** @def IEM_DECL_IMPL_DEF
+ * For defining an instruction implementation function.
+ *
+ * @param a_RetType The return type.
+ * @param a_Name The name of the type.
+ * @param a_ArgList The argument list enclosed in parentheses.
+ */
+
+#if defined(__GNUC__) && defined(RT_ARCH_X86)
+# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
+ __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
+# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
+ __attribute__((__fastcall__, __nothrow__)) a_RetType a_Name a_ArgList
+# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
+ __attribute__((__fastcall__, __nothrow__)) a_RetType a_Name a_ArgList
+
+#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
+# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
+ a_RetType (__fastcall a_Name) a_ArgList
+# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
+ a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
+# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
+ a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
+
+#elif __cplusplus >= 201700 /* P0012R1 support */
+# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
+ a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
+# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
+ a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
+# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
+ a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
+
+#else
+# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
+ a_RetType (VBOXCALL a_Name) a_ArgList
+# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
+ a_RetType VBOXCALL a_Name a_ArgList
+# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
+ a_RetType VBOXCALL a_Name a_ArgList
+
+#endif
+
+/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
+RT_C_DECLS_BEGIN
+extern uint8_t const g_afParity[256];
+RT_C_DECLS_END
+
+
+/** @name Arithmetic assignment operations on bytes (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
+FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
+FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
+/** @} */
+
+/** @name Arithmetic assignment operations on words (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
+FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
+FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
+/** @} */
+
+/** @name Arithmetic assignment operations on double words (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
+FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
+FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
+FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
+FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
+/** @} */
+
+/** @name Arithmetic assignment operations on quad words (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
+FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
+FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
+FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
+FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
+/** @} */
+
+/** @name Compare operations (thrown in with the binary ops).
+ * @{ */
+FNIEMAIMPLBINU8 iemAImpl_cmp_u8;
+FNIEMAIMPLBINU16 iemAImpl_cmp_u16;
+FNIEMAIMPLBINU32 iemAImpl_cmp_u32;
+FNIEMAIMPLBINU64 iemAImpl_cmp_u64;
+/** @} */
+
+/** @name Test operations (thrown in with the binary ops).
+ * @{ */
+FNIEMAIMPLBINU8 iemAImpl_test_u8;
+FNIEMAIMPLBINU16 iemAImpl_test_u16;
+FNIEMAIMPLBINU32 iemAImpl_test_u32;
+FNIEMAIMPLBINU64 iemAImpl_test_u64;
+/** @} */
+
+/** @name Bit operations operations (thrown in with the binary ops).
+ * @{ */
+FNIEMAIMPLBINU16 iemAImpl_bt_u16;
+FNIEMAIMPLBINU32 iemAImpl_bt_u32;
+FNIEMAIMPLBINU64 iemAImpl_bt_u64;
+FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
+FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
+FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
+FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
+FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
+FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
+FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
+FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
+FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
+/** @} */
+
+/** @name Arithmetic three operand operations on double words (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
+FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
+FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
+FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
+/** @} */
+
+/** @name Arithmetic three operand operations on quad words (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
+typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
+FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
+FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
+FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
+/** @} */
+
+/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
+typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
+FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
+/** @} */
+
+/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
+typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
+FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
+/** @} */
+
+/** @name MULX 32-bit and 64-bit.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
+typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
+FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
+typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
+FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
+/** @} */
+
+
+/** @name Exchange memory with register operations.
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
+/** @} */
+
+/** @name Exchange and add operations.
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
+/** @} */
+
+/** @name Compare and exchange.
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
+#if ARCH_BITS == 32
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
+#else
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
+#endif
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
+ uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
+ uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
+ uint32_t *pEFlags));
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
+ uint32_t *pEFlags));
+#ifndef RT_ARCH_ARM64
+IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
+ PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
+#endif
+/** @} */
+
+/** @name Memory ordering
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
+typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
+IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
+IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
+IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
+#ifndef RT_ARCH_ARM64
+IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
+#endif
+/** @} */
+
+/** @name Double precision shifts
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
+FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
+FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
+FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
+FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
+FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
+FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
+/** @} */
+
+
+/** @name Bit search operations (thrown in with the binary ops).
+ * @{ */
+FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
+FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
+FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
+FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
+FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
+FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
+FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
+FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
+FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
+FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
+FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
+FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
+FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
+FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
+FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
+/** @} */
+
+/** @name Signed multiplication operations (thrown in with the binary ops).
+ * @{ */
+FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
+FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
+FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
+/** @} */
+
+/** @name Arithmetic assignment operations on bytes (unary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
+typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
+FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
+FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
+FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
+FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
+/** @} */
+
+/** @name Arithmetic assignment operations on words (unary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
+typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
+FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
+FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
+FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
+FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
+/** @} */
+
+/** @name Arithmetic assignment operations on double words (unary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
+typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
+FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
+FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
+FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
+FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
+/** @} */
+
+/** @name Arithmetic assignment operations on quad words (unary).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
+typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
+FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
+FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
+FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
+FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
+/** @} */
+
+
+/** @name Shift operations on bytes (Group 2).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
+FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
+FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
+/** @} */
+
+/** @name Shift operations on words (Group 2).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
+FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
+FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
+/** @} */
+
+/** @name Shift operations on double words (Group 2).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
+FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
+FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
+/** @} */
+
+/** @name Shift operations on words (Group 2).
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
+typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
+FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
+FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
+/** @} */
+
+/** @name Multiplication and division operations.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
+typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
+FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
+FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
+FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
+FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
+
+typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
+typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
+FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
+FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
+FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
+FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
+
+typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
+typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
+FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
+FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
+FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
+FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
+
+typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
+typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
+FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
+FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
+FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
+FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
+/** @} */
+
+/** @name Byte Swap.
+ * @{ */
+IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
+IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
+IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
+/** @} */
+
+/** @name Misc.
+ * @{ */
+FNIEMAIMPLBINU16 iemAImpl_arpl;
+/** @} */
+
+/** @name RDRAND and RDSEED
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
+typedef FNIEMAIMPLRDRANDSEEDU16 *FNIEMAIMPLPRDRANDSEEDU16;
+typedef FNIEMAIMPLRDRANDSEEDU32 *FNIEMAIMPLPRDRANDSEEDU32;
+typedef FNIEMAIMPLRDRANDSEEDU64 *FNIEMAIMPLPRDRANDSEEDU64;
+
+FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
+FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
+FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
+FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
+FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
+FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
+/** @} */
+
+/** @name FPU operations taking a 32-bit float argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
+typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
+typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
+
+FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
+FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
+IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
+ PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
+/** @} */
+
+/** @name FPU operations taking a 64-bit float argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
+typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
+typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
+
+FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
+FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
+IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
+ PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
+/** @} */
+
+/** @name FPU operations taking a 80-bit float argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
+typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
+FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
+FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
+
+FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
+FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
+FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
+typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
+FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
+FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
+
+typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
+ PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
+typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
+FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
+FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
+typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
+FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
+FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
+FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
+FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
+FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
+FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
+FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
+typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
+FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
+FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
+typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
+FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
+ PCRTFLOAT80U pr80Val));
+typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
+FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
+FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
+FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
+IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
+ PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
+IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
+ PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
+
+/** @} */
+
+/** @name FPU operations taking a 16-bit signed integer argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
+ PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
+typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
+ int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
+typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
+
+FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
+FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
+FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
+FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
+FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
+FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
+ PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
+typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
+FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
+FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
+FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
+/** @} */
+
+/** @name FPU operations taking a 32-bit signed integer argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
+ PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
+typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
+ int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
+typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
+
+FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
+FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
+FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
+FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
+FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
+FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
+ PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
+typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
+FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
+FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
+FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
+/** @} */
+
+/** @name FPU operations taking a 64-bit signed integer argument
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
+ int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
+typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
+FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
+FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
+/** @} */
+
+
+/** Temporary type representing a 256-bit vector register. */
+typedef struct { uint64_t au64[4]; } IEMVMM256;
+/** Temporary type pointing to a 256-bit vector register. */
+typedef IEMVMM256 *PIEMVMM256;
+/** Temporary type pointing to a const 256-bit vector register. */
+typedef IEMVMM256 *PCIEMVMM256;
+
+
+/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
+typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
+typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
+typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
+typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
+typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
+typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
+typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
+typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
+typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_paddd_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_paddq_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psubd_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psubq_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddwd_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pmuludq_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
+
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_paddd_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_paddq_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psubd_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psubq_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhw_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminub_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxub_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsw_u128;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pmuludq_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
+
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
+FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
+
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
+FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
+
+FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
+FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
+ * @{ */
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
+ iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
+ iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
+ iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
+ iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
+ iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
+ iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
+ iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
+ iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
+ iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
+ iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
+ iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
+ iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
+ iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
+ iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
+ * @{ */
+FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
+FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
+ iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
+ iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
+ iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
+ iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
+ iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
+ iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
+IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
+FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
+#ifndef IEM_WITHOUT_ASSEMBLY
+FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
+#endif
+FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
+typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
+typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
+typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
+FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
+FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
+FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
+FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
+FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
+FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
+FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
+#ifndef IEM_WITHOUT_ASSEMBLY
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
+#endif
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
+typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
+typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
+typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
+
+FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
+FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
+
+FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
+FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
+
+FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
+FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
+FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
+FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
+/** @} */
+
+
+/** @name Media (SSE/MMX/AVX) operation: Sort this later
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u64,(uint64_t *pu64Dst, uint16_t u16Src, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u128,(PRTUINT128U puDst, uint16_t u16Src, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u64,(uint16_t *pu16Dst, uint64_t u64Src, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128_fallback,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
+IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
+
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
+
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
+FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
+
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
+FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
+
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
+
+
+typedef struct IEMPCMPISTRISRC
+{
+ RTUINT128U uSrc1;
+ RTUINT128U uSrc2;
+} IEMPCMPISTRISRC;
+typedef IEMPCMPISTRISRC *PIEMPCMPISTRISRC;
+typedef const IEMPCMPISTRISRC *PCIEMPCMPISTRISRC;
+
+IEM_DECL_IMPL_DEF(void, iemAImpl_pcmpistri_u128,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRISRC pSrc, uint8_t bEvil));
+IEM_DECL_IMPL_DEF(void, iemAImpl_pcmpistri_u128_fallback,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRISRC pSrc, uint8_t bEvil));
+
+FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
+FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
+/** @} */
+
+/** @name Media Odds and Ends
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
+FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
+FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
+FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
+FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
+FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
+FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
+typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
+typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
+typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
+typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
+
+FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
+FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
+
+FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
+FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
+
+FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
+FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
+
+FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
+FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
+typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
+typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
+
+FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
+FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
+typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
+typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
+
+FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
+FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
+
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFLMXCSR128,(uint32_t *pfMxcsr, uint32_t *pfEFlags, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
+typedef FNIEMAIMPLF2EFLMXCSR128 *PFNIEMAIMPLF2EFLMXCSR128;
+
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomiss_u128;
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
+
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomisd_u128;
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
+
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comiss_u128;
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
+
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comisd_u128;
+FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
+
+
+typedef struct IEMMEDIAF2XMMSRC
+{
+ X86XMMREG uSrc1;
+ X86XMMREG uSrc2;
+} IEMMEDIAF2XMMSRC;
+typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
+typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t *pfMxcsr, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
+typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
+
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
+
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
+FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U128,(uint32_t *pfMxcsr, uint64_t *pu64Dst, PCX86XMMREG pSrc));
+typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
+
+FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
+FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU128U64,(uint32_t *pfMxcsr, PX86XMMREG pDst, uint64_t u64Src));
+typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
+
+FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
+FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U64,(uint32_t *pfMxcsr, uint64_t *pu64Dst, uint64_t u64Src));
+typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
+
+FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
+FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
+
+/** @} */
+
+
+/** @name Function tables.
+ * @{
+ */
+
+/**
+ * Function table for a binary operator providing implementation based on
+ * operand size.
+ */
+typedef struct IEMOPBINSIZES
+{
+ PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
+ PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
+ PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
+ PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
+} IEMOPBINSIZES;
+/** Pointer to a binary operator function table. */
+typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
+
+
+/**
+ * Function table for a unary operator providing implementation based on
+ * operand size.
+ */
+typedef struct IEMOPUNARYSIZES
+{
+ PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
+ PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
+ PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
+ PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
+} IEMOPUNARYSIZES;
+/** Pointer to a unary operator function table. */
+typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
+
+
+/**
+ * Function table for a shift operator providing implementation based on
+ * operand size.
+ */
+typedef struct IEMOPSHIFTSIZES
+{
+ PFNIEMAIMPLSHIFTU8 pfnNormalU8;
+ PFNIEMAIMPLSHIFTU16 pfnNormalU16;
+ PFNIEMAIMPLSHIFTU32 pfnNormalU32;
+ PFNIEMAIMPLSHIFTU64 pfnNormalU64;
+} IEMOPSHIFTSIZES;
+/** Pointer to a shift operator function table. */
+typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
+
+
+/**
+ * Function table for a multiplication or division operation.
+ */
+typedef struct IEMOPMULDIVSIZES
+{
+ PFNIEMAIMPLMULDIVU8 pfnU8;
+ PFNIEMAIMPLMULDIVU16 pfnU16;
+ PFNIEMAIMPLMULDIVU32 pfnU32;
+ PFNIEMAIMPLMULDIVU64 pfnU64;
+} IEMOPMULDIVSIZES;
+/** Pointer to a multiplication or division operation function table. */
+typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
+
+
+/**
+ * Function table for a double precision shift operator providing implementation
+ * based on operand size.
+ */
+typedef struct IEMOPSHIFTDBLSIZES
+{
+ PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
+ PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
+ PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
+} IEMOPSHIFTDBLSIZES;
+/** Pointer to a double precision shift function table. */
+typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
+
+
+/**
+ * Function table for media instruction taking two full sized media source
+ * registers and one full sized destination register (AVX).
+ */
+typedef struct IEMOPMEDIAF3
+{
+ PFNIEMAIMPLMEDIAF3U128 pfnU128;
+ PFNIEMAIMPLMEDIAF3U256 pfnU256;
+} IEMOPMEDIAF3;
+/** Pointer to a media operation function table for 3 full sized ops (AVX). */
+typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
+
+/** @def IEMOPMEDIAF3_INIT_VARS_EX
+ * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
+ * given functions as initializers. For use in AVX functions where a pair of
+ * functions are only used once and the function table need not be public. */
+#ifndef TST_IEM_CHECK_MC
+# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
+ static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# else
+# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# endif
+#else
+# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
+#endif
+/** @def IEMOPMEDIAF3_INIT_VARS
+ * Generate AVX function tables for the @a a_InstrNm instruction.
+ * @sa IEMOPMEDIAF3_INIT_VARS_EX */
+#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
+ IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
+ RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
+
+/**
+ * Function table for media instruction taking two full sized media source
+ * registers and one full sized destination register, but no additional state
+ * (AVX).
+ */
+typedef struct IEMOPMEDIAOPTF3
+{
+ PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
+ PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
+} IEMOPMEDIAOPTF3;
+/** Pointer to a media operation function table for 3 full sized ops (AVX). */
+typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
+
+/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
+ * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
+ * given functions as initializers. For use in AVX functions where a pair of
+ * functions are only used once and the function table need not be public. */
+#ifndef TST_IEM_CHECK_MC
+# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
+ static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# else
+# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# endif
+#else
+# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
+#endif
+/** @def IEMOPMEDIAOPTF3_INIT_VARS
+ * Generate AVX function tables for the @a a_InstrNm instruction.
+ * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
+#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
+ IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
+ RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
+
+/**
+ * Function table for media instruction taking one full sized media source
+ * registers and one full sized destination register, but no additional state
+ * (AVX).
+ */
+typedef struct IEMOPMEDIAOPTF2
+{
+ PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
+ PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
+} IEMOPMEDIAOPTF2;
+/** Pointer to a media operation function table for 2 full sized ops (AVX). */
+typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
+
+/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
+ * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
+ * given functions as initializers. For use in AVX functions where a pair of
+ * functions are only used once and the function table need not be public. */
+#ifndef TST_IEM_CHECK_MC
+# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
+ static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# else
+# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# endif
+#else
+# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
+#endif
+/** @def IEMOPMEDIAOPTF2_INIT_VARS
+ * Generate AVX function tables for the @a a_InstrNm instruction.
+ * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
+#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
+ IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
+ RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
+
+/**
+ * Function table for media instruction taking two full sized media source
+ * registers and one full sized destination register and an 8-bit immediate, but no additional state
+ * (AVX).
+ */
+typedef struct IEMOPMEDIAOPTF3IMM8
+{
+ PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
+ PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
+} IEMOPMEDIAOPTF3IMM8;
+/** Pointer to a media operation function table for 3 full sized ops (AVX). */
+typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
+
+/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
+ * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
+ * given functions as initializers. For use in AVX functions where a pair of
+ * functions are only used once and the function table need not be public. */
+#ifndef TST_IEM_CHECK_MC
+# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
+ static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# else
+# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# endif
+#else
+# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
+#endif
+/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
+ * Generate AVX function tables for the @a a_InstrNm instruction.
+ * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
+#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
+ IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
+ RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
+/** @} */
+
+
+/**
+ * Function table for blend type instruction taking three full sized media source
+ * registers and one full sized destination register, but no additional state
+ * (AVX).
+ */
+typedef struct IEMOPBLENDOP
+{
+ PFNIEMAIMPLAVXBLENDU128 pfnU128;
+ PFNIEMAIMPLAVXBLENDU256 pfnU256;
+} IEMOPBLENDOP;
+/** Pointer to a media operation function table for 4 full sized ops (AVX). */
+typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
+
+/** @def IEMOPBLENDOP_INIT_VARS_EX
+ * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
+ * given functions as initializers. For use in AVX functions where a pair of
+ * functions are only used once and the function table need not be public. */
+#ifndef TST_IEM_CHECK_MC
+# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
+# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
+ static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# else
+# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
+ static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
+# endif
+#else
+# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
+#endif
+/** @def IEMOPBLENDOP_INIT_VARS
+ * Generate AVX function tables for the @a a_InstrNm instruction.
+ * @sa IEMOPBLENDOP_INIT_VARS_EX */
+#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
+ IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
+ RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
+
+
+/** @name SSE/AVX single/double precision floating point operations.
+ * @{ */
+/**
+ * A SSE result.
+ */
+typedef struct IEMSSERESULT
+{
+ /** The output value. */
+ X86XMMREG uResult;
+ /** The output status. */
+ uint32_t MXCSR;
+} IEMSSERESULT;
+AssertCompileMemberOffset(IEMSSERESULT, MXCSR, 128 / 8);
+/** Pointer to a SSE result. */
+typedef IEMSSERESULT *PIEMSSERESULT;
+/** Pointer to a const SSE result. */
+typedef IEMSSERESULT const *PCIEMSSERESULT;
+
+
+/**
+ * A AVX128 result.
+ */
+typedef struct IEMAVX128RESULT
+{
+ /** The output value. */
+ X86XMMREG uResult;
+ /** The output status. */
+ uint32_t MXCSR;
+} IEMAVX128RESULT;
+AssertCompileMemberOffset(IEMAVX128RESULT, MXCSR, 128 / 8);
+/** Pointer to a AVX128 result. */
+typedef IEMAVX128RESULT *PIEMAVX128RESULT;
+/** Pointer to a const AVX128 result. */
+typedef IEMAVX128RESULT const *PCIEMAVX128RESULT;
+
+
+/**
+ * A AVX256 result.
+ */
+typedef struct IEMAVX256RESULT
+{
+ /** The output value. */
+ X86YMMREG uResult;
+ /** The output status. */
+ uint32_t MXCSR;
+} IEMAVX256RESULT;
+AssertCompileMemberOffset(IEMAVX256RESULT, MXCSR, 256 / 8);
+/** Pointer to a AVX256 result. */
+typedef IEMAVX256RESULT *PIEMAVX256RESULT;
+/** Pointer to a const AVX256 result. */
+typedef IEMAVX256RESULT const *PCIEMAVX256RESULT;
+
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
+typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R32,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
+typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R64,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
+typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
+typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R32,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
+typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R64,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
+typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
+
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U256,(PX86XSAVEAREA pExtState, PIEMAVX256RESULT pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
+typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
+
+FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
+
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
+FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
+
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
+FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
+FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
+
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
+FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
+
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
+FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
+FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
+
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
+FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
+/** @} */
+
+/** @name C instruction implementations for anything slightly complicated.
+ * @{ */
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * no extra arguments.
+ *
+ * @param a_Name The name of the type.
+ */
+# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
+/**
+ * For defining a C instruction implementation function taking no extra
+ * arguments.
+ *
+ * @param a_Name The name of the function
+ */
+# define IEM_CIMPL_DEF_0(a_Name) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
+/**
+ * Prototype version of IEM_CIMPL_DEF_0.
+ */
+# define IEM_CIMPL_PROTO_0(a_Name) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
+/**
+ * For calling a C instruction implementation function taking no extra
+ * arguments.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ */
+# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * one extra argument.
+ *
+ * @param a_Name The name of the type.
+ * @param a_Type0 The argument type.
+ * @param a_Arg0 The argument name.
+ */
+# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
+/**
+ * For defining a C instruction implementation function taking one extra
+ * argument.
+ *
+ * @param a_Name The name of the function
+ * @param a_Type0 The argument type.
+ * @param a_Arg0 The argument name.
+ */
+# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
+/**
+ * Prototype version of IEM_CIMPL_DEF_1.
+ */
+# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
+/**
+ * For calling a C instruction implementation function taking one extra
+ * argument.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ * @param a0 The name of the 1st argument.
+ */
+# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * two extra arguments.
+ *
+ * @param a_Name The name of the type.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ */
+# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
+/**
+ * For defining a C instruction implementation function taking two extra
+ * arguments.
+ *
+ * @param a_Name The name of the function.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ */
+# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
+/**
+ * Prototype version of IEM_CIMPL_DEF_2.
+ */
+# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
+/**
+ * For calling a C instruction implementation function taking two extra
+ * arguments.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ * @param a0 The name of the 1st argument.
+ * @param a1 The name of the 2nd argument.
+ */
+# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * three extra arguments.
+ *
+ * @param a_Name The name of the type.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ */
+# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
+/**
+ * For defining a C instruction implementation function taking three extra
+ * arguments.
+ *
+ * @param a_Name The name of the function.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ */
+# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
+/**
+ * Prototype version of IEM_CIMPL_DEF_3.
+ */
+# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
+/**
+ * For calling a C instruction implementation function taking three extra
+ * arguments.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ * @param a0 The name of the 1st argument.
+ * @param a1 The name of the 2nd argument.
+ * @param a2 The name of the 3rd argument.
+ */
+# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
+
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * four extra arguments.
+ *
+ * @param a_Name The name of the type.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ * @param a_Type3 The type of the 4th argument.
+ * @param a_Arg3 The name of the 4th argument.
+ */
+# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
+/**
+ * For defining a C instruction implementation function taking four extra
+ * arguments.
+ *
+ * @param a_Name The name of the function.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ * @param a_Type3 The type of the 4th argument.
+ * @param a_Arg3 The name of the 4th argument.
+ */
+# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
+ a_Type2 a_Arg2, a_Type3 a_Arg3))
+/**
+ * Prototype version of IEM_CIMPL_DEF_4.
+ */
+# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
+ a_Type2 a_Arg2, a_Type3 a_Arg3))
+/**
+ * For calling a C instruction implementation function taking four extra
+ * arguments.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ * @param a0 The name of the 1st argument.
+ * @param a1 The name of the 2nd argument.
+ * @param a2 The name of the 3rd argument.
+ * @param a3 The name of the 4th argument.
+ */
+# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
+
+
+/**
+ * For typedef'ing or declaring a C instruction implementation function taking
+ * five extra arguments.
+ *
+ * @param a_Name The name of the type.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ * @param a_Type3 The type of the 4th argument.
+ * @param a_Arg3 The name of the 4th argument.
+ * @param a_Type4 The type of the 5th argument.
+ * @param a_Arg4 The name of the 5th argument.
+ */
+# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
+ IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
+ a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
+ a_Type3 a_Arg3, a_Type4 a_Arg4))
+/**
+ * For defining a C instruction implementation function taking five extra
+ * arguments.
+ *
+ * @param a_Name The name of the function.
+ * @param a_Type0 The type of the 1st argument
+ * @param a_Arg0 The name of the 1st argument.
+ * @param a_Type1 The type of the 2nd argument.
+ * @param a_Arg1 The name of the 2nd argument.
+ * @param a_Type2 The type of the 3rd argument.
+ * @param a_Arg2 The name of the 3rd argument.
+ * @param a_Type3 The type of the 4th argument.
+ * @param a_Arg3 The name of the 4th argument.
+ * @param a_Type4 The type of the 5th argument.
+ * @param a_Arg4 The name of the 5th argument.
+ */
+# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
+ IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
+ a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
+/**
+ * Prototype version of IEM_CIMPL_DEF_5.
+ */
+# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
+ IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
+ a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
+/**
+ * For calling a C instruction implementation function taking five extra
+ * arguments.
+ *
+ * This special call macro adds default arguments to the call and allow us to
+ * change these later.
+ *
+ * @param a_fn The name of the function.
+ * @param a0 The name of the 1st argument.
+ * @param a1 The name of the 2nd argument.
+ * @param a2 The name of the 3rd argument.
+ * @param a3 The name of the 4th argument.
+ * @param a4 The name of the 5th argument.
+ */
+# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
+
+/** @} */
+
+
+/** @name Opcode Decoder Function Types.
+ * @{ */
+
+/** @typedef PFNIEMOP
+ * Pointer to an opcode decoder function.
+ */
+
+/** @def FNIEMOP_DEF
+ * Define an opcode decoder function.
+ *
+ * We're using macors for this so that adding and removing parameters as well as
+ * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
+ *
+ * @param a_Name The function name.
+ */
+
+/** @typedef PFNIEMOPRM
+ * Pointer to an opcode decoder function with RM byte.
+ */
+
+/** @def FNIEMOPRM_DEF
+ * Define an opcode decoder function with RM byte.
+ *
+ * We're using macors for this so that adding and removing parameters as well as
+ * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
+ *
+ * @param a_Name The function name.
+ */
+
+#if defined(__GNUC__) && defined(RT_ARCH_X86)
+typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
+typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
+# define FNIEMOP_DEF(a_Name) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
+# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
+# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
+
+#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
+typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
+typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
+# define FNIEMOP_DEF(a_Name) \
+ IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
+# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
+ IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
+
+#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
+typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
+typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
+# define FNIEMOP_DEF(a_Name) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
+# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
+# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
+ IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
+
+#else
+typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
+typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
+# define FNIEMOP_DEF(a_Name) \
+ IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
+# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
+# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
+ IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
+
+#endif
+#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
+
+/**
+ * Call an opcode decoder function.
+ *
+ * We're using macors for this so that adding and removing parameters can be
+ * done as we please. See FNIEMOP_DEF.
+ */
+#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
+
+/**
+ * Call a common opcode decoder function taking one extra argument.
+ *
+ * We're using macors for this so that adding and removing parameters can be
+ * done as we please. See FNIEMOP_DEF_1.
+ */
+#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
+
+/**
+ * Call a common opcode decoder function taking one extra argument.
+ *
+ * We're using macors for this so that adding and removing parameters can be
+ * done as we please. See FNIEMOP_DEF_1.
+ */
+#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
+/** @} */
+
+
+/** @name Misc Helpers
+ * @{ */
+
+/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
+ * due to GCC lacking knowledge about the value range of a switch. */
+#if RT_CPLUSPLUS_PREREQ(202000)
+# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
+#else
+# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
+#endif
+
+/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
+#if RT_CPLUSPLUS_PREREQ(202000)
+# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
+#else
+# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
+#endif
+
+/**
+ * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
+ * occation.
+ */
+#ifdef LOG_ENABLED
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
+ do { \
+ /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
+ return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
+ } while (0)
+#else
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
+ return VERR_IEM_ASPECT_NOT_IMPLEMENTED
+#endif
+
+/**
+ * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
+ * occation using the supplied logger statement.
+ *
+ * @param a_LoggerArgs What to log on failure.
+ */
+#ifdef LOG_ENABLED
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
+ do { \
+ LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
+ /*LogFunc(a_LoggerArgs);*/ \
+ return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
+ } while (0)
+#else
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
+ return VERR_IEM_ASPECT_NOT_IMPLEMENTED
+#endif
+
+/**
+ * Check if we're currently executing in real or virtual 8086 mode.
+ *
+ * @returns @c true if it is, @c false if not.
+ * @param a_pVCpu The IEM state of the current CPU.
+ */
+#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if we're currently executing in virtual 8086 mode.
+ *
+ * @returns @c true if it is, @c false if not.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if we're currently executing in long mode.
+ *
+ * @returns @c true if it is, @c false if not.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if we're currently executing in a 64-bit code segment.
+ *
+ * @returns @c true if it is, @c false if not.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if we're currently executing in real mode.
+ *
+ * @returns @c true if it is, @c false if not.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
+ * @returns PCCPUMFEATURES
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
+
+/**
+ * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
+ * @returns PCCPUMFEATURES
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
+
+/**
+ * Evaluates to true if we're presenting an Intel CPU to the guest.
+ */
+#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
+
+/**
+ * Evaluates to true if we're presenting an AMD CPU to the guest.
+ */
+#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
+
+/**
+ * Check if the address is canonical.
+ */
+#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
+
+/** Checks if the ModR/M byte is in register mode or not. */
+#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
+/** Checks if the ModR/M byte is in memory mode or not. */
+#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
+
+/**
+ * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
+ *
+ * For use during decoding.
+ */
+#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
+/**
+ * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
+ *
+ * For use during decoding.
+ */
+#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
+
+/**
+ * Gets the register (reg) part of a ModR/M encoding, without REX.R.
+ *
+ * For use during decoding.
+ */
+#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
+/**
+ * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
+ *
+ * For use during decoding.
+ */
+#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
+
+/**
+ * Gets the effective VEX.VVVV value.
+ *
+ * The 4th bit is ignored if not 64-bit code.
+ * @returns effective V-register value.
+ * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
+ */
+#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
+ ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
+
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/**
+ * Check if the guest has entered VMX root operation.
+ */
+# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if the guest has entered VMX non-root operation.
+ */
+# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
+
+/**
+ * Check if the nested-guest has the given Pin-based VM-execution control set.
+ */
+# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
+ (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
+
+/**
+ * Check if the nested-guest has the given Processor-based VM-execution control set.
+ */
+# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
+ (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
+
+/**
+ * Check if the nested-guest has the given Secondary Processor-based VM-execution
+ * control set.
+ */
+# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
+ (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
+
+/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
+# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
+
+/** Whether a shadow VMCS is present for the given VCPU. */
+# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
+
+/** Gets the VMXON region pointer. */
+# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
+
+/** Gets the guest-physical address of the current VMCS for the given VCPU. */
+# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
+
+/** Whether a current VMCS is present for the given VCPU. */
+# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
+
+/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
+# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
+ do \
+ { \
+ Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
+ (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
+ } while (0)
+
+/** Clears any current VMCS for the given VCPU. */
+# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
+ do \
+ { \
+ (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
+ } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler for an instruction intercept.
+ */
+# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
+ do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler for an instruction intercept where the
+ * instruction provides additional VM-exit information.
+ */
+# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
+ do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler for a task switch.
+ */
+# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
+ do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler for MWAIT.
+ */
+# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
+ do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler for EPT faults.
+ */
+# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
+ do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
+
+/**
+ * Invokes the VMX VM-exit handler.
+ */
+# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
+ do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
+
+#else
+# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
+# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
+# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
+# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
+# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
+# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
+# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
+# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
+# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
+# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
+# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
+
+#endif
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+/**
+ * Check if an SVM control/instruction intercept is set.
+ */
+# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
+ (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
+
+/**
+ * Check if an SVM read CRx intercept is set.
+ */
+# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
+ (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
+
+/**
+ * Check if an SVM write CRx intercept is set.
+ */
+# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
+ (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
+
+/**
+ * Check if an SVM read DRx intercept is set.
+ */
+# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
+ (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
+
+/**
+ * Check if an SVM write DRx intercept is set.
+ */
+# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
+ (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
+
+/**
+ * Check if an SVM exception intercept is set.
+ */
+# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
+ (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
+
+/**
+ * Invokes the SVM \#VMEXIT handler for the nested-guest.
+ */
+# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+ do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
+
+/**
+ * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
+ * corresponding decode assist information.
+ */
+# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
+ do \
+ { \
+ uint64_t uExitInfo1; \
+ if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
+ && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
+ uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
+ else \
+ uExitInfo1 = 0; \
+ IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
+ } while (0)
+
+/** Check and handles SVM nested-guest instruction intercept and updates
+ * NRIP if needed.
+ */
+# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+ do \
+ { \
+ if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
+ { \
+ IEM_SVM_UPDATE_NRIP(a_pVCpu); \
+ IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
+ } \
+ } while (0)
+
+/** Checks and handles SVM nested-guest CR0 read intercept. */
+# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
+ do \
+ { \
+ if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
+ { /* probably likely */ } \
+ else \
+ { \
+ IEM_SVM_UPDATE_NRIP(a_pVCpu); \
+ IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
+ } \
+ } while (0)
+
+/**
+ * Updates the NextRIP (NRI) field in the nested-guest VMCB.
+ */
+# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
+ do { \
+ if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
+ CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
+ } while (0)
+
+#else
+# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
+# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
+# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
+# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
+# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
+# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
+# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
+# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
+# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
+
+#endif
+
+/** @} */
+
+void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu);
+
+
+/**
+ * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
+ */
+typedef union IEMSELDESC
+{
+ /** The legacy view. */
+ X86DESC Legacy;
+ /** The long mode view. */
+ X86DESC64 Long;
+} IEMSELDESC;
+/** Pointer to a selector descriptor table entry. */
+typedef IEMSELDESC *PIEMSELDESC;
+
+/** @name Raising Exceptions.
+ * @{ */
+VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
+ uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
+
+VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
+ uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
+ uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
+/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
+VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
+
+IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
+IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
+IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
+
+/**
+ * Macro for calling iemCImplRaiseDivideError().
+ *
+ * This enables us to add/remove arguments and force different levels of
+ * inlining as we wish.
+ *
+ * @return Strict VBox status code.
+ */
+#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
+
+/**
+ * Macro for calling iemCImplRaiseInvalidLockPrefix().
+ *
+ * This enables us to add/remove arguments and force different levels of
+ * inlining as we wish.
+ *
+ * @return Strict VBox status code.
+ */
+#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
+
+/**
+ * Macro for calling iemCImplRaiseInvalidOpcode().
+ *
+ * This enables us to add/remove arguments and force different levels of
+ * inlining as we wish.
+ *
+ * @return Strict VBox status code.
+ */
+#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
+/** @} */
+
+/** @name Register Access.
+ * @{ */
+VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
+ IEMMODE enmEffOpSize) RT_NOEXCEPT;
+VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
+ IEMMODE enmEffOpSize) RT_NOEXCEPT;
+VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewRip) RT_NOEXCEPT;
+VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewRip) RT_NOEXCEPT;
+VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT;
+/** @} */
+
+/** @name FPU access and helpers.
+ * @{ */
+void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT;
+void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT;
+void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT;
+void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT;
+void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
+ uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
+ uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT;
+void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT;
+void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT;
+void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT;
+void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT;
+void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT;
+void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT;
+void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT;
+void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT;
+void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT;
+void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT;
+/** @} */
+
+/** @name SSE+AVX SIMD access and helpers.
+ * @{ */
+void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT;
+void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
+/** @} */
+
+/** @name Memory access.
+ * @{ */
+
+/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
+#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
+/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
+ * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
+#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
+/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
+ * Users include FXSAVE & FXRSTOR. */
+#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
+
+VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
+ uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
+#ifndef IN_RING3
+VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
+#endif
+void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
+
+#ifdef IEM_WITH_CODE_TLB
+void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
+#else
+VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
+#endif
+#ifdef IEM_WITH_SETJMP
+uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
+#else
+VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
+#endif
+
+VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
+ RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+
+VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
+
+VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
+void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+
+VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
+ void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
+ void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
+ void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
+/** @} */
+
+/** @name IEMAllCImpl.cpp
+ * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
+ * @{ */
+IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
+IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
+IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
+IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
+IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
+IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
+IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
+IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
+IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
+IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
+IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
+IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
+IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
+IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
+IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
+IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
+IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
+IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
+IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
+IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
+IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
+IEM_CIMPL_PROTO_0(iemCImpl_syscall);
+IEM_CIMPL_PROTO_0(iemCImpl_sysret);
+IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
+IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
+IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
+IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
+IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
+IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
+IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
+IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
+IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
+IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
+IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_0(iemCImpl_clts);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
+IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
+IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
+IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
+IEM_CIMPL_PROTO_0(iemCImpl_invd);
+IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
+IEM_CIMPL_PROTO_0(iemCImpl_rsm);
+IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
+IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
+IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
+IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
+IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
+IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, bool, fImm, uint8_t, cbReg);
+IEM_CIMPL_PROTO_1(iemCImpl_in_eAX_DX, uint8_t, cbReg);
+IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, bool, fImm, uint8_t, cbReg);
+IEM_CIMPL_PROTO_1(iemCImpl_out_DX_eAX, uint8_t, cbReg);
+IEM_CIMPL_PROTO_0(iemCImpl_cli);
+IEM_CIMPL_PROTO_0(iemCImpl_sti);
+IEM_CIMPL_PROTO_0(iemCImpl_hlt);
+IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_mwait);
+IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
+IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
+IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
+IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
+IEM_CIMPL_PROTO_0(iemCImpl_daa);
+IEM_CIMPL_PROTO_0(iemCImpl_das);
+IEM_CIMPL_PROTO_0(iemCImpl_aaa);
+IEM_CIMPL_PROTO_0(iemCImpl_aas);
+IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
+IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
+IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
+IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
+IEM_CIMPL_PROTO_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
+ PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags);
+IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
+IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
+IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
+IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
+IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
+IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
+IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
+IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
+IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
+IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
+IEM_CIMPL_PROTO_1(iemCImpl_fxch_underflow, uint8_t, iStReg);
+IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop);
+/** @} */
+
+/** @name IEMAllCImplStrInstr.cpp.h
+ * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
+ * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
+ * @{ */
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
+
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
+
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+
+IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
+IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
+IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
+IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
+IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
+/** @} */
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
+ bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
+ bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
+ RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
+uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
+void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
+VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
+ uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
+bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
+IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
+IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
+IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
+IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
+IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
+IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
+IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
+IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
+IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
+IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
+IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint32_t *, pu32Dst, uint32_t, u32VmcsField);
+IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
+IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
+IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
+IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
+IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
+#endif
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
+VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
+VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
+ uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
+VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite) RT_NOEXCEPT;
+IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
+IEM_CIMPL_PROTO_0(iemCImpl_vmload);
+IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
+IEM_CIMPL_PROTO_0(iemCImpl_clgi);
+IEM_CIMPL_PROTO_0(iemCImpl_stgi);
+IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
+IEM_CIMPL_PROTO_0(iemCImpl_skinit);
+IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
+#endif
+
+IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
+IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
+IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
+
+
+extern const PFNIEMOP g_apfnOneByteMap[256];
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
+
diff --git a/src/VBox/VMM/include/IEMMc.h b/src/VBox/VMM/include/IEMMc.h
new file mode 100644
index 00000000..bfbb0842
--- /dev/null
+++ b/src/VBox/VMM/include/IEMMc.h
@@ -0,0 +1,1598 @@
+/* $Id: IEMMc.h $ */
+/** @file
+ * IEM - Interpreted Execution Manager - IEM_MC_XXX.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
+#define VMM_INCLUDED_SRC_include_IEMMc_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+/** @name "Microcode" macros.
+ *
+ * The idea is that we should be able to use the same code to interpret
+ * instructions as well as recompiler instructions. Thus this obfuscation.
+ *
+ * @{
+ */
+#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
+#define IEM_MC_END() }
+
+/** Internal macro. */
+#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
+ do \
+ { \
+ VBOXSTRICTRC rcStrict2 = a_Expr; \
+ if (rcStrict2 != VINF_SUCCESS) \
+ return rcStrict2; \
+ } while (0)
+
+
+/** Advances RIP, finishes the instruction and returns.
+ * This may include raising debug exceptions and such. */
+#define IEM_MC_ADVANCE_RIP_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
+#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
+ return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
+ * @note only usable in 16-bit op size mode. */
+#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
+ return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
+#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
+ return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
+#define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) return iemRegRipJumpU16AndFinishClearningRF((pVCpu), (a_u16NewIP))
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
+#define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) return iemRegRipJumpU32AndFinishClearningRF((pVCpu), (a_u32NewIP))
+/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
+#define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) return iemRegRipJumpU64AndFinishClearningRF((pVCpu), (a_u64NewIP))
+
+#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
+#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
+ do { \
+ if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
+ do { \
+ if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
+ do { \
+ if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
+ return iemRaiseMathFault(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_AESNI_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAesNi) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE42_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse42) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSSE3_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSsse3) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
+ return iemRaiseMathFault(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_EX(a_fSupported) \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(a_fSupported)) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
+ return iemRaiseMathFault(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
+ && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
+ return iemRaiseMathFault(pVCpu); \
+ } while (0)
+#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
+ do { \
+ if (pVCpu->iem.s.uCpl != 0) \
+ return iemRaiseGeneralProtectionFault0(pVCpu); \
+ } while (0)
+#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
+ do { \
+ if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
+ else return iemRaiseGeneralProtectionFault0(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
+ do { \
+ if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
+ do { \
+ if (!IEM_IS_CANONICAL(a_u64Addr)) \
+ return iemRaiseGeneralProtectionFault0(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
+ do { \
+ if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
+ & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) \
+ { \
+ if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
+ return iemRaiseSimdFpException(pVCpu); \
+ else \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ } \
+ } while (0)
+#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
+ do { \
+ if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
+ return iemRaiseSimdFpException(pVCpu); \
+ else \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ } while (0)
+#define IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT() \
+ do { \
+ if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
+ || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
+ || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPclMul) \
+ return iemRaiseUndefinedOpcode(pVCpu); \
+ if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
+ return iemRaiseDeviceNotAvailable(pVCpu); \
+ } while (0)
+
+
+#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
+#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
+#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
+#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
+#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
+#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
+#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
+ uint32_t a_Name; \
+ uint32_t *a_pName = &a_Name
+#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
+ do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
+
+#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
+#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
+
+#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
+#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
+#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
+ } while (0)
+#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
+ } while (0)
+#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
+ } while (0)
+/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
+#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
+ } while (0)
+#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
+ } while (0)
+/** @note Not for IOPL or IF testing or modification. */
+#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
+#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
+#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
+#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
+
+#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
+#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
+#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
+#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
+#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
+#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
+#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
+#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
+#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
+#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
+#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
+/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
+#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
+ } while (0)
+#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
+ IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
+ *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
+ } while (0)
+#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
+
+
+#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
+/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
+ * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
+#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
+#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
+/** @note Not for IOPL or IF testing or modification.
+ * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
+#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
+#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
+
+#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
+#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
+#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
+ do { \
+ uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
+ *pu32Reg += (a_u32Value); \
+ pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
+ } while (0)
+#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
+
+#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
+#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
+#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
+ do { \
+ uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
+ *pu32Reg -= (a_u32Value); \
+ pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
+ } while (0)
+#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
+#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
+
+#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
+#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
+#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
+#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
+#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
+#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
+#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
+
+#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
+#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
+#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
+#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
+
+#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
+#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
+#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
+
+#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
+#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
+#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
+
+#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
+#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
+#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
+
+#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
+
+#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
+#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
+#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
+
+#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
+
+#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
+
+#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
+#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
+#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
+ do { \
+ uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
+ *pu32Reg &= (a_u32Value); \
+ pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
+ } while (0)
+#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
+
+#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
+#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
+#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
+ do { \
+ uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
+ *pu32Reg |= (a_u32Value); \
+ pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
+ } while (0)
+#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
+
+#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
+#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
+#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
+
+/** @note Not for IOPL or IF modification. */
+#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
+/** @note Not for IOPL or IF modification. */
+#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
+/** @note Not for IOPL or IF modification. */
+#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
+
+#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
+
+/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
+#define IEM_MC_FPU_TO_MMX_MODE() do { \
+ iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
+ pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
+ pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
+ } while (0)
+
+/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
+#define IEM_MC_FPU_FROM_MMX_MODE() do { \
+ iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
+ pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
+ pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
+ } while (0)
+
+#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
+ do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
+#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
+ do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
+#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
+ pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
+ pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
+ } while (0)
+#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
+ pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
+ pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
+ } while (0)
+#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
+ (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
+#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
+ (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
+#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
+ (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
+#define IEM_MC_MODIFIED_MREG(a_iMReg) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
+#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
+ do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
+
+#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
+ do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
+ if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
+ if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
+ if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
+ } while (0)
+#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
+ do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
+ (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
+ } while (0)
+#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
+ do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
+ (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
+ } while (0)
+#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
+ do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
+#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
+ do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
+#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
+ do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
+#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
+ do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iByte)]; } while (0)
+#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
+ } while (0)
+#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
+ } while (0)
+#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
+#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
+#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
+#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
+#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iWord)] = (a_u16Value); } while (0)
+#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iByte)] = (a_u8Value); } while (0)
+
+#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
+ } while (0)
+
+#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
+#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
+#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
+#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
+ } while (0)
+#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
+#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
+ (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
+#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
+ (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
+#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
+ (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
+#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
+ (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
+#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
+ (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
+#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
+ (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
+#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
+ (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
+#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
+ do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
+ = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
+ = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
+ } while (0)
+
+#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
+ do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
+ } while (0)
+#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
+ do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ } while (0)
+#define IEM_MC_FETCH_YREG_2ND_U64(a_u64Dst, a_iYRegSrc) \
+ do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
+ } while (0)
+#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
+ do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
+ } while (0)
+#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
+ do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
+ (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
+ (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
+ } while (0)
+
+#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
+#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+
+#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
+ (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
+#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
+ (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
+#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
+ (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
+#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
+ do { uintptr_t const iYRegTmp = (a_iYReg); \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
+ } while (0)
+
+#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+
+#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
+ do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
+ uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
+ pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
+ pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
+ IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
+ } while (0)
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
+# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
+#else
+# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
+ ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
+# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
+ ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
+ ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifdef SOME_UNUSED_FUNCTION
+# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
+
+# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
+
+# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
+ (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
+ (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
+
+# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
+#else
+# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
+
+# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
+ iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
+#endif
+
+
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u16Dst) = u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u32Dst) = u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint16_t u16Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u32Dst) = u16Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint16_t u16Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = u16Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint32_t u32Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = u32Tmp; \
+ } while (0)
+#else /* IEM_WITH_SETJMP */
+# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+#endif /* IEM_WITH_SETJMP */
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u16Dst) = (int8_t)u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u32Dst) = (int8_t)u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint8_t u8Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = (int8_t)u8Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint16_t u16Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u32Dst) = (int16_t)u16Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint16_t u16Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = (int16_t)u16Tmp; \
+ } while (0)
+# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ do { \
+ uint32_t u32Tmp; \
+ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
+ (a_u64Dst) = (int32_t)u32Tmp; \
+ } while (0)
+#else /* IEM_WITH_SETJMP */
+# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+ ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
+#endif /* IEM_WITH_SETJMP */
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
+# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
+# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
+# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
+#else
+# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
+ iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
+# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
+ iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
+# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
+ iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
+# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
+ iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
+# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
+# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
+# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
+#else
+# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
+ iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
+# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
+ iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
+# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
+ iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
+# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
+ iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
+#endif
+
+#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
+#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
+#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
+#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
+ do { \
+ (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
+ (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
+ } while (0)
+#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
+ do { \
+ (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
+ (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
+ } while (0)
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
+# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
+#else
+# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
+ iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
+# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
+ iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
+# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
+#else
+# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
+ iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
+# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
+ iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
+#endif
+
+
+#define IEM_MC_PUSH_U16(a_u16Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
+#define IEM_MC_PUSH_U32(a_u32Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
+#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
+#define IEM_MC_PUSH_U64(a_u64Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
+
+#define IEM_MC_POP_U16(a_pu16Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
+#define IEM_MC_POP_U32(a_pu32Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
+#define IEM_MC_POP_U64(a_pu64Value) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
+
+/** Maps guest memory for direct or bounce buffered access.
+ * The purpose is to pass it to an operand implementation, thus the a_iArg.
+ * @remarks May return.
+ */
+#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
+ (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
+
+/** Maps guest memory for direct or bounce buffered access.
+ * The purpose is to pass it to an operand implementation, thus the a_iArg.
+ * @remarks May return.
+ */
+#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_cbAlign, a_iArg) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
+ (a_GCPtrMem), (a_fAccess), (a_cbAlign)))
+
+/** Commits the memory and unmaps the guest memory.
+ * @remarks May return.
+ */
+#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
+
+/** Commits the memory and unmaps the guest memory unless the FPU status word
+ * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
+ * that would cause FLD not to store.
+ *
+ * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
+ * store, while \#P will not.
+ *
+ * @remarks May in theory return - for now.
+ */
+#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
+ do { \
+ if ( !(a_u16FSW & X86_FSW_ES) \
+ || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
+ & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
+ IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
+ } while (0)
+
+/** Calculate efficient address from R/M. */
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
+ IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
+#else
+# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
+ ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
+#endif
+
+#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
+#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
+#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
+#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
+#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
+#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
+#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
+
+/**
+ * Defers the rest of the instruction emulation to a C implementation routine
+ * and returns, only taking the standard parameters.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
+ */
+#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
+
+/**
+ * Defers the rest of instruction emulation to a C implementation routine and
+ * returns, taking one argument in addition to the standard ones.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The argument.
+ */
+#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
+
+/**
+ * Defers the rest of the instruction emulation to a C implementation routine
+ * and returns, taking two arguments in addition to the standard ones.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ */
+#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
+
+/**
+ * Defers the rest of the instruction emulation to a C implementation routine
+ * and returns, taking three arguments in addition to the standard ones.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ */
+#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
+
+/**
+ * Defers the rest of the instruction emulation to a C implementation routine
+ * and returns, taking four arguments in addition to the standard ones.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ * @param a3 The fourth extra argument.
+ */
+#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
+
+/**
+ * Defers the rest of the instruction emulation to a C implementation routine
+ * and returns, taking two arguments in addition to the standard ones.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ * @param a3 The fourth extra argument.
+ * @param a4 The fifth extra argument.
+ */
+#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
+
+/**
+ * Defers the entire instruction emulation to a C implementation routine and
+ * returns, only taking the standard parameters.
+ *
+ * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
+ */
+#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
+
+/**
+ * Defers the entire instruction emulation to a C implementation routine and
+ * returns, taking one argument in addition to the standard ones.
+ *
+ * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The argument.
+ */
+#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
+
+/**
+ * Defers the entire instruction emulation to a C implementation routine and
+ * returns, taking two arguments in addition to the standard ones.
+ *
+ * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ */
+#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
+
+/**
+ * Defers the entire instruction emulation to a C implementation routine and
+ * returns, taking three arguments in addition to the standard ones.
+ *
+ * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
+ *
+ * @param a_pfnCImpl The pointer to the C routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ */
+#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
+
+/**
+ * Calls a FPU assembly implementation taking one visible argument.
+ *
+ * @param a_pfnAImpl Pointer to the assembly FPU routine.
+ * @param a0 The first extra argument.
+ */
+#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
+ do { \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
+ } while (0)
+
+/**
+ * Calls a FPU assembly implementation taking two visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly FPU routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ */
+#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
+ } while (0)
+
+/**
+ * Calls a FPU assembly implementation taking three visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly FPU routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ */
+#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
+ } while (0)
+
+#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
+ do { \
+ (a_FpuData).FSW = (a_FSW); \
+ (a_FpuData).r80Result = *(a_pr80Value); \
+ } while (0)
+
+/** Pushes FPU result onto the stack. */
+#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
+ iemFpuPushResult(pVCpu, &a_FpuData)
+/** Pushes FPU result onto the stack and sets the FPUDP. */
+#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
+ iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
+
+/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
+#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
+ iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
+
+/** Stores FPU result in a stack register. */
+#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
+ iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
+/** Stores FPU result in a stack register and pops the stack. */
+#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
+ iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
+/** Stores FPU result in a stack register and sets the FPUDP. */
+#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
+ iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
+/** Stores FPU result in a stack register, sets the FPUDP, and pops the
+ * stack. */
+#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
+ iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
+
+/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
+#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
+ iemFpuUpdateOpcodeAndIp(pVCpu)
+/** Free a stack register (for FFREE and FFREEP). */
+#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
+ iemFpuStackFree(pVCpu, a_iStReg)
+/** Increment the FPU stack pointer. */
+#define IEM_MC_FPU_STACK_INC_TOP() \
+ iemFpuStackIncTop(pVCpu)
+/** Decrement the FPU stack pointer. */
+#define IEM_MC_FPU_STACK_DEC_TOP() \
+ iemFpuStackDecTop(pVCpu)
+
+/** Updates the FSW, FOP, FPUIP, and FPUCS. */
+#define IEM_MC_UPDATE_FSW(a_u16FSW) \
+ iemFpuUpdateFSW(pVCpu, a_u16FSW)
+/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
+#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
+ iemFpuUpdateFSW(pVCpu, a_u16FSW)
+/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
+#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
+ iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
+/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
+#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
+ iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
+/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
+ * stack. */
+#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
+ iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
+/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
+#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
+ iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
+
+/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
+#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
+ iemFpuStackUnderflow(pVCpu, a_iStDst)
+/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
+ * stack. */
+#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
+ iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
+/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
+ * FPUDS. */
+#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
+ iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
+/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
+ * FPUDS. Pops stack. */
+#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
+ iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
+/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
+ * stack twice. */
+#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
+ iemFpuStackUnderflowThenPopPop(pVCpu)
+/** Raises a FPU stack underflow exception for an instruction pushing a result
+ * value onto the stack. Sets FPUIP, FPUCS and FOP. */
+#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
+ iemFpuStackPushUnderflow(pVCpu)
+/** Raises a FPU stack underflow exception for an instruction pushing a result
+ * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
+#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
+ iemFpuStackPushUnderflowTwo(pVCpu)
+
+/** Raises a FPU stack overflow exception as part of a push attempt. Sets
+ * FPUIP, FPUCS and FOP. */
+#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
+ iemFpuStackPushOverflow(pVCpu)
+/** Raises a FPU stack overflow exception as part of a push attempt. Sets
+ * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
+#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
+ iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
+/** Prepares for using the FPU state.
+ * Ensures that we can use the host FPU in the current context (RC+R0.
+ * Ensures the guest FPU state in the CPUMCTX is up to date. */
+#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
+/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
+#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
+/** Actualizes the guest FPU state so it can be accessed and modified. */
+#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
+
+/** Stores SSE SIMD result updating MXCSR. */
+#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
+ iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
+/** Updates MXCSR. */
+#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
+ iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
+
+/** Prepares for using the SSE state.
+ * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
+ * Ensures the guest SSE state in the CPUMCTX is up to date. */
+#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
+/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
+#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
+/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
+#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
+
+/** Prepares for using the AVX state.
+ * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
+ * Ensures the guest AVX state in the CPUMCTX is up to date.
+ * @note This will include the AVX512 state too when support for it is added
+ * due to the zero extending feature of VEX instruction. */
+#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
+/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
+#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
+/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
+#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
+
+/**
+ * Calls a MMX assembly implementation taking two visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly MMX routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ */
+#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { \
+ IEM_MC_PREPARE_FPU_USAGE(); \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
+ } while (0)
+
+/**
+ * Calls a MMX assembly implementation taking three visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly MMX routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ */
+#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { \
+ IEM_MC_PREPARE_FPU_USAGE(); \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
+ } while (0)
+
+
+/**
+ * Calls a SSE assembly implementation taking two visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly SSE routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ */
+#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { \
+ IEM_MC_PREPARE_SSE_USAGE(); \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
+ } while (0)
+
+/**
+ * Calls a SSE assembly implementation taking three visible arguments.
+ *
+ * @param a_pfnAImpl Pointer to the assembly SSE routine.
+ * @param a0 The first extra argument.
+ * @param a1 The second extra argument.
+ * @param a2 The third extra argument.
+ */
+#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { \
+ IEM_MC_PREPARE_SSE_USAGE(); \
+ a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
+ } while (0)
+
+
+/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
+ * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
+#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
+ IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
+
+/**
+ * Calls a AVX assembly implementation taking two visible arguments.
+ *
+ * There is one implicit zero'th argument, a pointer to the extended state.
+ *
+ * @param a_pfnAImpl Pointer to the assembly AVX routine.
+ * @param a1 The first extra argument.
+ * @param a2 The second extra argument.
+ */
+#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
+ do { \
+ IEM_MC_PREPARE_AVX_USAGE(); \
+ a_pfnAImpl(pXState, (a1), (a2)); \
+ } while (0)
+
+/**
+ * Calls a AVX assembly implementation taking three visible arguments.
+ *
+ * There is one implicit zero'th argument, a pointer to the extended state.
+ *
+ * @param a_pfnAImpl Pointer to the assembly AVX routine.
+ * @param a1 The first extra argument.
+ * @param a2 The second extra argument.
+ * @param a3 The third extra argument.
+ */
+#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
+ do { \
+ IEM_MC_PREPARE_AVX_USAGE(); \
+ a_pfnAImpl(pXState, (a1), (a2), (a3)); \
+ } while (0)
+
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
+ if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
+ != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
+ if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
+ == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
+ if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
+ || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
+ != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
+ if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
+ && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
+ == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
+#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
+#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
+#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.cx != 0 \
+ && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.ecx != 0 \
+ && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.rcx != 0 \
+ && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.cx != 0 \
+ && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.ecx != 0 \
+ && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+/** @note Not for IOPL or IF testing. */
+#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
+ if ( pVCpu->cpum.GstCtx.rcx != 0 \
+ && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
+#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
+#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
+
+#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
+ do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
+#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
+ if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
+#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
+ if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
+#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
+ if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
+#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
+ if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
+#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
+ if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
+#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
+ if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
+#define IEM_MC_IF_FCW_IM() \
+ if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
+#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
+ if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
+ & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
+
+#define IEM_MC_ELSE() } else {
+#define IEM_MC_ENDIF() } do {} while (0)
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
+
diff --git a/src/VBox/VMM/include/IEMOpHlp.h b/src/VBox/VMM/include/IEMOpHlp.h
new file mode 100644
index 00000000..6a4b6564
--- /dev/null
+++ b/src/VBox/VMM/include/IEMOpHlp.h
@@ -0,0 +1,601 @@
+/* $Id: IEMOpHlp.h $ */
+/** @file
+ * IEM - Interpreted Execution Manager - Opcode Helpers.
+ */
+
+/*
+ * Copyright (C) 2011-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IEMOpHlp_h
+#define VMM_INCLUDED_SRC_include_IEMOpHlp_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/** @name Common opcode decoders.
+ * @{
+ */
+void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT;
+
+/**
+ * Complains about a stub.
+ *
+ * Providing two versions of this macro, one for daily use and one for use when
+ * working on IEM.
+ */
+#if 0
+# define IEMOP_BITCH_ABOUT_STUB() \
+ do { \
+ RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
+ iemOpStubMsg2(pVCpu); \
+ RTAssertPanic(); \
+ } while (0)
+#else
+# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
+#endif
+
+/** Stubs an opcode. */
+#define FNIEMOP_STUB(a_Name) \
+ FNIEMOP_DEF(a_Name) \
+ { \
+ RT_NOREF_PV(pVCpu); \
+ IEMOP_BITCH_ABOUT_STUB(); \
+ return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
+ } \
+ typedef int ignore_semicolon
+
+/** Stubs an opcode. */
+#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
+ FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ { \
+ RT_NOREF_PV(pVCpu); \
+ RT_NOREF_PV(a_Name0); \
+ IEMOP_BITCH_ABOUT_STUB(); \
+ return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
+ } \
+ typedef int ignore_semicolon
+
+/** Stubs an opcode which currently should raise \#UD. */
+#define FNIEMOP_UD_STUB(a_Name) \
+ FNIEMOP_DEF(a_Name) \
+ { \
+ Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ typedef int ignore_semicolon
+
+/** Stubs an opcode which currently should raise \#UD. */
+#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
+ FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ { \
+ RT_NOREF_PV(pVCpu); \
+ RT_NOREF_PV(a_Name0); \
+ Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ typedef int ignore_semicolon
+
+/** @} */
+
+
+/** @name Opcode Debug Helpers.
+ * @{
+ */
+#ifdef VBOX_WITH_STATISTICS
+# ifdef IN_RING3
+# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
+# else
+# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
+# endif
+#else
+# define IEMOP_INC_STATS(a_Stats) do { } while (0)
+#endif
+
+#ifdef DEBUG
+# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
+ do { \
+ IEMOP_INC_STATS(a_Stats); \
+ Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
+ pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
+ } while (0)
+
+# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
+ do { \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
+ (void)RT_CONCAT(IEMOPFORM_, a_Form); \
+ (void)RT_CONCAT(OP_,a_Upper); \
+ (void)(a_fDisHints); \
+ (void)(a_fIemHints); \
+ } while (0)
+
+# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
+ do { \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
+ (void)RT_CONCAT(IEMOPFORM_, a_Form); \
+ (void)RT_CONCAT(OP_,a_Upper); \
+ (void)RT_CONCAT(OP_PARM_,a_Op1); \
+ (void)(a_fDisHints); \
+ (void)(a_fIemHints); \
+ } while (0)
+
+# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
+ do { \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
+ (void)RT_CONCAT(IEMOPFORM_, a_Form); \
+ (void)RT_CONCAT(OP_,a_Upper); \
+ (void)RT_CONCAT(OP_PARM_,a_Op1); \
+ (void)RT_CONCAT(OP_PARM_,a_Op2); \
+ (void)(a_fDisHints); \
+ (void)(a_fIemHints); \
+ } while (0)
+
+# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
+ do { \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
+ (void)RT_CONCAT(IEMOPFORM_, a_Form); \
+ (void)RT_CONCAT(OP_,a_Upper); \
+ (void)RT_CONCAT(OP_PARM_,a_Op1); \
+ (void)RT_CONCAT(OP_PARM_,a_Op2); \
+ (void)RT_CONCAT(OP_PARM_,a_Op3); \
+ (void)(a_fDisHints); \
+ (void)(a_fIemHints); \
+ } while (0)
+
+# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
+ do { \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
+ (void)RT_CONCAT(IEMOPFORM_, a_Form); \
+ (void)RT_CONCAT(OP_,a_Upper); \
+ (void)RT_CONCAT(OP_PARM_,a_Op1); \
+ (void)RT_CONCAT(OP_PARM_,a_Op2); \
+ (void)RT_CONCAT(OP_PARM_,a_Op3); \
+ (void)RT_CONCAT(OP_PARM_,a_Op4); \
+ (void)(a_fDisHints); \
+ (void)(a_fIemHints); \
+ } while (0)
+
+#else
+# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
+
+# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
+# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
+# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
+# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
+# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
+
+#endif
+
+#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC0EX(a_Lower, \
+ #a_Lower, \
+ a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
+#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
+ #a_Lower " " #a_Op1, \
+ a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
+#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
+ #a_Lower " " #a_Op1 "," #a_Op2, \
+ a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
+#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
+ #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
+ a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
+#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
+ IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
+ #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
+ a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
+
+/** @} */
+
+
+/** @name Opcode Helpers.
+ * @{
+ */
+
+#ifdef IN_RING3
+# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
+ do { \
+ if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
+ else \
+ { \
+ (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ } while (0)
+#else
+# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
+ do { \
+ if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
+ else return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+#endif
+
+/** The instruction requires a 186 or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
+# define IEMOP_HLP_MIN_186() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
+#endif
+
+/** The instruction requires a 286 or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
+# define IEMOP_HLP_MIN_286() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
+#endif
+
+/** The instruction requires a 386 or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
+# define IEMOP_HLP_MIN_386() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
+#endif
+
+/** The instruction requires a 386 or later if the given expression is true. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
+# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
+#else
+# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
+#endif
+
+/** The instruction requires a 486 or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
+# define IEMOP_HLP_MIN_486() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
+#endif
+
+/** The instruction requires a Pentium (586) or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
+# define IEMOP_HLP_MIN_586() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
+#endif
+
+/** The instruction requires a PentiumPro (686) or later. */
+#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
+# define IEMOP_HLP_MIN_686() do { } while (0)
+#else
+# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
+#endif
+
+
+/** The instruction raises an \#UD in real and V8086 mode. */
+#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
+ do \
+ { \
+ if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
+ else return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+/** This instruction raises an \#UD in real and V8086 mode or when not using a
+ * 64-bit code segment when in long mode (applicable to all VMX instructions
+ * except VMCALL).
+ */
+#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
+ do \
+ { \
+ if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+ && ( !IEM_IS_LONG_MODE(pVCpu) \
+ || IEM_IS_64BIT_CODE(pVCpu))) \
+ { /* likely */ } \
+ else \
+ { \
+ if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
+ { \
+ pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
+ Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
+ { \
+ pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
+ Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ } \
+ } while (0)
+
+/** The instruction can only be executed in VMX operation (VMX root mode and
+ * non-root mode).
+ *
+ * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
+ */
+# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
+ do \
+ { \
+ if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
+ else \
+ { \
+ pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
+ Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } \
+ } while (0)
+#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
+
+/** The instruction is not available in 64-bit mode, throw \#UD if we're in
+ * 64-bit mode. */
+#define IEMOP_HLP_NO_64BIT() \
+ do \
+ { \
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
+ * 64-bit mode. */
+#define IEMOP_HLP_ONLY_64BIT() \
+ do \
+ { \
+ if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/** The instruction defaults to 64-bit operand size if 64-bit mode. */
+#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
+ do \
+ { \
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
+ iemRecalEffOpSize64Default(pVCpu); \
+ } while (0)
+
+/** The instruction defaults to 64-bit operand size if 64-bit mode and intel
+ * CPUs ignore the operand size prefix complete (e.g. relative jumps). */
+#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE_AND_INTEL_IGNORES_OP_SIZE_PREFIX() \
+ do \
+ { \
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
+ iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(pVCpu); \
+ } while (0)
+
+/** The instruction has 64-bit operand size if 64-bit mode. */
+#define IEMOP_HLP_64BIT_OP_SIZE() \
+ do \
+ { \
+ if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
+ pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
+ } while (0)
+
+/** Only a REX prefix immediately preceeding the first opcode byte takes
+ * effect. This macro helps ensuring this as well as logging bad guest code. */
+#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
+ do \
+ { \
+ if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
+ { \
+ Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
+ pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
+ pVCpu->iem.s.uRexB = 0; \
+ pVCpu->iem.s.uRexIndex = 0; \
+ pVCpu->iem.s.uRexReg = 0; \
+ iemRecalEffOpSize(pVCpu); \
+ } \
+ } while (0)
+
+/**
+ * Done decoding.
+ */
+#define IEMOP_HLP_DONE_DECODING() \
+ do \
+ { \
+ /*nothing for now, maybe later... */ \
+ } while (0)
+
+/**
+ * Done decoding, raise \#UD exception if lock prefix present.
+ */
+#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
+ do \
+ { \
+ if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
+ } while (0)
+
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, or if in real or v8086 mode.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING() \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, if in real or v8086 mode, or if the
+ * a_fFeature is present in the guest CPU.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_EX(a_fFeature) \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+ && IEM_GET_GUEST_CPU_FEATURES(pVCpu)->a_fFeature)) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, or if in real or v8086 mode.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+ && pVCpu->iem.s.uVexLength == 0)) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, or if in real or v8086 mode.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_L0_EX(a_fFeature) \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+ && pVCpu->iem.s.uVexLength == 0 \
+ && IEM_GET_GUEST_CPU_FEATURES(pVCpu)->a_fFeature)) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
+ * register 0, or if in real or v8086 mode.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !pVCpu->iem.s.uVex3rdReg \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
+ * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
+ * register 0, if in real or v8086 mode, or if the a_fFeature is present in the
+ * guest CPU.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV_EX(a_fFeature) \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
+ && !pVCpu->iem.s.uVex3rdReg \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+ && IEM_GET_GUEST_CPU_FEATURES(pVCpu)->a_fFeature )) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding VEX, no V, L=0.
+ * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
+ * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
+ */
+#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
+ do \
+ { \
+ if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
+ & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
+ && pVCpu->iem.s.uVexLength == 0 \
+ && pVCpu->iem.s.uVex3rdReg == 0 \
+ && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
+ do \
+ { \
+ if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
+ { /* likely */ } \
+ else \
+ { \
+ NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
+ return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
+ } \
+ } while (0)
+#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
+ do \
+ { \
+ if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
+ { /* likely */ } \
+ else \
+ { \
+ NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
+ return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
+ } \
+ } while (0)
+
+/**
+ * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
+ * are present.
+ */
+#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
+ do \
+ { \
+ if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+/**
+ * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
+ * prefixes are present.
+ */
+#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
+ do \
+ { \
+ if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
+ { /* likely */ } \
+ else \
+ return IEMOP_RAISE_INVALID_OPCODE(); \
+ } while (0)
+
+VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT;
+#ifdef IEM_WITH_SETJMP
+RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP;
+#endif
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_IEMOpHlp_h */
diff --git a/src/VBox/VMM/include/IOMInline.h b/src/VBox/VMM/include/IOMInline.h
new file mode 100644
index 00000000..fd0ff011
--- /dev/null
+++ b/src/VBox/VMM/include/IOMInline.h
@@ -0,0 +1,270 @@
+/* $Id: IOMInline.h $ */
+/** @file
+ * IOM - Inlined functions.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IOMInline_h
+#define VMM_INCLUDED_SRC_include_IOMInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/errcore.h>
+
+/** @addtogroup grp_iom_int Internals
+ * @internal
+ * @{
+ */
+
+
+/**
+ * Gets the I/O port entry for the specified I/O port in the current context.
+ *
+ * @returns Pointer to I/O port entry.
+ * @returns NULL if no port registered.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uPort The I/O port to lookup.
+ * @param poffPort Where to return the port offset relative to the
+ * start of the I/O port range.
+ * @param pidxLastHint Pointer to IOMCPU::idxIoPortLastRead or
+ * IOMCPU::idxIoPortLastWrite.
+ *
+ * @note In ring-0 it is possible to get an uninitialized entry (pDevIns is
+ * NULL, cPorts is 0), in which case there should be ring-3 handlers
+ * for the entry. Use IOMIOPORTENTRYR0::idxSelf to get the ring-3
+ * entry.
+ *
+ * @note This code is almost identical to iomMmioGetEntry, so keep in sync.
+ */
+DECLINLINE(CTX_SUFF(PIOMIOPORTENTRY)) iomIoPortGetEntry(PVMCC pVM, RTIOPORT uPort, PRTIOPORT poffPort, uint16_t *pidxLastHint)
+{
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
+
+#ifdef IN_RING0
+ uint32_t iEnd = RT_MIN(pVM->iom.s.cIoPortLookupEntries, pVM->iomr0.s.cIoPortAlloc);
+ PCIOMIOPORTLOOKUPENTRY paLookup = pVM->iomr0.s.paIoPortLookup;
+#else
+ uint32_t iEnd = pVM->iom.s.cIoPortLookupEntries;
+ PCIOMIOPORTLOOKUPENTRY paLookup = pVM->iom.s.paIoPortLookup;
+#endif
+ if (iEnd > 0)
+ {
+ uint32_t iFirst = 0;
+ uint32_t i = *pidxLastHint;
+ if (i < iEnd)
+ { /* likely */ }
+ else
+ i = iEnd / 2;
+ for (;;)
+ {
+ PCIOMIOPORTLOOKUPENTRY pCur = &paLookup[i];
+ if (pCur->uFirstPort > uPort)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ break;
+ }
+ else if (pCur->uLastPort < uPort)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ break;
+ }
+ else
+ {
+ *pidxLastHint = (uint16_t)i;
+ *poffPort = uPort - pCur->uFirstPort;
+
+ /*
+ * Translate the 'idx' member into a pointer.
+ */
+ size_t const idx = pCur->idx;
+#ifdef IN_RING0
+ AssertMsg(idx < pVM->iom.s.cIoPortRegs && idx < pVM->iomr0.s.cIoPortAlloc,
+ ("%#zx vs %#x/%x (port %#x)\n", idx, pVM->iom.s.cIoPortRegs, pVM->iomr0.s.cIoPortMax, uPort));
+ if (idx < pVM->iomr0.s.cIoPortAlloc)
+ return &pVM->iomr0.s.paIoPortRegs[idx];
+#else
+ if (idx < pVM->iom.s.cIoPortRegs)
+ return &pVM->iom.s.paIoPortRegs[idx];
+ AssertMsgFailed(("%#zx vs %#x (port %#x)\n", idx, pVM->iom.s.cIoPortRegs, uPort));
+#endif
+ break;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+ }
+ *poffPort = 0;
+ return NULL;
+}
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * Gets the statistics entry for an I/O port.
+ *
+ * @returns Pointer to stats. Instead of NULL, a pointer to IoPortDummyStats is
+ * returned, so the caller does not need to check for NULL.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pRegEntry The I/O port entry to get stats for.
+ * @param offPort The offset of the port relative to the start of the
+ * registration entry.
+ */
+DECLINLINE(PIOMIOPORTSTATSENTRY) iomIoPortGetStats(PVMCC pVM, CTX_SUFF(PIOMIOPORTENTRY) pRegEntry, uint16_t offPort)
+{
+ size_t idxStats = pRegEntry->idxStats;
+ idxStats += offPort;
+# ifdef IN_RING0
+ if (idxStats < pVM->iomr0.s.cIoPortStatsAllocation)
+ return &pVM->iomr0.s.paIoPortStats[idxStats];
+# else
+ if (idxStats < pVM->iom.s.cIoPortStats)
+ return &pVM->iom.s.paIoPortStats[idxStats];
+# endif
+ return &pVM->iom.s.IoPortDummyStats;
+}
+#endif
+
+
+/**
+ * Gets the MMIO region entry for the specified address in the current context.
+ *
+ * @returns Pointer to MMIO region entry.
+ * @returns NULL if no MMIO region registered for the given address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The address to lookup.
+ * @param poffRegion Where to return the byte offset into the MMIO
+ * region that corresponds to @a GCPhys.
+ * @param pidxLastHint Pointer to IOMCPU::idxMmioLastRead,
+ * IOMCPU::idxMmioLastWrite, or similar.
+ *
+ * @note In ring-0 it is possible to get an uninitialized entry (pDevIns is
+ * NULL, cbRegion is 0), in which case there should be ring-3 handlers
+ * for the entry. Use IOMMMIOENTRYR0::idxSelf to get the ring-3 entry.
+ *
+ * @note This code is almost identical to iomIoPortGetEntry, so keep in sync.
+ */
+DECLINLINE(CTX_SUFF(PIOMMMIOENTRY)) iomMmioGetEntry(PVMCC pVM, RTGCPHYS GCPhys, PRTGCPHYS poffRegion, uint16_t *pidxLastHint)
+{
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
+
+#ifdef IN_RING0
+ uint32_t iEnd = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iomr0.s.cMmioAlloc);
+ PCIOMMMIOLOOKUPENTRY paLookup = pVM->iomr0.s.paMmioLookup;
+#else
+ uint32_t iEnd = pVM->iom.s.cMmioLookupEntries;
+ PCIOMMMIOLOOKUPENTRY paLookup = pVM->iom.s.paMmioLookup;
+#endif
+ if (iEnd > 0)
+ {
+ uint32_t iFirst = 0;
+ uint32_t i = *pidxLastHint;
+ if (i < iEnd)
+ { /* likely */ }
+ else
+ i = iEnd / 2;
+ for (;;)
+ {
+ PCIOMMMIOLOOKUPENTRY pCur = &paLookup[i];
+ if (pCur->GCPhysFirst > GCPhys)
+ {
+ if (i > iFirst)
+ iEnd = i;
+ else
+ break;
+ }
+ else if (pCur->GCPhysLast < GCPhys)
+ {
+ i += 1;
+ if (i < iEnd)
+ iFirst = i;
+ else
+ break;
+ }
+ else
+ {
+ *pidxLastHint = (uint16_t)i;
+ *poffRegion = GCPhys - pCur->GCPhysFirst;
+
+ /*
+ * Translate the 'idx' member into a pointer.
+ */
+ size_t const idx = pCur->idx;
+#ifdef IN_RING0
+ AssertMsg(idx < pVM->iom.s.cMmioRegs && idx < pVM->iomr0.s.cMmioAlloc,
+ ("%#zx vs %#x/%x (GCPhys=%RGp)\n", idx, pVM->iom.s.cMmioRegs, pVM->iomr0.s.cMmioMax, GCPhys));
+ if (idx < pVM->iomr0.s.cMmioAlloc)
+ return &pVM->iomr0.s.paMmioRegs[idx];
+#else
+ if (idx < pVM->iom.s.cMmioRegs)
+ return &pVM->iom.s.paMmioRegs[idx];
+ AssertMsgFailed(("%#zx vs %#x (GCPhys=%RGp)\n", idx, pVM->iom.s.cMmioRegs, GCPhys));
+#endif
+ break;
+ }
+
+ i = iFirst + (iEnd - iFirst) / 2;
+ }
+ }
+ *poffRegion = 0;
+ return NULL;
+}
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * Gets the statistics entry for an MMIO region.
+ *
+ * @returns Pointer to stats. Instead of NULL, a pointer to MmioDummyStats is
+ * returned, so the caller does not need to check for NULL.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pRegEntry The I/O port entry to get stats for.
+ */
+DECLINLINE(PIOMMMIOSTATSENTRY) iomMmioGetStats(PVMCC pVM, CTX_SUFF(PIOMMMIOENTRY) pRegEntry)
+{
+ size_t idxStats = pRegEntry->idxStats;
+# ifdef IN_RING0
+ if (idxStats < pVM->iomr0.s.cMmioStatsAllocation)
+ return &pVM->iomr0.s.paMmioStats[idxStats];
+# else
+ if (idxStats < pVM->iom.s.cMmioStats)
+ return &pVM->iom.s.paMmioStats[idxStats];
+# endif
+ return &pVM->iom.s.MmioDummyStats;
+}
+#endif
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_IOMInline_h */
+
diff --git a/src/VBox/VMM/include/IOMInternal.h b/src/VBox/VMM/include/IOMInternal.h
new file mode 100644
index 00000000..a4d3c145
--- /dev/null
+++ b/src/VBox/VMM/include/IOMInternal.h
@@ -0,0 +1,629 @@
+/* $Id: IOMInternal.h $ */
+/** @file
+ * IOM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_IOMInternal_h
+#define VMM_INCLUDED_SRC_include_IOMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#define IOM_WITH_CRIT_SECT_RW
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/pdmcritsect.h>
+#ifdef IOM_WITH_CRIT_SECT_RW
+# include <VBox/vmm/pdmcritsectrw.h>
+#endif
+#include <VBox/param.h>
+#include <iprt/assert.h>
+#include <iprt/avl.h>
+
+
+
+/** @defgroup grp_iom_int Internals
+ * @ingroup grp_iom
+ * @internal
+ * @{
+ */
+
+/**
+ * I/O port lookup table entry.
+ */
+typedef struct IOMIOPORTLOOKUPENTRY
+{
+ /** The first port in the range. */
+ RTIOPORT uFirstPort;
+ /** The last port in the range (inclusive). */
+ RTIOPORT uLastPort;
+ /** The registration handle/index. */
+ uint16_t idx;
+} IOMIOPORTLOOKUPENTRY;
+/** Pointer to an I/O port lookup table entry. */
+typedef IOMIOPORTLOOKUPENTRY *PIOMIOPORTLOOKUPENTRY;
+/** Pointer to a const I/O port lookup table entry. */
+typedef IOMIOPORTLOOKUPENTRY const *PCIOMIOPORTLOOKUPENTRY;
+
+/**
+ * Ring-0 I/O port handle table entry.
+ */
+typedef struct IOMIOPORTENTRYR0
+{
+ /** Pointer to user argument. */
+ RTR0PTR pvUser;
+ /** Pointer to the associated device instance, NULL if entry not used. */
+ R0PTRTYPE(PPDMDEVINS) pDevIns;
+ /** Pointer to OUT callback function. */
+ R0PTRTYPE(PFNIOMIOPORTNEWOUT) pfnOutCallback;
+ /** Pointer to IN callback function. */
+ R0PTRTYPE(PFNIOMIOPORTNEWIN) pfnInCallback;
+ /** Pointer to string OUT callback function. */
+ R0PTRTYPE(PFNIOMIOPORTNEWOUTSTRING) pfnOutStrCallback;
+ /** Pointer to string IN callback function. */
+ R0PTRTYPE(PFNIOMIOPORTNEWINSTRING) pfnInStrCallback;
+ /** The entry of the first statistics entry, UINT16_MAX if no stats. */
+ uint16_t idxStats;
+ /** The number of ports covered by this entry, 0 if entry not used. */
+ RTIOPORT cPorts;
+ /** Same as the handle index. */
+ uint16_t idxSelf;
+ /** IOM_IOPORT_F_XXX (copied from ring-3). */
+ uint16_t fFlags;
+} IOMIOPORTENTRYR0;
+/** Pointer to a ring-0 I/O port handle table entry. */
+typedef IOMIOPORTENTRYR0 *PIOMIOPORTENTRYR0;
+/** Pointer to a const ring-0 I/O port handle table entry. */
+typedef IOMIOPORTENTRYR0 const *PCIOMIOPORTENTRYR0;
+
+/**
+ * Ring-3 I/O port handle table entry.
+ */
+typedef struct IOMIOPORTENTRYR3
+{
+ /** Pointer to user argument. */
+ RTR3PTR pvUser;
+ /** Pointer to the associated device instance. */
+ R3PTRTYPE(PPDMDEVINS) pDevIns;
+ /** Pointer to OUT callback function. */
+ R3PTRTYPE(PFNIOMIOPORTNEWOUT) pfnOutCallback;
+ /** Pointer to IN callback function. */
+ R3PTRTYPE(PFNIOMIOPORTNEWIN) pfnInCallback;
+ /** Pointer to string OUT callback function. */
+ R3PTRTYPE(PFNIOMIOPORTNEWOUTSTRING) pfnOutStrCallback;
+ /** Pointer to string IN callback function. */
+ R3PTRTYPE(PFNIOMIOPORTNEWINSTRING) pfnInStrCallback;
+ /** Description / Name. For easing debugging. */
+ R3PTRTYPE(const char *) pszDesc;
+ /** Extended port description table, optional. */
+ R3PTRTYPE(PCIOMIOPORTDESC) paExtDescs;
+ /** PCI device the registration is associated with. */
+ R3PTRTYPE(PPDMPCIDEV) pPciDev;
+ /** The PCI device region (high 16-bit word) and subregion (low word),
+ * UINT32_MAX if not applicable. */
+ uint32_t iPciRegion;
+ /** The number of ports covered by this entry. */
+ RTIOPORT cPorts;
+ /** The current port mapping (duplicates lookup table). */
+ RTIOPORT uPort;
+ /** The entry of the first statistics entry, UINT16_MAX if no stats. */
+ uint16_t idxStats;
+ /** Set if mapped, clear if not.
+ * Only updated when critsect is held exclusively. */
+ bool fMapped;
+ /** Set if there is an ring-0 entry too. */
+ bool fRing0;
+ /** Set if there is an raw-mode entry too. */
+ bool fRawMode;
+ /** IOM_IOPORT_F_XXX */
+ uint8_t fFlags;
+ /** Same as the handle index. */
+ uint16_t idxSelf;
+} IOMIOPORTENTRYR3;
+AssertCompileSize(IOMIOPORTENTRYR3, 9 * sizeof(RTR3PTR) + 16);
+/** Pointer to a ring-3 I/O port handle table entry. */
+typedef IOMIOPORTENTRYR3 *PIOMIOPORTENTRYR3;
+/** Pointer to a const ring-3 I/O port handle table entry. */
+typedef IOMIOPORTENTRYR3 const *PCIOMIOPORTENTRYR3;
+
+/**
+ * I/O port statistics entry (one I/O port).
+ */
+typedef struct IOMIOPORTSTATSENTRY
+{
+ /** All accesses (only updated for the first port in a range). */
+ STAMCOUNTER Total;
+
+ /** Number of INs to this port from R3. */
+ STAMCOUNTER InR3;
+ /** Profiling IN handler overhead in R3. */
+ STAMPROFILE ProfInR3;
+ /** Number of OUTs to this port from R3. */
+ STAMCOUNTER OutR3;
+ /** Profiling OUT handler overhead in R3. */
+ STAMPROFILE ProfOutR3;
+
+ /** Number of INs to this port from R0/RC. */
+ STAMCOUNTER InRZ;
+ /** Profiling IN handler overhead in R0/RC. */
+ STAMPROFILE ProfInRZ;
+ /** Number of INs to this port from R0/RC which was serviced in R3. */
+ STAMCOUNTER InRZToR3;
+
+ /** Number of OUTs to this port from R0/RC. */
+ STAMCOUNTER OutRZ;
+ /** Profiling OUT handler overhead in R0/RC. */
+ STAMPROFILE ProfOutRZ;
+ /** Number of OUTs to this port from R0/RC which was serviced in R3. */
+ STAMCOUNTER OutRZToR3;
+} IOMIOPORTSTATSENTRY;
+/** Pointer to I/O port statistics entry. */
+typedef IOMIOPORTSTATSENTRY *PIOMIOPORTSTATSENTRY;
+
+
+
+/**
+ * MMIO lookup table entry.
+ */
+typedef struct IOMMMIOLOOKUPENTRY
+{
+ /** The first port in the range. */
+ RTGCPHYS GCPhysFirst;
+ /** The last port in the range (inclusive). */
+ RTGCPHYS GCPhysLast;
+ /** The registration handle/index.
+ * @todo bake this into the lower/upper bits of GCPhysFirst & GCPhysLast. */
+ uint16_t idx;
+ uint16_t abPadding[3];
+} IOMMMIOLOOKUPENTRY;
+/** Pointer to an MMIO lookup table entry. */
+typedef IOMMMIOLOOKUPENTRY *PIOMMMIOLOOKUPENTRY;
+/** Pointer to a const MMIO lookup table entry. */
+typedef IOMMMIOLOOKUPENTRY const *PCIOMMMIOLOOKUPENTRY;
+
+/**
+ * Ring-0 MMIO handle table entry.
+ */
+typedef struct IOMMMIOENTRYR0
+{
+ /** The number of bytes covered by this entry, 0 if entry not used. */
+ RTGCPHYS cbRegion;
+ /** Pointer to user argument. */
+ RTR0PTR pvUser;
+ /** Pointer to the associated device instance, NULL if entry not used. */
+ R0PTRTYPE(PPDMDEVINS) pDevIns;
+ /** Pointer to the write callback function. */
+ R0PTRTYPE(PFNIOMMMIONEWWRITE) pfnWriteCallback;
+ /** Pointer to the read callback function. */
+ R0PTRTYPE(PFNIOMMMIONEWREAD) pfnReadCallback;
+ /** Pointer to the fill callback function. */
+ R0PTRTYPE(PFNIOMMMIONEWFILL) pfnFillCallback;
+ /** The entry of the first statistics entry, UINT16_MAX if no stats.
+ * @note For simplicity, this is always copied from ring-3 for all entries at
+ * the end of VM creation. */
+ uint16_t idxStats;
+ /** Same as the handle index. */
+ uint16_t idxSelf;
+ /** IOM_MMIO_F_XXX (copied from ring-3). */
+ uint32_t fFlags;
+} IOMMMIOENTRYR0;
+/** Pointer to a ring-0 MMIO handle table entry. */
+typedef IOMMMIOENTRYR0 *PIOMMMIOENTRYR0;
+/** Pointer to a const ring-0 MMIO handle table entry. */
+typedef IOMMMIOENTRYR0 const *PCIOMMMIOENTRYR0;
+
+/**
+ * Ring-3 MMIO handle table entry.
+ */
+typedef struct IOMMMIOENTRYR3
+{
+ /** The number of bytes covered by this entry. */
+ RTGCPHYS cbRegion;
+ /** The current mapping address (duplicates lookup table).
+ * This is set to NIL_RTGCPHYS if not mapped (exclusive lock + atomic). */
+ RTGCPHYS volatile GCPhysMapping;
+ /** Pointer to user argument. */
+ RTR3PTR pvUser;
+ /** Pointer to the associated device instance. */
+ R3PTRTYPE(PPDMDEVINS) pDevIns;
+ /** Pointer to the write callback function. */
+ R3PTRTYPE(PFNIOMMMIONEWWRITE) pfnWriteCallback;
+ /** Pointer to the read callback function. */
+ R3PTRTYPE(PFNIOMMMIONEWREAD) pfnReadCallback;
+ /** Pointer to the fill callback function. */
+ R3PTRTYPE(PFNIOMMMIONEWFILL) pfnFillCallback;
+ /** Description / Name. For easing debugging. */
+ R3PTRTYPE(const char *) pszDesc;
+ /** PCI device the registration is associated with. */
+ R3PTRTYPE(PPDMPCIDEV) pPciDev;
+ /** The PCI device region (high 16-bit word) and subregion (low word),
+ * UINT32_MAX if not applicable. */
+ uint32_t iPciRegion;
+ /** IOM_MMIO_F_XXX */
+ uint32_t fFlags;
+ /** The entry of the first statistics entry, UINT16_MAX if no stats. */
+ uint16_t idxStats;
+ /** Set if mapped, clear if not.
+ * Only updated when critsect is held exclusively.
+ * @todo remove as GCPhysMapping != NIL_RTGCPHYS serves the same purpose. */
+ bool volatile fMapped;
+ /** Set if there is an ring-0 entry too. */
+ bool fRing0;
+ /** Set if there is an raw-mode entry too. */
+ bool fRawMode;
+ uint8_t bPadding;
+ /** Same as the handle index. */
+ uint16_t idxSelf;
+} IOMMMIOENTRYR3;
+AssertCompileSize(IOMMMIOENTRYR3, sizeof(RTGCPHYS) * 2 + 7 * sizeof(RTR3PTR) + 16);
+/** Pointer to a ring-3 MMIO handle table entry. */
+typedef IOMMMIOENTRYR3 *PIOMMMIOENTRYR3;
+/** Pointer to a const ring-3 MMIO handle table entry. */
+typedef IOMMMIOENTRYR3 const *PCIOMMMIOENTRYR3;
+
+/**
+ * MMIO statistics entry (one MMIO).
+ */
+typedef struct IOMMMIOSTATSENTRY
+{
+ /** Counting and profiling reads in R0/RC. */
+ STAMPROFILE ProfReadRZ;
+ /** Number of successful read accesses. */
+ STAMCOUNTER Reads;
+ /** Number of reads to this address from R0/RC which was serviced in R3. */
+ STAMCOUNTER ReadRZToR3;
+ /** Number of complicated reads. */
+ STAMCOUNTER ComplicatedReads;
+ /** Number of reads of 0xff or 0x00. */
+ STAMCOUNTER FFor00Reads;
+ /** Profiling read handler overhead in R3. */
+ STAMPROFILE ProfReadR3;
+
+ /** Counting and profiling writes in R0/RC. */
+ STAMPROFILE ProfWriteRZ;
+ /** Number of successful read accesses. */
+ STAMCOUNTER Writes;
+ /** Number of writes to this address from R0/RC which was serviced in R3. */
+ STAMCOUNTER WriteRZToR3;
+ /** Number of writes to this address from R0/RC which was committed in R3. */
+ STAMCOUNTER CommitRZToR3;
+ /** Number of complicated writes. */
+ STAMCOUNTER ComplicatedWrites;
+ /** Profiling write handler overhead in R3. */
+ STAMPROFILE ProfWriteR3;
+} IOMMMIOSTATSENTRY;
+/** Pointer to MMIO statistics entry. */
+typedef IOMMMIOSTATSENTRY *PIOMMMIOSTATSENTRY;
+
+
+/**
+ * IOM per virtual CPU instance data.
+ */
+typedef struct IOMCPU
+{
+ /**
+ * Pending I/O port write commit (VINF_IOM_R3_IOPORT_COMMIT_WRITE).
+ *
+ * This is a converted VINF_IOM_R3_IOPORT_WRITE handler return that lets the
+ * execution engine commit the instruction and then return to ring-3 to complete
+ * the I/O port write there. This avoids having to decode the instruction again
+ * in ring-3.
+ */
+ struct
+ {
+ /** The value size (0 if not pending). */
+ uint16_t cbValue;
+ /** The I/O port. */
+ RTIOPORT IOPort;
+ /** The value. */
+ uint32_t u32Value;
+ } PendingIOPortWrite;
+
+ /**
+ * Pending MMIO write commit (VINF_IOM_R3_MMIO_COMMIT_WRITE).
+ *
+ * This is a converted VINF_IOM_R3_MMIO_WRITE handler return that lets the
+ * execution engine commit the instruction, stop any more REPs, and return to
+ * ring-3 to complete the MMIO write there. The avoid the tedious decoding of
+ * the instruction again once we're in ring-3, more importantly it allows us to
+ * correctly deal with read-modify-write instructions like XCHG, OR, and XOR.
+ */
+ struct
+ {
+ /** Guest physical MMIO address. */
+ RTGCPHYS GCPhys;
+ /** The number of bytes to write (0 if nothing pending). */
+ uint32_t cbValue;
+ /** Hint. */
+ uint32_t idxMmioRegionHint;
+ /** The value to write. */
+ uint8_t abValue[128];
+ } PendingMmioWrite;
+
+ /** @name Caching of I/O Port and MMIO ranges and statistics.
+ * (Saves quite some time in rep outs/ins instruction emulation.)
+ * @{ */
+ /** I/O port registration index for the last read operation. */
+ uint16_t idxIoPortLastRead;
+ /** I/O port registration index for the last write operation. */
+ uint16_t idxIoPortLastWrite;
+ /** I/O port registration index for the last read string operation. */
+ uint16_t idxIoPortLastReadStr;
+ /** I/O port registration index for the last write string operation. */
+ uint16_t idxIoPortLastWriteStr;
+
+ /** MMIO port registration index for the last IOMR3MmioPhysHandler call.
+ * @note pretty static as only used by APIC on AMD-V. */
+ uint16_t idxMmioLastPhysHandler;
+ uint16_t au16Padding[2];
+ /** @} */
+
+ /** MMIO recursion guard (see @bugref{10315}). */
+ uint8_t cMmioRecursionDepth;
+ uint8_t bPadding;
+ /** The MMIO recursion stack (ring-3 version). */
+ PPDMDEVINSR3 apMmioRecursionStack[2];
+} IOMCPU;
+/** Pointer to IOM per virtual CPU instance data. */
+typedef IOMCPU *PIOMCPU;
+
+
+/**
+ * IOM Data (part of VM)
+ */
+typedef struct IOM
+{
+ /** Lock serializing EMT access to IOM. */
+#ifdef IOM_WITH_CRIT_SECT_RW
+ PDMCRITSECTRW CritSect;
+#else
+ PDMCRITSECT CritSect;
+#endif
+
+ /** @name I/O ports
+ * @note The updating of these variables is done exclusively from EMT(0).
+ * @{ */
+ /** Number of I/O port registrations. */
+ uint32_t cIoPortRegs;
+ /** The size of the paIoPortRegs allocation (in entries). */
+ uint32_t cIoPortAlloc;
+ /** I/O port registration table for ring-3.
+ * There is a parallel table in ring-0, IOMR0PERVM::paIoPortRegs. */
+ R3PTRTYPE(PIOMIOPORTENTRYR3) paIoPortRegs;
+ /** I/O port lookup table. */
+ R3PTRTYPE(PIOMIOPORTLOOKUPENTRY) paIoPortLookup;
+ /** Number of entries in the lookup table. */
+ uint32_t cIoPortLookupEntries;
+ /** Set if I/O port registrations are frozen. */
+ bool fIoPortsFrozen;
+ bool afPadding1[3];
+
+ /** The number of valid entries in paioPortStats. */
+ uint32_t cIoPortStats;
+ /** The size of the paIoPortStats allocation (in entries). */
+ uint32_t cIoPortStatsAllocation;
+ /** I/O port lookup table. */
+ R3PTRTYPE(PIOMIOPORTSTATSENTRY) paIoPortStats;
+ /** Dummy stats entry so we don't need to check for NULL pointers so much. */
+ IOMIOPORTSTATSENTRY IoPortDummyStats;
+ /** @} */
+
+ /** @name MMIO ports
+ * @note The updating of these variables is done exclusively from EMT(0).
+ * @{ */
+ /** MMIO physical access handler type, new style. */
+ PGMPHYSHANDLERTYPE hNewMmioHandlerType;
+ /** Number of MMIO registrations. */
+ uint32_t cMmioRegs;
+ /** The size of the paMmioRegs allocation (in entries). */
+ uint32_t cMmioAlloc;
+ /** MMIO registration table for ring-3.
+ * There is a parallel table in ring-0, IOMR0PERVM::paMmioRegs. */
+ R3PTRTYPE(PIOMMMIOENTRYR3) paMmioRegs;
+ /** MMIO lookup table. */
+ R3PTRTYPE(PIOMMMIOLOOKUPENTRY) paMmioLookup;
+ /** Number of entries in the lookup table. */
+ uint32_t cMmioLookupEntries;
+ /** Set if MMIO registrations are frozen. */
+ bool fMmioFrozen;
+ bool afPadding2[3];
+
+ /** The number of valid entries in paioPortStats. */
+ uint32_t cMmioStats;
+ /** The size of the paMmioStats allocation (in entries). */
+ uint32_t cMmioStatsAllocation;
+ /** MMIO lookup table. */
+ R3PTRTYPE(PIOMMMIOSTATSENTRY) paMmioStats;
+ /** Dummy stats entry so we don't need to check for NULL pointers so much. */
+ IOMMMIOSTATSENTRY MmioDummyStats;
+ /** @} */
+
+ /** @name I/O Port statistics.
+ * @{ */
+ STAMCOUNTER StatIoPortIn;
+ STAMCOUNTER StatIoPortOut;
+ STAMCOUNTER StatIoPortInS;
+ STAMCOUNTER StatIoPortOutS;
+ STAMCOUNTER StatIoPortCommits;
+ /** @} */
+
+ /** @name MMIO statistics.
+ * @{ */
+ STAMPROFILE StatMmioPfHandler;
+ STAMPROFILE StatMmioPhysHandler;
+ STAMCOUNTER StatMmioHandlerR3;
+ STAMCOUNTER StatMmioHandlerR0;
+ STAMCOUNTER StatMmioReadsR0ToR3;
+ STAMCOUNTER StatMmioWritesR0ToR3;
+ STAMCOUNTER StatMmioCommitsR0ToR3;
+ STAMCOUNTER StatMmioCommitsDirect;
+ STAMCOUNTER StatMmioCommitsPgm;
+ STAMCOUNTER StatMmioStaleMappings;
+ STAMCOUNTER StatMmioDevLockContentionR0;
+ STAMCOUNTER StatMmioTooDeepRecursion;
+ /** @} */
+} IOM;
+#ifdef IOM_WITH_CRIT_SECT_RW
+AssertCompileMemberAlignment(IOM, CritSect, 64);
+#endif
+/** Pointer to IOM instance data. */
+typedef IOM *PIOM;
+
+
+/**
+ * IOM data kept in the ring-0 GVM.
+ */
+typedef struct IOMR0PERVM
+{
+ /** @name I/O ports
+ * @{ */
+ /** The higest ring-0 I/O port registration plus one. */
+ uint32_t cIoPortMax;
+ /** The size of the paIoPortRegs allocation (in entries). */
+ uint32_t cIoPortAlloc;
+ /** I/O port registration table for ring-0.
+ * There is a parallel table for ring-3, paIoPortRing3Regs. */
+ R0PTRTYPE(PIOMIOPORTENTRYR0) paIoPortRegs;
+ /** I/O port lookup table. */
+ R0PTRTYPE(PIOMIOPORTLOOKUPENTRY) paIoPortLookup;
+ /** I/O port registration table for ring-3.
+ * Also mapped to ring-3 as IOM::paIoPortRegs. */
+ R0PTRTYPE(PIOMIOPORTENTRYR3) paIoPortRing3Regs;
+ /** Handle to the allocation backing both the ring-0 and ring-3 registration
+ * tables as well as the lookup table. */
+ RTR0MEMOBJ hIoPortMemObj;
+ /** Handle to the ring-3 mapping of the lookup and ring-3 registration table. */
+ RTR0MEMOBJ hIoPortMapObj;
+#ifdef VBOX_WITH_STATISTICS
+ /** The size of the paIoPortStats allocation (in entries). */
+ uint32_t cIoPortStatsAllocation;
+ /** Prevents paIoPortStats from growing, set by IOMR0IoPortSyncStatisticsIndices(). */
+ bool fIoPortStatsFrozen;
+ /** I/O port lookup table. */
+ R0PTRTYPE(PIOMIOPORTSTATSENTRY) paIoPortStats;
+ /** Handle to the allocation backing the I/O port statistics. */
+ RTR0MEMOBJ hIoPortStatsMemObj;
+ /** Handle to the ring-3 mapping of the I/O port statistics. */
+ RTR0MEMOBJ hIoPortStatsMapObj;
+#endif
+ /** @} */
+
+ /** @name MMIO
+ * @{ */
+ /** The higest ring-0 MMIO registration plus one. */
+ uint32_t cMmioMax;
+ /** The size of the paMmioRegs allocation (in entries). */
+ uint32_t cMmioAlloc;
+ /** MMIO registration table for ring-0.
+ * There is a parallel table for ring-3, paMmioRing3Regs. */
+ R0PTRTYPE(PIOMMMIOENTRYR0) paMmioRegs;
+ /** MMIO lookup table. */
+ R0PTRTYPE(PIOMMMIOLOOKUPENTRY) paMmioLookup;
+ /** MMIO registration table for ring-3.
+ * Also mapped to ring-3 as IOM::paMmioRegs. */
+ R0PTRTYPE(PIOMMMIOENTRYR3) paMmioRing3Regs;
+ /** Handle to the allocation backing both the ring-0 and ring-3 registration
+ * tables as well as the lookup table. */
+ RTR0MEMOBJ hMmioMemObj;
+ /** Handle to the ring-3 mapping of the lookup and ring-3 registration table. */
+ RTR0MEMOBJ hMmioMapObj;
+#ifdef VBOX_WITH_STATISTICS
+ /** The size of the paMmioStats allocation (in entries). */
+ uint32_t cMmioStatsAllocation;
+ /* Prevents paMmioStats from growing, set by IOMR0MmioSyncStatisticsIndices(). */
+ bool fMmioStatsFrozen;
+ /** MMIO lookup table. */
+ R0PTRTYPE(PIOMMMIOSTATSENTRY) paMmioStats;
+ /** Handle to the allocation backing the MMIO statistics. */
+ RTR0MEMOBJ hMmioStatsMemObj;
+ /** Handle to the ring-3 mapping of the MMIO statistics. */
+ RTR0MEMOBJ hMmioStatsMapObj;
+#endif
+ /** @} */
+
+} IOMR0PERVM;
+
+
+RT_C_DECLS_BEGIN
+
+#ifdef IN_RING3
+DECLCALLBACK(void) iomR3IoPortInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+void iomR3IoPortRegStats(PVM pVM, PIOMIOPORTENTRYR3 pRegEntry);
+DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry);
+VBOXSTRICTRC iomR3MmioCommitWorker(PVM pVM, PVMCPU pVCpu, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS offRegion); /* IOMAllMmioNew.cpp */
+#endif /* IN_RING3 */
+#ifdef IN_RING0
+void iomR0IoPortCleanupVM(PGVM pGVM);
+void iomR0IoPortInitPerVMData(PGVM pGVM);
+void iomR0MmioCleanupVM(PGVM pGVM);
+void iomR0MmioInitPerVMData(PGVM pGVM);
+#endif
+
+#ifndef IN_RING3
+DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iomMmioPfHandlerNew;
+#endif
+DECLCALLBACK(FNPGMPHYSHANDLER) iomMmioHandlerNew;
+
+/* IOM locking helpers. */
+#ifdef IOM_WITH_CRIT_SECT_RW
+# define IOM_LOCK_EXCL(a_pVM) PDMCritSectRwEnterExcl((a_pVM), &(a_pVM)->iom.s.CritSect, VERR_SEM_BUSY)
+# define IOM_UNLOCK_EXCL(a_pVM) do { PDMCritSectRwLeaveExcl((a_pVM), &(a_pVM)->iom.s.CritSect); } while (0)
+# if 0 /* (in case needed for debugging) */
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectRwEnterExcl(&(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectRwLeaveExcl(&(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectRwIsWriteOwner(&(a_pVM)->iom.s.CritSect)
+# else
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectRwEnterShared((a_pVM), &(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectRwLeaveShared((a_pVM), &(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectRwIsReadOwner((a_pVM), &(a_pVM)->iom.s.CritSect, true)
+# endif
+# define IOM_IS_EXCL_LOCK_OWNER(a_pVM) PDMCritSectRwIsWriteOwner((a_pVM), &(a_pVM)->iom.s.CritSect)
+#else
+# define IOM_LOCK_EXCL(a_pVM) PDMCritSectEnter((a_pVM), &(a_pVM)->iom.s.CritSect, VERR_SEM_BUSY)
+# define IOM_UNLOCK_EXCL(a_pVM) do { PDMCritSectLeave((a_pVM), &(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectEnter((a_pVM), &(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectLeave((a_pVM), &(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectIsOwner((a_pVM), &(a_pVM)->iom.s.CritSect)
+# define IOM_IS_EXCL_LOCK_OWNER(a_pVM) PDMCritSectIsOwner((a_pVM), &(a_pVM)->iom.s.CritSect)
+#endif
+#define IOM_LOCK_SHARED(a_pVM) IOM_LOCK_SHARED_EX(a_pVM, VERR_SEM_BUSY)
+
+
+RT_C_DECLS_END
+
+
+#ifdef IN_RING3
+
+#endif
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_IOMInternal_h */
+
diff --git a/src/VBox/VMM/include/MMInternal.h b/src/VBox/VMM/include/MMInternal.h
new file mode 100644
index 00000000..c80d37c9
--- /dev/null
+++ b/src/VBox/VMM/include/MMInternal.h
@@ -0,0 +1,207 @@
+/* $Id: MMInternal.h $ */
+/** @file
+ * MM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_MMInternal_h
+#define VMM_INCLUDED_SRC_include_MMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <iprt/assert.h>
+#include <iprt/avl.h>
+#include <iprt/critsect.h>
+
+
+
+/** @defgroup grp_mm_int Internals
+ * @ingroup grp_mm
+ * @internal
+ * @{
+ */
+
+
+/** @name MMR3Heap - VM Ring-3 Heap Internals
+ * @{
+ */
+
+/** @def MMR3HEAP_SIZE_ALIGNMENT
+ * The allocation size alignment of the MMR3Heap.
+ */
+#define MMR3HEAP_SIZE_ALIGNMENT 16
+
+/** @def MMR3HEAP_WITH_STATISTICS
+ * Enable MMR3Heap statistics.
+ */
+#if !defined(MMR3HEAP_WITH_STATISTICS) && defined(VBOX_WITH_STATISTICS)
+# define MMR3HEAP_WITH_STATISTICS
+#endif
+
+/**
+ * Heap statistics record.
+ * There is one global and one per allocation tag.
+ */
+typedef struct MMHEAPSTAT
+{
+ /** Core avl node, key is the tag. */
+ AVLULNODECORE Core;
+ /** Pointer to the heap the memory belongs to. */
+ struct MMHEAP *pHeap;
+#ifdef MMR3HEAP_WITH_STATISTICS
+ /** Number of bytes currently allocated. */
+ size_t cbCurAllocated;
+ /** Number of allocation. */
+ uint64_t cAllocations;
+ /** Number of reallocations. */
+ uint64_t cReallocations;
+ /** Number of frees. */
+ uint64_t cFrees;
+ /** Failures. */
+ uint64_t cFailures;
+ /** Number of bytes allocated (sum). */
+ uint64_t cbAllocated;
+ /** Number of bytes freed. */
+ uint64_t cbFreed;
+#endif
+} MMHEAPSTAT;
+#if defined(MMR3HEAP_WITH_STATISTICS) && defined(IN_RING3)
+AssertCompileMemberAlignment(MMHEAPSTAT, cAllocations, 8);
+AssertCompileSizeAlignment(MMHEAPSTAT, 8);
+#endif
+/** Pointer to heap statistics record. */
+typedef MMHEAPSTAT *PMMHEAPSTAT;
+
+
+
+
+/**
+ * Additional heap block header for relating allocations to the VM.
+ */
+typedef struct MMHEAPHDR
+{
+ /** Pointer to the next record. */
+ struct MMHEAPHDR *pNext;
+ /** Pointer to the previous record. */
+ struct MMHEAPHDR *pPrev;
+ /** Pointer to the heap statistics record.
+ * (Where the a PVM can be found.) */
+ PMMHEAPSTAT pStat;
+ /** Size of the allocation (including this header). */
+ size_t cbSize;
+} MMHEAPHDR;
+/** Pointer to MM heap header. */
+typedef MMHEAPHDR *PMMHEAPHDR;
+
+
+/** MM Heap structure. */
+typedef struct MMHEAP
+{
+ /** Lock protecting the heap. */
+ RTCRITSECT Lock;
+ /** Heap block list head. */
+ PMMHEAPHDR pHead;
+ /** Heap block list tail. */
+ PMMHEAPHDR pTail;
+ /** Heap per tag statistics tree. */
+ PAVLULNODECORE pStatTree;
+ /** The VM handle. */
+ PUVM pUVM;
+ /** Heap global statistics. */
+ MMHEAPSTAT Stat;
+} MMHEAP;
+/** Pointer to MM Heap structure. */
+typedef MMHEAP *PMMHEAP;
+
+/** @} */
+
+/**
+ * MM Data (part of VM)
+ */
+typedef struct MM
+{
+ /** Set if MMR3InitPaging has been called. */
+ bool fDoneMMR3InitPaging;
+ /** Padding. */
+ bool afPadding1[7];
+
+ /** Size of the base RAM in bytes. (The CFGM RamSize value.) */
+ uint64_t cbRamBase;
+ /** Number of bytes of RAM above 4GB, starting at address 4GB. */
+ uint64_t cbRamAbove4GB;
+ /** Size of the below 4GB RAM hole. */
+ uint32_t cbRamHole;
+ /** Number of bytes of RAM below 4GB, starting at address 0. */
+ uint32_t cbRamBelow4GB;
+ /** The number of base RAM pages that PGM has reserved (GMM).
+ * @remarks Shadow ROMs will be counted twice (RAM+ROM), so it won't be 1:1 with
+ * what the guest sees. */
+ uint64_t cBasePages;
+ /** The number of handy pages that PGM has reserved (GMM).
+ * These are kept out of cBasePages and thus out of the saved state. */
+ uint32_t cHandyPages;
+ /** The number of shadow pages PGM has reserved (GMM). */
+ uint32_t cShadowPages;
+ /** The number of fixed pages we've reserved (GMM). */
+ uint32_t cFixedPages;
+ /** Padding. */
+ uint32_t u32Padding2;
+} MM;
+/** Pointer to MM Data (part of VM). */
+typedef MM *PMM;
+
+
+/**
+ * MM data kept in the UVM.
+ */
+typedef struct MMUSERPERVM
+{
+ /** Pointer to the MM R3 Heap. */
+ R3PTRTYPE(PMMHEAP) pHeap;
+} MMUSERPERVM;
+/** Pointer to the MM data kept in the UVM. */
+typedef MMUSERPERVM *PMMUSERPERVM;
+
+
+RT_C_DECLS_BEGIN
+
+int mmR3UpdateReservation(PVM pVM);
+
+int mmR3HeapCreateU(PUVM pUVM, PMMHEAP *ppHeap);
+void mmR3HeapDestroy(PMMHEAP pHeap);
+
+const char *mmGetTagName(MMTAG enmTag);
+
+RT_C_DECLS_END
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_MMInternal_h */
+
diff --git a/src/VBox/VMM/include/NEMInternal.h b/src/VBox/VMM/include/NEMInternal.h
new file mode 100644
index 00000000..e0817e21
--- /dev/null
+++ b/src/VBox/VMM/include/NEMInternal.h
@@ -0,0 +1,669 @@
+/* $Id: NEMInternal.h $ */
+/** @file
+ * NEM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2018-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_NEMInternal_h
+#define VMM_INCLUDED_SRC_include_NEMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/cpum.h> /* For CPUMCPUVENDOR. */
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/vmapi.h>
+#ifdef RT_OS_WINDOWS
+#include <iprt/nt/hyperv.h>
+#include <iprt/critsect.h>
+#elif defined(RT_OS_DARWIN)
+# include "VMXInternal.h"
+#endif
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_nem_int Internal
+ * @ingroup grp_nem
+ * @internal
+ * @{
+ */
+
+#if defined(VBOX_WITH_NATIVE_NEM) && !defined(VBOX_WITH_PGM_NEM_MODE)
+# error "VBOX_WITH_NATIVE_NEM requires VBOX_WITH_PGM_NEM_MODE to be defined"
+#endif
+
+
+#ifdef RT_OS_WINDOWS
+/*
+ * Windows: Code configuration.
+ */
+/* nothing at the moment */
+
+/**
+ * Windows VID I/O control information.
+ */
+typedef struct NEMWINIOCTL
+{
+ /** The I/O control function number. */
+ uint32_t uFunction;
+ uint32_t cbInput;
+ uint32_t cbOutput;
+} NEMWINIOCTL;
+
+/** @name Windows: Our two-bit physical page state for PGMPAGE
+ * @{ */
+# define NEM_WIN_PAGE_STATE_NOT_SET 0
+# define NEM_WIN_PAGE_STATE_UNMAPPED 1
+# define NEM_WIN_PAGE_STATE_READABLE 2
+# define NEM_WIN_PAGE_STATE_WRITABLE 3
+/** @} */
+
+/** Windows: Checks if a_GCPhys is subject to the limited A20 gate emulation. */
+# define NEM_WIN_IS_SUBJECT_TO_A20(a_GCPhys) ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K)
+/** Windows: Checks if a_GCPhys is relevant to the limited A20 gate emulation. */
+# define NEM_WIN_IS_RELEVANT_TO_A20(a_GCPhys) \
+ ( ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K) || ((RTGCPHYS)(a_GCPhys) < (RTGCPHYS)_64K) )
+
+/** The CPUMCTX_EXTRN_XXX mask for IEM. */
+# define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT \
+ | CPUMCTX_EXTRN_INHIBIT_NMI )
+/** The CPUMCTX_EXTRN_XXX mask for IEM when raising exceptions. */
+# define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM)
+
+/** @name Windows: Interrupt window flags (NEM_WIN_INTW_F_XXX).
+ * @{ */
+# define NEM_WIN_INTW_F_NMI UINT8_C(0x01)
+# define NEM_WIN_INTW_F_REGULAR UINT8_C(0x02)
+# define NEM_WIN_INTW_F_PRIO_MASK UINT8_C(0x3c)
+# define NEM_WIN_INTW_F_PRIO_SHIFT 2
+/** @} */
+
+#endif /* RT_OS_WINDOWS */
+
+
+#ifdef RT_OS_DARWIN
+/** vCPU ID declaration to avoid dragging in HV headers here. */
+typedef unsigned hv_vcpuid_t;
+/** The HV VM memory space ID (ASID). */
+typedef unsigned hv_vm_space_t;
+
+
+/** @name Darwin: Our two-bit physical page state for PGMPAGE
+ * @{ */
+# define NEM_DARWIN_PAGE_STATE_UNMAPPED 0
+# define NEM_DARWIN_PAGE_STATE_RX 1
+# define NEM_DARWIN_PAGE_STATE_RW 2
+# define NEM_DARWIN_PAGE_STATE_RWX 3
+/** @} */
+
+/** The CPUMCTX_EXTRN_XXX mask for IEM. */
+# define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT \
+ | CPUMCTX_EXTRN_INHIBIT_NMI )
+/** The CPUMCTX_EXTRN_XXX mask for IEM when raising exceptions. */
+# define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM)
+
+#endif
+
+
+/** Trick to make slickedit see the static functions in the template. */
+#ifndef IN_SLICKEDIT
+# define NEM_TMPL_STATIC static
+#else
+# define NEM_TMPL_STATIC
+#endif
+
+
+/**
+ * Generic NEM exit type enumeration for use with EMHistoryAddExit.
+ *
+ * On windows we've got two different set of exit types and they are both jumping
+ * around the place value wise, so EM can use their values.
+ *
+ * @note We only have exit types for exits not covered by EM here.
+ */
+typedef enum NEMEXITTYPE
+{
+ NEMEXITTYPE_INVALID = 0,
+
+ /* Common: */
+ NEMEXITTYPE_INTTERRUPT_WINDOW,
+ NEMEXITTYPE_HALT,
+
+ /* Windows: */
+ NEMEXITTYPE_UNRECOVERABLE_EXCEPTION,
+ NEMEXITTYPE_INVALID_VP_REGISTER_VALUE,
+ NEMEXITTYPE_XCPT_UD,
+ NEMEXITTYPE_XCPT_DB,
+ NEMEXITTYPE_XCPT_BP,
+ NEMEXITTYPE_CANCELED,
+ NEMEXITTYPE_MEMORY_ACCESS,
+
+ /* Linux: */
+ NEMEXITTYPE_INTERNAL_ERROR_EMULATION,
+ NEMEXITTYPE_INTERNAL_ERROR_FATAL,
+ NEMEXITTYPE_INTERRUPTED,
+ NEMEXITTYPE_FAILED_ENTRY,
+
+ /* End of valid types. */
+ NEMEXITTYPE_END
+} NEMEXITTYPE;
+
+
+/**
+ * NEM VM Instance data.
+ */
+typedef struct NEM
+{
+ /** NEM_MAGIC. */
+ uint32_t u32Magic;
+
+ /** Set if enabled. */
+ bool fEnabled;
+ /** Set if long mode guests are allowed. */
+ bool fAllow64BitGuests;
+ /** Set when the debug facility has breakpoints/events enabled that requires
+ * us to use the debug execution loop. */
+ bool fUseDebugLoop;
+
+#if defined(RT_OS_LINUX)
+ /** The '/dev/kvm' file descriptor. */
+ int32_t fdKvm;
+ /** The KVM_CREATE_VM file descriptor. */
+ int32_t fdVm;
+
+ /** KVM_GET_VCPU_MMAP_SIZE. */
+ uint32_t cbVCpuMmap;
+ /** KVM_CAP_NR_MEMSLOTS. */
+ uint32_t cMaxMemSlots;
+ /** KVM_CAP_X86_ROBUST_SINGLESTEP. */
+ bool fRobustSingleStep;
+
+ /** Hint where there might be a free slot. */
+ uint16_t idPrevSlot;
+ /** Memory slot ID allocation bitmap. */
+ uint64_t bmSlotIds[_32K / 8 / sizeof(uint64_t)];
+
+#elif defined(RT_OS_WINDOWS)
+ /** Set if we've created the EMTs. */
+ bool fCreatedEmts : 1;
+ /** WHvRunVpExitReasonX64Cpuid is supported. */
+ bool fExtendedMsrExit : 1;
+ /** WHvRunVpExitReasonX64MsrAccess is supported. */
+ bool fExtendedCpuIdExit : 1;
+ /** WHvRunVpExitReasonException is supported. */
+ bool fExtendedXcptExit : 1;
+# ifdef NEM_WIN_WITH_A20
+ /** Set if we've started more than one CPU and cannot mess with A20. */
+ bool fA20Fixed : 1;
+ /** Set if A20 is enabled. */
+ bool fA20Enabled : 1;
+# endif
+ /** The reported CPU vendor. */
+ CPUMCPUVENDOR enmCpuVendor;
+ /** Cache line flush size as a power of two. */
+ uint8_t cCacheLineFlushShift;
+ /** The result of WHvCapabilityCodeProcessorFeatures. */
+ union
+ {
+ /** 64-bit view. */
+ uint64_t u64;
+# ifdef _WINHVAPIDEFS_H_
+ /** Interpreed features. */
+ WHV_PROCESSOR_FEATURES u;
+# endif
+ } uCpuFeatures;
+
+ /** The partition handle. */
+# ifdef _WINHVAPIDEFS_H_
+ WHV_PARTITION_HANDLE
+# else
+ RTHCUINTPTR
+# endif
+ hPartition;
+ /** The device handle for the partition, for use with Vid APIs or direct I/O
+ * controls. */
+ RTR3PTR hPartitionDevice;
+
+ /** Number of currently mapped pages. */
+ uint32_t volatile cMappedPages;
+ uint32_t u32Padding;
+ STAMCOUNTER StatMapPage;
+ STAMCOUNTER StatUnmapPage;
+ STAMCOUNTER StatMapPageFailed;
+ STAMCOUNTER StatUnmapPageFailed;
+ STAMPROFILE StatProfMapGpaRange;
+ STAMPROFILE StatProfUnmapGpaRange;
+ STAMPROFILE StatProfMapGpaRangePage;
+ STAMPROFILE StatProfUnmapGpaRangePage;
+
+ /** Statistics updated by NEMR0UpdateStatistics. */
+ struct
+ {
+ uint64_t cPagesAvailable;
+ uint64_t cPagesInUse;
+ } R0Stats;
+
+#elif defined(RT_OS_DARWIN)
+ /** Set if we've created the EMTs. */
+ bool fCreatedEmts : 1;
+ /** Set if hv_vm_create() was called successfully. */
+ bool fCreatedVm : 1;
+ /** Set if hv_vm_space_create() was called successfully. */
+ bool fCreatedAsid : 1;
+ /** Set if Last Branch Record (LBR) is enabled. */
+ bool fLbr;
+ /** The ASID for this VM (only valid if fCreatedAsid is true). */
+ hv_vm_space_t uVmAsid;
+ /** Number of mach time units per NS, for hv_vcpu_run_until(). */
+ uint64_t cMachTimePerNs;
+ /** Pause-loop exiting (PLE) gap in ticks. */
+ uint32_t cPleGapTicks;
+ /** Pause-loop exiting (PLE) window in ticks. */
+ uint32_t cPleWindowTicks;
+
+ /** The host LBR TOS (top-of-stack) MSR id. */
+ uint32_t idLbrTosMsr;
+ /** The host LBR select MSR id. */
+ uint32_t idLbrSelectMsr;
+ /** The host last event record from IP MSR id. */
+ uint32_t idLerFromIpMsr;
+ /** The host last event record to IP MSR id. */
+ uint32_t idLerToIpMsr;
+
+ /** The first valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrFirst;
+ /** The last valid host LBR branch-from-IP stack range. */
+ uint32_t idLbrFromIpMsrLast;
+
+ /** The first valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrFirst;
+ /** The last valid host LBR branch-to-IP stack range. */
+ uint32_t idLbrToIpMsrLast;
+
+ /** The first valid host LBR info stack range. */
+ uint32_t idLbrInfoMsrFirst;
+ /** The last valid host LBR info stack range. */
+ uint32_t idLbrInfoMsrLast;
+
+ STAMCOUNTER StatMapPage;
+ STAMCOUNTER StatUnmapPage;
+ STAMCOUNTER StatMapPageFailed;
+ STAMCOUNTER StatUnmapPageFailed;
+#endif /* RT_OS_WINDOWS */
+} NEM;
+/** Pointer to NEM VM instance data. */
+typedef NEM *PNEM;
+
+/** NEM::u32Magic value. */
+#define NEM_MAGIC UINT32_C(0x004d454e)
+/** NEM::u32Magic value after termination. */
+#define NEM_MAGIC_DEAD UINT32_C(0xdead1111)
+
+
+/**
+ * NEM VMCPU Instance data.
+ */
+typedef struct NEMCPU
+{
+ /** NEMCPU_MAGIC. */
+ uint32_t u32Magic;
+ /** Whether \#UD needs to be intercepted and presented to GIM. */
+ bool fGIMTrapXcptUD : 1;
+ /** Whether \#GP needs to be intercept for mesa driver workaround. */
+ bool fTrapXcptGpForLovelyMesaDrv: 1;
+ /** Whether we should use the debug loop because of single stepping or special
+ * debug breakpoints / events are armed. */
+ bool fUseDebugLoop : 1;
+ /** Whether we're executing a single instruction. */
+ bool fSingleInstruction : 1;
+ /** Set if we using the debug loop and wish to intercept RDTSC. */
+ bool fDebugWantRdTscExit : 1;
+ /** Whether we are currently executing in the debug loop.
+ * Mainly for assertions. */
+ bool fUsingDebugLoop : 1;
+ /** Set if we need to clear the trap flag because of single stepping. */
+ bool fClearTrapFlag : 1;
+ /** Whether we're using the hyper DR7 or guest DR7. */
+ bool fUsingHyperDR7 : 1;
+ /** Whether \#DE needs to be intercepted for GIM. */
+ bool fGCMTrapXcptDE : 1;
+
+#if defined(RT_OS_LINUX)
+ uint8_t abPadding[3];
+ /** The KVM VCpu file descriptor. */
+ int32_t fdVCpu;
+ /** Pointer to the KVM_RUN data exchange region. */
+ R3PTRTYPE(struct kvm_run *) pRun;
+ /** The MSR_IA32_APICBASE value known to KVM. */
+ uint64_t uKvmApicBase;
+
+ /** @name Statistics
+ * @{ */
+ STAMCOUNTER StatExitTotal;
+ STAMCOUNTER StatExitIo;
+ STAMCOUNTER StatExitMmio;
+ STAMCOUNTER StatExitSetTpr;
+ STAMCOUNTER StatExitTprAccess;
+ STAMCOUNTER StatExitRdMsr;
+ STAMCOUNTER StatExitWrMsr;
+ STAMCOUNTER StatExitIrqWindowOpen;
+ STAMCOUNTER StatExitHalt;
+ STAMCOUNTER StatExitIntr;
+ STAMCOUNTER StatExitHypercall;
+ STAMCOUNTER StatExitDebug;
+ STAMCOUNTER StatExitBusLock;
+ STAMCOUNTER StatExitInternalErrorEmulation;
+ STAMCOUNTER StatExitInternalErrorFatal;
+# if 0
+ STAMCOUNTER StatExitCpuId;
+ STAMCOUNTER StatExitUnrecoverable;
+ STAMCOUNTER StatGetMsgTimeout;
+ STAMCOUNTER StatStopCpuSuccess;
+ STAMCOUNTER StatStopCpuPending;
+ STAMCOUNTER StatStopCpuPendingAlerts;
+ STAMCOUNTER StatStopCpuPendingOdd;
+ STAMCOUNTER StatCancelChangedState;
+ STAMCOUNTER StatCancelAlertedThread;
+# endif
+ STAMCOUNTER StatBreakOnCancel;
+ STAMCOUNTER StatBreakOnFFPre;
+ STAMCOUNTER StatBreakOnFFPost;
+ STAMCOUNTER StatBreakOnStatus;
+ STAMCOUNTER StatFlushExitOnReturn;
+ STAMCOUNTER StatFlushExitOnReturn1Loop;
+ STAMCOUNTER StatFlushExitOnReturn2Loops;
+ STAMCOUNTER StatFlushExitOnReturn3Loops;
+ STAMCOUNTER StatFlushExitOnReturn4PlusLoops;
+ STAMCOUNTER StatImportOnDemand;
+ STAMCOUNTER StatImportOnReturn;
+ STAMCOUNTER StatImportOnReturnSkipped;
+ STAMCOUNTER StatImportPendingInterrupt;
+ STAMCOUNTER StatExportPendingInterrupt;
+ STAMCOUNTER StatQueryCpuTick;
+ /** @} */
+
+
+#elif defined(RT_OS_WINDOWS)
+ /** The current state of the interrupt windows (NEM_WIN_INTW_F_XXX). */
+ uint8_t fCurrentInterruptWindows;
+ /** The desired state of the interrupt windows (NEM_WIN_INTW_F_XXX). */
+ uint8_t fDesiredInterruptWindows;
+ /** Last copy of HV_X64_VP_EXECUTION_STATE::InterruptShadow. */
+ bool fLastInterruptShadow : 1;
+ uint32_t uPadding;
+ /** The VID_MSHAGN_F_XXX flags.
+ * Either VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE or zero. */
+ uint32_t fHandleAndGetFlags;
+ /** What VidMessageSlotMap returns and is used for passing exit info. */
+ RTR3PTR pvMsgSlotMapping;
+ /** The windows thread handle. */
+ RTR3PTR hNativeThreadHandle;
+
+ /** @name Statistics
+ * @{ */
+ STAMCOUNTER StatExitPortIo;
+ STAMCOUNTER StatExitMemUnmapped;
+ STAMCOUNTER StatExitMemIntercept;
+ STAMCOUNTER StatExitHalt;
+ STAMCOUNTER StatExitInterruptWindow;
+ STAMCOUNTER StatExitCpuId;
+ STAMCOUNTER StatExitMsr;
+ STAMCOUNTER StatExitException;
+ STAMCOUNTER StatExitExceptionBp;
+ STAMCOUNTER StatExitExceptionDb;
+ STAMCOUNTER StatExitExceptionGp;
+ STAMCOUNTER StatExitExceptionGpMesa;
+ STAMCOUNTER StatExitExceptionUd;
+ STAMCOUNTER StatExitExceptionUdHandled;
+ STAMCOUNTER StatExitUnrecoverable;
+ STAMCOUNTER StatGetMsgTimeout;
+ STAMCOUNTER StatStopCpuSuccess;
+ STAMCOUNTER StatStopCpuPending;
+ STAMCOUNTER StatStopCpuPendingAlerts;
+ STAMCOUNTER StatStopCpuPendingOdd;
+ STAMCOUNTER StatCancelChangedState;
+ STAMCOUNTER StatCancelAlertedThread;
+ STAMCOUNTER StatBreakOnCancel;
+ STAMCOUNTER StatBreakOnFFPre;
+ STAMCOUNTER StatBreakOnFFPost;
+ STAMCOUNTER StatBreakOnStatus;
+ STAMCOUNTER StatImportOnDemand;
+ STAMCOUNTER StatImportOnReturn;
+ STAMCOUNTER StatImportOnReturnSkipped;
+ STAMCOUNTER StatQueryCpuTick;
+ /** @} */
+
+#elif defined(RT_OS_DARWIN)
+ /** The vCPU handle associated with the EMT executing this vCPU. */
+ hv_vcpuid_t hVCpuId;
+
+ /** @name State shared with the VT-x code.
+ * @{ */
+ /** An additional error code used for some gurus. */
+ uint32_t u32HMError;
+ /** The last exit-to-ring-3 reason. */
+ int32_t rcLastExitToR3;
+ /** CPU-context changed flags (see HM_CHANGED_xxx). */
+ uint64_t fCtxChanged;
+
+ /** The guest VMCS information. */
+ VMXVMCSINFO VmcsInfo;
+
+ /** VT-x data. */
+ struct HMCPUVMX
+ {
+ /** @name Guest information.
+ * @{ */
+ /** Guest VMCS information shared with ring-3. */
+ VMXVMCSINFOSHARED VmcsInfo;
+ /** Nested-guest VMCS information shared with ring-3. */
+ VMXVMCSINFOSHARED VmcsInfoNstGst;
+ /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
+ * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs */
+ bool fSwitchedToNstGstVmcsCopyForRing3;
+ /** Whether the static guest VMCS controls has been merged with the
+ * nested-guest VMCS controls. */
+ bool fMergedNstGstCtls;
+ /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
+ bool fCopiedNstGstToShadowVmcs;
+ /** Whether flushing the TLB is required due to switching to/from the
+ * nested-guest. */
+ bool fSwitchedNstGstFlushTlb;
+ /** Alignment. */
+ bool afAlignment0[4];
+ /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
+ uint64_t u64GstMsrApicBase;
+ /** @} */
+
+ /** @name Error reporting and diagnostics.
+ * @{ */
+ /** VT-x error-reporting (mainly for ring-3 propagation). */
+ struct
+ {
+ RTCPUID idCurrentCpu;
+ RTCPUID idEnteredCpu;
+ RTHCPHYS HCPhysCurrentVmcs;
+ uint32_t u32VmcsRev;
+ uint32_t u32InstrError;
+ uint32_t u32ExitReason;
+ uint32_t u32GuestIntrState;
+ } LastError;
+ /** @} */
+ } vmx;
+
+ /** Event injection state. */
+ HMEVENT Event;
+
+ /** Current shadow paging mode for updating CR4.
+ * @todo move later (@bugref{9217}). */
+ PGMMODE enmShadowMode;
+ uint32_t u32TemporaryPadding;
+
+ /** The PAE PDPEs used with Nested Paging (only valid when
+ * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
+ X86PDPE aPdpes[4];
+ /** Pointer to the VMX statistics. */
+ PVMXSTATISTICS pVmxStats;
+
+ /** @name Statistics
+ * @{ */
+ STAMCOUNTER StatExitAll;
+ STAMCOUNTER StatBreakOnCancel;
+ STAMCOUNTER StatBreakOnFFPre;
+ STAMCOUNTER StatBreakOnFFPost;
+ STAMCOUNTER StatBreakOnStatus;
+ STAMCOUNTER StatImportOnDemand;
+ STAMCOUNTER StatImportOnReturn;
+ STAMCOUNTER StatImportOnReturnSkipped;
+ STAMCOUNTER StatQueryCpuTick;
+#ifdef VBOX_WITH_STATISTICS
+ STAMPROFILEADV StatProfGstStateImport;
+ STAMPROFILEADV StatProfGstStateExport;
+#endif
+ /** @} */
+
+ /** @} */
+#endif /* RT_OS_DARWIN */
+} NEMCPU;
+/** Pointer to NEM VMCPU instance data. */
+typedef NEMCPU *PNEMCPU;
+
+/** NEMCPU::u32Magic value. */
+#define NEMCPU_MAGIC UINT32_C(0x4d454e20)
+/** NEMCPU::u32Magic value after termination. */
+#define NEMCPU_MAGIC_DEAD UINT32_C(0xdead2222)
+
+
+#ifdef IN_RING0
+# ifdef RT_OS_WINDOWS
+/**
+ * Windows: Hypercall input/ouput page info.
+ */
+typedef struct NEMR0HYPERCALLDATA
+{
+ /** Host physical address of the hypercall input/output page. */
+ RTHCPHYS HCPhysPage;
+ /** Pointer to the hypercall input/output page. */
+ uint8_t *pbPage;
+ /** Handle to the memory object of the hypercall input/output page. */
+ RTR0MEMOBJ hMemObj;
+} NEMR0HYPERCALLDATA;
+/** Pointer to a Windows hypercall input/output page info. */
+typedef NEMR0HYPERCALLDATA *PNEMR0HYPERCALLDATA;
+# endif /* RT_OS_WINDOWS */
+
+/**
+ * NEM GVMCPU instance data.
+ */
+typedef struct NEMR0PERVCPU
+{
+ uint32_t uDummy;
+} NEMR0PERVCPU;
+
+/**
+ * NEM GVM instance data.
+ */
+typedef struct NEMR0PERVM
+{
+ uint32_t uDummy;
+} NEMR0PERVM;
+
+#endif /* IN_RING*/
+
+
+#ifdef IN_RING3
+
+int nemR3DisableCpuIsaExt(PVM pVM, const char *pszIsaExt);
+
+int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced);
+int nemR3NativeInitAfterCPUM(PVM pVM);
+int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
+int nemR3NativeTerm(PVM pVM);
+void nemR3NativeReset(PVM pVM);
+void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi);
+VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu);
+bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu);
+bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable);
+
+/**
+ * Forced flag notification call from VMEmt.h.
+ *
+ * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the CPU
+ * to be notified.
+ * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX.
+ */
+void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags);
+
+/**
+ * Called by NEMR3NotifyDebugEventChanged() to let the native backend take the final decision
+ * on whether to switch to the debug loop.
+ *
+ * @returns Final flag whether to switch to the debug loop.
+ * @param pVM The VM cross context VM structure.
+ * @param fUseDebugLoop The current value determined by NEMR3NotifyDebugEventChanged().
+ * @thread EMT(0)
+ */
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop);
+
+
+/**
+ * Called by NEMR3NotifyDebugEventChangedPerCpu() to let the native backend take the final decision
+ * on whether to switch to the debug loop.
+ *
+ * @returns Final flag whether to switch to the debug loop.
+ * @param pVM The VM cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param fUseDebugLoop The current value determined by NEMR3NotifyDebugEventChangedPerCpu().
+ */
+DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop);
+
+#endif /* IN_RING3 */
+
+void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb);
+void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
+ RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM);
+int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
+ PGMPAGETYPE enmType, uint8_t *pu2State);
+
+
+#ifdef RT_OS_WINDOWS
+/** Maximum number of pages we can map in a single NEMR0MapPages call. */
+# define NEM_MAX_MAP_PAGES ((HOST_PAGE_SIZE - RT_UOFFSETOF(HV_INPUT_MAP_GPA_PAGES, PageList)) / sizeof(HV_SPA_PAGE_NUMBER))
+/** Maximum number of pages we can unmap in a single NEMR0UnmapPages call. */
+# define NEM_MAX_UNMAP_PAGES 4095
+
+#endif
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_NEMInternal_h */
+
diff --git a/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h b/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
new file mode 100644
index 00000000..f6213b24
--- /dev/null
+++ b/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
@@ -0,0 +1,576 @@
+/* $Id: PDMAsyncCompletionFileInternal.h $ */
+/** @file
+ * PDM Async I/O - Transport data asynchronous in R3 using EMT.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PDMAsyncCompletionFileInternal_h
+#define VMM_INCLUDED_SRC_include_PDMAsyncCompletionFileInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/tm.h>
+#include <iprt/types.h>
+#include <iprt/file.h>
+#include <iprt/thread.h>
+#include <iprt/semaphore.h>
+#include <iprt/critsect.h>
+#include <iprt/avl.h>
+#include <iprt/list.h>
+#include <iprt/spinlock.h>
+#include <iprt/memcache.h>
+
+#include "PDMAsyncCompletionInternal.h"
+
+/** @todo: Revise the caching of tasks. We have currently four caches:
+ * Per endpoint task cache
+ * Per class cache
+ * Per endpoint task segment cache
+ * Per class task segment cache
+ *
+ * We could use the RT heap for this probably or extend MMR3Heap (uses RTMemAlloc
+ * instead of managing larger blocks) to have this global for the whole VM.
+ */
+
+/** Enable for delay injection from the debugger. */
+#if 0
+# define PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+#endif
+
+RT_C_DECLS_BEGIN
+
+/**
+ * A few forward declarations.
+ */
+typedef struct PDMASYNCCOMPLETIONENDPOINTFILE *PPDMASYNCCOMPLETIONENDPOINTFILE;
+/** Pointer to a request segment. */
+typedef struct PDMACTASKFILE *PPDMACTASKFILE;
+/** Pointer to the endpoint class data. */
+typedef struct PDMASYNCCOMPLETIONTASKFILE *PPDMASYNCCOMPLETIONTASKFILE;
+/** Pointer to a cache LRU list. */
+typedef struct PDMACFILELRULIST *PPDMACFILELRULIST;
+/** Pointer to the global cache structure. */
+typedef struct PDMACFILECACHEGLOBAL *PPDMACFILECACHEGLOBAL;
+/** Pointer to a task segment. */
+typedef struct PDMACFILETASKSEG *PPDMACFILETASKSEG;
+
+/**
+ * Blocking event types.
+ */
+typedef enum PDMACEPFILEAIOMGRBLOCKINGEVENT
+{
+ /** Invalid tye */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID = 0,
+ /** An endpoint is added to the manager. */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT,
+ /** An endpoint is removed from the manager. */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT,
+ /** An endpoint is about to be closed. */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT,
+ /** The manager is requested to terminate */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN,
+ /** The manager is requested to suspend */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND,
+ /** The manager is requested to resume */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME,
+ /** 32bit hack */
+ PDMACEPFILEAIOMGRBLOCKINGEVENT_32BIT_HACK = 0x7fffffff
+} PDMACEPFILEAIOMGRBLOCKINGEVENT;
+
+/**
+ * I/O manager type.
+ */
+typedef enum PDMACEPFILEMGRTYPE
+{
+ /** Simple aka failsafe */
+ PDMACEPFILEMGRTYPE_SIMPLE = 0,
+ /** Async I/O with host cache enabled. */
+ PDMACEPFILEMGRTYPE_ASYNC,
+ /** 32bit hack */
+ PDMACEPFILEMGRTYPE_32BIT_HACK = 0x7fffffff
+} PDMACEPFILEMGRTYPE;
+/** Pointer to a I/O manager type */
+typedef PDMACEPFILEMGRTYPE *PPDMACEPFILEMGRTYPE;
+
+/**
+ * States of the I/O manager.
+ */
+typedef enum PDMACEPFILEMGRSTATE
+{
+ /** Invalid state. */
+ PDMACEPFILEMGRSTATE_INVALID = 0,
+ /** Normal running state accepting new requests
+ * and processing them.
+ */
+ PDMACEPFILEMGRSTATE_RUNNING,
+ /** Fault state - not accepting new tasks for endpoints but waiting for
+ * remaining ones to finish.
+ */
+ PDMACEPFILEMGRSTATE_FAULT,
+ /** Suspending state - not accepting new tasks for endpoints but waiting
+ * for remaining ones to finish.
+ */
+ PDMACEPFILEMGRSTATE_SUSPENDING,
+ /** Shutdown state - not accepting new tasks for endpoints but waiting
+ * for remaining ones to finish.
+ */
+ PDMACEPFILEMGRSTATE_SHUTDOWN,
+ /** The I/O manager waits for all active requests to complete and doesn't queue
+ * new ones because it needs to grow to handle more requests.
+ */
+ PDMACEPFILEMGRSTATE_GROWING,
+ /** 32bit hack */
+ PDMACEPFILEMGRSTATE_32BIT_HACK = 0x7fffffff
+} PDMACEPFILEMGRSTATE;
+
+/**
+ * State of a async I/O manager.
+ */
+typedef struct PDMACEPFILEMGR
+{
+ /** Next Aio manager in the list. */
+ R3PTRTYPE(struct PDMACEPFILEMGR *) pNext;
+ /** Previous Aio manager in the list. */
+ R3PTRTYPE(struct PDMACEPFILEMGR *) pPrev;
+ /** Manager type */
+ PDMACEPFILEMGRTYPE enmMgrType;
+ /** Current state of the manager. */
+ PDMACEPFILEMGRSTATE enmState;
+ /** Event semaphore the manager sleeps on when waiting for new requests. */
+ RTSEMEVENT EventSem;
+ /** Flag whether the thread waits in the event semaphore. */
+ volatile bool fWaitingEventSem;
+ /** Thread data */
+ RTTHREAD Thread;
+ /** The async I/O context for this manager. */
+ RTFILEAIOCTX hAioCtx;
+ /** Flag whether the I/O manager was woken up. */
+ volatile bool fWokenUp;
+ /** List of endpoints assigned to this manager. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINTFILE) pEndpointsHead;
+ /** Number of endpoints assigned to the manager. */
+ unsigned cEndpoints;
+ /** Number of requests active currently. */
+ unsigned cRequestsActive;
+ /** Number of maximum requests active. */
+ uint32_t cRequestsActiveMax;
+ /** Pointer to an array of free async I/O request handles. */
+ RTFILEAIOREQ *pahReqsFree;
+ /** Index of the next free entry in the cache. */
+ uint32_t iFreeEntry;
+ /** Size of the array. */
+ unsigned cReqEntries;
+ /** Memory cache for file range locks. */
+ RTMEMCACHE hMemCacheRangeLocks;
+ /** Number of milliseconds to wait until the bandwidth is refreshed for at least
+ * one endpoint and it is possible to process more requests. */
+ RTMSINTERVAL msBwLimitExpired;
+ /** Critical section protecting the blocking event handling. */
+ RTCRITSECT CritSectBlockingEvent;
+ /** Event semaphore for blocking external events.
+ * The caller waits on it until the async I/O manager
+ * finished processing the event. */
+ RTSEMEVENT EventSemBlock;
+ /** Flag whether a blocking event is pending and needs
+ * processing by the I/O manager. */
+ volatile bool fBlockingEventPending;
+ /** Blocking event type */
+ volatile PDMACEPFILEAIOMGRBLOCKINGEVENT enmBlockingEvent;
+ /** Event type data */
+ union
+ {
+ /** Add endpoint event. */
+ struct
+ {
+ /** The endpoint to be added */
+ volatile PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
+ } AddEndpoint;
+ /** Remove endpoint event. */
+ struct
+ {
+ /** The endpoint to be removed */
+ volatile PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
+ } RemoveEndpoint;
+ /** Close endpoint event. */
+ struct
+ {
+ /** The endpoint to be closed */
+ volatile PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
+ } CloseEndpoint;
+ } BlockingEventData;
+} PDMACEPFILEMGR;
+/** Pointer to a async I/O manager state. */
+typedef PDMACEPFILEMGR *PPDMACEPFILEMGR;
+/** Pointer to a async I/O manager state pointer. */
+typedef PPDMACEPFILEMGR *PPPDMACEPFILEMGR;
+
+/**
+ * A file access range lock.
+ */
+typedef struct PDMACFILERANGELOCK
+{
+ /** AVL node in the locked range tree of the endpoint. */
+ AVLRFOFFNODECORE Core;
+ /** How many tasks have locked this range. */
+ uint32_t cRefs;
+ /** Flag whether this is a read or write lock. */
+ bool fReadLock;
+ /** List of tasks which are waiting that the range gets unlocked. */
+ PPDMACTASKFILE pWaitingTasksHead;
+ /** List of tasks which are waiting that the range gets unlocked. */
+ PPDMACTASKFILE pWaitingTasksTail;
+} PDMACFILERANGELOCK, *PPDMACFILERANGELOCK;
+
+/**
+ * Backend type for the endpoint.
+ */
+typedef enum PDMACFILEEPBACKEND
+{
+ /** Non buffered. */
+ PDMACFILEEPBACKEND_NON_BUFFERED = 0,
+ /** Buffered (i.e host cache enabled) */
+ PDMACFILEEPBACKEND_BUFFERED,
+ /** 32bit hack */
+ PDMACFILEEPBACKEND_32BIT_HACK = 0x7fffffff
+} PDMACFILEEPBACKEND;
+/** Pointer to a backend type. */
+typedef PDMACFILEEPBACKEND *PPDMACFILEEPBACKEND;
+
+/**
+ * Global data for the file endpoint class.
+ */
+typedef struct PDMASYNCCOMPLETIONEPCLASSFILE
+{
+ /** Common data. */
+ PDMASYNCCOMPLETIONEPCLASS Core;
+ /** Override I/O manager type - set to SIMPLE after failure. */
+ PDMACEPFILEMGRTYPE enmMgrTypeOverride;
+ /** Default backend type for the endpoint. */
+ PDMACFILEEPBACKEND enmEpBackendDefault;
+ RTCRITSECT CritSect;
+ /** Pointer to the head of the async I/O managers. */
+ R3PTRTYPE(PPDMACEPFILEMGR) pAioMgrHead;
+ /** Number of async I/O managers currently running. */
+ unsigned cAioMgrs;
+ /** Maximum number of segments to cache per endpoint */
+ unsigned cTasksCacheMax;
+ /** Maximum number of simultaneous outstandingrequests. */
+ uint32_t cReqsOutstandingMax;
+ /** Bitmask for checking the alignment of a buffer. */
+ RTR3UINTPTR uBitmaskAlignment;
+ /** Flag whether the out of resources warning was printed already. */
+ bool fOutOfResourcesWarningPrinted;
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ /** Timer for delayed request completion. */
+ TMTIMERHANDLE hTimer;
+ /** Milliseconds until the next delay expires. */
+ volatile uint64_t cMilliesNext;
+#endif
+} PDMASYNCCOMPLETIONEPCLASSFILE;
+/** Pointer to the endpoint class data. */
+typedef PDMASYNCCOMPLETIONEPCLASSFILE *PPDMASYNCCOMPLETIONEPCLASSFILE;
+
+typedef enum PDMACEPFILEBLOCKINGEVENT
+{
+ /** The invalid event type */
+ PDMACEPFILEBLOCKINGEVENT_INVALID = 0,
+ /** A task is about to be canceled */
+ PDMACEPFILEBLOCKINGEVENT_CANCEL,
+ /** Usual 32bit hack */
+ PDMACEPFILEBLOCKINGEVENT_32BIT_HACK = 0x7fffffff
+} PDMACEPFILEBLOCKINGEVENT;
+
+/**
+ * States of the endpoint.
+ */
+typedef enum PDMASYNCCOMPLETIONENDPOINTFILESTATE
+{
+ /** Invalid state. */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_INVALID = 0,
+ /** Normal running state accepting new requests
+ * and processing them.
+ */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
+ /** The endpoint is about to be closed - not accepting new tasks for endpoints but waiting for
+ * remaining ones to finish.
+ */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING,
+ /** Removing from current I/O manager state - not processing new tasks for endpoints but waiting
+ * for remaining ones to finish.
+ */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING,
+ /** The current endpoint will be migrated to another I/O manager. */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_MIGRATING,
+ /** 32bit hack */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE_32BIT_HACK = 0x7fffffff
+} PDMASYNCCOMPLETIONENDPOINTFILESTATE;
+
+typedef enum PDMACFILEREQTYPEDELAY
+{
+ PDMACFILEREQTYPEDELAY_ANY = 0,
+ PDMACFILEREQTYPEDELAY_READ,
+ PDMACFILEREQTYPEDELAY_WRITE,
+ PDMACFILEREQTYPEDELAY_FLUSH,
+ PDMACFILEREQTYPEDELAY_32BIT_HACK = 0x7fffffff
+} PDMACFILEREQTYPEDELAY;
+
+/**
+ * Data for the file endpoint.
+ */
+typedef struct PDMASYNCCOMPLETIONENDPOINTFILE
+{
+ /** Common data. */
+ PDMASYNCCOMPLETIONENDPOINT Core;
+ /** Current state of the endpoint. */
+ PDMASYNCCOMPLETIONENDPOINTFILESTATE enmState;
+ /** The backend to use for this endpoint. */
+ PDMACFILEEPBACKEND enmBackendType;
+ /** async I/O manager this endpoint is assigned to. */
+ R3PTRTYPE(volatile PPDMACEPFILEMGR) pAioMgr;
+ /** Flags for opening the file. */
+ unsigned fFlags;
+ /** File handle. */
+ RTFILE hFile;
+ /** Real size of the file. Only updated if data is appended. */
+ volatile uint64_t cbFile;
+ /** List of new tasks. */
+ R3PTRTYPE(volatile PPDMACTASKFILE) pTasksNewHead;
+
+ /** Head of the small cache for allocated task segments for exclusive
+ * use by this endpoint. */
+ R3PTRTYPE(volatile PPDMACTASKFILE) pTasksFreeHead;
+ /** Tail of the small cache for allocated task segments for exclusive
+ * use by this endpoint. */
+ R3PTRTYPE(volatile PPDMACTASKFILE) pTasksFreeTail;
+ /** Number of elements in the cache. */
+ volatile uint32_t cTasksCached;
+
+ /** Flag whether a flush request is currently active */
+ PPDMACTASKFILE pFlushReq;
+
+#ifdef VBOX_WITH_STATISTICS
+ /** Time spend in a read. */
+ STAMPROFILEADV StatRead;
+ /** Time spend in a write. */
+ STAMPROFILEADV StatWrite;
+#endif
+
+ /** Event semaphore for blocking external events.
+ * The caller waits on it until the async I/O manager
+ * finished processing the event. */
+ RTSEMEVENT EventSemBlock;
+ /** Flag whether caching is enabled for this file. */
+ bool fCaching;
+ /** Flag whether the file was opened readonly. */
+ bool fReadonly;
+ /** Flag whether the host supports the async flush API. */
+ bool fAsyncFlushSupported;
+#ifdef VBOX_WITH_DEBUGGER
+ /** Status code to inject for the next complete read. */
+ volatile int rcReqRead;
+ /** Status code to inject for the next complete write. */
+ volatile int rcReqWrite;
+#endif
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ /** Request delay. */
+ volatile uint32_t msDelay;
+ /** Request delay jitter. */
+ volatile uint32_t msJitter;
+ /** Number of requests to delay. */
+ volatile uint32_t cReqsDelay;
+ /** Task type to delay. */
+ PDMACFILEREQTYPEDELAY enmTypeDelay;
+ /** The current task which gets delayed. */
+ PPDMASYNCCOMPLETIONTASKFILE pDelayedHead;
+#endif
+ /** Flag whether a blocking event is pending and needs
+ * processing by the I/O manager. */
+ bool fBlockingEventPending;
+ /** Blocking event type */
+ PDMACEPFILEBLOCKINGEVENT enmBlockingEvent;
+
+ /** Additional data needed for the event types. */
+ union
+ {
+ /** Cancelation event. */
+ struct
+ {
+ /** The task to cancel. */
+ PPDMACTASKFILE pTask;
+ } Cancel;
+ } BlockingEventData;
+ /** Data for exclusive use by the assigned async I/O manager. */
+ struct
+ {
+ /** Pointer to the next endpoint assigned to the manager. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINTFILE) pEndpointNext;
+ /** Pointer to the previous endpoint assigned to the manager. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINTFILE) pEndpointPrev;
+ /** List of pending requests (not submitted due to usage restrictions
+ * or a pending flush request) */
+ R3PTRTYPE(PPDMACTASKFILE) pReqsPendingHead;
+ /** Tail of pending requests. */
+ R3PTRTYPE(PPDMACTASKFILE) pReqsPendingTail;
+ /** Tree of currently locked ranges.
+ * If a write task is enqueued the range gets locked and any other
+ * task writing to that range has to wait until the task completes.
+ */
+ PAVLRFOFFTREE pTreeRangesLocked;
+ /** Number of requests with a range lock active. */
+ unsigned cLockedReqsActive;
+ /** Number of requests currently being processed for this endpoint
+ * (excluded flush requests). */
+ unsigned cRequestsActive;
+ /** Number of requests processed during the last second. */
+ unsigned cReqsPerSec;
+ /** Current number of processed requests for the current update period. */
+ unsigned cReqsProcessed;
+ /** Flag whether the endpoint is about to be moved to another manager. */
+ bool fMoving;
+ /** Destination I/O manager. */
+ PPDMACEPFILEMGR pAioMgrDst;
+ } AioMgr;
+} PDMASYNCCOMPLETIONENDPOINTFILE;
+/** Pointer to the endpoint class data. */
+typedef PDMASYNCCOMPLETIONENDPOINTFILE *PPDMASYNCCOMPLETIONENDPOINTFILE;
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(PDMASYNCCOMPLETIONENDPOINTFILE, StatRead, sizeof(uint64_t));
+#endif
+
+/** Request completion function */
+typedef DECLCALLBACKTYPE(void, FNPDMACTASKCOMPLETED,(PPDMACTASKFILE pTask, void *pvUser, int rc));
+/** Pointer to a request completion function. */
+typedef FNPDMACTASKCOMPLETED *PFNPDMACTASKCOMPLETED;
+
+/**
+ * Transfer type.
+ */
+typedef enum PDMACTASKFILETRANSFER
+{
+ /** Invalid. */
+ PDMACTASKFILETRANSFER_INVALID = 0,
+ /** Read transfer. */
+ PDMACTASKFILETRANSFER_READ,
+ /** Write transfer. */
+ PDMACTASKFILETRANSFER_WRITE,
+ /** Flush transfer. */
+ PDMACTASKFILETRANSFER_FLUSH
+} PDMACTASKFILETRANSFER;
+
+/**
+ * Data of a request.
+ */
+typedef struct PDMACTASKFILE
+{
+ /** Pointer to the range lock we are waiting for */
+ PPDMACFILERANGELOCK pRangeLock;
+ /** Next task in the list. (Depending on the state) */
+ struct PDMACTASKFILE *pNext;
+ /** Endpoint */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
+ /** Transfer type. */
+ PDMACTASKFILETRANSFER enmTransferType;
+ /** Start offset */
+ RTFOFF Off;
+ /** Amount of data transfered so far. */
+ size_t cbTransfered;
+ /** Data segment. */
+ RTSGSEG DataSeg;
+ /** When non-zero the segment uses a bounce buffer because the provided buffer
+ * doesn't meet host requirements. */
+ size_t cbBounceBuffer;
+ /** Pointer to the used bounce buffer if any. */
+ void *pvBounceBuffer;
+ /** Start offset in the bounce buffer to copy from. */
+ uint32_t offBounceBuffer;
+ /** Flag whether this is a prefetch request. */
+ bool fPrefetch;
+ /** Already prepared native I/O request.
+ * Used if the request is prepared already but
+ * was not queued because the host has not enough
+ * resources. */
+ RTFILEAIOREQ hReq;
+ /** Completion function to call on completion. */
+ PFNPDMACTASKCOMPLETED pfnCompleted;
+ /** User data */
+ void *pvUser;
+} PDMACTASKFILE;
+
+/**
+ * Per task data.
+ */
+typedef struct PDMASYNCCOMPLETIONTASKFILE
+{
+ /** Common data. */
+ PDMASYNCCOMPLETIONTASK Core;
+ /** Number of bytes to transfer until this task completes. */
+ volatile int32_t cbTransferLeft;
+ /** Flag whether the task completed. */
+ volatile bool fCompleted;
+ /** Return code. */
+ volatile int rc;
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ volatile PPDMASYNCCOMPLETIONTASKFILE pDelayedNext;
+ /** Timestamp when the delay expires. */
+ uint64_t tsDelayEnd;
+#endif
+} PDMASYNCCOMPLETIONTASKFILE;
+
+DECLCALLBACK(int) pdmacFileAioMgrFailsafe(RTTHREAD hThreadSelf, void *pvUser);
+DECLCALLBACK(int) pdmacFileAioMgrNormal(RTTHREAD hThreadSelf, void *pvUser);
+
+int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr);
+void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr);
+
+int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr, PDMACEPFILEMGRTYPE enmMgrType);
+
+int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+
+PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+void pdmacFileTaskFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
+ PPDMACTASKFILE pTask);
+
+int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask);
+
+int pdmacFileCacheInit(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile, PCFGMNODE pCfgNode);
+void pdmacFileCacheDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile);
+int pdmacFileEpCacheInit(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile);
+void pdmacFileEpCacheDestroy(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+
+int pdmacFileEpCacheRead(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
+ RTFOFF off, PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbRead);
+int pdmacFileEpCacheWrite(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask,
+ RTFOFF off, PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbWrite);
+int pdmacFileEpCacheFlush(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_PDMAsyncCompletionFileInternal_h */
+
diff --git a/src/VBox/VMM/include/PDMAsyncCompletionInternal.h b/src/VBox/VMM/include/PDMAsyncCompletionInternal.h
new file mode 100644
index 00000000..2438897d
--- /dev/null
+++ b/src/VBox/VMM/include/PDMAsyncCompletionInternal.h
@@ -0,0 +1,291 @@
+/* $Id: PDMAsyncCompletionInternal.h $ */
+/** @file
+ * PDM - Pluggable Device Manager, Async I/O Completion internal header.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PDMAsyncCompletionInternal_h
+#define VMM_INCLUDED_SRC_include_PDMAsyncCompletionInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/critsect.h>
+#include <iprt/memcache.h>
+#include <iprt/sg.h>
+#include <VBox/types.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pdmasynccompletion.h>
+#include "PDMInternal.h"
+
+RT_C_DECLS_BEGIN
+
+
+/**
+ * PDM Async completion endpoint operations.
+ */
+typedef struct PDMASYNCCOMPLETIONEPCLASSOPS
+{
+ /** Version identifier. */
+ uint32_t u32Version;
+ /** Name of the endpoint class. */
+ const char *pszName;
+ /** Class type. */
+ PDMASYNCCOMPLETIONEPCLASSTYPE enmClassType;
+ /** Size of the global endpoint class data in bytes. */
+ size_t cbEndpointClassGlobal;
+ /** Size of an endpoint in bytes. */
+ size_t cbEndpoint;
+ /** size of a task in bytes. */
+ size_t cbTask;
+
+ /**
+ * Initializes the global data for a endpoint class.
+ *
+ * @returns VBox status code.
+ * @param pClassGlobals Pointer to the uninitialized globals data.
+ * @param pCfgNode Node for querying configuration data.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnInitialize, (PPDMASYNCCOMPLETIONEPCLASS pClassGlobals, PCFGMNODE pCfgNode));
+
+ /**
+ * Frees all allocated resources which were allocated during init.
+ *
+ * @returns VBox status code.
+ * @param pClassGlobals Pointer to the globals data.
+ */
+ DECLR3CALLBACKMEMBER(void, pfnTerminate, (PPDMASYNCCOMPLETIONEPCLASS pClassGlobals));
+
+ /**
+ * Initializes a given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint Pointer to the uninitialized endpoint.
+ * @param pszUri Pointer to the string containing the endpoint
+ * destination (filename, IP address, ...)
+ * @param fFlags Creation flags.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpInitialize, (PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
+ const char *pszUri, uint32_t fFlags));
+
+ /**
+ * Closes a endpoint finishing all tasks.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint Pointer to the endpoint to be closed.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpClose, (PPDMASYNCCOMPLETIONENDPOINT pEndpoint));
+
+ /**
+ * Initiates a read request from the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pTask Pointer to the task object associated with the request.
+ * @param pEndpoint Endpoint the request is for.
+ * @param off Where to start reading from.
+ * @param paSegments Scatter gather list to store the data in.
+ * @param cSegments Number of segments in the list.
+ * @param cbRead The overall number of bytes to read.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpRead, (PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbRead));
+
+ /**
+ * Initiates a write request to the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pTask Pointer to the task object associated with the request.
+ * @param pEndpoint Endpoint the request is for.
+ * @param off Where to start writing to.
+ * @param paSegments Scatter gather list to store the data in.
+ * @param cSegments Number of segments in the list.
+ * @param cbRead The overall number of bytes to write.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpWrite, (PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
+ PCRTSGSEG paSegments, size_t cSegments,
+ size_t cbWrite));
+
+ /**
+ * Initiates a flush request on the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pTask Pointer to the task object associated with the request.
+ * @param pEndpoint Endpoint the request is for.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpFlush, (PPDMASYNCCOMPLETIONTASK pTask,
+ PPDMASYNCCOMPLETIONENDPOINT pEndpoint));
+
+ /**
+ * Queries the size of the endpoint. Optional.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint Endpoint the request is for.
+ * @param pcbSize Where to store the size of the endpoint.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpGetSize, (PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
+ uint64_t *pcbSize));
+
+ /**
+ * Sets the size of the endpoint. Optional.
+ * This is a synchronous operation.
+ *
+ *
+ * @returns VBox status code.
+ * @param pEndpoint Endpoint the request is for.
+ * @param cbSize New size for the endpoint.
+ */
+ DECLR3CALLBACKMEMBER(int, pfnEpSetSize, (PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
+ uint64_t cbSize));
+
+ /** Initialization safety marker. */
+ uint32_t u32VersionEnd;
+} PDMASYNCCOMPLETIONEPCLASSOPS;
+/** Pointer to a async completion endpoint class operation table. */
+typedef PDMASYNCCOMPLETIONEPCLASSOPS *PPDMASYNCCOMPLETIONEPCLASSOPS;
+/** Const pointer to a async completion endpoint class operation table. */
+typedef const PDMASYNCCOMPLETIONEPCLASSOPS *PCPDMASYNCCOMPLETIONEPCLASSOPS;
+
+/** Version for the endpoint class operations structure. */
+#define PDMAC_EPCLASS_OPS_VERSION 0x00000001
+
+/** Pointer to a bandwidth control manager. */
+typedef struct PDMACBWMGR *PPDMACBWMGR;
+
+/**
+ * PDM Async completion endpoint class.
+ * Common data.
+ */
+typedef struct PDMASYNCCOMPLETIONEPCLASS
+{
+ /** Pointer to the VM. */
+ PVM pVM;
+ /** Critical section protecting the lists below. */
+ RTCRITSECT CritSect;
+ /** Number of endpoints in the list. */
+ volatile unsigned cEndpoints;
+ /** Head of endpoints with this class. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pEndpointsHead;
+ /** Head of the bandwidth managers for this class. */
+ R3PTRTYPE(PPDMACBWMGR) pBwMgrsHead;
+ /** Pointer to the callback table. */
+ R3PTRTYPE(PCPDMASYNCCOMPLETIONEPCLASSOPS) pEndpointOps;
+ /** Task cache. */
+ RTMEMCACHE hMemCacheTasks;
+ /** Flag whether to gather advanced statistics about requests. */
+ bool fGatherAdvancedStatistics;
+} PDMASYNCCOMPLETIONEPCLASS;
+/** Pointer to the PDM async completion endpoint class data. */
+typedef PDMASYNCCOMPLETIONEPCLASS *PPDMASYNCCOMPLETIONEPCLASS;
+
+/**
+ * A PDM Async completion endpoint.
+ * Common data.
+ */
+typedef struct PDMASYNCCOMPLETIONENDPOINT
+{
+ /** Next endpoint in the list. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pNext;
+ /** Previous endpoint in the list. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pPrev;
+ /** Pointer to the class this endpoint belongs to. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONEPCLASS) pEpClass;
+ /** Template associated with this endpoint. */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ /** Statistics ID for endpoints having a similar URI (same filename for example)
+ * to avoid assertions. */
+ unsigned iStatId;
+ /** URI describing the endpoint */
+ char *pszUri;
+ /** Pointer to the assigned bandwidth manager. */
+ volatile PPDMACBWMGR pBwMgr;
+ /** Aligns following statistic counters on a 8 byte boundary. */
+ uint32_t u32Alignment;
+ /** @name Request size statistics.
+ * @{ */
+ STAMCOUNTER StatReqSizeSmaller512;
+ STAMCOUNTER StatReqSize512To1K;
+ STAMCOUNTER StatReqSize1KTo2K;
+ STAMCOUNTER StatReqSize2KTo4K;
+ STAMCOUNTER StatReqSize4KTo8K;
+ STAMCOUNTER StatReqSize8KTo16K;
+ STAMCOUNTER StatReqSize16KTo32K;
+ STAMCOUNTER StatReqSize32KTo64K;
+ STAMCOUNTER StatReqSize64KTo128K;
+ STAMCOUNTER StatReqSize128KTo256K;
+ STAMCOUNTER StatReqSize256KTo512K;
+ STAMCOUNTER StatReqSizeOver512K;
+ STAMCOUNTER StatReqsUnaligned512;
+ STAMCOUNTER StatReqsUnaligned4K;
+ STAMCOUNTER StatReqsUnaligned8K;
+ /** @} */
+ /** @name Request completion time statistics.
+ * @{ */
+ STAMCOUNTER StatTaskRunTimesNs[10];
+ STAMCOUNTER StatTaskRunTimesUs[10];
+ STAMCOUNTER StatTaskRunTimesMs[10];
+ STAMCOUNTER StatTaskRunTimesSec[10];
+ STAMCOUNTER StatTaskRunOver100Sec;
+ STAMCOUNTER StatIoOpsPerSec;
+ STAMCOUNTER StatIoOpsStarted;
+ STAMCOUNTER StatIoOpsCompleted;
+ uint64_t tsIntervalStartMs;
+ uint64_t cIoOpsCompleted;
+ /** @} */
+} PDMASYNCCOMPLETIONENDPOINT;
+AssertCompileMemberAlignment(PDMASYNCCOMPLETIONENDPOINT, StatReqSizeSmaller512, sizeof(uint64_t));
+AssertCompileMemberAlignment(PDMASYNCCOMPLETIONENDPOINT, StatTaskRunTimesNs, sizeof(uint64_t));
+
+/**
+ * A PDM async completion task handle.
+ * Common data.
+ */
+typedef struct PDMASYNCCOMPLETIONTASK
+{
+ /** Next task in the list
+ * (for free and assigned tasks). */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONTASK) pNext;
+ /** Previous task in the list
+ * (for free and assigned tasks). */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONTASK) pPrev;
+ /** Endpoint this task is assigned to. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pEndpoint;
+ /** Opaque user data for this task. */
+ void *pvUser;
+ /** Start timestamp. */
+ uint64_t tsNsStart;
+} PDMASYNCCOMPLETIONTASK;
+
+void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler);
+bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext);
+
+RT_C_DECLS_END
+
+extern const PDMASYNCCOMPLETIONEPCLASSOPS g_PDMAsyncCompletionEndpointClassFile;
+
+#endif /* !VMM_INCLUDED_SRC_include_PDMAsyncCompletionInternal_h */
+
diff --git a/src/VBox/VMM/include/PDMBlkCacheInternal.h b/src/VBox/VMM/include/PDMBlkCacheInternal.h
new file mode 100644
index 00000000..dc703d34
--- /dev/null
+++ b/src/VBox/VMM/include/PDMBlkCacheInternal.h
@@ -0,0 +1,344 @@
+/* $Id: PDMBlkCacheInternal.h $ */
+/** @file
+ * PDM Block Cache.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PDMBlkCacheInternal_h
+#define VMM_INCLUDED_SRC_include_PDMBlkCacheInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/tm.h>
+#include <VBox/vmm/pdmblkcache.h>
+#include <iprt/types.h>
+#include <iprt/file.h>
+#include <iprt/thread.h>
+#include <iprt/semaphore.h>
+#include <iprt/critsect.h>
+#include <iprt/avl.h>
+#include <iprt/list.h>
+#include <iprt/spinlock.h>
+#include <iprt/memcache.h>
+
+RT_C_DECLS_BEGIN
+
+/**
+ * A few forward declarations.
+ */
+/** Pointer to a cache LRU list. */
+typedef struct PDMBLKLRULIST *PPDMBLKLRULIST;
+/** Pointer to the global cache structure. */
+typedef struct PDMBLKCACHEGLOBAL *PPDMBLKCACHEGLOBAL;
+/** Pointer to a cache entry waiter structure. */
+typedef struct PDMBLKCACHEWAITER *PPDMBLKCACHEWAITER;
+
+/**
+ * A cache entry
+ */
+typedef struct PDMBLKCACHEENTRY
+{
+ /** The AVL entry data. */
+ AVLRU64NODECORE Core;
+ /** Pointer to the previous element. Used in one of the LRU lists.*/
+ struct PDMBLKCACHEENTRY *pPrev;
+ /** Pointer to the next element. Used in one of the LRU lists.*/
+ struct PDMBLKCACHEENTRY *pNext;
+ /** Pointer to the list the entry is in. */
+ PPDMBLKLRULIST pList;
+ /** Cache the entry belongs to. */
+ PPDMBLKCACHE pBlkCache;
+ /** Flags for this entry. Combinations of PDMACFILECACHE_* \#defines */
+ volatile uint32_t fFlags;
+ /** Reference counter. Prevents eviction of the entry if > 0. */
+ volatile uint32_t cRefs;
+ /** Size of the entry. */
+ uint32_t cbData;
+ /** Pointer to the memory containing the data. */
+ uint8_t *pbData;
+ /** Head of list of tasks waiting for this one to finish. */
+ PPDMBLKCACHEWAITER pWaitingHead;
+ /** Tail of list of tasks waiting for this one to finish. */
+ PPDMBLKCACHEWAITER pWaitingTail;
+ /** Node for dirty but not yet committed entries list per endpoint. */
+ RTLISTNODE NodeNotCommitted;
+} PDMBLKCACHEENTRY, *PPDMBLKCACHEENTRY;
+/** I/O is still in progress for this entry. This entry is not evictable. */
+#define PDMBLKCACHE_ENTRY_IO_IN_PROGRESS RT_BIT(0)
+/** Entry is locked and thus not evictable. */
+#define PDMBLKCACHE_ENTRY_LOCKED RT_BIT(1)
+/** Entry is dirty */
+#define PDMBLKCACHE_ENTRY_IS_DIRTY RT_BIT(2)
+/** Entry is not evictable. */
+#define PDMBLKCACHE_NOT_EVICTABLE (PDMBLKCACHE_ENTRY_LOCKED | PDMBLKCACHE_ENTRY_IO_IN_PROGRESS | PDMBLKCACHE_ENTRY_IS_DIRTY)
+
+/**
+ * LRU list data
+ */
+typedef struct PDMBLKLRULIST
+{
+ /** Head of the list. */
+ PPDMBLKCACHEENTRY pHead;
+ /** Tail of the list. */
+ PPDMBLKCACHEENTRY pTail;
+ /** Number of bytes cached in the list. */
+ uint32_t cbCached;
+} PDMBLKLRULIST;
+
+/**
+ * Global cache data.
+ */
+typedef struct PDMBLKCACHEGLOBAL
+{
+ /** Pointer to the owning VM instance. */
+ PVM pVM;
+ /** Maximum size of the cache in bytes. */
+ uint32_t cbMax;
+ /** Current size of the cache in bytes. */
+ uint32_t cbCached;
+ /** Critical section protecting the cache. */
+ RTCRITSECT CritSect;
+ /** Maximum number of bytes cached. */
+ uint32_t cbRecentlyUsedInMax;
+ /** Maximum number of bytes in the paged out list .*/
+ uint32_t cbRecentlyUsedOutMax;
+ /** Recently used cache entries list */
+ PDMBLKLRULIST LruRecentlyUsedIn;
+ /** Scorecard cache entry list. */
+ PDMBLKLRULIST LruRecentlyUsedOut;
+ /** List of frequently used cache entries */
+ PDMBLKLRULIST LruFrequentlyUsed;
+ /** Commit timeout in milli seconds */
+ uint32_t u32CommitTimeoutMs;
+ /** Number of dirty bytes needed to start a commit of the data to the disk. */
+ uint32_t cbCommitDirtyThreshold;
+ /** Current number of dirty bytes in the cache. */
+ volatile uint32_t cbDirty;
+ /** Flag whether the VM was suspended becaus of an I/O error. */
+ volatile bool fIoErrorVmSuspended;
+ /** Flag whether a commit is currently in progress. */
+ volatile bool fCommitInProgress;
+ /** Commit interval timer */
+ TMTIMERHANDLE hTimerCommit;
+ /** Number of endpoints using the cache. */
+ uint32_t cRefs;
+ /** List of all users of this cache. */
+ RTLISTANCHOR ListUsers;
+#ifdef VBOX_WITH_STATISTICS
+ /** Hit counter. */
+ STAMCOUNTER cHits;
+ /** Partial hit counter. */
+ STAMCOUNTER cPartialHits;
+ /** Miss counter. */
+ STAMCOUNTER cMisses;
+ /** Bytes read from cache. */
+ STAMCOUNTER StatRead;
+ /** Bytes written to the cache. */
+ STAMCOUNTER StatWritten;
+ /** Time spend to get an entry in the AVL tree. */
+ STAMPROFILEADV StatTreeGet;
+ /** Time spend to insert an entry in the AVL tree. */
+ STAMPROFILEADV StatTreeInsert;
+ /** Time spend to remove an entry in the AVL tree. */
+ STAMPROFILEADV StatTreeRemove;
+ /** Number of times a buffer could be reused. */
+ STAMCOUNTER StatBuffersReused;
+#endif
+} PDMBLKCACHEGLOBAL;
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(PDMBLKCACHEGLOBAL, cHits, sizeof(uint64_t));
+#endif
+
+/**
+ * Block cache type.
+ */
+typedef enum PDMBLKCACHETYPE
+{
+ /** Device . */
+ PDMBLKCACHETYPE_DEV = 1,
+ /** Driver consumer. */
+ PDMBLKCACHETYPE_DRV,
+ /** Internal consumer. */
+ PDMBLKCACHETYPE_INTERNAL,
+ /** Usb consumer. */
+ PDMBLKCACHETYPE_USB
+} PDMBLKCACHETYPE;
+
+/**
+ * Per user cache data.
+ */
+typedef struct PDMBLKCACHE
+{
+ /** Pointer to the id for the cache. */
+ char *pszId;
+ /** AVL tree managing cache entries. */
+ PAVLRU64TREE pTree;
+ /** R/W semaphore protecting cached entries for this endpoint. */
+ RTSEMRW SemRWEntries;
+ /** Pointer to the gobal cache data */
+ PPDMBLKCACHEGLOBAL pCache;
+ /** Lock protecting the dirty entries list. */
+ RTSPINLOCK LockList;
+ /** List of dirty but not committed entries for this endpoint. */
+ RTLISTANCHOR ListDirtyNotCommitted;
+ /** Node of the cache user list. */
+ RTLISTNODE NodeCacheUser;
+ /** Block cache type. */
+ PDMBLKCACHETYPE enmType;
+ /** Type specific data. */
+ union
+ {
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_DEV */
+ struct
+ {
+ /** Pointer to the device instance owning the block cache. */
+ R3PTRTYPE(PPDMDEVINS) pDevIns;
+ /** Complete callback to the user. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERCOMPLETEDEV) pfnXferComplete;
+ /** I/O enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDEV) pfnXferEnqueue;
+ /** Discard enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDISCARDDEV) pfnXferEnqueueDiscard;
+ } Dev;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_DRV */
+ struct
+ {
+ /** Pointer to the driver instance owning the block cache. */
+ R3PTRTYPE(PPDMDRVINS) pDrvIns;
+ /** Complete callback to the user. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERCOMPLETEDRV) pfnXferComplete;
+ /** I/O enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDRV) pfnXferEnqueue;
+ /** Discard enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDISCARDDRV) pfnXferEnqueueDiscard;
+ } Drv;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_INTERNAL */
+ struct
+ {
+ /** Pointer to user data. */
+ R3PTRTYPE(void *) pvUser;
+ /** Complete callback to the user. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERCOMPLETEINT) pfnXferComplete;
+ /** I/O enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEINT) pfnXferEnqueue;
+ /** Discard enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDISCARDINT) pfnXferEnqueueDiscard;
+ } Int;
+ /** PDMASYNCCOMPLETIONTEMPLATETYPE_USB */
+ struct
+ {
+ /** Pointer to the usb instance owning the template. */
+ R3PTRTYPE(PPDMUSBINS) pUsbIns;
+ /** Complete callback to the user. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERCOMPLETEUSB) pfnXferComplete;
+ /** I/O enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEUSB) pfnXferEnqueue;
+ /** Discard enqueue callback. */
+ R3PTRTYPE(PFNPDMBLKCACHEXFERENQUEUEDISCARDUSB) pfnXferEnqueueDiscard;
+ } Usb;
+ } u;
+
+#ifdef VBOX_WITH_STATISTICS
+
+#if HC_ARCH_BITS == 64
+ uint32_t u32Alignment;
+#endif
+ /** Number of times a write was deferred because the cache entry was still in progress */
+ STAMCOUNTER StatWriteDeferred;
+ /** Number appended cache entries. */
+ STAMCOUNTER StatAppendedWrites;
+#endif
+
+ /** Flag whether the cache was suspended. */
+ volatile bool fSuspended;
+ /** Number of outstanding I/O transfers. */
+ volatile uint32_t cIoXfersActive;
+
+} PDMBLKCACHE, *PPDMBLKCACHE;
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(PDMBLKCACHE, StatWriteDeferred, sizeof(uint64_t));
+#endif
+
+/**
+ * I/O task.
+ */
+typedef struct PDMBLKCACHEREQ
+{
+ /** Opaque user data returned on completion. */
+ void *pvUser;
+ /** Number of pending transfers (waiting for a cache entry and passed through). */
+ volatile uint32_t cXfersPending;
+ /** Status code. */
+ volatile int rcReq;
+} PDMBLKCACHEREQ, *PPDMBLKCACHEREQ;
+
+/**
+ * I/O transfer from the cache to the underlying medium.
+ */
+typedef struct PDMBLKCACHEIOXFER
+{
+ /** Flag whether the I/O xfer updates a cache entry or updates the request directly. */
+ bool fIoCache;
+ /** Type dependent data. */
+ union
+ {
+ /** Pointer to the entry the transfer updates. */
+ PPDMBLKCACHEENTRY pEntry;
+ /** Pointer to the request the transfer updates. */
+ PPDMBLKCACHEREQ pReq;
+ };
+ /** Transfer direction. */
+ PDMBLKCACHEXFERDIR enmXferDir;
+ /** Segment used if a cache entry is updated. */
+ RTSGSEG SgSeg;
+ /** S/G buffer. */
+ RTSGBUF SgBuf;
+} PDMBLKCACHEIOXFER;
+
+/**
+ * Cache waiter
+ */
+typedef struct PDMBLKCACHEWAITER
+{
+ /* Next waiter in the list. */
+ struct PDMBLKCACHEWAITER *pNext;
+ /** S/G buffer holding or receiving data. */
+ RTSGBUF SgBuf;
+ /** Offset into the cache entry to start the transfer. */
+ uint32_t offCacheEntry;
+ /** How many bytes to transfer. */
+ size_t cbTransfer;
+ /** Flag whether the task wants to read or write into the entry. */
+ bool fWrite;
+ /** Task the waiter is for. */
+ PPDMBLKCACHEREQ pReq;
+} PDMBLKCACHEWAITER;
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_PDMBlkCacheInternal_h */
+
diff --git a/src/VBox/VMM/include/PDMInline.h b/src/VBox/VMM/include/PDMInline.h
new file mode 100644
index 00000000..d0ba8984
--- /dev/null
+++ b/src/VBox/VMM/include/PDMInline.h
@@ -0,0 +1,52 @@
+/* $Id: PDMInline.h $ */
+/** @file
+ * PDM - Internal header file containing the inlined functions.
+ */
+
+/*
+ * Copyright (C) 2012-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PDMInline_h
+#define VMM_INCLUDED_SRC_include_PDMInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+/**
+ * Calculates the next IRQ tag.
+ *
+ * @returns IRQ tag.
+ * @param pVM The cross context VM structure.
+ * @param idTracer The ID of the source device.
+ */
+DECLINLINE(uint32_t) pdmCalcIrqTag(PVM pVM, uint32_t idTracer)
+{
+ uint32_t uTag = (pVM->pdm.s.uIrqTag + 1) & 0x3ff; /* {0..1023} */
+ if (!uTag)
+ uTag++;
+ pVM->pdm.s.uIrqTag = uTag |= (idTracer << 16);
+ return uTag;
+}
+
+#endif /* !VMM_INCLUDED_SRC_include_PDMInline_h */
+
diff --git a/src/VBox/VMM/include/PDMInternal.h b/src/VBox/VMM/include/PDMInternal.h
new file mode 100644
index 00000000..ed38676e
--- /dev/null
+++ b/src/VBox/VMM/include/PDMInternal.h
@@ -0,0 +1,1906 @@
+/* $Id: PDMInternal.h $ */
+/** @file
+ * PDM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PDMInternal_h
+#define VMM_INCLUDED_SRC_include_PDMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/types.h>
+#include <VBox/param.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vusb.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/pdmasynccompletion.h>
+#ifdef VBOX_WITH_NETSHAPER
+# include <VBox/vmm/pdmnetshaper.h>
+#endif
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+# include <VBox/vmm/pdmasynccompletion.h>
+#endif
+#include <VBox/vmm/pdmblkcache.h>
+#include <VBox/vmm/pdmcommon.h>
+#include <VBox/vmm/pdmtask.h>
+#include <VBox/sup.h>
+#include <VBox/msi.h>
+#include <iprt/assert.h>
+#include <iprt/critsect.h>
+#ifdef IN_RING3
+# include <iprt/thread.h>
+#endif
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_pdm_int Internal
+ * @ingroup grp_pdm
+ * @internal
+ * @{
+ */
+
+/** @def PDM_WITH_R3R0_CRIT_SECT
+ * Enables or disabled ring-3/ring-0 critical sections. */
+#if defined(DOXYGEN_RUNNING) || 1
+# define PDM_WITH_R3R0_CRIT_SECT
+#endif
+
+/** @def PDMCRITSECT_STRICT
+ * Enables/disables PDM critsect strictness like deadlock detection. */
+#if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(PDMCRITSECT_STRICT)) \
+ || defined(DOXYGEN_RUNNING)
+# define PDMCRITSECT_STRICT
+#endif
+
+/** @def PDMCRITSECT_STRICT
+ * Enables/disables PDM read/write critsect strictness like deadlock
+ * detection. */
+#if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(PDMCRITSECTRW_STRICT)) \
+ || defined(DOXYGEN_RUNNING)
+# define PDMCRITSECTRW_STRICT
+#endif
+
+/** The maximum device instance (total) size, ring-0/raw-mode capable devices. */
+#define PDM_MAX_DEVICE_INSTANCE_SIZE _4M
+/** The maximum device instance (total) size, ring-3 only devices. */
+#define PDM_MAX_DEVICE_INSTANCE_SIZE_R3 _8M
+/** The maximum size for the DBGF tracing tracking structure allocated for each device. */
+#define PDM_MAX_DEVICE_DBGF_TRACING_TRACK HOST_PAGE_SIZE
+
+
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+
+/** Pointer to a PDM Device. */
+typedef struct PDMDEV *PPDMDEV;
+/** Pointer to a pointer to a PDM Device. */
+typedef PPDMDEV *PPPDMDEV;
+
+/** Pointer to a PDM USB Device. */
+typedef struct PDMUSB *PPDMUSB;
+/** Pointer to a pointer to a PDM USB Device. */
+typedef PPDMUSB *PPPDMUSB;
+
+/** Pointer to a PDM Driver. */
+typedef struct PDMDRV *PPDMDRV;
+/** Pointer to a pointer to a PDM Driver. */
+typedef PPDMDRV *PPPDMDRV;
+
+/** Pointer to a PDM Logical Unit. */
+typedef struct PDMLUN *PPDMLUN;
+/** Pointer to a pointer to a PDM Logical Unit. */
+typedef PPDMLUN *PPPDMLUN;
+
+/** Pointer to a DMAC instance. */
+typedef struct PDMDMAC *PPDMDMAC;
+/** Pointer to a RTC instance. */
+typedef struct PDMRTC *PPDMRTC;
+
+/** Pointer to an USB HUB registration record. */
+typedef struct PDMUSBHUB *PPDMUSBHUB;
+
+/**
+ * Supported asynchronous completion endpoint classes.
+ */
+typedef enum PDMASYNCCOMPLETIONEPCLASSTYPE
+{
+ /** File class. */
+ PDMASYNCCOMPLETIONEPCLASSTYPE_FILE = 0,
+ /** Number of supported classes. */
+ PDMASYNCCOMPLETIONEPCLASSTYPE_MAX,
+ /** 32bit hack. */
+ PDMASYNCCOMPLETIONEPCLASSTYPE_32BIT_HACK = 0x7fffffff
+} PDMASYNCCOMPLETIONEPCLASSTYPE;
+
+
+/**
+ * MMIO/IO port registration tracking structure for DBGF tracing.
+ */
+typedef struct PDMDEVINSDBGFTRACK
+{
+ /** Flag whether this tracks a IO port or MMIO registration. */
+ bool fMmio;
+ /** Opaque user data passed during registration. */
+ void *pvUser;
+ /** Type dependent data. */
+ union
+ {
+ /** I/O port registration. */
+ struct
+ {
+ /** IOM I/O port handle. */
+ IOMIOPORTHANDLE hIoPorts;
+ /** Original OUT handler of the device. */
+ PFNIOMIOPORTNEWOUT pfnOut;
+ /** Original IN handler of the device. */
+ PFNIOMIOPORTNEWIN pfnIn;
+ /** Original string OUT handler of the device. */
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr;
+ /** Original string IN handler of the device. */
+ PFNIOMIOPORTNEWINSTRING pfnInStr;
+ } IoPort;
+ /** MMIO registration. */
+ struct
+ {
+ /** IOM MMIO region handle. */
+ IOMMMIOHANDLE hMmioRegion;
+ /** Original MMIO write handler of the device. */
+ PFNIOMMMIONEWWRITE pfnWrite;
+ /** Original MMIO read handler of the device. */
+ PFNIOMMMIONEWREAD pfnRead;
+ /** Original MMIO fill handler of the device. */
+ PFNIOMMMIONEWFILL pfnFill;
+ } Mmio;
+ } u;
+} PDMDEVINSDBGFTRACK;
+/** Pointer to a MMIO/IO port registration tracking structure. */
+typedef PDMDEVINSDBGFTRACK *PPDMDEVINSDBGFTRACK;
+/** Pointer to a const MMIO/IO port registration tracking structure. */
+typedef const PDMDEVINSDBGFTRACK *PCPDMDEVINSDBGFTRACK;
+
+
+/**
+ * Private device instance data, ring-3.
+ */
+typedef struct PDMDEVINSINTR3
+{
+ /** Pointer to the next instance.
+ * (Head is pointed to by PDM::pDevInstances.) */
+ R3PTRTYPE(PPDMDEVINS) pNextR3;
+ /** Pointer to the next per device instance.
+ * (Head is pointed to by PDMDEV::pInstances.) */
+ R3PTRTYPE(PPDMDEVINS) pPerDeviceNextR3;
+ /** Pointer to device structure. */
+ R3PTRTYPE(PPDMDEV) pDevR3;
+ /** Pointer to the list of logical units associated with the device. (FIFO) */
+ R3PTRTYPE(PPDMLUN) pLunsR3;
+ /** Pointer to the asynchronous notification callback set while in
+ * FNPDMDEVSUSPEND or FNPDMDEVPOWEROFF. */
+ R3PTRTYPE(PFNPDMDEVASYNCNOTIFY) pfnAsyncNotify;
+ /** Configuration handle to the instance node. */
+ R3PTRTYPE(PCFGMNODE) pCfgHandle;
+
+ /** R3 pointer to the VM this instance was created for. */
+ PVMR3 pVMR3;
+ /** DBGF trace event source handle if tracing is configured. */
+ DBGFTRACEREVTSRC hDbgfTraceEvtSrc;
+ /** Pointer to the base of the page containing the DBGF tracing tracking structures. */
+ PPDMDEVINSDBGFTRACK paDbgfTraceTrack;
+ /** Index of the next entry to use for tracking. */
+ uint32_t idxDbgfTraceTrackNext;
+ /** Maximum number of records fitting into the single page. */
+ uint32_t cDbgfTraceTrackMax;
+
+ /** Flags, see PDMDEVINSINT_FLAGS_XXX. */
+ uint32_t fIntFlags;
+ /** The last IRQ tag (for tracing it thru clearing). */
+ uint32_t uLastIrqTag;
+ /** The ring-0 device index (for making ring-0 calls). */
+ uint32_t idxR0Device;
+} PDMDEVINSINTR3;
+
+
+/**
+ * Private device instance data, ring-0.
+ */
+typedef struct PDMDEVINSINTR0
+{
+ /** Pointer to the VM this instance was created for. */
+ R0PTRTYPE(PGVM) pGVM;
+ /** Pointer to device structure. */
+ R0PTRTYPE(struct PDMDEVREGR0 const *) pRegR0;
+ /** The ring-0 module reference. */
+ RTR0PTR hMod;
+ /** Pointer to the ring-0 mapping of the ring-3 internal data (for uLastIrqTag). */
+ R0PTRTYPE(PDMDEVINSINTR3 *) pIntR3R0;
+ /** Pointer to the ring-0 mapping of the ring-3 instance (for idTracing). */
+ R0PTRTYPE(struct PDMDEVINSR3 *) pInsR3R0;
+ /** DBGF trace event source handle if tracing is configured. */
+ DBGFTRACEREVTSRC hDbgfTraceEvtSrc;
+ /** The device instance memory. */
+ RTR0MEMOBJ hMemObj;
+ /** The ring-3 mapping object. */
+ RTR0MEMOBJ hMapObj;
+ /** The page memory object for tracking MMIO and I/O port registrations when tracing is configured. */
+ RTR0MEMOBJ hDbgfTraceObj;
+ /** Pointer to the base of the page containing the DBGF tracing tracking structures. */
+ PPDMDEVINSDBGFTRACK paDbgfTraceTrack;
+ /** Index of the next entry to use for tracking. */
+ uint32_t idxDbgfTraceTrackNext;
+ /** Maximum number of records fitting into the single page. */
+ uint32_t cDbgfTraceTrackMax;
+ /** Index into PDMR0PERVM::apDevInstances. */
+ uint32_t idxR0Device;
+} PDMDEVINSINTR0;
+
+
+/**
+ * Private device instance data, raw-mode
+ */
+typedef struct PDMDEVINSINTRC
+{
+ /** Pointer to the VM this instance was created for. */
+ RGPTRTYPE(PVM) pVMRC;
+} PDMDEVINSINTRC;
+
+
+/**
+ * Private device instance data.
+ */
+typedef struct PDMDEVINSINT
+{
+ /** Pointer to the next instance (HC Ptr).
+ * (Head is pointed to by PDM::pDevInstances.) */
+ R3PTRTYPE(PPDMDEVINS) pNextR3;
+ /** Pointer to the next per device instance (HC Ptr).
+ * (Head is pointed to by PDMDEV::pInstances.) */
+ R3PTRTYPE(PPDMDEVINS) pPerDeviceNextR3;
+ /** Pointer to device structure - HC Ptr. */
+ R3PTRTYPE(PPDMDEV) pDevR3;
+ /** Pointer to the list of logical units associated with the device. (FIFO) */
+ R3PTRTYPE(PPDMLUN) pLunsR3;
+ /** Pointer to the asynchronous notification callback set while in
+ * FNPDMDEVSUSPEND or FNPDMDEVPOWEROFF. */
+ R3PTRTYPE(PFNPDMDEVASYNCNOTIFY) pfnAsyncNotify;
+ /** Configuration handle to the instance node. */
+ R3PTRTYPE(PCFGMNODE) pCfgHandle;
+
+ /** R3 pointer to the VM this instance was created for. */
+ PVMR3 pVMR3;
+
+ /** R0 pointer to the VM this instance was created for. */
+ R0PTRTYPE(PVMCC) pVMR0;
+
+ /** RC pointer to the VM this instance was created for. */
+ PVMRC pVMRC;
+
+ /** Flags, see PDMDEVINSINT_FLAGS_XXX. */
+ uint32_t fIntFlags;
+ /** The last IRQ tag (for tracing it thru clearing). */
+ uint32_t uLastIrqTag;
+} PDMDEVINSINT;
+
+/** @name PDMDEVINSINT::fIntFlags
+ * @{ */
+/** Used by pdmR3Load to mark device instances it found in the saved state. */
+#define PDMDEVINSINT_FLAGS_FOUND RT_BIT_32(0)
+/** Indicates that the device hasn't been powered on or resumed.
+ * This is used by PDMR3PowerOn, PDMR3Resume, PDMR3Suspend and PDMR3PowerOff
+ * to make sure each device gets exactly one notification for each of those
+ * events. PDMR3Resume and PDMR3PowerOn also makes use of it to bail out on
+ * a failure (already resumed/powered-on devices are suspended).
+ * PDMR3PowerOff resets this flag once before going through the devices to make sure
+ * every device gets the power off notification even if it was suspended before with
+ * PDMR3Suspend.
+ */
+#define PDMDEVINSINT_FLAGS_SUSPENDED RT_BIT_32(1)
+/** Indicates that the device has been reset already. Used by PDMR3Reset. */
+#define PDMDEVINSINT_FLAGS_RESET RT_BIT_32(2)
+#define PDMDEVINSINT_FLAGS_R0_ENABLED RT_BIT_32(3)
+#define PDMDEVINSINT_FLAGS_RC_ENABLED RT_BIT_32(4)
+/** Set if we've called the ring-0 constructor. */
+#define PDMDEVINSINT_FLAGS_R0_CONTRUCT RT_BIT_32(5)
+/** Set if using non-default critical section. */
+#define PDMDEVINSINT_FLAGS_CHANGED_CRITSECT RT_BIT_32(6)
+/** @} */
+
+
+/**
+ * Private USB device instance data.
+ */
+typedef struct PDMUSBINSINT
+{
+ /** The UUID of this instance. */
+ RTUUID Uuid;
+ /** Pointer to the next instance.
+ * (Head is pointed to by PDM::pUsbInstances.) */
+ R3PTRTYPE(PPDMUSBINS) pNext;
+ /** Pointer to the next per USB device instance.
+ * (Head is pointed to by PDMUSB::pInstances.) */
+ R3PTRTYPE(PPDMUSBINS) pPerDeviceNext;
+
+ /** Pointer to device structure. */
+ R3PTRTYPE(PPDMUSB) pUsbDev;
+
+ /** Pointer to the VM this instance was created for. */
+ PVMR3 pVM;
+ /** Pointer to the list of logical units associated with the device. (FIFO) */
+ R3PTRTYPE(PPDMLUN) pLuns;
+ /** The per instance device configuration. */
+ R3PTRTYPE(PCFGMNODE) pCfg;
+ /** Same as pCfg if the configuration should be deleted when detaching the device. */
+ R3PTRTYPE(PCFGMNODE) pCfgDelete;
+ /** The global device configuration. */
+ R3PTRTYPE(PCFGMNODE) pCfgGlobal;
+
+ /** Pointer to the USB hub this device is attached to.
+ * This is NULL if the device isn't connected to any HUB. */
+ R3PTRTYPE(PPDMUSBHUB) pHub;
+ /** The port number that we're connected to. */
+ uint32_t iPort;
+ /** Indicates that the USB device hasn't been powered on or resumed.
+ * See PDMDEVINSINT_FLAGS_SUSPENDED.
+ * @note Runtime attached USB devices gets a pfnHotPlugged callback rather than
+ * a pfnVMResume one. */
+ bool fVMSuspended;
+ /** Indicates that the USB device has been reset. */
+ bool fVMReset;
+ /** Pointer to the asynchronous notification callback set while in
+ * FNPDMDEVSUSPEND or FNPDMDEVPOWEROFF. */
+ R3PTRTYPE(PFNPDMUSBASYNCNOTIFY) pfnAsyncNotify;
+} PDMUSBINSINT;
+
+
+/**
+ * Private driver instance data.
+ */
+typedef struct PDMDRVINSINT
+{
+ /** Pointer to the driver instance above.
+ * This is NULL for the topmost drive. */
+ R3PTRTYPE(PPDMDRVINS) pUp;
+ /** Pointer to the driver instance below.
+ * This is NULL for the bottommost driver. */
+ R3PTRTYPE(PPDMDRVINS) pDown;
+ /** Pointer to the logical unit this driver chained on. */
+ R3PTRTYPE(PPDMLUN) pLun;
+ /** Pointer to driver structure from which this was instantiated. */
+ R3PTRTYPE(PPDMDRV) pDrv;
+ /** Pointer to the VM this instance was created for, ring-3 context. */
+ PVMR3 pVMR3;
+ /** Pointer to the VM this instance was created for, ring-0 context. */
+ R0PTRTYPE(PVMCC) pVMR0;
+ /** Pointer to the VM this instance was created for, raw-mode context. */
+ PVMRC pVMRC;
+ /** Flag indicating that the driver is being detached and destroyed.
+ * (Helps detect potential recursive detaching.) */
+ bool fDetaching;
+ /** Indicates that the driver hasn't been powered on or resumed.
+ * See PDMDEVINSINT_FLAGS_SUSPENDED. */
+ bool fVMSuspended;
+ /** Indicates that the driver has been reset already. */
+ bool fVMReset;
+ /** Set if allocated on the hyper heap, false if on the ring-3 heap. */
+ bool fHyperHeap;
+ /** Pointer to the asynchronous notification callback set while in
+ * PDMUSBREG::pfnVMSuspend or PDMUSBREG::pfnVMPowerOff. */
+ R3PTRTYPE(PFNPDMDRVASYNCNOTIFY) pfnAsyncNotify;
+ /** Configuration handle to the instance node. */
+ R3PTRTYPE(PCFGMNODE) pCfgHandle;
+ /** Pointer to the ring-0 request handler function. */
+ PFNPDMDRVREQHANDLERR0 pfnReqHandlerR0;
+} PDMDRVINSINT;
+
+
+/**
+ * Private critical section data.
+ */
+typedef struct PDMCRITSECTINT
+{
+ /** The critical section core which is shared with IPRT.
+ * @note The semaphore is a SUPSEMEVENT. */
+ RTCRITSECT Core;
+ /** Pointer to the next critical section.
+ * This chain is used for device cleanup and the dbgf info item. */
+ R3PTRTYPE(struct PDMCRITSECTINT *) pNext;
+ /** Owner identifier.
+ * This is pDevIns if the owner is a device. Similarly for a driver or service.
+ * PDMR3CritSectInit() sets this to point to the critsect itself. */
+ RTR3PTR pvKey;
+ /** Set if this critical section is the automatically created default
+ * section of a device. */
+ bool fAutomaticDefaultCritsect;
+ /** Set if the critical section is used by a timer or similar.
+ * See PDMR3DevGetCritSect. */
+ bool fUsedByTimerOrSimilar;
+ /** Alignment padding. */
+ bool afPadding[2+4];
+ /** Support driver event semaphore that is scheduled to be signaled upon leaving
+ * the critical section. This is only for Ring-3 and Ring-0. */
+ SUPSEMEVENT volatile hEventToSignal;
+ /** The lock name. */
+ R3PTRTYPE(const char *) pszName;
+ /** The ring-3 pointer to this critical section, for leave queueing. */
+ R3PTRTYPE(PPDMCRITSECT) pSelfR3;
+ /** R0/RC lock contention. */
+ STAMCOUNTER StatContentionRZLock;
+ /** R0/RC lock contention: returning rcBusy or VERR_SEM_BUSY (try). */
+ STAMCOUNTER StatContentionRZLockBusy;
+ /** R0/RC lock contention: Profiling waiting time. */
+ STAMPROFILE StatContentionRZWait;
+ /** R0/RC unlock contention. */
+ STAMCOUNTER StatContentionRZUnlock;
+ /** R3 lock contention. */
+ STAMCOUNTER StatContentionR3;
+ /** R3 lock contention: Profiling waiting time. */
+ STAMPROFILE StatContentionR3Wait;
+ /** Profiling the time the section is locked. */
+ STAMPROFILEADV StatLocked;
+} PDMCRITSECTINT;
+AssertCompileMemberAlignment(PDMCRITSECTINT, StatContentionRZLock, 8);
+/** Pointer to private critical section data. */
+typedef PDMCRITSECTINT *PPDMCRITSECTINT;
+
+/** Special magic value set when we failed to abort entering in ring-0 due to a
+ * timeout, interruption or pending thread termination. */
+#define PDMCRITSECT_MAGIC_FAILED_ABORT UINT32_C(0x0bad0326)
+/** Special magic value set if we detected data/state corruption. */
+#define PDMCRITSECT_MAGIC_CORRUPTED UINT32_C(0x0bad2603)
+
+/** Indicates that the critical section is queued for unlock.
+ * PDMCritSectIsOwner and PDMCritSectIsOwned optimizations. */
+#define PDMCRITSECT_FLAGS_PENDING_UNLOCK RT_BIT_32(17)
+
+
+/**
+ * Private critical section data.
+ */
+typedef struct PDMCRITSECTRWINT
+{
+ /** The read/write critical section core which is shared with IPRT.
+ * @note The semaphores are SUPSEMEVENT and SUPSEMEVENTMULTI. */
+ RTCRITSECTRW Core;
+
+ /** Pointer to the next critical section.
+ * This chain is used for device cleanup and the dbgf info item. */
+ R3PTRTYPE(struct PDMCRITSECTRWINT *) pNext;
+ /** Self pointer. */
+ R3PTRTYPE(PPDMCRITSECTRW) pSelfR3;
+ /** Owner identifier.
+ * This is pDevIns if the owner is a device. Similarly for a driver or service.
+ * PDMR3CritSectRwInit() sets this to point to the critsect itself. */
+ RTR3PTR pvKey;
+ /** The lock name. */
+ R3PTRTYPE(const char *) pszName;
+
+ /** R0/RC write lock contention. */
+ STAMCOUNTER StatContentionRZEnterExcl;
+ /** R0/RC write unlock contention. */
+ STAMCOUNTER StatContentionRZLeaveExcl;
+ /** R0/RC read lock contention. */
+ STAMCOUNTER StatContentionRZEnterShared;
+ /** R0/RC read unlock contention. */
+ STAMCOUNTER StatContentionRZLeaveShared;
+ /** R0/RC writes. */
+ STAMCOUNTER StatRZEnterExcl;
+ /** R0/RC reads. */
+ STAMCOUNTER StatRZEnterShared;
+ /** R3 write lock contention. */
+ STAMCOUNTER StatContentionR3EnterExcl;
+ /** R3 write unlock contention. */
+ STAMCOUNTER StatContentionR3LeaveExcl;
+ /** R3 read lock contention. */
+ STAMCOUNTER StatContentionR3EnterShared;
+ /** R3 writes. */
+ STAMCOUNTER StatR3EnterExcl;
+ /** R3 reads. */
+ STAMCOUNTER StatR3EnterShared;
+ /** Profiling the time the section is write locked. */
+ STAMPROFILEADV StatWriteLocked;
+} PDMCRITSECTRWINT;
+AssertCompileMemberAlignment(PDMCRITSECTRWINT, StatContentionRZEnterExcl, 8);
+AssertCompileMemberAlignment(PDMCRITSECTRWINT, Core.u, 16);
+AssertCompileMemberAlignment(PDMCRITSECTRWINT, Core.u.s.u64State, 8);
+/** Pointer to private critical section data. */
+typedef PDMCRITSECTRWINT *PPDMCRITSECTRWINT;
+
+/** Special magic value we set the structure has become corrupted. */
+#define PDMCRITSECTRW_MAGIC_CORRUPT UINT32_C(0x0bad0620)
+
+
+/**
+ * The usual device/driver/internal/external stuff.
+ */
+typedef enum
+{
+ /** The usual invalid entry. */
+ PDMTHREADTYPE_INVALID = 0,
+ /** Device type. */
+ PDMTHREADTYPE_DEVICE,
+ /** USB Device type. */
+ PDMTHREADTYPE_USB,
+ /** Driver type. */
+ PDMTHREADTYPE_DRIVER,
+ /** Internal type. */
+ PDMTHREADTYPE_INTERNAL,
+ /** External type. */
+ PDMTHREADTYPE_EXTERNAL,
+ /** The usual 32-bit hack. */
+ PDMTHREADTYPE_32BIT_HACK = 0x7fffffff
+} PDMTHREADTYPE;
+
+
+/**
+ * The internal structure for the thread.
+ */
+typedef struct PDMTHREADINT
+{
+ /** The VM pointer. */
+ PVMR3 pVM;
+ /** The event semaphore the thread blocks on when not running. */
+ RTSEMEVENTMULTI BlockEvent;
+ /** The event semaphore the thread sleeps on while running. */
+ RTSEMEVENTMULTI SleepEvent;
+ /** Pointer to the next thread. */
+ R3PTRTYPE(struct PDMTHREAD *) pNext;
+ /** The thread type. */
+ PDMTHREADTYPE enmType;
+} PDMTHREADINT;
+
+
+
+/* Must be included after PDMDEVINSINT is defined. */
+#define PDMDEVINSINT_DECLARED
+#define PDMUSBINSINT_DECLARED
+#define PDMDRVINSINT_DECLARED
+#define PDMCRITSECTINT_DECLARED
+#define PDMCRITSECTRWINT_DECLARED
+#define PDMTHREADINT_DECLARED
+#ifdef ___VBox_pdm_h
+# error "Invalid header PDM order. Include PDMInternal.h before VBox/vmm/pdm.h!"
+#endif
+RT_C_DECLS_END
+#include <VBox/vmm/pdm.h>
+RT_C_DECLS_BEGIN
+
+/**
+ * PDM Logical Unit.
+ *
+ * This typically the representation of a physical port on a
+ * device, like for instance the PS/2 keyboard port on the
+ * keyboard controller device. The LUNs are chained on the
+ * device they belong to (PDMDEVINSINT::pLunsR3).
+ */
+typedef struct PDMLUN
+{
+ /** The LUN - The Logical Unit Number. */
+ RTUINT iLun;
+ /** Pointer to the next LUN. */
+ PPDMLUN pNext;
+ /** Pointer to the top driver in the driver chain. */
+ PPDMDRVINS pTop;
+ /** Pointer to the bottom driver in the driver chain. */
+ PPDMDRVINS pBottom;
+ /** Pointer to the device instance which the LUN belongs to.
+ * Either this is set or pUsbIns is set. Both is never set at the same time. */
+ PPDMDEVINS pDevIns;
+ /** Pointer to the USB device instance which the LUN belongs to. */
+ PPDMUSBINS pUsbIns;
+ /** Pointer to the device base interface. */
+ PPDMIBASE pBase;
+ /** Description of this LUN. */
+ const char *pszDesc;
+} PDMLUN;
+
+
+/**
+ * PDM Device, ring-3.
+ */
+typedef struct PDMDEV
+{
+ /** Pointer to the next device (R3 Ptr). */
+ R3PTRTYPE(PPDMDEV) pNext;
+ /** Device name length. (search optimization) */
+ uint32_t cchName;
+ /** Registration structure. */
+ R3PTRTYPE(const struct PDMDEVREGR3 *) pReg;
+ /** Number of instances. */
+ uint32_t cInstances;
+ /** Pointer to chain of instances (R3 Ptr). */
+ PPDMDEVINSR3 pInstances;
+ /** The search path for raw-mode context modules (';' as separator). */
+ char *pszRCSearchPath;
+ /** The search path for ring-0 context modules (';' as separator). */
+ char *pszR0SearchPath;
+} PDMDEV;
+
+
+#if 0
+/**
+ * PDM Device, ring-0.
+ */
+typedef struct PDMDEVR0
+{
+ /** Pointer to the next device. */
+ R0PTRTYPE(PPDMDEVR0) pNext;
+ /** Device name length. (search optimization) */
+ uint32_t cchName;
+ /** Registration structure. */
+ R3PTRTYPE(const struct PDMDEVREGR0 *) pReg;
+ /** Number of instances. */
+ uint32_t cInstances;
+ /** Pointer to chain of instances. */
+ PPDMDEVINSR0 pInstances;
+} PDMDEVR0;
+#endif
+
+
+/**
+ * PDM USB Device.
+ */
+typedef struct PDMUSB
+{
+ /** Pointer to the next device (R3 Ptr). */
+ R3PTRTYPE(PPDMUSB) pNext;
+ /** Device name length. (search optimization) */
+ RTUINT cchName;
+ /** Registration structure. */
+ R3PTRTYPE(const struct PDMUSBREG *) pReg;
+ /** Next instance number. */
+ uint32_t iNextInstance;
+ /** Pointer to chain of instances (R3 Ptr). */
+ R3PTRTYPE(PPDMUSBINS) pInstances;
+} PDMUSB;
+
+
+/**
+ * PDM Driver.
+ */
+typedef struct PDMDRV
+{
+ /** Pointer to the next device. */
+ PPDMDRV pNext;
+ /** Registration structure. */
+ const struct PDMDRVREG * pReg;
+ /** Current number of instances. */
+ uint32_t cInstances;
+ /** The next instance number. */
+ uint32_t iNextInstance;
+ /** The search path for raw-mode context modules (';' as separator). */
+ char *pszRCSearchPath;
+ /** The search path for ring-0 context modules (';' as separator). */
+ char *pszR0SearchPath;
+} PDMDRV;
+
+
+/**
+ * PDM IOMMU, shared ring-3.
+ */
+typedef struct PDMIOMMUR3
+{
+ /** IOMMU index. */
+ uint32_t idxIommu;
+ uint32_t uPadding0; /**< Alignment padding.*/
+
+ /** Pointer to the IOMMU device instance - R3. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** @copydoc PDMIOMMUREGR3::pfnMemAccess */
+ DECLR3CALLBACKMEMBER(int, pfnMemAccess,(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
+ uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContig));
+ /** @copydoc PDMIOMMUREGR3::pfnMemBulkAccess */
+ DECLR3CALLBACKMEMBER(int, pfnMemBulkAccess,(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
+ uint32_t fFlags, PRTGCPHYS paGCPhysSpa));
+ /** @copydoc PDMIOMMUREGR3::pfnMsiRemap */
+ DECLR3CALLBACKMEMBER(int, pfnMsiRemap,(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut));
+} PDMIOMMUR3;
+/** Pointer to a PDM IOMMU instance. */
+typedef PDMIOMMUR3 *PPDMIOMMUR3;
+/** Pointer to a const PDM IOMMU instance. */
+typedef const PDMIOMMUR3 *PCPDMIOMMUR3;
+
+
+/**
+ * PDM IOMMU, ring-0.
+ */
+typedef struct PDMIOMMUR0
+{
+ /** IOMMU index. */
+ uint32_t idxIommu;
+ uint32_t uPadding0; /**< Alignment padding.*/
+
+ /** Pointer to IOMMU device instance. */
+ PPDMDEVINSR0 pDevInsR0;
+ /** @copydoc PDMIOMMUREGR3::pfnMemAccess */
+ DECLR0CALLBACKMEMBER(int, pfnMemAccess,(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
+ uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContig));
+ /** @copydoc PDMIOMMUREGR3::pfnMemBulkAccess */
+ DECLR0CALLBACKMEMBER(int, pfnMemBulkAccess,(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
+ uint32_t fFlags, PRTGCPHYS paGCPhysSpa));
+ /** @copydoc PDMIOMMUREGR3::pfnMsiRemap */
+ DECLR0CALLBACKMEMBER(int, pfnMsiRemap,(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut));
+} PDMIOMMUR0;
+/** Pointer to a ring-0 IOMMU data. */
+typedef PDMIOMMUR0 *PPDMIOMMUR0;
+/** Pointer to a const ring-0 IOMMU data. */
+typedef const PDMIOMMUR0 *PCPDMIOMMUR0;
+
+/** Pointer to a PDM IOMMU for the current context. */
+#ifdef IN_RING3
+typedef PPDMIOMMUR3 PPDMIOMMU;
+#else
+typedef PPDMIOMMUR0 PPDMIOMMU;
+#endif
+
+
+/**
+ * PDM registered PIC device.
+ */
+typedef struct PDMPIC
+{
+ /** Pointer to the PIC device instance - R3. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** @copydoc PDMPICREG::pfnSetIrq */
+ DECLR3CALLBACKMEMBER(void, pfnSetIrqR3,(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMPICREG::pfnGetInterrupt */
+ DECLR3CALLBACKMEMBER(int, pfnGetInterruptR3,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+
+ /** Pointer to the PIC device instance - R0. */
+ PPDMDEVINSR0 pDevInsR0;
+ /** @copydoc PDMPICREG::pfnSetIrq */
+ DECLR0CALLBACKMEMBER(void, pfnSetIrqR0,(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMPICREG::pfnGetInterrupt */
+ DECLR0CALLBACKMEMBER(int, pfnGetInterruptR0,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+
+ /** Pointer to the PIC device instance - RC. */
+ PPDMDEVINSRC pDevInsRC;
+ /** @copydoc PDMPICREG::pfnSetIrq */
+ DECLRCCALLBACKMEMBER(void, pfnSetIrqRC,(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMPICREG::pfnGetInterrupt */
+ DECLRCCALLBACKMEMBER(int, pfnGetInterruptRC,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+ /** Alignment padding. */
+ RTRCPTR RCPtrPadding;
+} PDMPIC;
+
+
+/**
+ * PDM registered APIC device.
+ */
+typedef struct PDMAPIC
+{
+ /** Pointer to the APIC device instance - R3 Ptr. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** Pointer to the APIC device instance - R0 Ptr. */
+ PPDMDEVINSR0 pDevInsR0;
+ /** Pointer to the APIC device instance - RC Ptr. */
+ PPDMDEVINSRC pDevInsRC;
+ uint8_t Alignment[4];
+} PDMAPIC;
+
+
+/**
+ * PDM registered I/O APIC device.
+ */
+typedef struct PDMIOAPIC
+{
+ /** Pointer to the I/O APIC device instance - R3 Ptr. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** @copydoc PDMIOAPICREG::pfnSetIrq */
+ DECLR3CALLBACKMEMBER(void, pfnSetIrqR3,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSendMsi */
+ DECLR3CALLBACKMEMBER(void, pfnSendMsiR3,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, PCMSIMSG pMsi, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSetEoi */
+ DECLR3CALLBACKMEMBER(void, pfnSetEoiR3,(PPDMDEVINS pDevIns, uint8_t u8Vector));
+
+ /** Pointer to the I/O APIC device instance - R0. */
+ PPDMDEVINSR0 pDevInsR0;
+ /** @copydoc PDMIOAPICREG::pfnSetIrq */
+ DECLR0CALLBACKMEMBER(void, pfnSetIrqR0,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSendMsi */
+ DECLR0CALLBACKMEMBER(void, pfnSendMsiR0,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, PCMSIMSG pMsi, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSetEoi */
+ DECLR0CALLBACKMEMBER(void, pfnSetEoiR0,(PPDMDEVINS pDevIns, uint8_t u8Vector));
+
+ /** Pointer to the I/O APIC device instance - RC Ptr. */
+ PPDMDEVINSRC pDevInsRC;
+ /** @copydoc PDMIOAPICREG::pfnSetIrq */
+ DECLRCCALLBACKMEMBER(void, pfnSetIrqRC,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, int iIrq, int iLevel, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSendMsi */
+ DECLRCCALLBACKMEMBER(void, pfnSendMsiRC,(PPDMDEVINS pDevIns, PCIBDF uBusDevFn, PCMSIMSG pMsi, uint32_t uTagSrc));
+ /** @copydoc PDMIOAPICREG::pfnSendMsi */
+ DECLRCCALLBACKMEMBER(void, pfnSetEoiRC,(PPDMDEVINS pDevIns, uint8_t u8Vector));
+} PDMIOAPIC;
+/** Pointer to a PDM IOAPIC instance. */
+typedef PDMIOAPIC *PPDMIOAPIC;
+/** Pointer to a const PDM IOAPIC instance. */
+typedef PDMIOAPIC const *PCPDMIOAPIC;
+
+/** Maximum number of PCI busses for a VM. */
+#define PDM_PCI_BUSSES_MAX 8
+/** Maximum number of IOMMUs (at most one per PCI bus). */
+#define PDM_IOMMUS_MAX PDM_PCI_BUSSES_MAX
+
+
+#ifdef IN_RING3
+/**
+ * PDM registered firmware device.
+ */
+typedef struct PDMFW
+{
+ /** Pointer to the firmware device instance. */
+ PPDMDEVINSR3 pDevIns;
+ /** Copy of the registration structure. */
+ PDMFWREG Reg;
+} PDMFW;
+/** Pointer to a firmware instance. */
+typedef PDMFW *PPDMFW;
+#endif
+
+
+/**
+ * PDM PCI bus instance.
+ */
+typedef struct PDMPCIBUS
+{
+ /** PCI bus number. */
+ uint32_t iBus;
+ uint32_t uPadding0; /**< Alignment padding.*/
+
+ /** Pointer to PCI bus device instance. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** @copydoc PDMPCIBUSREGR3::pfnSetIrqR3 */
+ DECLR3CALLBACKMEMBER(void, pfnSetIrqR3,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel, uint32_t uTagSrc));
+
+ /** @copydoc PDMPCIBUSREGR3::pfnRegisterR3 */
+ DECLR3CALLBACKMEMBER(int, pfnRegister,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t fFlags,
+ uint8_t uPciDevNo, uint8_t uPciFunNo, const char *pszName));
+ /** @copydoc PDMPCIBUSREGR3::pfnRegisterMsiR3 */
+ DECLR3CALLBACKMEMBER(int, pfnRegisterMsi,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, PPDMMSIREG pMsiReg));
+ /** @copydoc PDMPCIBUSREGR3::pfnIORegionRegisterR3 */
+ DECLR3CALLBACKMEMBER(int, pfnIORegionRegister,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS cbRegion, PCIADDRESSSPACE enmType, uint32_t fFlags,
+ uint64_t hHandle, PFNPCIIOREGIONMAP pfnCallback));
+ /** @copydoc PDMPCIBUSREGR3::pfnInterceptConfigAccesses */
+ DECLR3CALLBACKMEMBER(void, pfnInterceptConfigAccesses,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
+ PFNPCICONFIGREAD pfnRead, PFNPCICONFIGWRITE pfnWrite));
+ /** @copydoc PDMPCIBUSREGR3::pfnConfigWrite */
+ DECLR3CALLBACKMEMBER(VBOXSTRICTRC, pfnConfigWrite,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
+ uint32_t uAddress, unsigned cb, uint32_t u32Value));
+ /** @copydoc PDMPCIBUSREGR3::pfnConfigRead */
+ DECLR3CALLBACKMEMBER(VBOXSTRICTRC, pfnConfigRead,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
+ uint32_t uAddress, unsigned cb, uint32_t *pu32Value));
+} PDMPCIBUS;
+/** Pointer to a PDM PCI Bus instance. */
+typedef PDMPCIBUS *PPDMPCIBUS;
+/** Pointer to a const PDM PCI Bus instance. */
+typedef const PDMPCIBUS *PCPDMPCIBUS;
+
+
+/**
+ * Ring-0 PDM PCI bus instance data.
+ */
+typedef struct PDMPCIBUSR0
+{
+ /** PCI bus number. */
+ uint32_t iBus;
+ uint32_t uPadding0; /**< Alignment padding.*/
+ /** Pointer to PCI bus device instance. */
+ PPDMDEVINSR0 pDevInsR0;
+ /** @copydoc PDMPCIBUSREGR0::pfnSetIrq */
+ DECLR0CALLBACKMEMBER(void, pfnSetIrqR0,(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel, uint32_t uTagSrc));
+} PDMPCIBUSR0;
+/** Pointer to the ring-0 PCI bus data. */
+typedef PDMPCIBUSR0 *PPDMPCIBUSR0;
+/** Pointer to the const ring-0 PCI bus data. */
+typedef const PDMPCIBUSR0 *PCPDMPCIBUSR0;
+
+
+#ifdef IN_RING3
+/**
+ * PDM registered DMAC (DMA Controller) device.
+ */
+typedef struct PDMDMAC
+{
+ /** Pointer to the DMAC device instance. */
+ PPDMDEVINSR3 pDevIns;
+ /** Copy of the registration structure. */
+ PDMDMACREG Reg;
+} PDMDMAC;
+
+
+/**
+ * PDM registered RTC (Real Time Clock) device.
+ */
+typedef struct PDMRTC
+{
+ /** Pointer to the RTC device instance. */
+ PPDMDEVINSR3 pDevIns;
+ /** Copy of the registration structure. */
+ PDMRTCREG Reg;
+} PDMRTC;
+
+#endif /* IN_RING3 */
+
+/**
+ * Module type.
+ */
+typedef enum PDMMODTYPE
+{
+ /** Raw-mode (RC) context module. */
+ PDMMOD_TYPE_RC,
+ /** Ring-0 (host) context module. */
+ PDMMOD_TYPE_R0,
+ /** Ring-3 (host) context module. */
+ PDMMOD_TYPE_R3
+} PDMMODTYPE;
+
+
+/** The module name length including the terminator. */
+#define PDMMOD_NAME_LEN 32
+
+/**
+ * Loaded module instance.
+ */
+typedef struct PDMMOD
+{
+ /** Module name. This is used for referring to
+ * the module internally, sort of like a handle. */
+ char szName[PDMMOD_NAME_LEN];
+ /** Module type. */
+ PDMMODTYPE eType;
+ /** Loader module handle. Not used for R0 modules. */
+ RTLDRMOD hLdrMod;
+ /** Loaded address.
+ * This is the 'handle' for R0 modules. */
+ RTUINTPTR ImageBase;
+ /** Old loaded address.
+ * This is used during relocation of GC modules. Not used for R0 modules. */
+ RTUINTPTR OldImageBase;
+ /** Where the R3 HC bits are stored.
+ * This can be equal to ImageBase but doesn't have to. Not used for R0 modules. */
+ void *pvBits;
+
+ /** Pointer to next module. */
+ struct PDMMOD *pNext;
+ /** Module filename. */
+ char szFilename[1];
+} PDMMOD;
+/** Pointer to loaded module instance. */
+typedef PDMMOD *PPDMMOD;
+
+
+
+/** Max number of items in a queue. */
+#define PDMQUEUE_MAX_ITEMS _16K
+/** Max item size. */
+#define PDMQUEUE_MAX_ITEM_SIZE _1M
+/** Max total queue item size for ring-0 capable queues. */
+#define PDMQUEUE_MAX_TOTAL_SIZE_R0 _8M
+/** Max total queue item size for ring-3 only queues. */
+#define PDMQUEUE_MAX_TOTAL_SIZE_R3 _32M
+
+/**
+ * Queue type.
+ */
+typedef enum PDMQUEUETYPE
+{
+ /** Device consumer. */
+ PDMQUEUETYPE_DEV = 1,
+ /** Driver consumer. */
+ PDMQUEUETYPE_DRV,
+ /** Internal consumer. */
+ PDMQUEUETYPE_INTERNAL,
+ /** External consumer. */
+ PDMQUEUETYPE_EXTERNAL
+} PDMQUEUETYPE;
+
+/**
+ * PDM Queue.
+ */
+typedef struct PDMQUEUE
+{
+ /** Magic value (PDMQUEUE_MAGIC). */
+ uint32_t u32Magic;
+ /** Item size (bytes). */
+ uint32_t cbItem;
+ /** Number of items in the queue. */
+ uint32_t cItems;
+ /** Offset of the the queue items relative to the PDMQUEUE structure. */
+ uint32_t offItems;
+
+ /** Interval timer. Only used if cMilliesInterval is non-zero. */
+ TMTIMERHANDLE hTimer;
+ /** The interval between checking the queue for events.
+ * The realtime timer below is used to do the waiting.
+ * If 0, the queue will use the VM_FF_PDM_QUEUE forced action. */
+ uint32_t cMilliesInterval;
+
+ /** This is VINF_SUCCESS if the queue is okay, error status if not. */
+ int32_t rcOkay;
+ uint32_t u32Padding;
+
+ /** Queue type. */
+ PDMQUEUETYPE enmType;
+ /** Type specific data. */
+ union
+ {
+ /** PDMQUEUETYPE_DEV */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMQUEUEDEV) pfnCallback;
+ /** Pointer to the device instance owning the queue. */
+ R3PTRTYPE(PPDMDEVINS) pDevIns;
+ } Dev;
+ /** PDMQUEUETYPE_DRV */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMQUEUEDRV) pfnCallback;
+ /** Pointer to the driver instance owning the queue. */
+ R3PTRTYPE(PPDMDRVINS) pDrvIns;
+ } Drv;
+ /** PDMQUEUETYPE_INTERNAL */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMQUEUEINT) pfnCallback;
+ } Int;
+ /** PDMQUEUETYPE_EXTERNAL */
+ struct
+ {
+ /** Pointer to consumer function. */
+ R3PTRTYPE(PFNPDMQUEUEEXT) pfnCallback;
+ /** Pointer to user argument. */
+ R3PTRTYPE(void *) pvUser;
+ } Ext;
+ struct
+ {
+ /** Generic callback pointer. */
+ RTR3PTR pfnCallback;
+ /** Generic owner pointer. */
+ RTR3PTR pvOwner;
+ } Gen;
+ } u;
+
+ /** Unique queue name. */
+ char szName[40];
+
+ /** LIFO of pending items (item index), UINT32_MAX if empty. */
+ uint32_t volatile iPending;
+
+ /** State: Pending items. */
+ uint32_t volatile cStatPending;
+ /** Stat: Times PDMQueueAlloc fails. */
+ STAMCOUNTER StatAllocFailures;
+ /** Stat: PDMQueueInsert calls. */
+ STAMCOUNTER StatInsert;
+ /** Stat: Queue flushes. */
+ STAMCOUNTER StatFlush;
+ /** Stat: Queue flushes with pending items left over. */
+ STAMCOUNTER StatFlushLeftovers;
+ /** State: Profiling the flushing. */
+ STAMPROFILE StatFlushPrf;
+ uint64_t au64Padding[3];
+
+ /** Allocation bitmap: Set bits means free, clear means allocated. */
+ RT_FLEXIBLE_ARRAY_EXTENSION
+ uint64_t bmAlloc[RT_FLEXIBLE_ARRAY];
+ /* The items follows after the end of the bitmap */
+} PDMQUEUE;
+AssertCompileMemberAlignment(PDMQUEUE, bmAlloc, 64);
+/** Pointer to a PDM Queue. */
+typedef struct PDMQUEUE *PPDMQUEUE;
+
+/** Magic value PDMQUEUE::u32Magic (Bud Powell). */
+#define PDMQUEUE_MAGIC UINT32_C(0x19240927)
+/** Magic value PDMQUEUE::u32Magic after destroy. */
+#define PDMQUEUE_MAGIC_DEAD UINT32_C(0x19660731)
+
+/** @name PDM::fQueueFlushing
+ * @{ */
+/** Used to make sure only one EMT will flush the queues.
+ * Set when an EMT is flushing queues, clear otherwise. */
+#define PDM_QUEUE_FLUSH_FLAG_ACTIVE_BIT 0
+/** Indicating there are queues with items pending.
+ * This is make sure we don't miss inserts happening during flushing. The FF
+ * cannot be used for this since it has to be cleared immediately to prevent
+ * other EMTs from spinning. */
+#define PDM_QUEUE_FLUSH_FLAG_PENDING_BIT 1
+/** @} */
+
+/**
+ * Ring-0 queue
+ *
+ * @author bird (2022-02-04)
+ */
+typedef struct PDMQUEUER0
+{
+ /** Pointer to the shared queue data. */
+ R0PTRTYPE(PPDMQUEUE) pQueue;
+ /** The memory allocation. */
+ RTR0MEMOBJ hMemObj;
+ /** The ring-3 mapping object. */
+ RTR0MEMOBJ hMapObj;
+ /** The owner pointer. This is NULL if not allocated. */
+ RTR0PTR pvOwner;
+ /** Queue item size. */
+ uint32_t cbItem;
+ /** Number of queue items. */
+ uint32_t cItems;
+ /** Offset of the the queue items relative to the PDMQUEUE structure. */
+ uint32_t offItems;
+ uint32_t u32Reserved;
+} PDMQUEUER0;
+
+
+/** @name PDM task structures.
+ * @{ */
+
+/**
+ * A asynchronous user mode task.
+ */
+typedef struct PDMTASK
+{
+ /** Task owner type. */
+ PDMTASKTYPE volatile enmType;
+ /** Queue flags. */
+ uint32_t volatile fFlags;
+ /** User argument for the callback. */
+ R3PTRTYPE(void *) volatile pvUser;
+ /** The callback (will be cast according to enmType before callout). */
+ R3PTRTYPE(PFNRT) volatile pfnCallback;
+ /** The owner identifier. */
+ R3PTRTYPE(void *) volatile pvOwner;
+ /** Task name. */
+ R3PTRTYPE(const char *) pszName;
+ /** Number of times already triggered when PDMTaskTrigger was called. */
+ uint32_t volatile cAlreadyTrigged;
+ /** Number of runs. */
+ uint32_t cRuns;
+} PDMTASK;
+/** Pointer to a PDM task. */
+typedef PDMTASK *PPDMTASK;
+
+/**
+ * A task set.
+ *
+ * This is served by one task executor thread.
+ */
+typedef struct PDMTASKSET
+{
+ /** Magic value (PDMTASKSET_MAGIC). */
+ uint32_t u32Magic;
+ /** Set if this task set works for ring-0 and raw-mode. */
+ bool fRZEnabled;
+ /** Number of allocated taks. */
+ uint8_t volatile cAllocated;
+ /** Base handle value for this set. */
+ uint16_t uHandleBase;
+ /** The task executor thread. */
+ R3PTRTYPE(RTTHREAD) hThread;
+ /** Event semaphore for waking up the thread when fRZEnabled is set. */
+ SUPSEMEVENT hEventR0;
+ /** Event semaphore for waking up the thread when fRZEnabled is clear. */
+ R3PTRTYPE(RTSEMEVENT) hEventR3;
+ /** The VM pointer. */
+ PVM pVM;
+ /** Padding so fTriggered is in its own cacheline. */
+ uint64_t au64Padding2[3];
+
+ /** Bitmask of triggered tasks. */
+ uint64_t volatile fTriggered;
+ /** Shutdown thread indicator. */
+ bool volatile fShutdown;
+ /** Padding. */
+ bool volatile afPadding3[3];
+ /** Task currently running, UINT32_MAX if idle. */
+ uint32_t volatile idxRunning;
+ /** Padding so fTriggered and fShutdown are in their own cacheline. */
+ uint64_t volatile au64Padding3[6];
+
+ /** The individual tasks. (Unallocated tasks have NULL pvOwner.) */
+ PDMTASK aTasks[64];
+} PDMTASKSET;
+AssertCompileMemberAlignment(PDMTASKSET, fTriggered, 64);
+AssertCompileMemberAlignment(PDMTASKSET, aTasks, 64);
+/** Magic value for PDMTASKSET::u32Magic (Quincy Delight Jones Jr.). */
+#define PDMTASKSET_MAGIC UINT32_C(0x19330314)
+/** Pointer to a task set. */
+typedef PDMTASKSET *PPDMTASKSET;
+
+/** @} */
+
+
+/** @name PDM Network Shaper
+ * @{ */
+
+/**
+ * Bandwidth group.
+ */
+typedef struct PDMNSBWGROUP
+{
+ /** Critical section protecting all members below. */
+ PDMCRITSECT Lock;
+ /** List of filters in this group (PDMNSFILTER). */
+ RTLISTANCHORR3 FilterList;
+ /** Reference counter - How many filters are associated with this group. */
+ volatile uint32_t cRefs;
+ uint32_t uPadding1;
+ /** The group name. */
+ char szName[PDM_NET_SHAPER_MAX_NAME_LEN + 1];
+ /** Maximum number of bytes filters are allowed to transfer. */
+ volatile uint64_t cbPerSecMax;
+ /** Number of bytes we are allowed to transfer in one burst. */
+ volatile uint32_t cbBucket;
+ /** Number of bytes we were allowed to transfer at the last update. */
+ volatile uint32_t cbTokensLast;
+ /** Timestamp of the last update */
+ volatile uint64_t tsUpdatedLast;
+ /** Number of times a filter was choked. */
+ volatile uint64_t cTotalChokings;
+ /** Pad the structure to a multiple of 64 bytes. */
+ uint64_t au64Padding[1];
+} PDMNSBWGROUP;
+AssertCompileSizeAlignment(PDMNSBWGROUP, 64);
+/** Pointer to a bandwidth group. */
+typedef PDMNSBWGROUP *PPDMNSBWGROUP;
+
+/** @} */
+
+
+/**
+ * Queue device helper task operation.
+ */
+typedef enum PDMDEVHLPTASKOP
+{
+ /** The usual invalid 0 entry. */
+ PDMDEVHLPTASKOP_INVALID = 0,
+ /** IsaSetIrq, IoApicSetIrq */
+ PDMDEVHLPTASKOP_ISA_SET_IRQ,
+ /** PciSetIrq */
+ PDMDEVHLPTASKOP_PCI_SET_IRQ,
+ /** PciSetIrq */
+ PDMDEVHLPTASKOP_IOAPIC_SET_IRQ,
+ /** IoApicSendMsi */
+ PDMDEVHLPTASKOP_IOAPIC_SEND_MSI,
+ /** IoApicSettEoi */
+ PDMDEVHLPTASKOP_IOAPIC_SET_EOI,
+ /** The usual 32-bit hack. */
+ PDMDEVHLPTASKOP_32BIT_HACK = 0x7fffffff
+} PDMDEVHLPTASKOP;
+
+/**
+ * Queued Device Helper Task.
+ */
+typedef struct PDMDEVHLPTASK
+{
+ /** The queue item core (don't touch). */
+ PDMQUEUEITEMCORE Core;
+ /** Pointer to the device instance (R3 Ptr). */
+ PPDMDEVINSR3 pDevInsR3;
+ /** This operation to perform. */
+ PDMDEVHLPTASKOP enmOp;
+#if HC_ARCH_BITS == 64
+ uint32_t Alignment0;
+#endif
+ /** Parameters to the operation. */
+ union PDMDEVHLPTASKPARAMS
+ {
+ /**
+ * PDMDEVHLPTASKOP_ISA_SET_IRQ and PDMDEVHLPTASKOP_IOAPIC_SET_IRQ.
+ */
+ struct PDMDEVHLPTASKISASETIRQ
+ {
+ /** The bus:device:function of the device initiating the IRQ. Can be NIL_PCIBDF. */
+ PCIBDF uBusDevFn;
+ /** The IRQ */
+ int iIrq;
+ /** The new level. */
+ int iLevel;
+ /** The IRQ tag and source. */
+ uint32_t uTagSrc;
+ } IsaSetIrq, IoApicSetIrq;
+
+ /**
+ * PDMDEVHLPTASKOP_PCI_SET_IRQ
+ */
+ struct PDMDEVHLPTASKPCISETIRQ
+ {
+ /** Index of the PCI device (into PDMDEVINSR3::apPciDevs). */
+ uint32_t idxPciDev;
+ /** The IRQ */
+ int32_t iIrq;
+ /** The new level. */
+ int32_t iLevel;
+ /** The IRQ tag and source. */
+ uint32_t uTagSrc;
+ } PciSetIrq;
+
+ /**
+ * PDMDEVHLPTASKOP_IOAPIC_SEND_MSI
+ */
+ struct PDMDEVHLPTASKIOAPICSENDMSI
+ {
+ /** The bus:device:function of the device sending the MSI. */
+ PCIBDF uBusDevFn;
+ /** The MSI. */
+ MSIMSG Msi;
+ /** The IRQ tag and source. */
+ uint32_t uTagSrc;
+ } IoApicSendMsi;
+
+ /**
+ * PDMDEVHLPTASKOP_IOAPIC_SET_EOI
+ */
+ struct PDMDEVHLPTASKIOAPICSETEOI
+ {
+ /** The vector corresponding to the EOI. */
+ uint8_t uVector;
+ } IoApicSetEoi;
+
+ /** Expanding the structure. */
+ uint64_t au64[3];
+ } u;
+} PDMDEVHLPTASK;
+/** Pointer to a queued Device Helper Task. */
+typedef PDMDEVHLPTASK *PPDMDEVHLPTASK;
+/** Pointer to a const queued Device Helper Task. */
+typedef const PDMDEVHLPTASK *PCPDMDEVHLPTASK;
+
+
+
+/**
+ * An USB hub registration record.
+ */
+typedef struct PDMUSBHUB
+{
+ /** The USB versions this hub support.
+ * Note that 1.1 hubs can take on 2.0 devices. */
+ uint32_t fVersions;
+ /** The number of ports on the hub. */
+ uint32_t cPorts;
+ /** The number of available ports (0..cPorts). */
+ uint32_t cAvailablePorts;
+ /** The driver instance of the hub. */
+ PPDMDRVINS pDrvIns;
+ /** Copy of the to the registration structure. */
+ PDMUSBHUBREG Reg;
+
+ /** Pointer to the next hub in the list. */
+ struct PDMUSBHUB *pNext;
+} PDMUSBHUB;
+
+/** Pointer to a const USB HUB registration record. */
+typedef const PDMUSBHUB *PCPDMUSBHUB;
+
+/** Pointer to a PDM Async I/O template. */
+typedef struct PDMASYNCCOMPLETIONTEMPLATE *PPDMASYNCCOMPLETIONTEMPLATE;
+
+/** Pointer to the main PDM Async completion endpoint class. */
+typedef struct PDMASYNCCOMPLETIONEPCLASS *PPDMASYNCCOMPLETIONEPCLASS;
+
+/** Pointer to the global block cache structure. */
+typedef struct PDMBLKCACHEGLOBAL *PPDMBLKCACHEGLOBAL;
+
+/**
+ * PDM VMCPU Instance data.
+ * Changes to this must checked against the padding of the pdm union in VMCPU!
+ */
+typedef struct PDMCPU
+{
+ /** The number of entries in the apQueuedCritSectsLeaves table that's currently
+ * in use. */
+ uint32_t cQueuedCritSectLeaves;
+ uint32_t uPadding0; /**< Alignment padding.*/
+ /** Critical sections queued in RC/R0 because of contention preventing leave to
+ * complete. (R3 Ptrs)
+ * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
+ R3PTRTYPE(PPDMCRITSECT) apQueuedCritSectLeaves[8];
+
+ /** The number of entries in the apQueuedCritSectRwExclLeaves table that's
+ * currently in use. */
+ uint32_t cQueuedCritSectRwExclLeaves;
+ uint32_t uPadding1; /**< Alignment padding.*/
+ /** Read/write critical sections queued in RC/R0 because of contention
+ * preventing exclusive leave to complete. (R3 Ptrs)
+ * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
+ R3PTRTYPE(PPDMCRITSECTRW) apQueuedCritSectRwExclLeaves[8];
+
+ /** The number of entries in the apQueuedCritSectsRwShrdLeaves table that's
+ * currently in use. */
+ uint32_t cQueuedCritSectRwShrdLeaves;
+ uint32_t uPadding2; /**< Alignment padding.*/
+ /** Read/write critical sections queued in RC/R0 because of contention
+ * preventing shared leave to complete. (R3 Ptrs)
+ * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
+ R3PTRTYPE(PPDMCRITSECTRW) apQueuedCritSectRwShrdLeaves[8];
+} PDMCPU;
+
+
+/** Max number of ring-0 device instances. */
+#define PDM_MAX_RING0_DEVICE_INSTANCES 190
+
+
+/**
+ * PDM VM Instance data.
+ * Changes to this must checked against the padding of the cfgm union in VM!
+ */
+typedef struct PDM
+{
+ /** The PDM lock.
+ * This is used to protect everything that deals with interrupts, i.e.
+ * the PIC, APIC, IOAPIC and PCI devices plus some PDM functions. */
+ PDMCRITSECT CritSect;
+ /** The NOP critical section.
+ * This is a dummy critical section that will not do any thread
+ * serialization but instead let all threads enter immediately and
+ * concurrently. */
+ PDMCRITSECT NopCritSect;
+
+ /** The ring-0 capable task sets (max 128). */
+ PDMTASKSET aTaskSets[2];
+ /** Pointer to task sets (max 512). */
+ R3PTRTYPE(PPDMTASKSET) apTaskSets[8];
+
+ /** PCI Buses. */
+ PDMPCIBUS aPciBuses[PDM_PCI_BUSSES_MAX];
+ /** IOMMU devices. */
+ PDMIOMMUR3 aIommus[PDM_IOMMUS_MAX];
+ /** The register PIC device. */
+ PDMPIC Pic;
+ /** The registered APIC device. */
+ PDMAPIC Apic;
+ /** The registered I/O APIC device. */
+ PDMIOAPIC IoApic;
+ /** The registered HPET device. */
+ PPDMDEVINSR3 pHpet;
+
+ /** List of registered devices. (FIFO) */
+ R3PTRTYPE(PPDMDEV) pDevs;
+ /** List of devices instances. (FIFO) */
+ PPDMDEVINSR3 pDevInstances;
+ /** This runs parallel to PDMR0PERVM::apDevInstances and is used with
+ * physical access handlers to get the ring-3 device instance for passing down
+ * as uUser. */
+ PPDMDEVINSR3 apDevRing0Instances[PDM_MAX_RING0_DEVICE_INSTANCES];
+
+ /** List of registered USB devices. (FIFO) */
+ R3PTRTYPE(PPDMUSB) pUsbDevs;
+ /** List of USB devices instances. (FIFO) */
+ R3PTRTYPE(PPDMUSBINS) pUsbInstances;
+ /** List of registered drivers. (FIFO) */
+ R3PTRTYPE(PPDMDRV) pDrvs;
+ /** The registered firmware device (can be NULL). */
+ R3PTRTYPE(PPDMFW) pFirmware;
+ /** The registered DMAC device. */
+ R3PTRTYPE(PPDMDMAC) pDmac;
+ /** The registered RTC device. */
+ R3PTRTYPE(PPDMRTC) pRtc;
+ /** The registered USB HUBs. (FIFO) */
+ R3PTRTYPE(PPDMUSBHUB) pUsbHubs;
+
+ /** @name Queues
+ * @{ */
+ /** Number of ring-0 capable queues in apQueues. */
+ uint32_t cRing0Queues;
+ uint32_t u32Padding1;
+ /** Array of ring-0 capable queues running in parallel to PDMR0PERVM::aQueues. */
+ R3PTRTYPE(PPDMQUEUE) apRing0Queues[16];
+
+ /** Number of ring-3 only queues.
+ * PDMUSERPERVM::ListCritSect protects this and the next two members. */
+ uint32_t cRing3Queues;
+ /** The allocation size of the ring-3 queue handle table. */
+ uint32_t cRing3QueuesAlloc;
+ /** Handle table for the ring-3 only queues. */
+ R3PTRTYPE(PPDMQUEUE *) papRing3Queues;
+
+ /** Queue in which devhlp tasks are queued for R3 execution. */
+ PDMQUEUEHANDLE hDevHlpQueue;
+ /** Bitmask controlling the queue flushing.
+ * See PDM_QUEUE_FLUSH_FLAG_ACTIVE and PDM_QUEUE_FLUSH_FLAG_PENDING. */
+ uint32_t volatile fQueueFlushing;
+ /** @} */
+
+ /** The current IRQ tag (tracing purposes). */
+ uint32_t volatile uIrqTag;
+
+ /** Pending reset flags (PDMVMRESET_F_XXX). */
+ uint32_t volatile fResetFlags;
+
+ /** Set by pdmR3LoadExec for use in assertions. */
+ bool fStateLoaded;
+ /** Alignment padding. */
+ bool afPadding1[3];
+
+ /** The tracing ID of the next device instance.
+ *
+ * @remarks We keep the device tracing ID seperate from the rest as these are
+ * then more likely to end up with the same ID from one run to
+ * another, making analysis somewhat easier. Drivers and USB devices
+ * are more volatile and can be changed at runtime, thus these are much
+ * less likely to remain stable, so just heap them all together. */
+ uint32_t idTracingDev;
+ /** The tracing ID of the next driver instance, USB device instance or other
+ * PDM entity requiring an ID. */
+ uint32_t idTracingOther;
+
+ /** @name VMM device heap
+ * @{ */
+ /** The heap size. */
+ uint32_t cbVMMDevHeap;
+ /** Free space. */
+ uint32_t cbVMMDevHeapLeft;
+ /** Pointer to the heap base (MMIO2 ring-3 mapping). NULL if not registered. */
+ RTR3PTR pvVMMDevHeap;
+ /** Ring-3 mapping/unmapping notification callback for the user. */
+ PFNPDMVMMDEVHEAPNOTIFY pfnVMMDevHeapNotify;
+ /** The current mapping. NIL_RTGCPHYS if not mapped or registered. */
+ RTGCPHYS GCPhysVMMDevHeap;
+ /** @} */
+
+ /** @name Network Shaper
+ * @{ */
+ /** Thread that processes choked filter drivers after
+ * the a PDM_NETSHAPER_MAX_LATENCY period has elapsed. */
+ PPDMTHREAD pNsUnchokeThread;
+ /** Semaphore that the TX thread waits on. */
+ RTSEMEVENT hNsUnchokeEvt;
+ /** Timer handle for waking up pNsUnchokeThread. */
+ TMTIMERHANDLE hNsUnchokeTimer;
+ /** Indicates whether the unchoke timer has been armed already or not. */
+ bool volatile fNsUnchokeTimerArmed;
+ /** Align aNsGroups on a cacheline. */
+ bool afPadding2[19+16];
+ /** Number of network shaper groups.
+ * @note Marked volatile to prevent re-reading after validation. */
+ uint32_t volatile cNsGroups;
+ /** The network shaper groups. */
+ PDMNSBWGROUP aNsGroups[PDM_NET_SHAPER_MAX_GROUPS];
+ /** Critical section protecting attaching, detaching and unchoking.
+ * This helps making sure pNsTxThread can do unchoking w/o needing to lock the
+ * individual groups and cause unnecessary contention. */
+ RTCRITSECT NsLock;
+ /** @} */
+
+ /** Number of times a critical section leave request needed to be queued for ring-3 execution. */
+ STAMCOUNTER StatQueuedCritSectLeaves;
+ /** Number of times we've successfully aborted a wait in ring-0. */
+ STAMCOUNTER StatAbortedCritSectEnters;
+ /** Number of times we've got the critical section ownership while trying to
+ * abort a wait due to VERR_INTERRUPTED. */
+ STAMCOUNTER StatCritSectEntersWhileAborting;
+ STAMCOUNTER StatCritSectVerrTimeout;
+ STAMCOUNTER StatCritSectVerrInterrupted;
+ STAMCOUNTER StatCritSectNonInterruptibleWaits;
+
+ STAMCOUNTER StatCritSectRwExclVerrTimeout;
+ STAMCOUNTER StatCritSectRwExclVerrInterrupted;
+ STAMCOUNTER StatCritSectRwExclNonInterruptibleWaits;
+
+ STAMCOUNTER StatCritSectRwEnterSharedWhileAborting;
+ STAMCOUNTER StatCritSectRwSharedVerrTimeout;
+ STAMCOUNTER StatCritSectRwSharedVerrInterrupted;
+ STAMCOUNTER StatCritSectRwSharedNonInterruptibleWaits;
+} PDM;
+AssertCompileMemberAlignment(PDM, CritSect, 8);
+AssertCompileMemberAlignment(PDM, aTaskSets, 64);
+AssertCompileMemberAlignment(PDM, aNsGroups, 8);
+AssertCompileMemberAlignment(PDM, aNsGroups, 16);
+AssertCompileMemberAlignment(PDM, aNsGroups, 32);
+AssertCompileMemberAlignment(PDM, aNsGroups, 64);
+AssertCompileMemberAlignment(PDM, StatQueuedCritSectLeaves, 8);
+AssertCompileMemberAlignment(PDM, GCPhysVMMDevHeap, sizeof(RTGCPHYS));
+/** Pointer to PDM VM instance data. */
+typedef PDM *PPDM;
+
+
+/**
+ * PDM data kept in the ring-0 GVM.
+ */
+typedef struct PDMR0PERVM
+{
+ /** PCI Buses, ring-0 data. */
+ PDMPCIBUSR0 aPciBuses[PDM_PCI_BUSSES_MAX];
+ /** IOMMUs, ring-0 data. */
+ PDMIOMMUR0 aIommus[PDM_IOMMUS_MAX];
+ /** Number of valid ring-0 device instances (apDevInstances). */
+ uint32_t cDevInstances;
+ uint32_t u32Padding1;
+ /** Pointer to ring-0 device instances. */
+ R0PTRTYPE(struct PDMDEVINSR0 *) apDevInstances[PDM_MAX_RING0_DEVICE_INSTANCES];
+ /** Number of valid ring-0 queue instances (aQueues). */
+ uint32_t cQueues;
+ uint32_t u32Padding2;
+ /** Array of ring-0 queues. */
+ PDMQUEUER0 aQueues[16];
+} PDMR0PERVM;
+
+
+/**
+ * PDM data kept in the UVM.
+ */
+typedef struct PDMUSERPERVM
+{
+ /** @todo move more stuff over here. */
+
+ /** Lock protecting the lists below it and the queue list. */
+ RTCRITSECT ListCritSect;
+ /** Pointer to list of loaded modules. */
+ PPDMMOD pModules;
+ /** List of initialized critical sections. (LIFO) */
+ R3PTRTYPE(PPDMCRITSECTINT) pCritSects;
+ /** List of initialized read/write critical sections. (LIFO) */
+ R3PTRTYPE(PPDMCRITSECTRWINT) pRwCritSects;
+ /** Head of the PDM Thread list. (singly linked) */
+ R3PTRTYPE(PPDMTHREAD) pThreads;
+ /** Tail of the PDM Thread list. (singly linked) */
+ R3PTRTYPE(PPDMTHREAD) pThreadsTail;
+
+ /** @name PDM Async Completion
+ * @{ */
+ /** Pointer to the array of supported endpoint classes. */
+ PPDMASYNCCOMPLETIONEPCLASS apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_MAX];
+ /** Head of the templates. Singly linked, protected by ListCritSect. */
+ R3PTRTYPE(PPDMASYNCCOMPLETIONTEMPLATE) pAsyncCompletionTemplates;
+ /** @} */
+
+ /** Global block cache data. */
+ R3PTRTYPE(PPDMBLKCACHEGLOBAL) pBlkCacheGlobal;
+} PDMUSERPERVM;
+/** Pointer to the PDM data kept in the UVM. */
+typedef PDMUSERPERVM *PPDMUSERPERVM;
+
+
+
+/*******************************************************************************
+* Global Variables *
+*******************************************************************************/
+#ifdef IN_RING3
+extern const PDMDRVHLPR3 g_pdmR3DrvHlp;
+extern const PDMDEVHLPR3 g_pdmR3DevHlpTrusted;
+# ifdef VBOX_WITH_DBGF_TRACING
+extern const PDMDEVHLPR3 g_pdmR3DevHlpTracing;
+# endif
+extern const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted;
+extern const PDMPICHLP g_pdmR3DevPicHlp;
+extern const PDMIOAPICHLP g_pdmR3DevIoApicHlp;
+extern const PDMFWHLPR3 g_pdmR3DevFirmwareHlp;
+extern const PDMPCIHLPR3 g_pdmR3DevPciHlp;
+extern const PDMIOMMUHLPR3 g_pdmR3DevIommuHlp;
+extern const PDMDMACHLP g_pdmR3DevDmacHlp;
+extern const PDMRTCHLP g_pdmR3DevRtcHlp;
+extern const PDMHPETHLPR3 g_pdmR3DevHpetHlp;
+extern const PDMPCIRAWHLPR3 g_pdmR3DevPciRawHlp;
+#endif
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+/** @def PDMDEV_ASSERT_DEVINS
+ * Asserts the validity of the device instance.
+ */
+#ifdef VBOX_STRICT
+# define PDMDEV_ASSERT_DEVINS(pDevIns) \
+ do { \
+ AssertPtr(pDevIns); \
+ Assert(pDevIns->u32Version == PDM_DEVINS_VERSION); \
+ Assert(pDevIns->CTX_SUFF(pvInstanceDataFor) == (void *)&pDevIns->achInstanceData[0]); \
+ } while (0)
+#else
+# define PDMDEV_ASSERT_DEVINS(pDevIns) do { } while (0)
+#endif
+
+/** @def PDMDRV_ASSERT_DRVINS
+ * Asserts the validity of the driver instance.
+ */
+#ifdef VBOX_STRICT
+# define PDMDRV_ASSERT_DRVINS(pDrvIns) \
+ do { \
+ AssertPtr(pDrvIns); \
+ Assert(pDrvIns->u32Version == PDM_DRVINS_VERSION); \
+ Assert(pDrvIns->CTX_SUFF(pvInstanceData) == (void *)&pDrvIns->achInstanceData[0]); \
+ } while (0)
+#else
+# define PDMDRV_ASSERT_DRVINS(pDrvIns) do { } while (0)
+#endif
+
+
+/*******************************************************************************
+* Internal Functions *
+*******************************************************************************/
+#ifdef IN_RING3
+bool pdmR3IsValidName(const char *pszName);
+
+int pdmR3CritSectBothInitStatsAndInfo(PVM pVM);
+int pdmR3CritSectBothDeleteDevice(PVM pVM, PPDMDEVINS pDevIns);
+int pdmR3CritSectBothDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns);
+int pdmR3CritSectInitDevice( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va);
+int pdmR3CritSectInitDeviceAuto( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectInitDriver( PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectRwInitDevice( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va);
+int pdmR3CritSectRwInitDeviceAuto( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectRwInitDriver( PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+
+int pdmR3DevInit(PVM pVM);
+int pdmR3DevInitComplete(PVM pVM);
+PPDMDEV pdmR3DevLookup(PVM pVM, const char *pszName);
+int pdmR3DevFindLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMLUN *ppLun);
+DECLCALLBACK(bool) pdmR3DevHlpQueueConsumer(PVM pVM, PPDMQUEUEITEMCORE pItem);
+
+int pdmR3UsbLoadModules(PVM pVM);
+int pdmR3UsbInstantiateDevices(PVM pVM);
+PPDMUSB pdmR3UsbLookup(PVM pVM, const char *pszName);
+int pdmR3UsbRegisterHub(PVM pVM, PPDMDRVINS pDrvIns, uint32_t fVersions, uint32_t cPorts, PCPDMUSBHUBREG pUsbHubReg, PPCPDMUSBHUBHLP ppUsbHubHlp);
+int pdmR3UsbVMInitComplete(PVM pVM);
+
+int pdmR3DrvInit(PVM pVM);
+int pdmR3DrvInstantiate(PVM pVM, PCFGMNODE pNode, PPDMIBASE pBaseInterface, PPDMDRVINS pDrvAbove,
+ PPDMLUN pLun, PPDMIBASE *ppBaseInterface);
+int pdmR3DrvDetach(PPDMDRVINS pDrvIns, uint32_t fFlags);
+void pdmR3DrvDestroyChain(PPDMDRVINS pDrvIns, uint32_t fFlags);
+PPDMDRV pdmR3DrvLookup(PVM pVM, const char *pszName);
+
+int pdmR3LdrInitU(PUVM pUVM);
+void pdmR3LdrTermU(PUVM pUVM, bool fFinal);
+char *pdmR3FileR3(const char *pszFile, bool fShared);
+int pdmR3LoadR3U(PUVM pUVM, const char *pszFilename, const char *pszName);
+#endif /* IN_RING3 */
+
+void pdmQueueInit(PPDMQUEUE pQueue, uint32_t cbBitmap, uint32_t cbItem, uint32_t cItems,
+ const char *pszName, PDMQUEUETYPE enmType, RTR3PTR pfnCallback, RTR3PTR pvOwner);
+
+#ifdef IN_RING3
+int pdmR3TaskInit(PVM pVM);
+void pdmR3TaskTerm(PVM pVM);
+
+int pdmR3ThreadCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDEV pfnThread,
+ PFNPDMTHREADWAKEUPDEV pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName);
+int pdmR3ThreadCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADUSB pfnThread,
+ PFNPDMTHREADWAKEUPUSB pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName);
+int pdmR3ThreadCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMTHREAD ppThread, void *pvUser, PFNPDMTHREADDRV pfnThread,
+ PFNPDMTHREADWAKEUPDRV pfnWakeup, size_t cbStack, RTTHREADTYPE enmType, const char *pszName);
+int pdmR3ThreadDestroyDevice(PVM pVM, PPDMDEVINS pDevIns);
+int pdmR3ThreadDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns);
+int pdmR3ThreadDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns);
+void pdmR3ThreadDestroyAll(PVM pVM);
+int pdmR3ThreadResumeAll(PVM pVM);
+int pdmR3ThreadSuspendAll(PVM pVM);
+
+# ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+int pdmR3AsyncCompletionInit(PVM pVM);
+int pdmR3AsyncCompletionTerm(PVM pVM);
+void pdmR3AsyncCompletionResume(PVM pVM);
+int pdmR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEDEV pfnCompleted, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEUSB pfnCompleted, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDevIns);
+int pdmR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns);
+int pdmR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns);
+# endif
+
+# ifdef VBOX_WITH_NETSHAPER
+int pdmR3NetShaperInit(PVM pVM);
+void pdmR3NetShaperTerm(PVM pVM);
+# endif
+
+int pdmR3BlkCacheInit(PVM pVM);
+void pdmR3BlkCacheTerm(PVM pVM);
+int pdmR3BlkCacheResume(PVM pVM);
+
+DECLHIDDEN(void) pdmR3QueueTerm(PVM pVM);
+#endif /* IN_RING3 */
+
+void pdmLock(PVMCC pVM);
+int pdmLockEx(PVMCC pVM, int rcBusy);
+void pdmUnlock(PVMCC pVM);
+bool pdmLockIsOwner(PVMCC pVM);
+
+#if defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL)
+bool pdmIommuIsPresent(PPDMDEVINS pDevIns);
+int pdmIommuMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut);
+int pdmIommuMemAccessRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags);
+int pdmIommuMemAccessWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags);
+# ifdef IN_RING3
+int pdmR3IommuMemAccessReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void const **ppv, PPGMPAGEMAPLOCK pLock);
+int pdmR3IommuMemAccessWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void **ppv, PPGMPAGEMAPLOCK pLock);
+int pdmR3IommuMemAccessBulkReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages, uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks);
+int pdmR3IommuMemAccessBulkWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages, uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks);
+# endif
+#endif
+
+#if defined(IN_RING3) || defined(IN_RING0)
+void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis);
+void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis);
+#endif
+
+#ifdef IN_RING0
+DECLHIDDEN(bool) pdmR0IsaSetIrq(PGVM pGVM, int iIrq, int iLevel, uint32_t uTagSrc);
+DECLHIDDEN(void) pdmR0QueueDestroy(PGVM pGVM, uint32_t iQueue);
+
+#endif
+
+#ifdef VBOX_WITH_DBGF_TRACING
+# ifdef IN_RING3
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_IoPortCreateEx(PPDMDEVINS pDevIns, RTIOPORT cPorts, uint32_t fFlags, PPDMPCIDEV pPciDev,
+ uint32_t iPciRegion, PFNIOMIOPORTNEWOUT pfnOut, PFNIOMIOPORTNEWIN pfnIn,
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr, PFNIOMIOPORTNEWINSTRING pfnInStr, RTR3PTR pvUser,
+ const char *pszDesc, PCIOMIOPORTDESC paExtDescs, PIOMIOPORTHANDLE phIoPorts);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_IoPortMap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts, RTIOPORT Port);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_IoPortUnmap(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_MmioCreateEx(PPDMDEVINS pDevIns, RTGCPHYS cbRegion,
+ uint32_t fFlags, PPDMPCIDEV pPciDev, uint32_t iPciRegion,
+ PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill,
+ void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_MmioMap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_MmioUnmap(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_PCIPhysRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR3DevHlpTracing_PCIPhysWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_PCISetIrq(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_PCISetIrqNoWait(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_ISASetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR3DevHlpTracing_ISASetIrqNoWait(PPDMDEVINS pDevIns, int iIrq, int iLevel);
+# elif defined(IN_RING0)
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_IoPortSetUpContextEx(PPDMDEVINS pDevIns, IOMIOPORTHANDLE hIoPorts,
+ PFNIOMIOPORTNEWOUT pfnOut, PFNIOMIOPORTNEWIN pfnIn,
+ PFNIOMIOPORTNEWOUTSTRING pfnOutStr, PFNIOMIOPORTNEWINSTRING pfnInStr,
+ void *pvUser);
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_MmioSetUpContextEx(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, PFNIOMMMIONEWWRITE pfnWrite,
+ PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill, void *pvUser);
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_PCIPhysRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(int) pdmR0DevHlpTracing_PCIPhysWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, uint32_t fFlags);
+DECL_HIDDEN_CALLBACK(void) pdmR0DevHlpTracing_PCISetIrq(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR0DevHlpTracing_PCISetIrqNoWait(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR0DevHlpTracing_ISASetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel);
+DECL_HIDDEN_CALLBACK(void) pdmR0DevHlpTracing_ISASetIrqNoWait(PPDMDEVINS pDevIns, int iIrq, int iLevel);
+# else
+# error "Invalid environment selected"
+# endif
+#endif
+
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_PDMInternal_h */
+
diff --git a/src/VBox/VMM/include/PGMGstDefs.h b/src/VBox/VMM/include/PGMGstDefs.h
new file mode 100644
index 00000000..0d09f20f
--- /dev/null
+++ b/src/VBox/VMM/include/PGMGstDefs.h
@@ -0,0 +1,254 @@
+/* $Id: PGMGstDefs.h $ */
+/** @file
+ * VBox - Page Manager, Guest Paging Template - All context code.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+#undef GST_ATOMIC_OR
+#undef GSTPT
+#undef PGSTPT
+#undef GSTPTE
+#undef PGSTPTE
+#undef GSTPD
+#undef PGSTPD
+#undef GSTPDE
+#undef PGSTPDE
+#undef GSTPTWALK
+#undef PGSTPTWALK
+#undef PCGSTPTWALK
+#undef GST_BIG_PAGE_SIZE
+#undef GST_BIG_PAGE_OFFSET_MASK
+#undef GST_GIGANT_PAGE_SIZE
+#undef GST_GIGANT_PAGE_OFFSET_MASK
+#undef GST_PDPE_BIG_PG_MASK
+#undef GST_PDE_PG_MASK
+#undef GST_PDE_BIG_PG_MASK
+#undef GST_PD_SHIFT
+#undef GST_PD_MASK
+#undef GST_PTE_PG_MASK
+#undef GST_GET_PTE_SHW_FLAGS
+#undef GST_PT_SHIFT
+#undef GST_PT_MASK
+#undef GST_CR3_PAGE_MASK
+#undef GST_PDPE_ENTRIES
+#undef GST_PDPT_SHIFT
+#undef GST_PDPT_MASK
+#undef GST_PDPE_PG_MASK
+#undef GST_GET_PTE_GCPHYS
+#undef GST_GET_PDE_GCPHYS
+#undef GST_GET_BIG_PDE_GCPHYS
+#undef GST_GET_BIG_PDPE_GCPHYS
+#undef GST_GET_PDE_SHW_FLAGS
+#undef GST_GET_BIG_PDE_SHW_FLAGS
+#undef GST_GET_BIG_PDE_SHW_FLAGS_4_PTE
+#undef GST_IS_PTE_VALID
+#undef GST_IS_PDE_VALID
+#undef GST_IS_BIG_PDE_VALID
+#undef GST_IS_PDPE_VALID
+#undef GST_IS_BIG_PDPE_VALID
+#undef GST_IS_PML4E_VALID
+#undef GST_IS_PGENTRY_PRESENT
+#undef GST_IS_PSE_ACTIVE
+#undef GST_IS_NX_ACTIVE
+#undef BTH_IS_NP_ACTIVE
+
+#if PGM_GST_TYPE == PGM_TYPE_REAL \
+ || PGM_GST_TYPE == PGM_TYPE_PROT
+
+# if PGM_SHW_TYPE == PGM_TYPE_EPT
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU64((a_pu), (a_fFlags))
+# define GSTPT X86PTPAE
+# define PGSTPT PX86PTPAE
+# define GSTPTE X86PTEPAE
+# define PGSTPTE PX86PTEPAE
+# define GSTPD X86PDPAE
+# define PGSTPD PX86PDPAE
+# define GSTPDE X86PDEPAE
+# define PGSTPDE PX86PDEPAE
+# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# define GST_IS_NX_ACTIVE(pVCpu) (true && This_should_perhaps_not_be_used_in_this_context)
+# define BTH_IS_NP_ACTIVE(pVM) (true)
+# else
+# if PGM_SHW_TYPE == PGM_TYPE_32BIT /* Same as shadow paging, but no PGMSHWPTEPAE. */
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU32((a_pu), (a_fFlags))
+# define GSTPT X86PT
+# define PGSTPT PX86PT
+# define GSTPTE X86PTE
+# define PGSTPTE PX86PTE
+# define GSTPD X86PD
+# define PGSTPD PX86PD
+# define GSTPDE X86PDE
+# define PGSTPDE PX86PDE
+# define GST_PTE_PG_MASK X86_PTE_PG_MASK
+# else
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU64((a_pu), (a_fFlags))
+# define GSTPT X86PTPAE
+# define PGSTPT PX86PTPAE
+# define GSTPTE X86PTEPAE
+# define PGSTPTE PX86PTEPAE
+# define GSTPD X86PDPAE
+# define PGSTPD PX86PDPAE
+# define GSTPDE X86PDEPAE
+# define PGSTPDE PX86PDEPAE
+# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# endif
+# define GST_IS_NX_ACTIVE(pVCpu) (pgmGstIsNoExecuteActive(pVCpu))
+# if PGM_GST_TYPE == PGM_TYPE_PROT /* (comment at top of PGMAllBth.h) */
+# define BTH_IS_NP_ACTIVE(pVM) (pVM->pgm.s.fNestedPaging)
+# else
+# define BTH_IS_NP_ACTIVE(pVM) (false)
+# endif
+# endif
+# define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PTE_PG_MASK))
+# define GST_GET_PDE_GCPHYS(Pde) (true && This_should_perhaps_not_be_used_in_this_context) //??
+# define GST_GET_BIG_PDE_GCPHYS(Pde) (true && This_should_perhaps_not_be_used_in_this_context) //??
+# define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G)) /**< @todo Could return P|RW|US|A|D here without consulting the PTE. */
+# define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context) //??
+# define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context) //??
+# define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context) //??
+# define GST_IS_PTE_VALID(pVCpu, Pte) (true)
+# define GST_IS_PDE_VALID(pVCpu, Pde) (true)
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (true)
+# define GST_IS_PDPE_VALID(pVCpu, Pdpe) (true)
+# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (true)
+# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (true)
+# define GST_IS_PGENTRY_PRESENT(pVCpu, Pge) ((Pge.u) & X86_PTE_P)
+# define GST_IS_PSE_ACTIVE(pVCpu) (false && This_should_not_be_used_in_this_context)
+
+#elif PGM_GST_TYPE == PGM_TYPE_32BIT
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU32((a_pu), (a_fFlags))
+# define GSTPT X86PT
+# define PGSTPT PX86PT
+# define GSTPTE X86PTE
+# define PGSTPTE PX86PTE
+# define GSTPD X86PD
+# define PGSTPD PX86PD
+# define GSTPDE X86PDE
+# define PGSTPDE PX86PDE
+# define GSTPTWALK PGMPTWALKGST32BIT
+# define PGSTPTWALK PPGMPTWALKGST32BIT
+# define PCGSTPTWALK PCPGMPTWALKGST32BIT
+# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
+# define GST_PDE_PG_MASK X86_PDE_PG_MASK
+# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
+# define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PDE_PG_MASK))
+# define GST_GET_PDE_GCPHYS(Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_PG_MASK))
+# define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) PGM_A20_APPLY(pVCpu, pgmGstGet4MBPhysPage((pVM), Pde))
+# define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A))
+# define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) \
+ ( ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A)) | PGM_PDFLAGS_BIG_PAGE )
+# define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) \
+ ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_G))
+# define GST_PD_SHIFT X86_PD_SHIFT
+# define GST_PD_MASK X86_PD_MASK
+# define GST_PTE_PG_MASK X86_PTE_PG_MASK
+# define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G))
+# define GST_PT_SHIFT X86_PT_SHIFT
+# define GST_PT_MASK X86_PT_MASK
+# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
+# define GST_IS_PTE_VALID(pVCpu, Pte) (true)
+# define GST_IS_PDE_VALID(pVCpu, Pde) (true)
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGst32BitMbzBigPdeMask ))
+//# define GST_IS_PDPE_VALID(pVCpu, Pdpe) (false)
+//# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (false)
+//# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (false)
+# define GST_IS_PGENTRY_PRESENT(pVCpu, Pge) ((Pge.u) & X86_PTE_P)
+# define GST_IS_PSE_ACTIVE(pVCpu) pgmGst32BitIsPageSizeExtActive(pVCpu)
+# define GST_IS_NX_ACTIVE(pVCpu) (false)
+# define BTH_IS_NP_ACTIVE(pVM) (false)
+
+#elif PGM_GST_TYPE == PGM_TYPE_PAE \
+ || PGM_GST_TYPE == PGM_TYPE_AMD64
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU64((a_pu), (a_fFlags))
+# define GSTPT X86PTPAE
+# define PGSTPT PX86PTPAE
+# define GSTPTE X86PTEPAE
+# define PGSTPTE PX86PTEPAE
+# define GSTPD X86PDPAE
+# define PGSTPD PX86PDPAE
+# define GSTPDE X86PDEPAE
+# define PGSTPDE PX86PDEPAE
+# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
+# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
+# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
+# define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PTE_PG_MASK))
+# define GST_GET_PDE_GCPHYS(Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_PG_MASK))
+# define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_BIG_PG_MASK))
+# define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (pVCpu)->pgm.s.fGst64ShadowedPteMask )
+# define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedPdeMask )
+# define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) ( ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask ) | PGM_PDFLAGS_BIG_PAGE)
+# define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPde4PteMask )
+
+# define GST_PD_SHIFT X86_PD_PAE_SHIFT
+# define GST_PD_MASK X86_PD_PAE_MASK
+# if PGM_GST_TYPE == PGM_TYPE_PAE
+# define GSTPTWALK PGMPTWALKGSTPAE
+# define PGSTPTWALK PPGMPTWALKGSTPAE
+# define PCGSTPTWALK PCPGMPTWALKGSTPAE
+# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
+# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK
+# define GST_PDPT_SHIFT X86_PDPT_SHIFT
+# define GST_PDPT_MASK X86_PDPT_MASK_PAE
+# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
+# define GST_IS_PTE_VALID(pVCpu, Pte) (!( (Pte).u & (pVCpu)->pgm.s.fGstPaeMbzPteMask ))
+# define GST_IS_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstPaeMbzPdeMask ))
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstPaeMbzBigPdeMask ))
+# define GST_IS_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstPaeMbzPdpeMask ))
+//# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (false)
+//# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (false)
+# else
+# define GSTPTWALK PGMPTWALKGSTAMD64
+# define PGSTPTWALK PPGMPTWALKGSTAMD64
+# define PCGSTPTWALK PCPGMPTWALKGSTAMD64
+# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
+# define GST_PDPT_SHIFT X86_PDPT_SHIFT
+# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK
+# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
+# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
+# define GST_IS_PTE_VALID(pVCpu, Pte) (!( (Pte).u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask ))
+# define GST_IS_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask ))
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstAmd64MbzBigPdeMask ))
+# define GST_IS_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstAmd64MbzPdpeMask ))
+# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstAmd64MbzBigPdpeMask ))
+# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (!( (Pml4e).u & (pVCpu)->pgm.s.fGstAmd64MbzPml4eMask ))
+# endif
+# define GST_IS_PGENTRY_PRESENT(pVCpu, Pge) ((Pge.u) & X86_PTE_P)
+# define GST_PT_SHIFT X86_PT_PAE_SHIFT
+# define GST_PT_MASK X86_PT_PAE_MASK
+# define GST_IS_PSE_ACTIVE(pVCpu) (true)
+# define GST_IS_NX_ACTIVE(pVCpu) (pgmGstIsNoExecuteActive(pVCpu))
+# define BTH_IS_NP_ACTIVE(pVM) (false)
+
+#else
+# error "Unknown PGM_GST_TYPE."
+#endif
+
diff --git a/src/VBox/VMM/include/PGMInline.h b/src/VBox/VMM/include/PGMInline.h
new file mode 100644
index 00000000..b727e888
--- /dev/null
+++ b/src/VBox/VMM/include/PGMInline.h
@@ -0,0 +1,1210 @@
+/* $Id: PGMInline.h $ */
+/** @file
+ * PGM - Inlined functions.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
+#define VMM_INCLUDED_SRC_include_PGMInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/err.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/param.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/dis.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/log.h>
+#include <VBox/vmm/gmm.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/nem.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/avl.h>
+#include <iprt/critsect.h>
+#include <iprt/sha.h>
+
+
+
+/** @addtogroup grp_pgm_int Internals
+ * @internal
+ * @{
+ */
+
+/**
+ * Gets the PGMRAMRANGE structure for a guest page.
+ *
+ * @returns Pointer to the RAM range on success.
+ * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ */
+DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
+{
+ PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
+ return pgmPhysGetRangeSlow(pVM, GCPhys);
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ return pRam;
+}
+
+
+/**
+ * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
+ * range above it.
+ *
+ * @returns Pointer to the RAM range on success.
+ * @returns NULL if the address is located after the last range.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ */
+DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
+{
+ PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ if ( !pRam
+ || (GCPhys - pRam->GCPhys) >= pRam->cb)
+ return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ return pRam;
+}
+
+
+/**
+ * Gets the PGMPAGE structure for a guest page.
+ *
+ * @returns Pointer to the page on success.
+ * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ */
+DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
+{
+ PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ RTGCPHYS off;
+ if ( pRam
+ && (off = GCPhys - pRam->GCPhys) < pRam->cb)
+ {
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
+ }
+ return pgmPhysGetPageSlow(pVM, GCPhys);
+}
+
+
+/**
+ * Gets the PGMPAGE structure for a guest page.
+ *
+ * Old Phys code: Will make sure the page is present.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS and a valid *ppPage on success.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ * @param ppPage Where to store the page pointer on success.
+ */
+DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
+{
+ PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ RTGCPHYS off;
+ if ( !pRam
+ || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
+ return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
+ *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the PGMPAGE structure for a guest page.
+ *
+ * Old Phys code: Will make sure the page is present.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS and a valid *ppPage on success.
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ * @param ppPage Where to store the page pointer on success.
+ * @param ppRamHint Where to read and store the ram list hint.
+ * The caller initializes this to NULL before the call.
+ */
+DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
+{
+ RTGCPHYS off;
+ PPGMRAMRANGE pRam = *ppRamHint;
+ if ( !pRam
+ || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
+ {
+ pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ if ( !pRam
+ || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
+ return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
+
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ *ppRamHint = pRam;
+ }
+ *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
+ *
+ * @returns Pointer to the page on success.
+ * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ * @param ppPage Where to store the pointer to the PGMPAGE structure.
+ * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
+ */
+DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
+{
+ PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
+ RTGCPHYS off;
+ if ( !pRam
+ || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
+ return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
+
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
+ *ppRam = pRam;
+ *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Convert GC Phys to HC Phys.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address.
+ * @param pHCPhys Where to store the corresponding HC physical address.
+ *
+ * @deprecated Doesn't deal with zero, shared or write monitored pages.
+ * Avoid when writing new code!
+ */
+DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
+{
+ PPGMPAGE pPage;
+ int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
+ if (RT_FAILURE(rc))
+ return rc;
+ *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Queries the Physical TLB entry for a physical guest page,
+ * attempting to load the TLB entry if necessary.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The address of the guest page.
+ * @param ppTlbe Where to store the pointer to the TLB entry.
+ */
+DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
+{
+ int rc;
+ PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
+ if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
+ {
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
+ *ppTlbe = pTlbe;
+ return rc;
+}
+
+
+/**
+ * Queries the Physical TLB entry for a physical guest page,
+ * attempting to load the TLB entry if necessary.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS on success
+ * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pPage Pointer to the PGMPAGE structure corresponding to
+ * GCPhys.
+ * @param GCPhys The address of the guest page.
+ * @param ppTlbe Where to store the pointer to the TLB entry.
+ */
+DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
+{
+ int rc;
+ PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
+ if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
+ {
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
+ rc = VINF_SUCCESS;
+ AssertPtr(pTlbe->pv);
+#ifdef IN_RING3
+ Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
+#endif
+ }
+ else
+ rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
+ *ppTlbe = pTlbe;
+ return rc;
+}
+
+
+/**
+ * Calculates NEM page protection flags.
+ */
+DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
+{
+ /*
+ * Deal with potentially writable pages first.
+ */
+ if (PGMPAGETYPE_IS_RWX(enmType))
+ {
+ if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+ {
+ if (PGM_PAGE_IS_ALLOCATED(pPage))
+ return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
+ return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
+ }
+ if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+ return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
+ }
+ /*
+ * Potentially readable & executable pages.
+ */
+ else if ( PGMPAGETYPE_IS_ROX(enmType)
+ && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+ return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
+
+ /*
+ * The rest is needs special access handling.
+ */
+ return NEM_PAGE_PROT_NONE;
+}
+
+
+/**
+ * Enables write monitoring for an allocated page.
+ *
+ * The caller is responsible for updating the shadow page tables.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pPage The page to write monitor.
+ * @param GCPhysPage The address of the page.
+ */
+DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
+{
+ Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
+ PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
+ pVM->pgm.s.cMonitoredPages++;
+
+ /* Large pages must disabled. */
+ if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
+ {
+ PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
+ AssertFatal(pFirstPage);
+ if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
+ {
+ PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+ pVM->pgm.s.cLargePagesDisabled++;
+ }
+ else
+ Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+ }
+
+#ifdef VBOX_WITH_NATIVE_NEM
+ /* Tell NEM. */
+ if (VM_IS_NEM_ENABLED(pVM))
+ {
+ uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
+ PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
+ PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
+ NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
+ pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
+ pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
+ PGM_PAGE_SET_NEM_STATE(pPage, u2State);
+ }
+#endif
+}
+
+
+/**
+ * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
+ *
+ * Only used when the guest is in PAE or long mode. This is inlined so that we
+ * can perform consistency checks in debug builds.
+ *
+ * @returns true if it is, false if it isn't.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
+{
+ Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
+ Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
+ return pVCpu->pgm.s.fNoExecuteEnabled;
+}
+
+
+/**
+ * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
+ *
+ * Only used when the guest is in paged 32-bit mode. This is inlined so that
+ * we can perform consistency checks in debug builds.
+ *
+ * @returns true if it is, false if it isn't.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
+{
+ Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
+ Assert(!CPUMIsGuestInPAEMode(pVCpu));
+ Assert(!CPUMIsGuestInLongMode(pVCpu));
+ return pVCpu->pgm.s.fGst32BitPageSizeExtension;
+}
+
+
+/**
+ * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
+ * Takes PSE-36 into account.
+ *
+ * @returns guest physical address
+ * @param pVM The cross context VM structure.
+ * @param Pde Guest Pde
+ */
+DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
+{
+ RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
+ GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
+
+ return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
+}
+
+
+/**
+ * Gets the address the guest page directory (32-bit paging).
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param ppPd Where to return the mapping. This is always set.
+ */
+DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
+{
+ *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
+ if (RT_UNLIKELY(!*ppPd))
+ return pgmGstLazyMap32BitPD(pVCpu, ppPd);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the address the guest page directory (32-bit paging).
+ *
+ * @returns Pointer to the page directory entry in question.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
+{
+ PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
+ if (RT_UNLIKELY(!pGuestPD))
+ {
+ int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
+ if (RT_FAILURE(rc))
+ return NULL;
+ }
+ return pGuestPD;
+}
+
+
+/**
+ * Gets the guest page directory pointer table.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param ppPdpt Where to return the mapping. This is always set.
+ */
+DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
+{
+ *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
+ if (RT_UNLIKELY(!*ppPdpt))
+ return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the guest page directory pointer table.
+ *
+ * @returns Pointer to the page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
+{
+ PX86PDPT pGuestPdpt;
+ int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
+ AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
+ return pGuestPdpt;
+}
+
+
+/**
+ * Gets the guest page directory pointer table entry for the specified address.
+ *
+ * @returns Pointer to the page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ AssertGCPtr32(GCPtr);
+
+ PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
+ if (RT_UNLIKELY(!pGuestPDPT))
+ {
+ int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
+ if (RT_FAILURE(rc))
+ return NULL;
+ }
+ return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
+}
+
+
+/**
+ * Gets the page directory entry for the specified address.
+ *
+ * @returns The page directory entry in question.
+ * @returns A non-present entry if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param GCPtr The address.
+ */
+DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ AssertGCPtr32(GCPtr);
+ PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
+ if (RT_LIKELY(pGuestPDPT))
+ {
+ const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
+ if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
+ {
+ const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
+ if ( !pGuestPD
+ || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
+ pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
+ if (pGuestPD)
+ return pGuestPD->a[iPD];
+ }
+ }
+
+ X86PDEPAE ZeroPde = {0};
+ return ZeroPde;
+}
+
+
+/**
+ * Gets the page directory pointer table entry for the specified address
+ * and returns the index into the page directory
+ *
+ * @returns Pointer to the page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ * @param piPD Receives the index into the returned page directory
+ * @param pPdpe Receives the page directory pointer entry. Optional.
+ */
+DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
+{
+ AssertGCPtr32(GCPtr);
+
+ /* The PDPE. */
+ PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
+ if (pGuestPDPT)
+ {
+ const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
+ X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
+ if (pPdpe)
+ pPdpe->u = uPdpe;
+ if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
+ {
+
+ /* The PDE. */
+ PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
+ if ( !pGuestPD
+ || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
+ pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
+ *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ return pGuestPD;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Gets the page map level-4 pointer for the guest.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param ppPml4 Where to return the mapping. Always set.
+ */
+DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
+{
+ *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
+ if (RT_UNLIKELY(!*ppPml4))
+ return pgmGstLazyMapPml4(pVCpu, ppPml4);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the page map level-4 pointer for the guest.
+ *
+ * @returns Pointer to the PML4 page.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
+{
+ PX86PML4 pGuestPml4;
+ int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
+ AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
+ return pGuestPml4;
+}
+
+
+/**
+ * Gets the pointer to a page map level-4 entry.
+ *
+ * @returns Pointer to the PML4 entry.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param iPml4 The index.
+ * @remarks Only used by AssertCR3.
+ */
+DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
+{
+ PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
+ if (pGuestPml4)
+ { /* likely */ }
+ else
+ {
+ int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
+ AssertRCReturn(rc, NULL);
+ }
+ return &pGuestPml4->a[iPml4];
+}
+
+
+/**
+ * Gets the page directory entry for the specified address.
+ *
+ * @returns The page directory entry in question.
+ * @returns A non-present entry if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
+{
+ /*
+ * Note! To keep things simple, ASSUME invalid physical addresses will
+ * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
+ * supporting 52-bit wide physical guest addresses.
+ */
+ PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
+ if (RT_LIKELY(pGuestPml4))
+ {
+ const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
+ if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
+ {
+ PCX86PDPT pPdptTemp;
+ int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
+ if (RT_SUCCESS(rc))
+ {
+ const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
+ X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
+ if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
+ {
+ PCX86PDPAE pPD;
+ rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
+ if (RT_SUCCESS(rc))
+ {
+ const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ return pPD->a[iPD];
+ }
+ }
+ }
+ AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+ }
+ }
+
+ X86PDEPAE ZeroPde = {0};
+ return ZeroPde;
+}
+
+
+/**
+ * Gets the GUEST page directory pointer for the specified address.
+ *
+ * @returns The page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ * @param ppPml4e Page Map Level-4 Entry (out)
+ * @param pPdpe Page directory pointer table entry (out)
+ * @param piPD Receives the index into the returned page directory
+ */
+DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
+{
+ /* The PMLE4. */
+ PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
+ if (pGuestPml4)
+ {
+ const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ *ppPml4e = &pGuestPml4->a[iPml4];
+ X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
+ if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
+ {
+ /* The PDPE. */
+ PCX86PDPT pPdptTemp;
+ int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
+ if (RT_SUCCESS(rc))
+ {
+ const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
+ X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
+ pPdpe->u = uPdpe;
+ if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
+ {
+ /* The PDE. */
+ PX86PDPAE pPD;
+ rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
+ if (RT_SUCCESS(rc))
+ {
+ *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ return pPD;
+ }
+ AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+ }
+ }
+ else
+ AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+ }
+ }
+ return NULL;
+}
+
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+# if 0
+/**
+ * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
+ *
+ * @returns Pointer to the PML4 entry.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param iPml4 The index.
+ * @remarks Only used by AssertCR3.
+ */
+DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
+{
+ PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
+ if (pEptPml4)
+ { /* likely */ }
+ else
+ {
+ int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
+ AssertRCReturn(rc, NULL);
+ }
+ return &pEptPml4->a[iPml4];
+}
+# endif
+
+
+/**
+ * Gets the page map level-4 pointer for the guest when the guest is using EPT
+ * paging.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param ppEptPml4 Where to return the mapping. Always set.
+ */
+DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
+{
+ /* Shadow CR3 might not have been mapped at this point, see PGMHCChangeMode. */
+ *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
+ if (!*ppEptPml4)
+ return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
+ return VINF_SUCCESS;
+}
+
+
+# if 0
+/**
+ * Gets the page map level-4 pointer for the guest when the guest is using EPT
+ * paging.
+ *
+ * @returns Pointer to the EPT PML4 page.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
+{
+ PEPTPML4 pEptPml4;
+ int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
+ AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
+ return pEptPml4;
+}
+# endif
+#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
+
+
+/**
+ * Gets the shadow page directory, 32-bit.
+ *
+ * @returns Pointer to the shadow 32-bit PD.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
+{
+ return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+}
+
+
+/**
+ * Gets the shadow page directory entry for the specified address, 32-bit.
+ *
+ * @returns Shadow 32-bit PDE.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
+ if (!pShwPde)
+ {
+ X86PDE ZeroPde = {0};
+ return ZeroPde;
+ }
+ return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
+}
+
+
+/**
+ * Gets the pointer to the shadow page directory entry for the specified
+ * address, 32-bit.
+ *
+ * @returns Pointer to the shadow 32-bit PDE.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
+ AssertReturn(pPde, NULL);
+ return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
+}
+
+
+/**
+ * Gets the shadow page pointer table, PAE.
+ *
+ * @returns Pointer to the shadow PAE PDPT.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
+{
+ return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+}
+
+
+/**
+ * Gets the shadow page directory for the specified address, PAE.
+ *
+ * @returns Pointer to the shadow PD.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pPdpt Pointer to the page directory pointer table.
+ * @param GCPtr The address.
+ */
+DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
+{
+ const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
+ if (pPdpt->a[iPdpt].u & X86_PDPE_P)
+ {
+ /* Fetch the pgm pool shadow descriptor. */
+ PVMCC pVM = pVCpu->CTX_SUFF(pVM);
+ PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
+ AssertReturn(pShwPde, NULL);
+
+ return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
+ }
+ return NULL;
+}
+
+
+/**
+ * Gets the shadow page directory for the specified address, PAE.
+ *
+ * @returns Pointer to the shadow PD.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
+}
+
+
+/**
+ * Gets the shadow page directory entry, PAE.
+ *
+ * @returns PDE.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
+ if (pShwPde)
+ return pShwPde->a[iPd];
+
+ X86PDEPAE ZeroPde = {0};
+ return ZeroPde;
+}
+
+
+/**
+ * Gets the pointer to the shadow page directory entry for an address, PAE.
+ *
+ * @returns Pointer to the PDE.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ * @remarks Only used by AssertCR3.
+ */
+DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+ PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
+ AssertReturn(pShwPde, NULL);
+ return &pShwPde->a[iPd];
+}
+
+
+/**
+ * Gets the shadow page map level-4 pointer.
+ *
+ * @returns Pointer to the shadow PML4.
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
+{
+ return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+}
+
+
+/**
+ * Gets the shadow page map level-4 entry for the specified address.
+ *
+ * @returns The entry.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCPtr The address.
+ */
+DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
+{
+ const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
+ PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
+ if (pShwPml4)
+ return pShwPml4->a[iPml4];
+
+ X86PML4E ZeroPml4e = {0};
+ return ZeroPml4e;
+}
+
+
+/**
+ * Gets the pointer to the specified shadow page map level-4 entry.
+ *
+ * @returns The entry.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param iPml4 The PML4 index.
+ */
+DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
+{
+ PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
+ if (pShwPml4)
+ return &pShwPml4->a[iPml4];
+ return NULL;
+}
+
+
+/**
+ * Cached physical handler lookup.
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_FOUND if no handler.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The lookup address.
+ * @param ppHandler Where to return the handler pointer.
+ */
+DECLINLINE(int) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys, PPGMPHYSHANDLER *ppHandler)
+{
+ PPGMPHYSHANDLER pHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrFromInt(pVM->pgm.s.idxLastPhysHandler);
+ if ( pHandler
+ && pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.isPtrRetOkay(pHandler)
+ && GCPhys >= pHandler->Key
+ && GCPhys < pHandler->KeyLast
+ && pHandler->hType != NIL_PGMPHYSHANDLERTYPE
+ && pHandler->hType != 0)
+
+ {
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
+ *ppHandler = pHandler;
+ return VINF_SUCCESS;
+ }
+
+ STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
+ AssertPtrReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
+ int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pHandler);
+ if (RT_SUCCESS(rc))
+ {
+ *ppHandler = pHandler;
+ pVM->pgm.s.idxLastPhysHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrToInt(pHandler);
+ return VINF_SUCCESS;
+ }
+ *ppHandler = NULL;
+ return rc;
+}
+
+
+/**
+ * Converts a handle to a pointer.
+ *
+ * @returns Pointer on success, NULL on failure (asserted).
+ * @param pVM The cross context VM structure.
+ * @param hType Physical access handler type handle.
+ */
+DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
+{
+#ifdef IN_RING0
+ PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
+#elif defined(IN_RING3)
+ PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
+#else
+# error "Invalid context"
+#endif
+ AssertReturn(pType->hType == hType, NULL);
+ return pType;
+}
+
+
+/**
+ * Converts a handle to a pointer, never returns NULL.
+ *
+ * @returns Pointer on success, dummy on failure (asserted).
+ * @param pVM The cross context VM structure.
+ * @param hType Physical access handler type handle.
+ */
+DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr2(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
+{
+#ifdef IN_RING0
+ PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
+#elif defined(IN_RING3)
+ PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
+#else
+# error "Invalid context"
+#endif
+ AssertReturn(pType->hType == hType, &g_pgmHandlerPhysicalDummyType);
+ return pType;
+}
+
+
+/**
+ * Internal worker for finding a 'in-use' shadow page give by it's physical address.
+ *
+ * @returns Pointer to the shadow page structure.
+ * @param pPool The pool.
+ * @param idx The pool page index.
+ */
+DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
+{
+ AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
+ return &pPool->aPages[idx];
+}
+
+
+/**
+ * Clear references to guest physical memory.
+ *
+ * @param pPool The pool.
+ * @param pPoolPage The pool page.
+ * @param pPhysPage The physical guest page tracking structure.
+ * @param iPte Shadow PTE index
+ */
+DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
+{
+ /*
+ * Just deal with the simple case here.
+ */
+#ifdef VBOX_STRICT
+ PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
+#endif
+#ifdef LOG_ENABLED
+ const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
+#endif
+ const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
+ if (cRefs == 1)
+ {
+ Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
+ Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
+ /* Invalidate the tracking data. */
+ PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
+ }
+ else
+ pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
+ Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
+}
+
+
+/**
+ * Moves the page to the head of the age list.
+ *
+ * This is done when the cached page is used in one way or another.
+ *
+ * @param pPool The pool.
+ * @param pPage The cached page.
+ */
+DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+ PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
+
+ /*
+ * Move to the head of the age list.
+ */
+ if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
+ {
+ /* unlink */
+ pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
+ if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
+ pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
+ else
+ pPool->iAgeTail = pPage->iAgePrev;
+
+ /* insert at head */
+ pPage->iAgePrev = NIL_PGMPOOL_IDX;
+ pPage->iAgeNext = pPool->iAgeHead;
+ Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
+ pPool->iAgeHead = pPage->idx;
+ pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
+ }
+}
+
+
+/**
+ * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
+ *
+ * @param pPool The pool.
+ * @param pPage PGM pool page
+ */
+DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+ PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
+ ASMAtomicIncU32(&pPage->cLocked);
+}
+
+
+/**
+ * Unlocks a page to allow flushing again
+ *
+ * @param pPool The pool.
+ * @param pPage PGM pool page
+ */
+DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+ PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
+ Assert(pPage->cLocked);
+ ASMAtomicDecU32(&pPage->cLocked);
+}
+
+
+/**
+ * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
+ *
+ * @returns VBox status code.
+ * @param pPage PGM pool page
+ */
+DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
+{
+ if (pPage->cLocked)
+ {
+ LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
+ if (pPage->cModifications)
+ pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * Check if the specified page is dirty (not write monitored)
+ *
+ * @return dirty or not
+ * @param pVM The cross context VM structure.
+ * @param GCPhys Guest physical address
+ */
+DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
+{
+ PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+ PGM_LOCK_ASSERT_OWNER(pVM);
+ if (!pPool->cDirtyPages)
+ return false;
+ return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
+}
+
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
+
diff --git a/src/VBox/VMM/include/PGMInternal.h b/src/VBox/VMM/include/PGMInternal.h
new file mode 100644
index 00000000..b19c7c5b
--- /dev/null
+++ b/src/VBox/VMM/include/PGMInternal.h
@@ -0,0 +1,3884 @@
+/* $Id: PGMInternal.h $ */
+/** @file
+ * PGM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_PGMInternal_h
+#define VMM_INCLUDED_SRC_include_PGMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/err.h>
+#include <VBox/dbg.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/param.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/dis.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/log.h>
+#include <VBox/vmm/gmm.h>
+#include <VBox/vmm/hm.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/avl.h>
+#include <iprt/critsect.h>
+#include <iprt/list-off32.h>
+#include <iprt/sha.h>
+#include <iprt/cpp/hardavlrange.h>
+
+
+
+/** @defgroup grp_pgm_int Internals
+ * @ingroup grp_pgm
+ * @internal
+ * @{
+ */
+
+
+/** @name PGM Compile Time Config
+ * @{
+ */
+
+/**
+ * Check and skip global PDEs for non-global flushes
+ */
+#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
+
+/**
+ * Optimization for PAE page tables that are modified often
+ */
+//#if 0 /* disabled again while debugging */
+#define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+//#endif
+
+/**
+ * Large page support enabled only on 64 bits hosts; applies to nested paging only.
+ */
+#define PGM_WITH_LARGE_PAGES
+
+/**
+ * Enables optimizations for MMIO handlers that exploits X86_TRAP_PF_RSVD and
+ * VMX_EXIT_EPT_MISCONFIG.
+ */
+#define PGM_WITH_MMIO_OPTIMIZATIONS
+
+/**
+ * Sync N pages instead of a whole page table
+ */
+#define PGM_SYNC_N_PAGES
+
+/**
+ * Number of pages to sync during a page fault
+ *
+ * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
+ * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
+ *
+ * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
+ * world switch overhead, so let's sync more.
+ */
+#ifdef IN_RING0
+/* Chose 32 based on the compile test in @bugref{4219}; 64 shows worse stats.
+ * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
+ * but ~5% fewer faults.
+ */
+# define PGM_SYNC_NR_PAGES 32
+#else
+# define PGM_SYNC_NR_PAGES 8
+#endif
+
+/**
+ * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
+ */
+#define PGM_MAX_PHYSCACHE_ENTRIES 64
+#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
+
+
+/** @def PGMPOOL_CFG_MAX_GROW
+ * The maximum number of pages to add to the pool in one go.
+ */
+#define PGMPOOL_CFG_MAX_GROW (_2M >> GUEST_PAGE_SHIFT) /** @todo or HOST_PAGE_SHIFT ? */
+
+/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
+ * Enables some extra assertions for virtual handlers (mainly phys2virt related).
+ */
+#ifdef VBOX_STRICT
+# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
+#endif
+
+/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
+ * Enables the experimental lazy page allocation code. */
+#ifdef DOXYGEN_RUNNING
+# define VBOX_WITH_NEW_LAZY_PAGE_ALLOC
+#endif
+
+/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
+ * Enables real write monitoring of pages, i.e. mapping them read-only and
+ * only making them writable when getting a write access \#PF. */
+#define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
+
+/** @def VBOX_WITH_PGM_NEM_MODE
+ * Enabled the NEM memory management mode in PGM. See PGM::fNemMode for
+ * details. */
+#ifdef DOXYGEN_RUNNING
+# define VBOX_WITH_PGM_NEM_MODE
+#endif
+
+/** @} */
+
+
+/** @name PDPT and PML4 flags.
+ * These are placed in the three bits available for system programs in
+ * the PDPT and PML4 entries.
+ * @{ */
+/** The entry is a permanent one and it's must always be present.
+ * Never free such an entry. */
+#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
+/** PGM specific bits in PML4 entries. */
+#define PGM_PML4_FLAGS 0
+/** PGM specific bits in PDPT entries. */
+#define PGM_PDPT_FLAGS (PGM_PLXFLAGS_PERMANENT)
+/** @} */
+
+/** @name Page directory flags.
+ * These are placed in the three bits available for system programs in
+ * the page directory entries.
+ * @{ */
+/** Indicates the original entry was a big page.
+ * @remarks This is currently only used for statistics and can be recycled. */
+#define PGM_PDFLAGS_BIG_PAGE RT_BIT_64(9)
+/** Made read-only to facilitate dirty bit tracking. */
+#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
+/** @} */
+
+/** @name Page flags.
+ * These are placed in the three bits available for system programs in
+ * the page entries.
+ * @{ */
+/** Made read-only to facilitate dirty bit tracking. */
+#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
+
+#ifndef PGM_PTFLAGS_CSAM_VALIDATED
+/** Scanned and approved by CSAM (tm).
+ * NOTE: Must be identical to the one defined in CSAMInternal.h!!
+ * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/vmm/pgm.h. */
+#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
+#endif
+
+/** @} */
+
+/** @name Defines used to indicate the shadow and guest paging in the templates.
+ * @{ */
+#define PGM_TYPE_REAL 1
+#define PGM_TYPE_PROT 2
+#define PGM_TYPE_32BIT 3
+#define PGM_TYPE_PAE 4
+#define PGM_TYPE_AMD64 5
+#define PGM_TYPE_NESTED_32BIT 6
+#define PGM_TYPE_NESTED_PAE 7
+#define PGM_TYPE_NESTED_AMD64 8
+#define PGM_TYPE_EPT 9
+#define PGM_TYPE_NONE 10 /**< Dummy shadow paging mode for NEM. */
+#define PGM_TYPE_END (PGM_TYPE_NONE + 1)
+#define PGM_TYPE_FIRST_SHADOW PGM_TYPE_32BIT /**< The first type used by shadow paging. */
+/** @} */
+
+/** @name Defines used to indicate the second-level
+ * address translation (SLAT) modes in the templates.
+ * @{ */
+#define PGM_SLAT_TYPE_DIRECT (PGM_TYPE_END + 1)
+#define PGM_SLAT_TYPE_EPT (PGM_TYPE_END + 2)
+#define PGM_SLAT_TYPE_32BIT (PGM_TYPE_END + 3)
+#define PGM_SLAT_TYPE_PAE (PGM_TYPE_END + 4)
+#define PGM_SLAT_TYPE_AMD64 (PGM_TYPE_END + 5)
+/** @} */
+
+/** Macro for checking if the guest is using paging.
+ * @param uGstType PGM_TYPE_*
+ * @param uShwType PGM_TYPE_*
+ * @remark ASSUMES certain order of the PGM_TYPE_* values.
+ */
+#define PGM_WITH_PAGING(uGstType, uShwType) \
+ ( (uGstType) >= PGM_TYPE_32BIT \
+ && (uShwType) < PGM_TYPE_NESTED_32BIT)
+
+/** Macro for checking if the guest supports the NX bit.
+ * @param uGstType PGM_TYPE_*
+ * @param uShwType PGM_TYPE_*
+ * @remark ASSUMES certain order of the PGM_TYPE_* values.
+ */
+#define PGM_WITH_NX(uGstType, uShwType) \
+ ( (uGstType) >= PGM_TYPE_PAE \
+ && (uShwType) < PGM_TYPE_NESTED_32BIT)
+
+/** Macro for checking for nested.
+ * @param uType PGM_TYPE_*
+ */
+#define PGM_TYPE_IS_NESTED(uType) \
+ ( (uType) == PGM_TYPE_NESTED_32BIT \
+ || (uType) == PGM_TYPE_NESTED_PAE \
+ || (uType) == PGM_TYPE_NESTED_AMD64)
+
+/** Macro for checking for nested or EPT.
+ * @param uType PGM_TYPE_*
+ */
+#define PGM_TYPE_IS_NESTED_OR_EPT(uType) \
+ ( (uType) == PGM_TYPE_NESTED_32BIT \
+ || (uType) == PGM_TYPE_NESTED_PAE \
+ || (uType) == PGM_TYPE_NESTED_AMD64 \
+ || (uType) == PGM_TYPE_EPT)
+
+
+
+/** @def PGM_HCPHYS_2_PTR
+ * Maps a HC physical page pool address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param HCPhys The HC physical address to map to a virtual one.
+ * @param ppv Where to store the virtual address. No need to cast
+ * this.
+ *
+ * @remark There is no need to assert on the result.
+ */
+#define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) pgmPoolHCPhys2Ptr(pVM, HCPhys, (void **)(ppv))
+
+/** @def PGM_GCPHYS_2_PTR_V2
+ * Maps a GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param GCPhys The GC physical address to map to a virtual one.
+ * @param ppv Where to store the virtual address. No need to cast this.
+ *
+ * @remark Use with care as we don't have so much dynamic mapping space in
+ * ring-0 on 32-bit darwin and in RC.
+ * @remark There is no need to assert on the result.
+ */
+#define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
+ pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
+
+/** @def PGM_GCPHYS_2_PTR
+ * Maps a GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address to map to a virtual one.
+ * @param ppv Where to store the virtual address. No need to cast this.
+ *
+ * @remark Use with care as we don't have so much dynamic mapping space in
+ * ring-0 on 32-bit darwin and in RC.
+ * @remark There is no need to assert on the result.
+ */
+#define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2(pVM, VMMGetCpu(pVM), GCPhys, ppv)
+
+/** @def PGM_GCPHYS_2_PTR_BY_VMCPU
+ * Maps a GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param GCPhys The GC physical address to map to a virtual one.
+ * @param ppv Where to store the virtual address. No need to cast this.
+ *
+ * @remark Use with care as we don't have so much dynamic mapping space in
+ * ring-0 on 32-bit darwin and in RC.
+ * @remark There is no need to assert on the result.
+ */
+#define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2((pVCpu)->CTX_SUFF(pVM), pVCpu, GCPhys, ppv)
+
+/** @def PGM_GCPHYS_2_PTR_EX
+ * Maps a unaligned GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param GCPhys The GC physical address to map to a virtual one.
+ * @param ppv Where to store the virtual address. No need to cast this.
+ *
+ * @remark Use with care as we don't have so much dynamic mapping space in
+ * ring-0 on 32-bit darwin and in RC.
+ * @remark There is no need to assert on the result.
+ */
+#define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
+ pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
+
+/** @def PGM_DYNMAP_UNUSED_HINT
+ * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
+ * is no longer used.
+ *
+ * For best effect only apply this to the page that was mapped most recently.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pvPage The pool page.
+ */
+#define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) do {} while (0)
+
+/** @def PGM_DYNMAP_UNUSED_HINT_VM
+ * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
+ * is no longer used.
+ *
+ * For best effect only apply this to the page that was mapped most recently.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pvPage The pool page.
+ */
+#define PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvPage) PGM_DYNMAP_UNUSED_HINT(VMMGetCpu(pVM), pvPage)
+
+
+/** @def PGM_INVL_PG
+ * Invalidates a page.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCVirt The virtual address of the page to invalidate.
+ */
+#ifdef IN_RING0
+# define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
+#elif defined(IN_RING3)
+# define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
+#else
+# error "Not IN_RING0 or IN_RING3!"
+#endif
+
+/** @def PGM_INVL_PG_ALL_VCPU
+ * Invalidates a page on all VCPUs
+ *
+ * @param pVM The cross context VM structure.
+ * @param GCVirt The virtual address of the page to invalidate.
+ */
+#ifdef IN_RING0
+# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
+#else
+# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
+#endif
+
+/** @def PGM_INVL_BIG_PG
+ * Invalidates a 4MB page directory entry.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param GCVirt The virtual address within the page directory to invalidate.
+ */
+#ifdef IN_RING0
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTlb(pVCpu)
+#else
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTlb(pVCpu)
+#endif
+
+/** @def PGM_INVL_VCPU_TLBS()
+ * Invalidates the TLBs of the specified VCPU
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+#ifdef IN_RING0
+# define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTlb(pVCpu)
+#else
+# define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTlb(pVCpu)
+#endif
+
+/** @def PGM_INVL_ALL_VCPU_TLBS()
+ * Invalidates the TLBs of all VCPUs
+ *
+ * @param pVM The cross context VM structure.
+ */
+#ifdef IN_RING0
+# define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTlbOnAllVCpus(pVM)
+#else
+# define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTlbOnAllVCpus(pVM)
+#endif
+
+
+/** @name Safer Shadow PAE PT/PTE
+ * For helping avoid misinterpreting invalid PAE/AMD64 page table entries as
+ * present.
+ *
+ * @{
+ */
+#if 1
+/**
+ * For making sure that u1Present and X86_PTE_P checks doesn't mistake
+ * invalid entries for present.
+ * @sa X86PTEPAE.
+ */
+typedef union PGMSHWPTEPAE
+{
+ /** Unsigned integer view */
+ X86PGPAEUINT uCareful;
+ /* Not other views. */
+} PGMSHWPTEPAE;
+
+# define PGMSHWPTEPAE_IS_P(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == X86_PTE_P )
+# define PGMSHWPTEPAE_IS_RW(Pte) ( !!((Pte).uCareful & X86_PTE_RW))
+# define PGMSHWPTEPAE_IS_US(Pte) ( !!((Pte).uCareful & X86_PTE_US))
+# define PGMSHWPTEPAE_IS_A(Pte) ( !!((Pte).uCareful & X86_PTE_A))
+# define PGMSHWPTEPAE_IS_D(Pte) ( !!((Pte).uCareful & X86_PTE_D))
+# define PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte) ( !!((Pte).uCareful & PGM_PTFLAGS_TRACK_DIRTY) )
+# define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_RW | X86_PTE_PAE_MBZ_MASK_NX)) == (X86_PTE_P | X86_PTE_RW) )
+# define PGMSHWPTEPAE_GET_LOG(Pte) ( (Pte).uCareful )
+# define PGMSHWPTEPAE_GET_HCPHYS(Pte) ( (Pte).uCareful & X86_PTE_PAE_PG_MASK )
+# define PGMSHWPTEPAE_GET_U(Pte) ( (Pte).uCareful ) /**< Use with care. */
+# define PGMSHWPTEPAE_SET(Pte, uVal) do { (Pte).uCareful = (uVal); } while (0)
+# define PGMSHWPTEPAE_SET2(Pte, Pte2) do { (Pte).uCareful = (Pte2).uCareful; } while (0)
+# define PGMSHWPTEPAE_ATOMIC_SET(Pte, uVal) do { ASMAtomicWriteU64(&(Pte).uCareful, (uVal)); } while (0)
+# define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).uCareful, (Pte2).uCareful); } while (0)
+# define PGMSHWPTEPAE_SET_RO(Pte) do { (Pte).uCareful &= ~(X86PGPAEUINT)X86_PTE_RW; } while (0)
+# define PGMSHWPTEPAE_SET_RW(Pte) do { (Pte).uCareful |= X86_PTE_RW; } while (0)
+
+/**
+ * For making sure that u1Present and X86_PTE_P checks doesn't mistake
+ * invalid entries for present.
+ * @sa X86PTPAE.
+ */
+typedef struct PGMSHWPTPAE
+{
+ PGMSHWPTEPAE a[X86_PG_PAE_ENTRIES];
+} PGMSHWPTPAE;
+
+#else
+typedef X86PTEPAE PGMSHWPTEPAE;
+typedef X86PTPAE PGMSHWPTPAE;
+# define PGMSHWPTEPAE_IS_P(Pte) ( (Pte).n.u1Present )
+# define PGMSHWPTEPAE_IS_RW(Pte) ( (Pte).n.u1Write )
+# define PGMSHWPTEPAE_IS_US(Pte) ( (Pte).n.u1User )
+# define PGMSHWPTEPAE_IS_A(Pte) ( (Pte).n.u1Accessed )
+# define PGMSHWPTEPAE_IS_D(Pte) ( (Pte).n.u1Dirty )
+# define PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
+# define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
+# define PGMSHWPTEPAE_GET_LOG(Pte) ( (Pte).u )
+# define PGMSHWPTEPAE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PAE_PG_MASK )
+# define PGMSHWPTEPAE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
+# define PGMSHWPTEPAE_SET(Pte, uVal) do { (Pte).u = (uVal); } while (0)
+# define PGMSHWPTEPAE_SET2(Pte, Pte2) do { (Pte).u = (Pte2).u; } while (0)
+# define PGMSHWPTEPAE_ATOMIC_SET(Pte, uVal) do { ASMAtomicWriteU64(&(Pte).u, (uVal)); } while (0)
+# define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
+# define PGMSHWPTEPAE_SET_RO(Pte) do { (Pte).u &= ~(X86PGPAEUINT)X86_PTE_RW; } while (0)
+# define PGMSHWPTEPAE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0)
+
+#endif
+
+/** Pointer to a shadow PAE PTE. */
+typedef PGMSHWPTEPAE *PPGMSHWPTEPAE;
+/** Pointer to a const shadow PAE PTE. */
+typedef PGMSHWPTEPAE const *PCPGMSHWPTEPAE;
+
+/** Pointer to a shadow PAE page table. */
+typedef PGMSHWPTPAE *PPGMSHWPTPAE;
+/** Pointer to a const shadow PAE page table. */
+typedef PGMSHWPTPAE const *PCPGMSHWPTPAE;
+/** @} */
+
+
+/** The physical access handler type handle count (power of two). */
+#define PGMPHYSHANDLERTYPE_COUNT 0x20
+/** Mask for getting the array index from an access handler type handle.
+ * The other handle bits are random and non-zero to avoid mixups due to zero
+ * initialized fields. */
+#define PGMPHYSHANDLERTYPE_IDX_MASK 0x1f
+
+/**
+ * Physical page access handler type registration, ring-0 part.
+ */
+typedef struct PGMPHYSHANDLERTYPEINTR0
+{
+ /** The handle value for verfication. */
+ PGMPHYSHANDLERTYPE hType;
+ /** The kind of accesses we're handling. */
+ PGMPHYSHANDLERKIND enmKind;
+ /** The PGM_PAGE_HNDL_PHYS_STATE_XXX value corresponding to enmKind. */
+ uint8_t uState;
+ /** Whether to keep the PGM lock when calling the handler.
+ * @sa PGMPHYSHANDLER_F_KEEP_PGM_LOCK */
+ bool fKeepPgmLock;
+ /** Set if this is registered by a device instance and uUser should be
+ * translated from a device instance ID to a pointer.
+ * @sa PGMPHYSHANDLER_F_R0_DEVINS_IDX */
+ bool fRing0DevInsIdx;
+ /** See PGMPHYSHANDLER_F_NOT_IN_HM. */
+ bool fNotInHm : 1;
+ /** Pointer to the ring-0 callback function. */
+ R0PTRTYPE(PFNPGMPHYSHANDLER) pfnHandler;
+ /** Pointer to the ring-0 callback function for \#PFs, can be NULL. */
+ R0PTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandler;
+ /** Description / Name. For easing debugging. */
+ R0PTRTYPE(const char *) pszDesc;
+} PGMPHYSHANDLERTYPEINTR0;
+/** Pointer to a physical access handler type registration. */
+typedef PGMPHYSHANDLERTYPEINTR0 *PPGMPHYSHANDLERTYPEINTR0;
+
+/**
+ * Physical page access handler type registration, shared/ring-3 part.
+ */
+typedef struct PGMPHYSHANDLERTYPEINTR3
+{
+ /** The handle value for verfication. */
+ PGMPHYSHANDLERTYPE hType;
+ /** The kind of accesses we're handling. */
+ PGMPHYSHANDLERKIND enmKind;
+ /** The PGM_PAGE_HNDL_PHYS_STATE_XXX value corresponding to enmKind. */
+ uint8_t uState;
+ /** Whether to keep the PGM lock when calling the handler.
+ * @sa PGMPHYSHANDLER_F_KEEP_PGM_LOCK */
+ bool fKeepPgmLock;
+ /** Set if this is registered by a device instance and uUser should be
+ * translated from a device instance ID to a pointer.
+ * @sa PGMPHYSHANDLER_F_R0_DEVINS_IDX */
+ bool fRing0DevInsIdx;
+ /** Set by ring-0 if the handler is ring-0 enabled (for debug). */
+ bool fRing0Enabled : 1;
+ /** See PGMPHYSHANDLER_F_NOT_IN_HM. */
+ bool fNotInHm : 1;
+ /** Pointer to the ring-3 callback function. */
+ R3PTRTYPE(PFNPGMPHYSHANDLER) pfnHandler;
+ /** Description / Name. For easing debugging. */
+ R3PTRTYPE(const char *) pszDesc;
+} PGMPHYSHANDLERTYPEINTR3;
+/** Pointer to a physical access handler type registration. */
+typedef PGMPHYSHANDLERTYPEINTR3 *PPGMPHYSHANDLERTYPEINTR3;
+
+/** Pointer to a physical access handler type record for the current context. */
+typedef CTX_SUFF(PPGMPHYSHANDLERTYPEINT) PPGMPHYSHANDLERTYPEINT;
+/** Pointer to a const physical access handler type record for the current context. */
+typedef CTX_SUFF(PGMPHYSHANDLERTYPEINT) const *PCPGMPHYSHANDLERTYPEINT;
+/** Dummy physical access handler type record. */
+extern CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType;
+
+
+/**
+ * Physical page access handler structure.
+ *
+ * This is used to keep track of physical address ranges
+ * which are being monitored in some kind of way.
+ */
+typedef struct PGMPHYSHANDLER
+{
+ /** @name Tree stuff.
+ * @{ */
+ /** First address. */
+ RTGCPHYS Key;
+ /** Last address. */
+ RTGCPHYS KeyLast;
+ uint32_t idxLeft;
+ uint32_t idxRight;
+ uint8_t cHeight;
+ /** @} */
+ uint8_t abPadding[3];
+ /** Number of pages to update. */
+ uint32_t cPages;
+ /** Set if we have pages that have been aliased. */
+ uint32_t cAliasedPages;
+ /** Set if we have pages that have temporarily been disabled. */
+ uint32_t cTmpOffPages;
+ /** Registered handler type handle.
+ * @note Marked volatile to prevent re-reading after validation. */
+ PGMPHYSHANDLERTYPE volatile hType;
+ /** User argument for the handlers. */
+ uint64_t uUser;
+ /** Description / Name. For easing debugging. */
+ R3PTRTYPE(const char *) pszDesc;
+ /** Profiling of this handler.
+ * @note VBOX_WITH_STATISTICS only, but included to keep structure stable. */
+ STAMPROFILE Stat;
+} PGMPHYSHANDLER;
+AssertCompileSize(PGMPHYSHANDLER, 12*8);
+/** Pointer to a physical page access handler structure. */
+typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
+
+/**
+ * Gets the type record for a physical handler (no reference added).
+ * @returns PCPGMPHYSHANDLERTYPEINT, can be NULL
+ * @param a_pVM The cross context VM structure.
+ * @param a_pPhysHandler Pointer to the physical handler structure
+ * (PGMPHYSHANDLER).
+ */
+#define PGMPHYSHANDLER_GET_TYPE(a_pVM, a_pPhysHandler) \
+ pgmHandlerPhysicalTypeHandleToPtr(a_pVM, (a_pPhysHandler) ? (a_pPhysHandler)->hType : NIL_PGMPHYSHANDLERTYPE)
+
+/**
+ * Gets the type record for a physical handler, never returns NULL.
+ *
+ * @returns PCPGMPHYSHANDLERTYPEINT, never NULL.
+ * @param a_pVM The cross context VM structure.
+ * @param a_pPhysHandler Pointer to the physical handler structure
+ * (PGMPHYSHANDLER).
+ */
+#define PGMPHYSHANDLER_GET_TYPE_NO_NULL(a_pVM, a_pPhysHandler) \
+ pgmHandlerPhysicalTypeHandleToPtr2(a_pVM, (a_pPhysHandler) ? (a_pPhysHandler)->hType : NIL_PGMPHYSHANDLERTYPE)
+
+/** Physical access handler allocator. */
+typedef RTCHardAvlTreeSlabAllocator<PGMPHYSHANDLER> PGMPHYSHANDLERALLOCATOR;
+
+/** Physical access handler tree. */
+typedef RTCHardAvlRangeTree<PGMPHYSHANDLER, RTGCPHYS> PGMPHYSHANDLERTREE;
+/** Pointer to a physical access handler tree. */
+typedef PGMPHYSHANDLERTREE *PPGMPHYSHANDLERTREE;
+
+
+/**
+ * A Physical Guest Page tracking structure.
+ *
+ * The format of this structure is complicated because we have to fit a lot
+ * of information into as few bits as possible. The format is also subject
+ * to change (there is one coming up soon). Which means that for we'll be
+ * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
+ * accesses to the structure.
+ */
+typedef union PGMPAGE
+{
+ /** Structured view. */
+ struct
+ {
+ /** 1:0 - The physical handler state (PGM_PAGE_HNDL_PHYS_STATE_*). */
+ uint64_t u2HandlerPhysStateY : 2;
+ /** 2 - Don't apply the physical handler in HM mode (nested APIC hack). */
+ uint64_t fHandlerPhysNotInHm : 1;
+ /** 3 - Flag indicating that a write monitored page was written to
+ * when set. */
+ uint64_t fWrittenToY : 1;
+ /** 7:4 - Unused. */
+ uint64_t u2Unused0 : 4;
+ /** 9:8 - Paging structure needed to map the page
+ * (PGM_PAGE_PDE_TYPE_*). */
+ uint64_t u2PDETypeY : 2;
+ /** 11:10 - NEM state bits. */
+ uint64_t u2NemStateY : 2;
+ /** 12:48 - The host physical frame number (shift left to get the
+ * address). */
+ uint64_t HCPhysFN : 36;
+ /** 50:48 - The page state. */
+ uint64_t uStateY : 3;
+ /** 51:53 - The page type (PGMPAGETYPE). */
+ uint64_t uTypeY : 3;
+ /** 63:54 - PTE index for usage tracking (page pool). */
+ uint64_t u10PteIdx : 10;
+
+ /** The GMM page ID.
+ * @remarks In the current implementation, MMIO2 and pages aliased to
+ * MMIO2 pages will be exploiting this field to calculate the
+ * ring-3 mapping address corresponding to the page.
+ * Later we may consider including MMIO2 management into GMM. */
+ uint32_t idPage;
+ /** Usage tracking (page pool). */
+ uint16_t u16TrackingY;
+ /** The number of read locks on this page. */
+ uint8_t cReadLocksY;
+ /** The number of write locks on this page. */
+ uint8_t cWriteLocksY;
+ } s;
+
+ /** 64-bit integer view. */
+ uint64_t au64[2];
+ /** 16-bit view. */
+ uint32_t au32[4];
+ /** 16-bit view. */
+ uint16_t au16[8];
+ /** 8-bit view. */
+ uint8_t au8[16];
+} PGMPAGE;
+AssertCompileSize(PGMPAGE, 16);
+/** Pointer to a physical guest page. */
+typedef PGMPAGE *PPGMPAGE;
+/** Pointer to a const physical guest page. */
+typedef const PGMPAGE *PCPGMPAGE;
+/** Pointer to a physical guest page pointer. */
+typedef PPGMPAGE *PPPGMPAGE;
+
+
+/**
+ * Clears the page structure.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_CLEAR(a_pPage) \
+ do { \
+ (a_pPage)->au64[0] = 0; \
+ (a_pPage)->au64[1] = 0; \
+ } while (0)
+
+/**
+ * Initializes the page structure.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_HCPhys The host physical address of the page.
+ * @param a_idPage The (GMM) page ID of the page.
+ * @param a_uType The page type (PGMPAGETYPE).
+ * @param a_uState The page state (PGM_PAGE_STATE_XXX).
+ */
+#define PGM_PAGE_INIT(a_pPage, a_HCPhys, a_idPage, a_uType, a_uState) \
+ do { \
+ RTHCPHYS SetHCPhysTmp = (a_HCPhys); \
+ AssertFatalMsg(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000)), ("%RHp\n", SetHCPhysTmp)); \
+ (a_pPage)->au64[0] = SetHCPhysTmp; \
+ (a_pPage)->au64[1] = 0; \
+ (a_pPage)->s.idPage = (a_idPage); \
+ (a_pPage)->s.uStateY = (a_uState); \
+ (a_pPage)->s.uTypeY = (a_uType); \
+ } while (0)
+
+/**
+ * Initializes the page structure of a ZERO page.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_pVM The VM handle (for getting the zero page address).
+ * @param a_uType The page type (PGMPAGETYPE).
+ */
+#define PGM_PAGE_INIT_ZERO(a_pPage, a_pVM, a_uType) \
+ PGM_PAGE_INIT((a_pPage), (a_pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (a_uType), PGM_PAGE_STATE_ZERO)
+
+
+/** @name The Page state, PGMPAGE::uStateY.
+ * @{ */
+/** The zero page.
+ * This is a per-VM page that's never ever mapped writable. */
+#define PGM_PAGE_STATE_ZERO 0U
+/** A allocated page.
+ * This is a per-VM page allocated from the page pool (or wherever
+ * we get MMIO2 pages from if the type is MMIO2).
+ */
+#define PGM_PAGE_STATE_ALLOCATED 1U
+/** A allocated page that's being monitored for writes.
+ * The shadow page table mappings are read-only. When a write occurs, the
+ * fWrittenTo member is set, the page remapped as read-write and the state
+ * moved back to allocated. */
+#define PGM_PAGE_STATE_WRITE_MONITORED 2U
+/** The page is shared, aka. copy-on-write.
+ * This is a page that's shared with other VMs. */
+#define PGM_PAGE_STATE_SHARED 3U
+/** The page is ballooned, so no longer available for this VM. */
+#define PGM_PAGE_STATE_BALLOONED 4U
+/** @} */
+
+
+/** Asserts lock ownership in some of the PGM_PAGE_XXX macros. */
+#if defined(VBOX_STRICT) && 0 /** @todo triggers in pgmRZDynMapGCPageV2Inlined */
+# define PGM_PAGE_ASSERT_LOCK(a_pVM) PGM_LOCK_ASSERT_OWNER(a_pVM)
+#else
+# define PGM_PAGE_ASSERT_LOCK(a_pVM) do { } while (0)
+#endif
+
+/**
+ * Gets the page state.
+ * @returns page state (PGM_PAGE_STATE_*).
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ *
+ * @remarks See PGM_PAGE_GET_HCPHYS_NA for remarks about GCC and strict
+ * builds.
+ */
+#define PGM_PAGE_GET_STATE_NA(a_pPage) ( (a_pPage)->s.uStateY )
+#if defined(__GNUC__) && defined(VBOX_STRICT)
+# define PGM_PAGE_GET_STATE(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_STATE_NA(a_pPage); })
+#else
+# define PGM_PAGE_GET_STATE PGM_PAGE_GET_STATE_NA
+#endif
+
+/**
+ * Sets the page state.
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_uState The new page state.
+ */
+#define PGM_PAGE_SET_STATE(a_pVM, a_pPage, a_uState) \
+ do { (a_pPage)->s.uStateY = (a_uState); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+
+/**
+ * Gets the host physical address of the guest page.
+ * @returns host physical address (RTHCPHYS).
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ *
+ * @remarks In strict builds on gcc platforms, this macro will make some ugly
+ * assumption about a valid pVM variable/parameter being in the
+ * current context. It will use this pVM variable to assert that the
+ * PGM lock is held. Use the PGM_PAGE_GET_HCPHYS_NA in contexts where
+ * pVM is not around.
+ */
+#if 0
+# define PGM_PAGE_GET_HCPHYS_NA(a_pPage) ( (a_pPage)->s.HCPhysFN << 12 )
+# define PGM_PAGE_GET_HCPHYS PGM_PAGE_GET_HCPHYS_NA
+#else
+# define PGM_PAGE_GET_HCPHYS_NA(a_pPage) ( (a_pPage)->au64[0] & UINT64_C(0x0000fffffffff000) )
+# if defined(__GNUC__) && defined(VBOX_STRICT)
+# define PGM_PAGE_GET_HCPHYS(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_HCPHYS_NA(a_pPage); })
+# else
+# define PGM_PAGE_GET_HCPHYS PGM_PAGE_GET_HCPHYS_NA
+# endif
+#endif
+
+/**
+ * Sets the host physical address of the guest page.
+ *
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_HCPhys The new host physical address.
+ */
+#define PGM_PAGE_SET_HCPHYS(a_pVM, a_pPage, a_HCPhys) \
+ do { \
+ RTHCPHYS const SetHCPhysTmp = (a_HCPhys); \
+ AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
+ (a_pPage)->s.HCPhysFN = SetHCPhysTmp >> 12; \
+ PGM_PAGE_ASSERT_LOCK(a_pVM); \
+ } while (0)
+
+/**
+ * Get the Page ID.
+ * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_PAGEID(a_pPage) ( (uint32_t)(a_pPage)->s.idPage )
+
+/**
+ * Sets the Page ID.
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_idPage The new page ID.
+ */
+#define PGM_PAGE_SET_PAGEID(a_pVM, a_pPage, a_idPage) \
+ do { \
+ (a_pPage)->s.idPage = (a_idPage); \
+ PGM_PAGE_ASSERT_LOCK(a_pVM); \
+ } while (0)
+
+/**
+ * Get the Chunk ID.
+ * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_CHUNKID(a_pPage) ( PGM_PAGE_GET_PAGEID(a_pPage) >> GMM_CHUNKID_SHIFT )
+
+/**
+ * Get the index of the page within the allocation chunk.
+ * @returns The page index.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_PAGE_IN_CHUNK(a_pPage) ( PGM_PAGE_GET_PAGEID(a_pPage) & GMM_PAGEID_IDX_MASK )
+
+/**
+ * Gets the page type.
+ * @returns The page type.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ *
+ * @remarks See PGM_PAGE_GET_HCPHYS_NA for remarks about GCC and strict
+ * builds.
+ */
+#define PGM_PAGE_GET_TYPE_NA(a_pPage) ( (a_pPage)->s.uTypeY )
+#if defined(__GNUC__) && defined(VBOX_STRICT)
+# define PGM_PAGE_GET_TYPE(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_TYPE_NA(a_pPage); })
+#else
+# define PGM_PAGE_GET_TYPE PGM_PAGE_GET_TYPE_NA
+#endif
+
+/**
+ * Sets the page type.
+ *
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_enmType The new page type (PGMPAGETYPE).
+ */
+#define PGM_PAGE_SET_TYPE(a_pVM, a_pPage, a_enmType) \
+ do { (a_pPage)->s.uTypeY = (a_enmType); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/**
+ * Gets the page table index
+ * @returns The page table index.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_PTE_INDEX(a_pPage) ( (a_pPage)->s.u10PteIdx )
+
+/**
+ * Sets the page table index.
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_iPte New page table index.
+ */
+#define PGM_PAGE_SET_PTE_INDEX(a_pVM, a_pPage, a_iPte) \
+ do { (a_pPage)->s.u10PteIdx = (a_iPte); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/**
+ * Checks if the page is marked for MMIO, no MMIO2 aliasing.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_MMIO(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO )
+
+/**
+ * Checks if the page is marked for MMIO, including both aliases.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_MMIO_OR_ALIAS(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO2_ALIAS_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO \
+ )
+
+/**
+ * Checks if the page is marked for MMIO, including special aliases.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO )
+
+/**
+ * Checks if the page is a special aliased MMIO page.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO )
+
+/**
+ * Checks if the page is backed by the ZERO page.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_ZERO(a_pPage) ( (a_pPage)->s.uStateY == PGM_PAGE_STATE_ZERO )
+
+/**
+ * Checks if the page is backed by a SHARED page.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_SHARED(a_pPage) ( (a_pPage)->s.uStateY == PGM_PAGE_STATE_SHARED )
+
+/**
+ * Checks if the page is ballooned.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_BALLOONED(a_pPage) ( (a_pPage)->s.uStateY == PGM_PAGE_STATE_BALLOONED )
+
+/**
+ * Checks if the page is allocated.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_ALLOCATED(a_pPage) ( (a_pPage)->s.uStateY == PGM_PAGE_STATE_ALLOCATED )
+
+/**
+ * Marks the page as written to (for GMM change monitoring).
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_SET_WRITTEN_TO(a_pVM, a_pPage) \
+ do { (a_pPage)->s.fWrittenToY = 1; PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/**
+ * Clears the written-to indicator.
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_CLEAR_WRITTEN_TO(a_pVM, a_pPage) \
+ do { (a_pPage)->s.fWrittenToY = 0; PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/**
+ * Checks if the page was marked as written-to.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_WRITTEN_TO(a_pPage) ( (a_pPage)->s.fWrittenToY )
+
+
+/** @name PT usage values (PGMPAGE::u2PDEType).
+ *
+ * @{ */
+/** Either as a PT or PDE. */
+#define PGM_PAGE_PDE_TYPE_DONTCARE 0
+/** Must use a page table to map the range. */
+#define PGM_PAGE_PDE_TYPE_PT 1
+/** Can use a page directory entry to map the continuous range. */
+#define PGM_PAGE_PDE_TYPE_PDE 2
+/** Can use a page directory entry to map the continuous range - temporarily disabled (by page monitoring). */
+#define PGM_PAGE_PDE_TYPE_PDE_DISABLED 3
+/** @} */
+
+/**
+ * Set the PDE type of the page
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_uType PGM_PAGE_PDE_TYPE_*.
+ */
+#define PGM_PAGE_SET_PDE_TYPE(a_pVM, a_pPage, a_uType) \
+ do { (a_pPage)->s.u2PDETypeY = (a_uType); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/**
+ * Checks if the page was marked being part of a large page
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_PDE_TYPE(a_pPage) ( (a_pPage)->s.u2PDETypeY )
+
+/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
+ *
+ * @remarks The values are assigned in order of priority, so we can calculate
+ * the correct state for a page with different handlers installed.
+ * @{ */
+/** No handler installed. */
+#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
+/** Monitoring is temporarily disabled. */
+#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
+/** Write access is monitored. */
+#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
+/** All access is monitored. */
+#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
+/** @} */
+
+/**
+ * Gets the physical access handler state of a page.
+ * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) ( (a_pPage)->s.u2HandlerPhysStateY )
+
+/**
+ * Sets the physical access handler state of a page.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_uState The new state value.
+ * @param a_fNotInHm The PGMPHYSHANDLER_F_NOT_HM bit.
+ */
+#define PGM_PAGE_SET_HNDL_PHYS_STATE(a_pPage, a_uState, a_fNotInHm) \
+ do { (a_pPage)->s.u2HandlerPhysStateY = (a_uState); (a_pPage)->s.fHandlerPhysNotInHm = (a_fNotInHm); } while (0)
+
+/**
+ * Sets the physical access handler state of a page.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_uState The new state value.
+ */
+#define PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(a_pPage, a_uState) \
+ do { (a_pPage)->s.u2HandlerPhysStateY = (a_uState); } while (0)
+
+/**
+ * Checks if the page has any physical access handlers, including temporarily disabled ones.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(a_pPage) \
+ ( PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
+
+/**
+ * Checks if the page has any active physical access handlers.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(a_pPage) \
+ ( PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
+
+/**
+ * Checks if the page has any access handlers, including temporarily disabled ones.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_HAS_ANY_HANDLERS(a_pPage) \
+ ( PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
+
+/**
+ * Checks if the page has any active access handlers.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_HAS_ACTIVE_HANDLERS(a_pPage) \
+ (PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
+
+/**
+ * Checks if the page has any active access handlers catching all accesses.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(a_pPage) \
+ ( PGM_PAGE_GET_HNDL_PHYS_STATE(a_pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL )
+
+/** @def PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM
+ * Checks if the physical handlers of the page should be ignored in shadow page
+ * tables and such.
+ * @returns true/false
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(a_pPage) ((a_pPage)->s.fHandlerPhysNotInHm)
+
+/** @def PGM_PAGE_GET_TRACKING
+ * Gets the packed shadow page pool tracking data associated with a guest page.
+ * @returns uint16_t containing the data.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_TRACKING_NA(a_pPage) ( (a_pPage)->s.u16TrackingY )
+#if defined(__GNUC__) && defined(VBOX_STRICT)
+# define PGM_PAGE_GET_TRACKING(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_TRACKING_NA(a_pPage); })
+#else
+# define PGM_PAGE_GET_TRACKING PGM_PAGE_GET_TRACKING_NA
+#endif
+
+/** @def PGM_PAGE_SET_TRACKING
+ * Sets the packed shadow page pool tracking data associated with a guest page.
+ * @param a_pVM The VM handle, only used for lock ownership assertions.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_u16TrackingData The tracking data to store.
+ */
+#define PGM_PAGE_SET_TRACKING(a_pVM, a_pPage, a_u16TrackingData) \
+ do { (a_pPage)->s.u16TrackingY = (a_u16TrackingData); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
+
+/** @def PGM_PAGE_GET_TD_CREFS
+ * Gets the @a cRefs tracking data member.
+ * @returns cRefs.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_TD_CREFS(a_pPage) \
+ ((PGM_PAGE_GET_TRACKING(a_pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
+#define PGM_PAGE_GET_TD_CREFS_NA(a_pPage) \
+ ((PGM_PAGE_GET_TRACKING_NA(a_pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
+
+/** @def PGM_PAGE_GET_TD_IDX
+ * Gets the @a idx tracking data member.
+ * @returns idx.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_TD_IDX(a_pPage) \
+ ((PGM_PAGE_GET_TRACKING(a_pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
+#define PGM_PAGE_GET_TD_IDX_NA(a_pPage) \
+ ((PGM_PAGE_GET_TRACKING_NA(a_pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
+
+
+/** Max number of locks on a page. */
+#define PGM_PAGE_MAX_LOCKS UINT8_C(254)
+
+/** Get the read lock count.
+ * @returns count.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_READ_LOCKS(a_pPage) ( (a_pPage)->s.cReadLocksY )
+
+/** Get the write lock count.
+ * @returns count.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_WRITE_LOCKS(a_pPage) ( (a_pPage)->s.cWriteLocksY )
+
+/** Decrement the read lock counter.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_DEC_READ_LOCKS(a_pPage) do { --(a_pPage)->s.cReadLocksY; } while (0)
+
+/** Decrement the write lock counter.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_DEC_WRITE_LOCKS(a_pPage) do { --(a_pPage)->s.cWriteLocksY; } while (0)
+
+/** Increment the read lock counter.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_INC_READ_LOCKS(a_pPage) do { ++(a_pPage)->s.cReadLocksY; } while (0)
+
+/** Increment the write lock counter.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_INC_WRITE_LOCKS(a_pPage) do { ++(a_pPage)->s.cWriteLocksY; } while (0)
+
+
+/** Gets the NEM state.
+ * @returns NEM state value (two bits).
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_GET_NEM_STATE(a_pPage) ((a_pPage)->s.u2NemStateY)
+
+/** Sets the NEM state.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ * @param a_u2State The NEM state value (specific to NEM impl.).
+ */
+#define PGM_PAGE_SET_NEM_STATE(a_pPage, a_u2State) \
+ do { Assert((a_u2State) < 4); (a_pPage)->s.u2NemStateY = (a_u2State); } while (0)
+
+
+#if 0
+/** Enables sanity checking of write monitoring using CRC-32. */
+# define PGMLIVESAVERAMPAGE_WITH_CRC32
+#endif
+
+/**
+ * Per page live save tracking data.
+ */
+typedef struct PGMLIVESAVERAMPAGE
+{
+ /** Number of times it has been dirtied. */
+ uint32_t cDirtied : 24;
+ /** Whether it is currently dirty. */
+ uint32_t fDirty : 1;
+ /** Ignore the page.
+ * This is used for pages that has been MMIO, MMIO2 or ROM pages once. We will
+ * deal with these after pausing the VM and DevPCI have said it bit about
+ * remappings. */
+ uint32_t fIgnore : 1;
+ /** Was a ZERO page last time around. */
+ uint32_t fZero : 1;
+ /** Was a SHARED page last time around. */
+ uint32_t fShared : 1;
+ /** Whether the page is/was write monitored in a previous pass. */
+ uint32_t fWriteMonitored : 1;
+ /** Whether the page is/was write monitored earlier in this pass. */
+ uint32_t fWriteMonitoredJustNow : 1;
+ /** Bits reserved for future use. */
+ uint32_t u2Reserved : 2;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+ /** CRC-32 for the page. This is for internal consistency checks. */
+ uint32_t u32Crc;
+#endif
+} PGMLIVESAVERAMPAGE;
+#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
+AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
+#else
+AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
+#endif
+/** Pointer to the per page live save tracking data. */
+typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
+
+/** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
+#define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
+
+
+/**
+ * RAM range for GC Phys to HC Phys conversion.
+ *
+ * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
+ * conversions too, but we'll let MM handle that for now.
+ *
+ * This structure is used by linked lists in both GC and HC.
+ */
+typedef struct PGMRAMRANGE
+{
+ /** Start of the range. Page aligned. */
+ RTGCPHYS GCPhys;
+ /** Size of the range. (Page aligned of course). */
+ RTGCPHYS cb;
+ /** Pointer to the next RAM range - for R3. */
+ R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
+ /** Pointer to the next RAM range - for R0. */
+ R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
+ /** PGM_RAM_RANGE_FLAGS_* flags. */
+ uint32_t fFlags;
+ /** NEM specific info, UINT32_MAX if not used. */
+ uint32_t uNemRange;
+ /** Last address in the range (inclusive). Page aligned (-1). */
+ RTGCPHYS GCPhysLast;
+ /** Start of the HC mapping of the range. This is only used for MMIO2 and in NEM mode. */
+ R3PTRTYPE(void *) pvR3;
+ /** Live save per page tracking data. */
+ R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages;
+ /** The range description. */
+ R3PTRTYPE(const char *) pszDesc;
+ /** Pointer to self - R0 pointer. */
+ R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
+
+ /** Pointer to the left search three node - ring-3 context. */
+ R3PTRTYPE(struct PGMRAMRANGE *) pLeftR3;
+ /** Pointer to the right search three node - ring-3 context. */
+ R3PTRTYPE(struct PGMRAMRANGE *) pRightR3;
+ /** Pointer to the left search three node - ring-0 context. */
+ R0PTRTYPE(struct PGMRAMRANGE *) pLeftR0;
+ /** Pointer to the right search three node - ring-0 context. */
+ R0PTRTYPE(struct PGMRAMRANGE *) pRightR0;
+
+ /** Padding to make aPage aligned on sizeof(PGMPAGE). */
+#if HC_ARCH_BITS == 32
+ uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 2 : 0];
+#endif
+ /** Array of physical guest page tracking structures.
+ * @note Number of entries is PGMRAMRANGE::cb / GUEST_PAGE_SIZE. */
+ PGMPAGE aPages[1];
+} PGMRAMRANGE;
+/** Pointer to RAM range for GC Phys to HC Phys conversion. */
+typedef PGMRAMRANGE *PPGMRAMRANGE;
+
+/** @name PGMRAMRANGE::fFlags
+ * @{ */
+/** The RAM range is floating around as an independent guest mapping. */
+#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
+/** Ad hoc RAM range for an ROM mapping. */
+#define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21)
+/** Ad hoc RAM range for an MMIO mapping. */
+#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22)
+/** Ad hoc RAM range for an MMIO2 or pre-registered MMIO mapping. */
+#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX RT_BIT(23)
+/** @} */
+
+/** Tests if a RAM range is an ad hoc one or not.
+ * @returns true/false.
+ * @param pRam The RAM range.
+ */
+#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
+ (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX) ) )
+
+/** The number of entries in the RAM range TLBs (there is one for each
+ * context). Must be a power of two. */
+#define PGM_RAMRANGE_TLB_ENTRIES 8
+
+/**
+ * Calculates the RAM range TLB index for the physical address.
+ *
+ * @returns RAM range TLB index.
+ * @param a_GCPhys The guest physical address.
+ */
+#define PGM_RAMRANGE_TLB_IDX(a_GCPhys) ( ((a_GCPhys) >> 20) & (PGM_RAMRANGE_TLB_ENTRIES - 1) )
+
+/**
+ * Calculates the ring-3 address for a_GCPhysPage if the RAM range has a
+ * mapping address.
+ */
+#define PGM_RAMRANGE_CALC_PAGE_R3PTR(a_pRam, a_GCPhysPage) \
+ ( (a_pRam)->pvR3 ? (R3PTRTYPE(uint8_t *))(a_pRam)->pvR3 + (a_GCPhysPage) - (a_pRam)->GCPhys : NULL )
+
+
+/**
+ * Per page tracking structure for ROM image.
+ *
+ * A ROM image may have a shadow page, in which case we may have two pages
+ * backing it. This structure contains the PGMPAGE for both while
+ * PGMRAMRANGE have a copy of the active one. It is important that these
+ * aren't out of sync in any regard other than page pool tracking data.
+ */
+typedef struct PGMROMPAGE
+{
+ /** The page structure for the virgin ROM page. */
+ PGMPAGE Virgin;
+ /** The page structure for the shadow RAM page. */
+ PGMPAGE Shadow;
+ /** The current protection setting. */
+ PGMROMPROT enmProt;
+ /** Live save status information. Makes use of unused alignment space. */
+ struct
+ {
+ /** The previous protection value. */
+ uint8_t u8Prot;
+ /** Written to flag set by the handler. */
+ bool fWrittenTo;
+ /** Whether the shadow page is dirty or not. */
+ bool fDirty;
+ /** Whether it was dirtied in the recently. */
+ bool fDirtiedRecently;
+ } LiveSave;
+} PGMROMPAGE;
+AssertCompileSizeAlignment(PGMROMPAGE, 8);
+/** Pointer to a ROM page tracking structure. */
+typedef PGMROMPAGE *PPGMROMPAGE;
+
+
+/**
+ * A registered ROM image.
+ *
+ * This is needed to keep track of ROM image since they generally intrude
+ * into a PGMRAMRANGE. It also keeps track of additional info like the
+ * two page sets (read-only virgin and read-write shadow), the current
+ * state of each page.
+ *
+ * Because access handlers cannot easily be executed in a different
+ * context, the ROM ranges needs to be accessible and in all contexts.
+ */
+typedef struct PGMROMRANGE
+{
+ /** Pointer to the next range - R3. */
+ R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
+ /** Pointer to the next range - R0. */
+ R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
+ /** Pointer to the this range - R0. */
+ R0PTRTYPE(struct PGMROMRANGE *) pSelfR0;
+ /** Address of the range. */
+ RTGCPHYS GCPhys;
+ /** Address of the last byte in the range. */
+ RTGCPHYS GCPhysLast;
+ /** Size of the range. */
+ RTGCPHYS cb;
+ /** The flags (PGMPHYS_ROM_FLAGS_*). */
+ uint8_t fFlags;
+ /** The saved state range ID. */
+ uint8_t idSavedState;
+ /** Alignment padding. */
+ uint8_t au8Alignment[2];
+ /** The size bits pvOriginal points to. */
+ uint32_t cbOriginal;
+ /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
+ * This is used for strictness checks. */
+ R3PTRTYPE(const void *) pvOriginal;
+ /** The ROM description. */
+ R3PTRTYPE(const char *) pszDesc;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /** In simplified memory mode this provides alternate backing for shadowed ROMs.
+ * - PGMROMPROT_READ_ROM_WRITE_IGNORE: Shadow
+ * - PGMROMPROT_READ_ROM_WRITE_RAM: Shadow
+ * - PGMROMPROT_READ_RAM_WRITE_IGNORE: ROM
+ * - PGMROMPROT_READ_RAM_WRITE_RAM: ROM */
+ R3PTRTYPE(uint8_t *) pbR3Alternate;
+ RTR3PTR pvAlignment2;
+#endif
+ /** The per page tracking structures. */
+ PGMROMPAGE aPages[1];
+} PGMROMRANGE;
+/** Pointer to a ROM range. */
+typedef PGMROMRANGE *PPGMROMRANGE;
+
+
+/**
+ * Live save per page data for an MMIO2 page.
+ *
+ * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
+ * of MMIO2 pages. The current approach is using some optimistic SHA-1 +
+ * CRC-32 for detecting changes as well as special handling of zero pages. This
+ * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
+ * for speeding things up. (We're using SHA-1 and not SHA-256 or SHA-512
+ * because of speed (2.5x and 6x slower).)
+ *
+ * @todo Implement dirty MMIO2 page reporting that can be enabled during live
+ * save but normally is disabled. Since we can write monitor guest
+ * accesses on our own, we only need this for host accesses. Shouldn't be
+ * too difficult for DevVGA, VMMDev might be doable, the planned
+ * networking fun will be fun since it involves ring-0.
+ */
+typedef struct PGMLIVESAVEMMIO2PAGE
+{
+ /** Set if the page is considered dirty. */
+ bool fDirty;
+ /** The number of scans this page has remained unchanged for.
+ * Only updated for dirty pages. */
+ uint8_t cUnchangedScans;
+ /** Whether this page was zero at the last scan. */
+ bool fZero;
+ /** Alignment padding. */
+ bool fReserved;
+ /** CRC-32 for the first half of the page.
+ * This is used together with u32CrcH2 to quickly detect changes in the page
+ * during the non-final passes. */
+ uint32_t u32CrcH1;
+ /** CRC-32 for the second half of the page. */
+ uint32_t u32CrcH2;
+ /** SHA-1 for the saved page.
+ * This is used in the final pass to skip pages without changes. */
+ uint8_t abSha1Saved[RTSHA1_HASH_SIZE];
+} PGMLIVESAVEMMIO2PAGE;
+/** Pointer to a live save status data for an MMIO2 page. */
+typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
+
+/**
+ * A registered MMIO2 (= Device RAM) range.
+ *
+ * There are a few reason why we need to keep track of these registrations. One
+ * of them is the deregistration & cleanup stuff, while another is that the
+ * PGMRAMRANGE associated with such a region may have to be removed from the ram
+ * range list.
+ *
+ * Overlapping with a RAM range has to be 100% or none at all. The pages in the
+ * existing RAM range must not be ROM nor MMIO. A guru meditation will be
+ * raised if a partial overlap or an overlap of ROM pages is encountered. On an
+ * overlap we will free all the existing RAM pages and put in the ram range
+ * pages instead.
+ */
+typedef struct PGMREGMMIO2RANGE
+{
+ /** The owner of the range. (a device) */
+ PPDMDEVINSR3 pDevInsR3;
+ /** Pointer to the ring-3 mapping of the allocation. */
+ RTR3PTR pvR3;
+#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
+ /** Pointer to the ring-0 mapping of the allocation. */
+ RTR0PTR pvR0;
+#endif
+ /** Pointer to the next range - R3. */
+ R3PTRTYPE(struct PGMREGMMIO2RANGE *) pNextR3;
+ /** Flags (PGMREGMMIO2RANGE_F_XXX). */
+ uint16_t fFlags;
+ /** The sub device number (internal PCI config (CFGM) number). */
+ uint8_t iSubDev;
+ /** The PCI region number. */
+ uint8_t iRegion;
+ /** The saved state range ID. */
+ uint8_t idSavedState;
+ /** MMIO2 range identifier, for page IDs (PGMPAGE::s.idPage). */
+ uint8_t idMmio2;
+ /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */
+#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
+ uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 4 : 2];
+#else
+ uint8_t abAlignment[HC_ARCH_BITS == 32 ? 6 + 8 : 2 + 8];
+#endif
+ /** The real size.
+ * This may be larger than indicated by RamRange.cb if the range has been
+ * reduced during saved state loading. */
+ RTGCPHYS cbReal;
+ /** Pointer to the physical handler for MMIO.
+ * If NEM is responsible for tracking dirty pages in simple memory mode, this
+ * will be NULL. */
+ R3PTRTYPE(PPGMPHYSHANDLER) pPhysHandlerR3;
+ /** Live save per page tracking data for MMIO2. */
+ R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
+ /** The associated RAM range. */
+ PGMRAMRANGE RamRange;
+} PGMREGMMIO2RANGE;
+AssertCompileMemberAlignment(PGMREGMMIO2RANGE, RamRange, 16);
+/** Pointer to a MMIO2 or pre-registered MMIO range. */
+typedef PGMREGMMIO2RANGE *PPGMREGMMIO2RANGE;
+
+/** @name PGMREGMMIO2RANGE_F_XXX - Registered MMIO2 range flags.
+ * @{ */
+/** Set if this is the first chunk in the MMIO2 range. */
+#define PGMREGMMIO2RANGE_F_FIRST_CHUNK UINT16_C(0x0001)
+/** Set if this is the last chunk in the MMIO2 range. */
+#define PGMREGMMIO2RANGE_F_LAST_CHUNK UINT16_C(0x0002)
+/** Set if the whole range is mapped. */
+#define PGMREGMMIO2RANGE_F_MAPPED UINT16_C(0x0004)
+/** Set if it's overlapping, clear if not. */
+#define PGMREGMMIO2RANGE_F_OVERLAPPING UINT16_C(0x0008)
+/** This mirrors the PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES creation flag.*/
+#define PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES UINT16_C(0x0010)
+/** Set if the access handler is registered. */
+#define PGMREGMMIO2RANGE_F_IS_TRACKING UINT16_C(0x0020)
+/** Set if dirty page tracking is currently enabled. */
+#define PGMREGMMIO2RANGE_F_TRACKING_ENABLED UINT16_C(0x0040)
+/** Set if there are dirty pages in the range. */
+#define PGMREGMMIO2RANGE_F_IS_DIRTY UINT16_C(0x0080)
+/** @} */
+
+
+/** @name Internal MMIO2 constants.
+ * @{ */
+/** The maximum number of MMIO2 ranges. */
+#define PGM_MMIO2_MAX_RANGES 32
+/** The maximum number of pages in a MMIO2 range. */
+#define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x01000000)
+/** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */
+#define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage) ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) )
+/** Gets the MMIO2 range ID from an MMIO2 page ID. */
+#define PGM_MMIO2_PAGEID_GET_MMIO2_ID(a_idPage) ( (uint8_t)((a_idPage) >> 24) )
+/** Gets the MMIO2 page index from an MMIO2 page ID. */
+#define PGM_MMIO2_PAGEID_GET_IDX(a_idPage) ( ((a_idPage) & UINT32_C(0x00ffffff)) )
+/** @} */
+
+
+
+/**
+ * PGMPhysRead/Write cache entry
+ */
+typedef struct PGMPHYSCACHEENTRY
+{
+ /** R3 pointer to physical page. */
+ R3PTRTYPE(uint8_t *) pbR3;
+ /** GC Physical address for cache entry */
+ RTGCPHYS GCPhys;
+#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
+ RTGCPHYS u32Padding0; /**< alignment padding. */
+#endif
+} PGMPHYSCACHEENTRY;
+
+/**
+ * PGMPhysRead/Write cache to reduce REM memory access overhead
+ */
+typedef struct PGMPHYSCACHE
+{
+ /** Bitmap of valid cache entries */
+ uint64_t aEntries;
+ /** Cache entries */
+ PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
+} PGMPHYSCACHE;
+
+
+/** @name Ring-3 page mapping TLBs
+ * @{ */
+
+/** Pointer to an allocation chunk ring-3 mapping. */
+typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
+/** Pointer to an allocation chunk ring-3 mapping pointer. */
+typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
+
+/**
+ * Ring-3 tracking structure for an allocation chunk ring-3 mapping.
+ *
+ * The primary tree (Core) uses the chunk id as key.
+ */
+typedef struct PGMCHUNKR3MAP
+{
+ /** The key is the chunk id. */
+ AVLU32NODECORE Core;
+ /** The time (ChunkR3Map.iNow) this chunk was last used. Used for unmap
+ * selection. */
+ uint32_t iLastUsed;
+ /** The current reference count. */
+ uint32_t volatile cRefs;
+ /** The current permanent reference count. */
+ uint32_t volatile cPermRefs;
+ /** The mapping address. */
+ void *pv;
+} PGMCHUNKR3MAP;
+
+/**
+ * Allocation chunk ring-3 mapping TLB entry.
+ */
+typedef struct PGMCHUNKR3MAPTLBE
+{
+ /** The chunk id. */
+ uint32_t volatile idChunk;
+#if HC_ARCH_BITS == 64
+ uint32_t u32Padding; /**< alignment padding. */
+#endif
+ /** The chunk map. */
+ R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
+} PGMCHUNKR3MAPTLBE;
+/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
+typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
+
+/** The number of TLB entries in PGMCHUNKR3MAPTLB.
+ * @remark Must be a power of two value. */
+#define PGM_CHUNKR3MAPTLB_ENTRIES 64
+
+/**
+ * Allocation chunk ring-3 mapping TLB.
+ *
+ * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
+ * At first glance this might look kinda odd since AVL trees are
+ * supposed to give the most optimal lookup times of all trees
+ * due to their balancing. However, take a tree with 1023 nodes
+ * in it, that's 10 levels, meaning that most searches has to go
+ * down 9 levels before they find what they want. This isn't fast
+ * compared to a TLB hit. There is the factor of cache misses,
+ * and of course the problem with trees and branch prediction.
+ * This is why we use TLBs in front of most of the trees.
+ *
+ * @todo Generalize this TLB + AVL stuff, shouldn't be all that
+ * difficult when we switch to the new inlined AVL trees (from kStuff).
+ */
+typedef struct PGMCHUNKR3MAPTLB
+{
+ /** The TLB entries. */
+ PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
+} PGMCHUNKR3MAPTLB;
+
+/**
+ * Calculates the index of a guest page in the Ring-3 Chunk TLB.
+ * @returns Chunk TLB index.
+ * @param idChunk The Chunk ID.
+ */
+#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
+
+
+/**
+ * Ring-3 guest page mapping TLB entry.
+ * @remarks used in ring-0 as well at the moment.
+ */
+typedef struct PGMPAGER3MAPTLBE
+{
+ /** Address of the page. */
+ RTGCPHYS volatile GCPhys;
+ /** The guest page. */
+ R3PTRTYPE(PPGMPAGE) volatile pPage;
+ /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
+ R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
+ /** The address */
+ R3PTRTYPE(void *) volatile pv;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Padding; /**< alignment padding. */
+#endif
+} PGMPAGER3MAPTLBE;
+/** Pointer to an entry in the HC physical TLB. */
+typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
+
+
+/** The number of entries in the ring-3 guest page mapping TLB.
+ * @remarks The value must be a power of two. */
+#define PGM_PAGER3MAPTLB_ENTRIES 256
+
+/**
+ * Ring-3 guest page mapping TLB.
+ * @remarks used in ring-0 as well at the moment.
+ */
+typedef struct PGMPAGER3MAPTLB
+{
+ /** The TLB entries. */
+ PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
+} PGMPAGER3MAPTLB;
+/** Pointer to the ring-3 guest page mapping TLB. */
+typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
+
+/**
+ * Calculates the index of the TLB entry for the specified guest page.
+ * @returns Physical TLB index.
+ * @param GCPhys The guest physical address.
+ */
+#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> GUEST_PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
+
+/** @} */
+
+
+/** @name Ring-0 page mapping TLB
+ * @{ */
+/**
+ * Ring-0 guest page mapping TLB entry.
+ */
+typedef struct PGMPAGER0MAPTLBE
+{
+ /** Address of the page. */
+ RTGCPHYS volatile GCPhys;
+ /** The guest page. */
+ R0PTRTYPE(PPGMPAGE) volatile pPage;
+ /** The address */
+ R0PTRTYPE(void *) volatile pv;
+} PGMPAGER0MAPTLBE;
+/** Pointer to an entry in the HC physical TLB. */
+typedef PGMPAGER0MAPTLBE *PPGMPAGER0MAPTLBE;
+
+
+/** The number of entries in the ring-3 guest page mapping TLB.
+ * @remarks The value must be a power of two. */
+#define PGM_PAGER0MAPTLB_ENTRIES 256
+
+/**
+ * Ring-3 guest page mapping TLB.
+ * @remarks used in ring-0 as well at the moment.
+ */
+typedef struct PGMPAGER0MAPTLB
+{
+ /** The TLB entries. */
+ PGMPAGER0MAPTLBE aEntries[PGM_PAGER0MAPTLB_ENTRIES];
+} PGMPAGER0MAPTLB;
+/** Pointer to the ring-3 guest page mapping TLB. */
+typedef PGMPAGER0MAPTLB *PPGMPAGER0MAPTLB;
+
+/**
+ * Calculates the index of the TLB entry for the specified guest page.
+ * @returns Physical TLB index.
+ * @param GCPhys The guest physical address.
+ */
+#define PGM_PAGER0MAPTLB_IDX(GCPhys) ( ((GCPhys) >> GUEST_PAGE_SHIFT) & (PGM_PAGER0MAPTLB_ENTRIES - 1) )
+/** @} */
+
+
+/** @name Context neutral page mapper TLB.
+ *
+ * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
+ * code is writting in a kind of context neutral way. Time will show whether
+ * this actually makes sense or not...
+ *
+ * @todo this needs to be reconsidered and dropped/redone since the ring-0
+ * context ends up using a global mapping cache on some platforms
+ * (darwin).
+ *
+ * @{ */
+/** @typedef PPGMPAGEMAPTLB
+ * The page mapper TLB pointer type for the current context. */
+/** @typedef PPGMPAGEMAPTLB
+ * The page mapper TLB entry pointer type for the current context. */
+/** @typedef PPGMPAGEMAPTLB
+ * The page mapper TLB entry pointer pointer type for the current context. */
+/** @def PGM_PAGEMAPTLB_ENTRIES
+ * The number of TLB entries in the page mapper TLB for the current context. */
+/** @def PGM_PAGEMAPTLB_IDX
+ * Calculate the TLB index for a guest physical address.
+ * @returns The TLB index.
+ * @param GCPhys The guest physical address. */
+/** @typedef PPGMPAGEMAP
+ * Pointer to a page mapper unit for current context. */
+/** @typedef PPPGMPAGEMAP
+ * Pointer to a page mapper unit pointer for current context. */
+#if defined(IN_RING0)
+typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
+typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
+typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
+# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
+# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
+typedef struct PGMCHUNKR0MAP *PPGMPAGEMAP;
+typedef struct PGMCHUNKR0MAP **PPPGMPAGEMAP;
+#else
+typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
+typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
+typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
+# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
+# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
+typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
+typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
+#endif
+/** @} */
+
+
+/** @name PGM Pool Indexes.
+ * Aka. the unique shadow page identifier.
+ * @{ */
+/** NIL page pool IDX. */
+#define NIL_PGMPOOL_IDX 0
+/** The first normal index. There used to be 5 fictive pages up front, now
+ * there is only the NIL page. */
+#define PGMPOOL_IDX_FIRST 1
+/** The last valid index. (inclusive, 14 bits) */
+#define PGMPOOL_IDX_LAST 0x3fff
+/** @} */
+
+/** The NIL index for the parent chain. */
+#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
+#define NIL_PGMPOOL_PRESENT_INDEX ((uint16_t)0xffff)
+
+/**
+ * Node in the chain linking a shadowed page to it's parent (user).
+ */
+#pragma pack(1)
+typedef struct PGMPOOLUSER
+{
+ /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
+ uint16_t iNext;
+ /** The user page index. */
+ uint16_t iUser;
+ /** Index into the user table. */
+ uint32_t iUserTable;
+} PGMPOOLUSER, *PPGMPOOLUSER;
+typedef const PGMPOOLUSER *PCPGMPOOLUSER;
+#pragma pack()
+
+
+/** The NIL index for the phys ext chain. */
+#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
+/** The NIL pte index for a phys ext chain slot. */
+#define NIL_PGMPOOL_PHYSEXT_IDX_PTE ((uint16_t)0xffff)
+
+/**
+ * Node in the chain of physical cross reference extents.
+ * @todo Calling this an 'extent' is not quite right, find a better name.
+ * @todo find out the optimal size of the aidx array
+ */
+#pragma pack(1)
+typedef struct PGMPOOLPHYSEXT
+{
+ /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
+ uint16_t iNext;
+ /** Alignment. */
+ uint16_t u16Align;
+ /** The user page index. */
+ uint16_t aidx[3];
+ /** The page table index or NIL_PGMPOOL_PHYSEXT_IDX_PTE if unknown. */
+ uint16_t apte[3];
+} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
+typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
+#pragma pack()
+
+
+/**
+ * The kind of page that's being shadowed.
+ */
+typedef enum PGMPOOLKIND
+{
+ /** The virtual invalid 0 entry. */
+ PGMPOOLKIND_INVALID = 0,
+ /** The entry is free (=unused). */
+ PGMPOOLKIND_FREE,
+
+ /** Shw: 32-bit page table; Gst: no paging. */
+ PGMPOOLKIND_32BIT_PT_FOR_PHYS,
+ /** Shw: 32-bit page table; Gst: 32-bit page table. */
+ PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
+ /** Shw: 32-bit page table; Gst: 4MB page. */
+ PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
+ /** Shw: PAE page table; Gst: no paging. */
+ PGMPOOLKIND_PAE_PT_FOR_PHYS,
+ /** Shw: PAE page table; Gst: 32-bit page table. */
+ PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
+ /** Shw: PAE page table; Gst: Half of a 4MB page. */
+ PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
+ /** Shw: PAE page table; Gst: PAE page table. */
+ PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
+ /** Shw: PAE page table; Gst: 2MB page. */
+ PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
+
+ /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
+ PGMPOOLKIND_32BIT_PD,
+ /** Shw: 32-bit page directory. Gst: no paging. */
+ PGMPOOLKIND_32BIT_PD_PHYS,
+ /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
+ PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
+ /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
+ PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
+ /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
+ PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
+ /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
+ PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
+ /** Shw: PAE page directory; Gst: PAE page directory. */
+ PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
+ /** Shw: PAE page directory; Gst: no paging. Note: +NP. */
+ PGMPOOLKIND_PAE_PD_PHYS,
+
+ /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
+ PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
+ /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
+ PGMPOOLKIND_PAE_PDPT,
+ /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
+ PGMPOOLKIND_PAE_PDPT_PHYS,
+
+ /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
+ PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
+ /** Shw: 64-bit page directory pointer table; Gst: no paging. */
+ PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
+ /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
+ PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
+ /** Shw: 64-bit page directory table; Gst: no paging. */
+ PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 24 */
+
+ /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
+ PGMPOOLKIND_64BIT_PML4,
+
+ /** Shw: EPT page directory pointer table; Gst: no paging. */
+ PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
+ /** Shw: EPT page directory table; Gst: no paging. */
+ PGMPOOLKIND_EPT_PD_FOR_PHYS,
+ /** Shw: EPT page table; Gst: no paging. */
+ PGMPOOLKIND_EPT_PT_FOR_PHYS,
+
+ /** Shw: Root Nested paging table. */
+ PGMPOOLKIND_ROOT_NESTED,
+
+ /** Shw: EPT page table; Gst: EPT page table. */
+ PGMPOOLKIND_EPT_PT_FOR_EPT_PT,
+ /** Shw: EPT page table; Gst: 2MB page. */
+ PGMPOOLKIND_EPT_PT_FOR_EPT_2MB,
+ /** Shw: EPT page directory table; Gst: EPT page directory. */
+ PGMPOOLKIND_EPT_PD_FOR_EPT_PD,
+ /** Shw: EPT page directory pointer table; Gst: EPT page directory pointer table. */
+ PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT,
+ /** Shw: EPT PML4; Gst: EPT PML4. */
+ PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4,
+
+ /** The last valid entry. */
+ PGMPOOLKIND_LAST = PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4
+} PGMPOOLKIND;
+
+/**
+ * The access attributes of the page; only applies to big pages.
+ */
+typedef enum
+{
+ PGMPOOLACCESS_DONTCARE = 0,
+ PGMPOOLACCESS_USER_RW,
+ PGMPOOLACCESS_USER_R,
+ PGMPOOLACCESS_USER_RW_NX,
+ PGMPOOLACCESS_USER_R_NX,
+ PGMPOOLACCESS_SUPERVISOR_RW,
+ PGMPOOLACCESS_SUPERVISOR_R,
+ PGMPOOLACCESS_SUPERVISOR_RW_NX,
+ PGMPOOLACCESS_SUPERVISOR_R_NX
+} PGMPOOLACCESS;
+
+/**
+ * The tracking data for a page in the pool.
+ */
+typedef struct PGMPOOLPAGE
+{
+ /** AVL node code with the (HC) physical address of this page. */
+ AVLOHCPHYSNODECORE Core;
+ /** Pointer to the R3 mapping of the page. */
+ R3PTRTYPE(void *) pvPageR3;
+ /** Pointer to the R0 mapping of the page. */
+ R0PTRTYPE(void *) pvPageR0;
+ /** The guest physical address. */
+ RTGCPHYS GCPhys;
+ /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
+ uint8_t enmKind;
+ /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
+ uint8_t enmAccess;
+ /** This supplements enmKind and enmAccess */
+ bool fA20Enabled : 1;
+
+ /** Used to indicate that the page is zeroed. */
+ bool fZeroed : 1;
+ /** Used to indicate that a PT has non-global entries. */
+ bool fSeenNonGlobal : 1;
+ /** Used to indicate that we're monitoring writes to the guest page. */
+ bool fMonitored : 1;
+ /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
+ * (All pages are in the age list.) */
+ bool fCached : 1;
+ /** This is used by the R3 access handlers when invoked by an async thread.
+ * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
+ bool volatile fReusedFlushPending : 1;
+ /** Used to mark the page as dirty (write monitoring is temporarily
+ * off). */
+ bool fDirty : 1;
+ bool fPadding1 : 1;
+ bool fPadding2;
+
+ /** The index of this page. */
+ uint16_t idx;
+ /** The next entry in the list this page currently resides in.
+ * It's either in the free list or in the GCPhys hash. */
+ uint16_t iNext;
+ /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
+ uint16_t iUserHead;
+ /** The number of present entries. */
+ uint16_t cPresent;
+ /** The first entry in the table which is present. */
+ uint16_t iFirstPresent;
+ /** The number of modifications to the monitored page. */
+ uint16_t cModifications;
+ /** The next modified page. NIL_PGMPOOL_IDX if tail. */
+ uint16_t iModifiedNext;
+ /** The previous modified page. NIL_PGMPOOL_IDX if head. */
+ uint16_t iModifiedPrev;
+ /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
+ uint16_t iMonitoredNext;
+ /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
+ uint16_t iMonitoredPrev;
+ /** The next page in the age list. */
+ uint16_t iAgeNext;
+ /** The previous page in the age list. */
+ uint16_t iAgePrev;
+ /** Index into PGMPOOL::aDirtyPages if fDirty is set. */
+ uint8_t idxDirtyEntry;
+
+ /** @name Access handler statistics to determine whether the guest is
+ * (re)initializing a page table.
+ * @{ */
+ RTGCPTR GCPtrLastAccessHandlerRip;
+ RTGCPTR GCPtrLastAccessHandlerFault;
+ uint64_t cLastAccessHandler;
+ /** @} */
+ /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages. */
+ uint32_t volatile cLocked;
+#if GC_ARCH_BITS == 64
+ uint32_t u32Alignment3;
+#endif
+# ifdef VBOX_STRICT
+ RTGCPTR GCPtrDirtyFault;
+# endif
+} PGMPOOLPAGE;
+/** Pointer to a pool page. */
+typedef PGMPOOLPAGE *PPGMPOOLPAGE;
+/** Pointer to a const pool page. */
+typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
+/** Pointer to a pool page pointer. */
+typedef PGMPOOLPAGE **PPPGMPOOLPAGE;
+
+
+/** The hash table size. */
+# define PGMPOOL_HASH_SIZE 0x8000
+/** The hash function. */
+# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> GUEST_PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
+
+
+/**
+ * The shadow page pool instance data.
+ *
+ * It's all one big allocation made at init time, except for the
+ * pages that is. The user nodes follows immediately after the
+ * page structures.
+ */
+typedef struct PGMPOOL
+{
+ /** The VM handle - R3 Ptr. */
+ PVMR3 pVMR3;
+ /** The VM handle - R0 Ptr. */
+ R0PTRTYPE(PVMCC) pVMR0;
+ /** The ring-3 pointer to this structure. */
+ R3PTRTYPE(struct PGMPOOL *) pPoolR3;
+ /** The ring-0 pointer to this structure. */
+ R0PTRTYPE(struct PGMPOOL *) pPoolR0;
+ /** The max pool size. This includes the special IDs. */
+ uint16_t cMaxPages;
+ /** The current pool size. */
+ uint16_t cCurPages;
+ /** The head of the free page list. */
+ uint16_t iFreeHead;
+ /* Padding. */
+ uint16_t u16Padding;
+ /** Head of the chain of free user nodes. */
+ uint16_t iUserFreeHead;
+ /** The number of user nodes we've allocated. */
+ uint16_t cMaxUsers;
+ /** The number of present page table entries in the entire pool. */
+ uint32_t cPresent;
+ /** Pointer to the array of user nodes - R3 pointer. */
+ R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
+ /** Pointer to the array of user nodes - R0 pointer. */
+ R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
+ /** Head of the chain of free phys ext nodes. */
+ uint16_t iPhysExtFreeHead;
+ /** The number of user nodes we've allocated. */
+ uint16_t cMaxPhysExts;
+ uint32_t u32Padding0b;
+ /** Pointer to the array of physical xref extent nodes - R3 pointer. */
+ R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
+ /** Pointer to the array of physical xref extent nodes - R0 pointer. */
+ R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
+ /** Hash table for GCPhys addresses. */
+ uint16_t aiHash[PGMPOOL_HASH_SIZE];
+ /** The head of the age list. */
+ uint16_t iAgeHead;
+ /** The tail of the age list. */
+ uint16_t iAgeTail;
+ /** Set if the cache is enabled. */
+ bool fCacheEnabled;
+ /** Alignment padding. */
+ bool afPadding1[3];
+ /** Head of the list of modified pages. */
+ uint16_t iModifiedHead;
+ /** The current number of modified pages. */
+ uint16_t cModifiedPages;
+ /** Alignment padding. */
+ uint32_t u32Padding2;
+ /** Physical access handler type registration handle. */
+ PGMPHYSHANDLERTYPE hAccessHandlerType;
+ /** Next available slot (in aDirtyPages). */
+ uint32_t idxFreeDirtyPage;
+ /** Number of active dirty pages. */
+ uint32_t cDirtyPages;
+ /** Array of current dirty pgm pool page indices. */
+ uint16_t aidxDirtyPages[16];
+ /** Array running in parallel to aidxDirtyPages with the page data. */
+ struct
+ {
+ uint64_t aPage[512];
+ } aDirtyPages[16];
+
+ /** The number of pages currently in use. */
+ uint16_t cUsedPages;
+#ifdef VBOX_WITH_STATISTICS
+ /** The high water mark for cUsedPages. */
+ uint16_t cUsedPagesHigh;
+ uint32_t Alignment1; /**< Align the next member on a 64-bit boundary. */
+ /** Profiling pgmPoolAlloc(). */
+ STAMPROFILEADV StatAlloc;
+ /** Profiling pgmR3PoolClearDoIt(). */
+ STAMPROFILE StatClearAll;
+ /** Profiling pgmR3PoolReset(). */
+ STAMPROFILE StatR3Reset;
+ /** Profiling pgmPoolFlushPage(). */
+ STAMPROFILE StatFlushPage;
+ /** Profiling pgmPoolFree(). */
+ STAMPROFILE StatFree;
+ /** Counting explicit flushes by PGMPoolFlushPage(). */
+ STAMCOUNTER StatForceFlushPage;
+ /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
+ STAMCOUNTER StatForceFlushDirtyPage;
+ /** Counting flushes for reused pages. */
+ STAMCOUNTER StatForceFlushReused;
+ /** Profiling time spent zeroing pages. */
+ STAMPROFILE StatZeroPage;
+ /** Profiling of pgmPoolTrackDeref. */
+ STAMPROFILE StatTrackDeref;
+ /** Profiling pgmTrackFlushGCPhysPT. */
+ STAMPROFILE StatTrackFlushGCPhysPT;
+ /** Profiling pgmTrackFlushGCPhysPTs. */
+ STAMPROFILE StatTrackFlushGCPhysPTs;
+ /** Profiling pgmTrackFlushGCPhysPTsSlow. */
+ STAMPROFILE StatTrackFlushGCPhysPTsSlow;
+ /** Number of times we've been out of user records. */
+ STAMCOUNTER StatTrackFreeUpOneUser;
+ /** Nr of flushed entries. */
+ STAMCOUNTER StatTrackFlushEntry;
+ /** Nr of updated entries. */
+ STAMCOUNTER StatTrackFlushEntryKeep;
+ /** Profiling deref activity related tracking GC physical pages. */
+ STAMPROFILE StatTrackDerefGCPhys;
+ /** Number of linear searches for a HCPhys in the ram ranges. */
+ STAMCOUNTER StatTrackLinearRamSearches;
+ /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
+ STAMCOUNTER StamTrackPhysExtAllocFailures;
+
+ /** Profiling the RC/R0 \#PF access handler. */
+ STAMPROFILE StatMonitorPfRZ;
+ /** Profiling the RC/R0 access we've handled (except REP STOSD). */
+ STAMPROFILE StatMonitorPfRZHandled;
+ /** Times we've failed interpreting the instruction. */
+ STAMCOUNTER StatMonitorPfRZEmulateInstr;
+ /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
+ STAMPROFILE StatMonitorPfRZFlushPage;
+ /** Times we've detected a page table reinit. */
+ STAMCOUNTER StatMonitorPfRZFlushReinit;
+ /** Counting flushes for pages that are modified too often. */
+ STAMCOUNTER StatMonitorPfRZFlushModOverflow;
+ /** Times we've detected fork(). */
+ STAMCOUNTER StatMonitorPfRZFork;
+ /** Times we've failed interpreting a patch code instruction. */
+ STAMCOUNTER StatMonitorPfRZIntrFailPatch1;
+ /** Times we've failed interpreting a patch code instruction during flushing. */
+ STAMCOUNTER StatMonitorPfRZIntrFailPatch2;
+ /** The number of times we've seen rep prefixes we can't handle. */
+ STAMCOUNTER StatMonitorPfRZRepPrefix;
+ /** Profiling the REP STOSD cases we've handled. */
+ STAMPROFILE StatMonitorPfRZRepStosd;
+
+ /** Profiling the R0/RC regular access handler. */
+ STAMPROFILE StatMonitorRZ;
+ /** Profiling the pgmPoolFlushPage calls made from the regular access handler in R0/RC. */
+ STAMPROFILE StatMonitorRZFlushPage;
+ /** Per access size counts indexed by size minus 1, last for larger. */
+ STAMCOUNTER aStatMonitorRZSizes[16+3];
+ /** Missaligned access counts indexed by offset - 1. */
+ STAMCOUNTER aStatMonitorRZMisaligned[7];
+
+ /** Nr of handled PT faults. */
+ STAMCOUNTER StatMonitorRZFaultPT;
+ /** Nr of handled PD faults. */
+ STAMCOUNTER StatMonitorRZFaultPD;
+ /** Nr of handled PDPT faults. */
+ STAMCOUNTER StatMonitorRZFaultPDPT;
+ /** Nr of handled PML4 faults. */
+ STAMCOUNTER StatMonitorRZFaultPML4;
+
+ /** Profiling the R3 access handler. */
+ STAMPROFILE StatMonitorR3;
+ /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
+ STAMPROFILE StatMonitorR3FlushPage;
+ /** Per access size counts indexed by size minus 1, last for larger. */
+ STAMCOUNTER aStatMonitorR3Sizes[16+3];
+ /** Missaligned access counts indexed by offset - 1. */
+ STAMCOUNTER aStatMonitorR3Misaligned[7];
+ /** Nr of handled PT faults. */
+ STAMCOUNTER StatMonitorR3FaultPT;
+ /** Nr of handled PD faults. */
+ STAMCOUNTER StatMonitorR3FaultPD;
+ /** Nr of handled PDPT faults. */
+ STAMCOUNTER StatMonitorR3FaultPDPT;
+ /** Nr of handled PML4 faults. */
+ STAMCOUNTER StatMonitorR3FaultPML4;
+
+ /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
+ STAMCOUNTER StatResetDirtyPages;
+ /** Times we've called pgmPoolAddDirtyPage. */
+ STAMCOUNTER StatDirtyPage;
+ /** Times we've had to flush duplicates for dirty page management. */
+ STAMCOUNTER StatDirtyPageDupFlush;
+ /** Times we've had to flush because of overflow. */
+ STAMCOUNTER StatDirtyPageOverFlowFlush;
+
+ /** The high water mark for cModifiedPages. */
+ uint16_t cModifiedPagesHigh;
+ uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundary. */
+
+ /** The number of cache hits. */
+ STAMCOUNTER StatCacheHits;
+ /** The number of cache misses. */
+ STAMCOUNTER StatCacheMisses;
+ /** The number of times we've got a conflict of 'kind' in the cache. */
+ STAMCOUNTER StatCacheKindMismatches;
+ /** Number of times we've been out of pages. */
+ STAMCOUNTER StatCacheFreeUpOne;
+ /** The number of cacheable allocations. */
+ STAMCOUNTER StatCacheCacheable;
+ /** The number of uncacheable allocations. */
+ STAMCOUNTER StatCacheUncacheable;
+#else
+ uint32_t Alignment3; /**< Align the next member on a 64-bit boundary. */
+#endif
+ /** Profiling PGMR0PoolGrow(). */
+ STAMPROFILE StatGrow;
+ /** The AVL tree for looking up a page by its HC physical address. */
+ AVLOHCPHYSTREE HCPhysTree;
+ uint32_t Alignment4; /**< Align the next member on a 64-bit boundary. */
+ /** Array of pages. (cMaxPages in length)
+ * The Id is the index into thist array.
+ */
+ PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
+} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
+AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
+AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
+AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
+#endif
+AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
+
+
+/** @def PGMPOOL_PAGE_2_PTR
+ * Maps a pool page pool into the current context.
+ *
+ * @returns VBox status code.
+ * @param a_pVM Pointer to the VM.
+ * @param a_pPage The pool page.
+ *
+ * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
+ * small page window employeed by that function. Be careful.
+ * @remark There is no need to assert on the result.
+ */
+#if defined(VBOX_STRICT) || 1 /* temporarily going strict here */
+# define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage) pgmPoolMapPageStrict(a_pPage, __FUNCTION__)
+DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE a_pPage, const char *pszCaller)
+{
+ RT_NOREF(pszCaller);
+ AssertPtr(a_pPage);
+ AssertMsg(RT_VALID_PTR(a_pPage->CTX_SUFF(pvPage)),
+ ("enmKind=%d idx=%#x HCPhys=%RHp GCPhys=%RGp pvPageR3=%p pvPageR0=%p caller=%s\n",
+ a_pPage->enmKind, a_pPage->idx, a_pPage->Core.Key, a_pPage->GCPhys, a_pPage->pvPageR3, a_pPage->pvPageR0, pszCaller));
+ return a_pPage->CTX_SUFF(pvPage);
+}
+#else
+# define PGMPOOL_PAGE_2_PTR(pVM, a_pPage) ((a_pPage)->CTX_SUFF(pvPage))
+#endif
+
+
+/** @def PGMPOOL_PAGE_2_PTR_V2
+ * Maps a pool page pool into the current context, taking both VM and VMCPU.
+ *
+ * @returns VBox status code.
+ * @param a_pVM Pointer to the VM.
+ * @param a_pVCpu The current CPU.
+ * @param a_pPage The pool page.
+ *
+ * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
+ * small page window employeed by that function. Be careful.
+ * @remark There is no need to assert on the result.
+ */
+#define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) PGMPOOL_PAGE_2_PTR((a_pVM), (a_pPage))
+
+
+/** @def PGMPOOL_PAGE_IS_NESTED
+ * Checks whether the given pool page is a nested-guest pool page.
+ *
+ * @returns @c true if a nested-guest pool page, @c false otherwise.
+ * @param a_pPage The pool page.
+ * @todo We can optimize the conditionals later.
+ */
+#define PGMPOOL_PAGE_IS_NESTED(a_pPage) PGMPOOL_PAGE_IS_KIND_NESTED((a_pPage)->enmKind)
+#define PGMPOOL_PAGE_IS_KIND_NESTED(a_enmKind) ( (a_enmKind) == PGMPOOLKIND_EPT_PT_FOR_EPT_PT \
+ || (a_enmKind) == PGMPOOLKIND_EPT_PT_FOR_EPT_2MB \
+ || (a_enmKind) == PGMPOOLKIND_EPT_PD_FOR_EPT_PD \
+ || (a_enmKind) == PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT \
+ || (a_enmKind) == PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4)
+
+/** @name Per guest page tracking data.
+ * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
+ * is to use more bits for it and split it up later on. But for now we'll play
+ * safe and change as little as possible.
+ *
+ * The 16-bit word has two parts:
+ *
+ * The first 14-bit forms the @a idx field. It is either the index of a page in
+ * the shadow page pool, or and index into the extent list.
+ *
+ * The 2 topmost bits makes up the @a cRefs field, which counts the number of
+ * shadow page pool references to the page. If cRefs equals
+ * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
+ * (misnomer) table and not the shadow page pool.
+ *
+ * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
+ * the 16-bit word.
+ *
+ * @{ */
+/** The shift count for getting to the cRefs part. */
+#define PGMPOOL_TD_CREFS_SHIFT 14
+/** The mask applied after shifting the tracking data down by
+ * PGMPOOL_TD_CREFS_SHIFT. */
+#define PGMPOOL_TD_CREFS_MASK 0x3
+/** The cRefs value used to indicate that the idx is the head of a
+ * physical cross reference list. */
+#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
+/** The shift used to get idx. */
+#define PGMPOOL_TD_IDX_SHIFT 0
+/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
+#define PGMPOOL_TD_IDX_MASK 0x3fff
+/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
+ * simply too many mappings of this page. */
+#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
+
+/** @def PGMPOOL_TD_MAKE
+ * Makes a 16-bit tracking data word.
+ *
+ * @returns tracking data.
+ * @param cRefs The @a cRefs field. Must be within bounds!
+ * @param idx The @a idx field. Must also be within bounds! */
+#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
+
+/** @def PGMPOOL_TD_GET_CREFS
+ * Get the @a cRefs field from a tracking data word.
+ *
+ * @returns The @a cRefs field
+ * @param u16 The tracking data word.
+ * @remarks This will only return 1 or PGMPOOL_TD_CREFS_PHYSEXT for a
+ * non-zero @a u16. */
+#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
+
+/** @def PGMPOOL_TD_GET_IDX
+ * Get the @a idx field from a tracking data word.
+ *
+ * @returns The @a idx field
+ * @param u16 The tracking data word. */
+#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
+/** @} */
+
+
+
+/** @name A20 gate macros
+ * @{ */
+#define PGM_WITH_A20
+#ifdef PGM_WITH_A20
+# define PGM_A20_IS_ENABLED(a_pVCpu) ((a_pVCpu)->pgm.s.fA20Enabled)
+# define PGM_A20_APPLY(a_pVCpu, a_GCPhys) ((a_GCPhys) & (a_pVCpu)->pgm.s.GCPhysA20Mask)
+# define PGM_A20_APPLY_TO_VAR(a_pVCpu, a_GCPhysVar) \
+ do { a_GCPhysVar &= (a_pVCpu)->pgm.s.GCPhysA20Mask; } while (0)
+# define PGM_A20_ASSERT_MASKED(pVCpu, a_GCPhys) Assert(PGM_A20_APPLY(pVCpu, a_GCPhys) == (a_GCPhys))
+#else
+# define PGM_A20_IS_ENABLED(a_pVCpu) (true)
+# define PGM_A20_APPLY(a_pVCpu, a_GCPhys) (a_GCPhys)
+# define PGM_A20_APPLY_TO_VAR(a_pVCpu, a_GCPhysVar) do { } while (0)
+# define PGM_A20_ASSERT_MASKED(pVCpu, a_GCPhys) do { } while (0)
+#endif
+/** @} */
+
+
+/**
+ * Guest page table walk for the AMD64 mode.
+ */
+typedef struct PGMPTWALKGSTAMD64
+{
+ PX86PML4 pPml4;
+ PX86PML4E pPml4e;
+ X86PML4E Pml4e;
+
+ PX86PDPT pPdpt;
+ PX86PDPE pPdpe;
+ X86PDPE Pdpe;
+
+ PX86PDPAE pPd;
+ PX86PDEPAE pPde;
+ X86PDEPAE Pde;
+
+ PX86PTPAE pPt;
+ PX86PTEPAE pPte;
+ X86PTEPAE Pte;
+} PGMPTWALKGSTAMD64;
+/** Pointer to a AMD64 guest page table walk. */
+typedef PGMPTWALKGSTAMD64 *PPGMPTWALKGSTAMD64;
+/** Pointer to a const AMD64 guest page table walk. */
+typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64;
+
+/**
+ * Guest page table walk for the EPT mode.
+ */
+typedef struct PGMPTWALKGSTEPT
+{
+ PEPTPML4 pPml4;
+ PEPTPML4E pPml4e;
+ EPTPML4E Pml4e;
+
+ PEPTPDPT pPdpt;
+ PEPTPDPTE pPdpte;
+ EPTPDPTE Pdpte;
+
+ PEPTPD pPd;
+ PEPTPDE pPde;
+ EPTPDE Pde;
+
+ PEPTPT pPt;
+ PEPTPTE pPte;
+ EPTPTE Pte;
+} PGMPTWALKGSTEPT;
+/** Pointer to an EPT guest page table walk. */
+typedef PGMPTWALKGSTEPT *PPGMPTWALKGSTEPT;
+/** Pointer to a const EPT guest page table walk. */
+typedef PGMPTWALKGSTEPT const *PCPGMPTWALKGSTEPT;
+
+/**
+ * Guest page table walk for the PAE mode.
+ */
+typedef struct PGMPTWALKGSTPAE
+{
+ PX86PDPT pPdpt;
+ PX86PDPE pPdpe;
+ X86PDPE Pdpe;
+
+ PX86PDPAE pPd;
+ PX86PDEPAE pPde;
+ X86PDEPAE Pde;
+
+ PX86PTPAE pPt;
+ PX86PTEPAE pPte;
+ X86PTEPAE Pte;
+} PGMPTWALKGSTPAE;
+/** Pointer to a PAE guest page table walk. */
+typedef PGMPTWALKGSTPAE *PPGMPTWALKGSTPAE;
+/** Pointer to a const AMD64 guest page table walk. */
+typedef PGMPTWALKGSTPAE const *PCPGMPTWALKGSTPAE;
+
+/**
+ * Guest page table walk for the 32-bit mode.
+ */
+typedef struct PGMPTWALKGST32BIT
+{
+ PX86PD pPd;
+ PX86PDE pPde;
+ X86PDE Pde;
+
+ PX86PT pPt;
+ PX86PTE pPte;
+ X86PTE Pte;
+} PGMPTWALKGST32BIT;
+/** Pointer to a 32-bit guest page table walk. */
+typedef PGMPTWALKGST32BIT *PPGMPTWALKGST32BIT;
+/** Pointer to a const 32-bit guest page table walk. */
+typedef PGMPTWALKGST32BIT const *PCPGMPTWALKGST32BIT;
+
+/**
+ * Which part of PGMPTWALKGST that is valid.
+ */
+typedef enum PGMPTWALKGSTTYPE
+{
+ /** Customary invalid 0 value. */
+ PGMPTWALKGSTTYPE_INVALID = 0,
+ /** PGMPTWALKGST::u.Amd64 is valid. */
+ PGMPTWALKGSTTYPE_AMD64,
+ /** PGMPTWALKGST::u.Pae is valid. */
+ PGMPTWALKGSTTYPE_PAE,
+ /** PGMPTWALKGST::u.Legacy is valid. */
+ PGMPTWALKGSTTYPE_32BIT,
+ /** PGMPTWALKGST::u.Ept is valid. */
+ PGMPTWALKGSTTYPE_EPT,
+ /** Customary 32-bit type hack. */
+ PGMPTWALKGSTTYPE_32BIT_HACK = 0x7fff0000
+} PGMPTWALKGSTTYPE;
+
+/**
+ * Combined guest page table walk result.
+ */
+typedef struct PGMPTWALKGST
+{
+ union
+ {
+ /** The page walker for AMD64. */
+ PGMPTWALKGSTAMD64 Amd64;
+ /** The page walker for PAE (32-bit). */
+ PGMPTWALKGSTPAE Pae;
+ /** The page walker for 32-bit paging (called legacy due to C naming
+ * convension). */
+ PGMPTWALKGST32BIT Legacy;
+ /** The page walker for EPT (SLAT). */
+ PGMPTWALKGSTEPT Ept;
+ } u;
+ /** Indicates which part of the union is valid. */
+ PGMPTWALKGSTTYPE enmType;
+} PGMPTWALKGST;
+/** Pointer to a combined guest page table walk result. */
+typedef PGMPTWALKGST *PPGMPTWALKGST;
+/** Pointer to a read-only combined guest page table walk result. */
+typedef PGMPTWALKGST const *PCPGMPTWALKGST;
+
+
+/** @name Paging mode macros
+ * @{
+ */
+#ifdef IN_RING3
+# define PGM_CTX(a,b) a##R3##b
+# define PGM_CTX_STR(a,b) a "R3" b
+# define PGM_CTX_DECL(type) DECLCALLBACK(type)
+#elif defined(IN_RING0)
+# define PGM_CTX(a,b) a##R0##b
+# define PGM_CTX_STR(a,b) a "R0" b
+# define PGM_CTX_DECL(type) VMMDECL(type)
+#else
+# error "Not IN_RING3 or IN_RING0!"
+#endif
+
+#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
+#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
+#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
+#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
+#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
+#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
+#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
+#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
+#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
+#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
+#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
+#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
+#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
+#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
+#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
+#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
+
+#define PGM_GST_SLAT_NAME_EPT(name) PGM_CTX(pgm,GstSlatEpt##name)
+#define PGM_GST_SLAT_NAME_RC_EPT_STR(name) "pgmRCGstSlatEpt" #name
+#define PGM_GST_SLAT_NAME_R0_EPT_STR(name) "pgmR0GstSlatEpt" #name
+#define PGM_GST_SLAT_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_SLAT_NAME(name)
+
+#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
+#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
+#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
+#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
+#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
+#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
+#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
+#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
+#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
+#define PGM_SHW_NAME_NESTED_32BIT(name) PGM_CTX(pgm,ShwNested32Bit##name)
+#define PGM_SHW_NAME_RC_NESTED_32BIT_STR(name) "pgmRCShwNested32Bit" #name
+#define PGM_SHW_NAME_R0_NESTED_32BIT_STR(name) "pgmR0ShwNested32Bit" #name
+#define PGM_SHW_NAME_NESTED_PAE(name) PGM_CTX(pgm,ShwNestedPAE##name)
+#define PGM_SHW_NAME_RC_NESTED_PAE_STR(name) "pgmRCShwNestedPAE" #name
+#define PGM_SHW_NAME_R0_NESTED_PAE_STR(name) "pgmR0ShwNestedPAE" #name
+#define PGM_SHW_NAME_NESTED_AMD64(name) PGM_CTX(pgm,ShwNestedAMD64##name)
+#define PGM_SHW_NAME_RC_NESTED_AMD64_STR(name) "pgmRCShwNestedAMD64" #name
+#define PGM_SHW_NAME_R0_NESTED_AMD64_STR(name) "pgmR0ShwNestedAMD64" #name
+#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
+#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
+#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
+#define PGM_SHW_NAME_NONE(name) PGM_CTX(pgm,ShwNone##name)
+#define PGM_SHW_NAME_RC_NONE_STR(name) "pgmRCShwNone" #name
+#define PGM_SHW_NAME_R0_NONE_STR(name) "pgmR0ShwNone" #name
+#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
+
+/* Shw_Gst */
+#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
+#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
+#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
+#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
+#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
+#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
+#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
+#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
+#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
+#define PGM_BTH_NAME_NESTED_32BIT_REAL(name) PGM_CTX(pgm,BthNested32BitReal##name)
+#define PGM_BTH_NAME_NESTED_32BIT_PROT(name) PGM_CTX(pgm,BthNested32BitProt##name)
+#define PGM_BTH_NAME_NESTED_32BIT_32BIT(name) PGM_CTX(pgm,BthNested32Bit32Bit##name)
+#define PGM_BTH_NAME_NESTED_32BIT_PAE(name) PGM_CTX(pgm,BthNested32BitPAE##name)
+#define PGM_BTH_NAME_NESTED_32BIT_AMD64(name) PGM_CTX(pgm,BthNested32BitAMD64##name)
+#define PGM_BTH_NAME_NESTED_PAE_REAL(name) PGM_CTX(pgm,BthNestedPAEReal##name)
+#define PGM_BTH_NAME_NESTED_PAE_PROT(name) PGM_CTX(pgm,BthNestedPAEProt##name)
+#define PGM_BTH_NAME_NESTED_PAE_32BIT(name) PGM_CTX(pgm,BthNestedPAE32Bit##name)
+#define PGM_BTH_NAME_NESTED_PAE_PAE(name) PGM_CTX(pgm,BthNestedPAEPAE##name)
+#define PGM_BTH_NAME_NESTED_PAE_AMD64(name) PGM_CTX(pgm,BthNestedPAEAMD64##name)
+#define PGM_BTH_NAME_NESTED_AMD64_REAL(name) PGM_CTX(pgm,BthNestedAMD64Real##name)
+#define PGM_BTH_NAME_NESTED_AMD64_PROT(name) PGM_CTX(pgm,BthNestedAMD64Prot##name)
+#define PGM_BTH_NAME_NESTED_AMD64_32BIT(name) PGM_CTX(pgm,BthNestedAMD6432Bit##name)
+#define PGM_BTH_NAME_NESTED_AMD64_PAE(name) PGM_CTX(pgm,BthNestedAMD64PAE##name)
+#define PGM_BTH_NAME_NESTED_AMD64_AMD64(name) PGM_CTX(pgm,BthNestedAMD64AMD64##name)
+#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
+#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
+#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
+#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
+#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
+#define PGM_BTH_NAME_NONE_REAL(name) PGM_CTX(pgm,BthNoneReal##name)
+#define PGM_BTH_NAME_NONE_PROT(name) PGM_CTX(pgm,BthNoneProt##name)
+#define PGM_BTH_NAME_NONE_32BIT(name) PGM_CTX(pgm,BthNone32Bit##name)
+#define PGM_BTH_NAME_NONE_PAE(name) PGM_CTX(pgm,BthNonePAE##name)
+#define PGM_BTH_NAME_NONE_AMD64(name) PGM_CTX(pgm,BthNoneAMD64##name)
+
+#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
+#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
+#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
+#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
+#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
+#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
+#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
+#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
+#define PGM_BTH_NAME_RC_NESTED_32BIT_REAL_STR(name) "pgmRCBthNested32BitReal" #name
+#define PGM_BTH_NAME_RC_NESTED_32BIT_PROT_STR(name) "pgmRCBthNested32BitProt" #name
+#define PGM_BTH_NAME_RC_NESTED_32BIT_32BIT_STR(name) "pgmRCBthNested32Bit32Bit" #name
+#define PGM_BTH_NAME_RC_NESTED_32BIT_PAE_STR(name) "pgmRCBthNested32BitPAE" #name
+#define PGM_BTH_NAME_RC_NESTED_32BIT_AMD64_STR(name) "pgmRCBthNested32BitAMD64" #name
+#define PGM_BTH_NAME_RC_NESTED_PAE_REAL_STR(name) "pgmRCBthNestedPAEReal" #name
+#define PGM_BTH_NAME_RC_NESTED_PAE_PROT_STR(name) "pgmRCBthNestedPAEProt" #name
+#define PGM_BTH_NAME_RC_NESTED_PAE_32BIT_STR(name) "pgmRCBthNestedPAE32Bit" #name
+#define PGM_BTH_NAME_RC_NESTED_PAE_PAE_STR(name) "pgmRCBthNestedPAEPAE" #name
+#define PGM_BTH_NAME_RC_NESTED_PAE_AMD64_STR(name) "pgmRCBthNestedPAEAMD64" #name
+#define PGM_BTH_NAME_RC_NESTED_AMD64_REAL_STR(name) "pgmRCBthNestedAMD64Real" #name
+#define PGM_BTH_NAME_RC_NESTED_AMD64_PROT_STR(name) "pgmRCBthNestedAMD64Prot" #name
+#define PGM_BTH_NAME_RC_NESTED_AMD64_32BIT_STR(name) "pgmRCBthNestedAMD6432Bit" #name
+#define PGM_BTH_NAME_RC_NESTED_AMD64_PAE_STR(name) "pgmRCBthNestedAMD64PAE" #name
+#define PGM_BTH_NAME_RC_NESTED_AMD64_AMD64_STR(name) "pgmRCBthNestedAMD64AMD64" #name
+#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
+#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
+#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
+#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
+#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
+
+#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
+#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
+#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
+#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
+#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
+#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
+#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
+#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
+#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
+#define PGM_BTH_NAME_R0_NESTED_32BIT_REAL_STR(name) "pgmR0BthNested32BitReal" #name
+#define PGM_BTH_NAME_R0_NESTED_32BIT_PROT_STR(name) "pgmR0BthNested32BitProt" #name
+#define PGM_BTH_NAME_R0_NESTED_32BIT_32BIT_STR(name) "pgmR0BthNested32Bit32Bit" #name
+#define PGM_BTH_NAME_R0_NESTED_32BIT_PAE_STR(name) "pgmR0BthNested32BitPAE" #name
+#define PGM_BTH_NAME_R0_NESTED_32BIT_AMD64_STR(name) "pgmR0BthNested32BitAMD64" #name
+#define PGM_BTH_NAME_R0_NESTED_PAE_REAL_STR(name) "pgmR0BthNestedPAEReal" #name
+#define PGM_BTH_NAME_R0_NESTED_PAE_PROT_STR(name) "pgmR0BthNestedPAEProt" #name
+#define PGM_BTH_NAME_R0_NESTED_PAE_32BIT_STR(name) "pgmR0BthNestedPAE32Bit" #name
+#define PGM_BTH_NAME_R0_NESTED_PAE_PAE_STR(name) "pgmR0BthNestedPAEPAE" #name
+#define PGM_BTH_NAME_R0_NESTED_PAE_AMD64_STR(name) "pgmR0BthNestedPAEAMD64" #name
+#define PGM_BTH_NAME_R0_NESTED_AMD64_REAL_STR(name) "pgmR0BthNestedAMD64Real" #name
+#define PGM_BTH_NAME_R0_NESTED_AMD64_PROT_STR(name) "pgmR0BthNestedAMD64Prot" #name
+#define PGM_BTH_NAME_R0_NESTED_AMD64_32BIT_STR(name) "pgmR0BthNestedAMD6432Bit" #name
+#define PGM_BTH_NAME_R0_NESTED_AMD64_PAE_STR(name) "pgmR0BthNestedAMD64PAE" #name
+#define PGM_BTH_NAME_R0_NESTED_AMD64_AMD64_STR(name) "pgmR0BthNestedAMD64AMD64" #name
+#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
+#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
+#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
+#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
+#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
+
+#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
+/** @} */
+
+
+/**
+ * Function pointers for guest paging.
+ */
+typedef struct PGMMODEDATAGST
+{
+ /** The guest mode type. */
+ uint32_t uType;
+ DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk));
+ DECLCALLBACKMEMBER(int, pfnModifyPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
+ DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
+ DECLCALLBACKMEMBER(int, pfnExit,(PVMCPUCC pVCpu));
+#ifdef IN_RING3
+ DECLCALLBACKMEMBER(int, pfnRelocate,(PVMCPUCC pVCpu, RTGCPTR offDelta)); /**< Only in ring-3. */
+#endif
+} PGMMODEDATAGST;
+
+/** The length of g_aPgmGuestModeData. */
+#if VBOX_WITH_64_BITS_GUESTS
+# define PGM_GUEST_MODE_DATA_ARRAY_SIZE (PGM_TYPE_AMD64 + 1)
+#else
+# define PGM_GUEST_MODE_DATA_ARRAY_SIZE (PGM_TYPE_PAE + 1)
+#endif
+/** The guest mode data array. */
+extern PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE];
+
+
+/**
+ * Function pointers for shadow paging.
+ */
+typedef struct PGMMODEDATASHW
+{
+ /** The shadow mode type. */
+ uint32_t uType;
+ DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
+ DECLCALLBACKMEMBER(int, pfnModifyPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags,
+ uint64_t fMask, uint32_t fOpFlags));
+ DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu));
+ DECLCALLBACKMEMBER(int, pfnExit,(PVMCPUCC pVCpu));
+#ifdef IN_RING3
+ DECLCALLBACKMEMBER(int, pfnRelocate,(PVMCPUCC pVCpu, RTGCPTR offDelta)); /**< Only in ring-3. */
+#endif
+} PGMMODEDATASHW;
+
+/** The length of g_aPgmShadowModeData. */
+#define PGM_SHADOW_MODE_DATA_ARRAY_SIZE PGM_TYPE_END
+/** The shadow mode data array. */
+extern PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE];
+
+
+/**
+ * Function pointers for guest+shadow paging.
+ */
+typedef struct PGMMODEDATABTH
+{
+ /** The shadow mode type. */
+ uint32_t uShwType;
+ /** The guest mode type. */
+ uint32_t uGstType;
+
+ DECLCALLBACKMEMBER(int, pfnInvalidatePage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage));
+ DECLCALLBACKMEMBER(int, pfnSyncCR3,(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
+ DECLCALLBACKMEMBER(int, pfnPrefetchPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage));
+ DECLCALLBACKMEMBER(int, pfnVerifyAccessSyncPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
+ DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
+ DECLCALLBACKMEMBER(int, pfnUnmapCR3,(PVMCPUCC pVCpu));
+ DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
+#ifndef IN_RING3
+ DECLCALLBACKMEMBER(int, pfnTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPTR pvFault, bool *pfLockTaken));
+ DECLCALLBACKMEMBER(int, pfnNestedTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPHYS GCPhysNested,
+ bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
+ bool *pfLockTaken));
+#endif
+#ifdef VBOX_STRICT
+ DECLCALLBACKMEMBER(unsigned, pfnAssertCR3,(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
+#endif
+} PGMMODEDATABTH;
+
+/** The length of g_aPgmBothModeData. */
+#define PGM_BOTH_MODE_DATA_ARRAY_SIZE ((PGM_TYPE_END - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END)
+/** The guest+shadow mode data array. */
+extern PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE];
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * PGM statistics.
+ */
+typedef struct PGMSTATS
+{
+ /* R3 only: */
+ STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
+ STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
+
+ /* R3+RZ */
+ STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
+ STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
+ STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
+ STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
+ STAMCOUNTER StatPageMapTlbFlushes; /**< ALL: Ring-3/0 page mapper TLB flushes. */
+ STAMCOUNTER StatPageMapTlbFlushEntry; /**< ALL: Ring-3/0 page mapper TLB flushes. */
+ STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
+ STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
+ STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
+ STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
+ STAMCOUNTER StatRZRamRangeTlbHits; /**< RC/R0: RAM range TLB hits. */
+ STAMCOUNTER StatRZRamRangeTlbMisses; /**< RC/R0: RAM range TLB misses. */
+ STAMCOUNTER StatR3RamRangeTlbHits; /**< R3: RAM range TLB hits. */
+ STAMCOUNTER StatR3RamRangeTlbMisses; /**< R3: RAM range TLB misses. */
+ STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
+ STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
+ STAMCOUNTER StatR3PhysHandlerLookupHits; /**< R3: Number of cache hits when looking up physical handlers. */
+ STAMCOUNTER StatR3PhysHandlerLookupMisses; /**< R3: Number of cache misses when looking up physical handlers. */
+ STAMCOUNTER StatRZPhysHandlerLookupHits; /**< RC/R0: Number of cache hits when lookup up physical handlers. */
+ STAMCOUNTER StatRZPhysHandlerLookupMisses; /**< RC/R0: Number of cache misses when looking up physical handlers */
+ STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
+ STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
+/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
+ STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
+ STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
+/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
+
+ /* RC only: */
+ STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
+ STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
+
+ STAMCOUNTER StatRZPhysRead;
+ STAMCOUNTER StatRZPhysReadBytes;
+ STAMCOUNTER StatRZPhysWrite;
+ STAMCOUNTER StatRZPhysWriteBytes;
+ STAMCOUNTER StatR3PhysRead;
+ STAMCOUNTER StatR3PhysReadBytes;
+ STAMCOUNTER StatR3PhysWrite;
+ STAMCOUNTER StatR3PhysWriteBytes;
+ STAMCOUNTER StatRCPhysRead;
+ STAMCOUNTER StatRCPhysReadBytes;
+ STAMCOUNTER StatRCPhysWrite;
+ STAMCOUNTER StatRCPhysWriteBytes;
+
+ STAMCOUNTER StatRZPhysSimpleRead;
+ STAMCOUNTER StatRZPhysSimpleReadBytes;
+ STAMCOUNTER StatRZPhysSimpleWrite;
+ STAMCOUNTER StatRZPhysSimpleWriteBytes;
+ STAMCOUNTER StatR3PhysSimpleRead;
+ STAMCOUNTER StatR3PhysSimpleReadBytes;
+ STAMCOUNTER StatR3PhysSimpleWrite;
+ STAMCOUNTER StatR3PhysSimpleWriteBytes;
+ STAMCOUNTER StatRCPhysSimpleRead;
+ STAMCOUNTER StatRCPhysSimpleReadBytes;
+ STAMCOUNTER StatRCPhysSimpleWrite;
+ STAMCOUNTER StatRCPhysSimpleWriteBytes;
+
+ STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
+ STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
+ STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
+ STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
+ STAMCOUNTER StatTrackNoExtentsLeft; /**< The number of times the extent list was exhausted. */
+ STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
+ STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
+
+ STAMPROFILE StatLargePageAlloc2; /**< Time spent setting up newly allocated large pages. */
+ STAMPROFILE StatLargePageSetup; /**< Time spent setting up newly allocated large pages. */
+ /** pgmPhysIsValidLargePage profiling - R3 */
+ STAMPROFILE StatR3IsValidLargePage;
+ /** pgmPhysIsValidLargePage profiling - RZ*/
+ STAMPROFILE StatRZIsValidLargePage;
+
+ STAMPROFILE StatChunkAging;
+ STAMPROFILE StatChunkFindCandidate;
+ STAMPROFILE StatChunkUnmap;
+ STAMPROFILE StatChunkMap;
+} PGMSTATS;
+#endif /* VBOX_WITH_STATISTICS */
+
+
+/**
+ * PGM Data (part of VM)
+ */
+typedef struct PGM
+{
+ /** The zero page. */
+ uint8_t abZeroPg[RT_MAX(HOST_PAGE_SIZE, GUEST_PAGE_SIZE)];
+ /** The MMIO placeholder page. */
+ uint8_t abMmioPg[RT_MAX(HOST_PAGE_SIZE, GUEST_PAGE_SIZE)];
+
+ /** @name The zero page (abPagePg).
+ * @{ */
+ /** The host physical address of the zero page. */
+ RTHCPHYS HCPhysZeroPg;
+ /** @}*/
+
+ /** @name The Invalid MMIO page (abMmioPg).
+ * This page is filled with 0xfeedface.
+ * @{ */
+ /** The host physical address of the invalid MMIO page. */
+ RTHCPHYS HCPhysMmioPg;
+ /** The host pysical address of the invalid MMIO page plus all invalid
+ * physical address bits set. This is used to trigger X86_TRAP_PF_RSVD.
+ * @remarks Check fLessThan52PhysicalAddressBits before use. */
+ RTHCPHYS HCPhysInvMmioPg;
+ /** @} */
+
+ /** @cfgm{/RamPreAlloc, boolean, false}
+ * Indicates whether the base RAM should all be allocated before starting
+ * the VM (default), or if it should be allocated when first written to.
+ */
+ bool fRamPreAlloc;
+#ifdef VBOX_WITH_PGM_NEM_MODE
+ /** Set if we're operating in NEM memory mode.
+ *
+ * NEM mode implies that memory is allocated in big chunks for each RAM range
+ * rather than on demand page by page. Memory is also not locked and PGM has
+ * therefore no physical addresses for them. Page sharing is out of the
+ * question. Ballooning depends on the native execution engine, but probably
+ * pointless as well. */
+ bool fNemMode;
+# define PGM_IS_IN_NEM_MODE(a_pVM) ((a_pVM)->pgm.s.fNemMode)
+#else
+# define PGM_IS_IN_NEM_MODE(a_pVM) (false)
+#endif
+ /** Indicates whether write monitoring is currently in use.
+ * This is used to prevent conflicts between live saving and page sharing
+ * detection. */
+ bool fPhysWriteMonitoringEngaged;
+ /** Set if the CPU has less than 52-bit physical address width.
+ * This is used */
+ bool fLessThan52PhysicalAddressBits;
+ /** Set when nested paging is active.
+ * This is meant to save calls to HMIsNestedPagingActive and let the
+ * compilers optimize the code better. Whether we use nested paging or
+ * not is something we find out during VMM initialization and we won't
+ * change this later on. */
+ bool fNestedPaging;
+ /** We're not in a state which permits writes to guest memory.
+ * (Only used in strict builds.) */
+ bool fNoMorePhysWrites;
+ /** @cfgm{/PageFusionAllowed, boolean, false}
+ * Whether page fusion is allowed. */
+ bool fPageFusionAllowed;
+ /** @cfgm{/PGM/PciPassThrough, boolean, false}
+ * Whether PCI passthrough is enabled. */
+ bool fPciPassthrough;
+ /** The number of MMIO2 regions (serves as the next MMIO2 ID). */
+ uint8_t cMmio2Regions;
+ /** Restore original ROM page content when resetting after loading state.
+ * The flag is set by pgmR3LoadRomRanges and cleared at reset. This
+ * enables the VM to start using an updated ROM without requiring powering
+ * down the VM, just rebooting or resetting it. */
+ bool fRestoreRomPagesOnReset;
+ /** Whether to automatically clear all RAM pages on reset. */
+ bool fZeroRamPagesOnReset;
+ /** Large page enabled flag. */
+ bool fUseLargePages;
+ /** Alignment padding. */
+#ifndef VBOX_WITH_PGM_NEM_MODE
+ bool afAlignment3[1];
+#endif
+ /** The host paging mode. (This is what SUPLib reports.) */
+ SUPPAGINGMODE enmHostMode;
+ bool afAlignment3b[2];
+
+ /** Generation ID for the RAM ranges. This member is incremented everytime
+ * a RAM range is linked or unlinked. */
+ uint32_t volatile idRamRangesGen;
+
+ /** Physical access handler type for ROM protection. */
+ PGMPHYSHANDLERTYPE hRomPhysHandlerType;
+ /** Physical access handler type for MMIO2 dirty page tracing. */
+ PGMPHYSHANDLERTYPE hMmio2DirtyPhysHandlerType;
+
+ /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
+ RTGCPHYS GCPhys4MBPSEMask;
+ /** Mask containing the invalid bits of a guest physical address.
+ * @remarks this does not stop at bit 52. */
+ RTGCPHYS GCPhysInvAddrMask;
+
+
+ /** RAM range TLB for R3. */
+ R3PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR3[PGM_RAMRANGE_TLB_ENTRIES];
+ /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
+ * This is sorted by physical address and contains no overlapping ranges. */
+ R3PTRTYPE(PPGMRAMRANGE) pRamRangesXR3;
+ /** Root of the RAM range search tree for ring-3. */
+ R3PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR3;
+ /** Shadow Page Pool - R3 Ptr. */
+ R3PTRTYPE(PPGMPOOL) pPoolR3;
+ /** Pointer to the list of ROM ranges - for R3.
+ * This is sorted by physical address and contains no overlapping ranges. */
+ R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
+ /** Pointer to the list of MMIO2 ranges - for R3.
+ * Registration order. */
+ R3PTRTYPE(PPGMREGMMIO2RANGE) pRegMmioRangesR3;
+ /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */
+ R3PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];
+
+ /** RAM range TLB for R0. */
+ R0PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR0[PGM_RAMRANGE_TLB_ENTRIES];
+ /** R0 pointer corresponding to PGM::pRamRangesXR3. */
+ R0PTRTYPE(PPGMRAMRANGE) pRamRangesXR0;
+ /** Root of the RAM range search tree for ring-0. */
+ R0PTRTYPE(PPGMRAMRANGE) pRamRangeTreeR0;
+ /** Shadow Page Pool - R0 Ptr. */
+ R0PTRTYPE(PPGMPOOL) pPoolR0;
+ /** R0 pointer corresponding to PGM::pRomRangesR3. */
+ R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
+ /** MMIO2 lookup array for ring-0. Indexed by idMmio2 minus 1. */
+ R0PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];
+
+ /** Hack: Number of deprecated page mapping locks taken by the current lock
+ * owner via pgmPhysGCPhys2CCPtrInternalDepr. */
+ uint32_t cDeprecatedPageLocks;
+
+ /** Registered physical access handler types. */
+ uint32_t cPhysHandlerTypes;
+ /** Physical access handler types.
+ * Initialized to callback causing guru meditations and invalid enmKind. */
+ PGMPHYSHANDLERTYPEINTR3 aPhysHandlerTypes[PGMPHYSHANDLERTYPE_COUNT];
+ /** Physical handler allocator, ring-3 edition. */
+#ifdef IN_RING3
+ PGMPHYSHANDLERALLOCATOR PhysHandlerAllocator;
+#else
+ RTCHardAvlTreeSlabAllocatorR3_T PhysHandlerAllocator;
+#endif
+ /** The pointer to the ring-3 mapping of the physical access handler tree. */
+ R3PTRTYPE(PPGMPHYSHANDLERTREE) pPhysHandlerTree;
+ /** Caching the last physical handler we looked. */
+ uint32_t idxLastPhysHandler;
+
+ uint32_t au64Padding3[5];
+
+ /** PGM critical section.
+ * This protects the physical, ram ranges, and the page flag updating (some of
+ * it anyway).
+ */
+ PDMCRITSECT CritSectX;
+
+ /**
+ * Data associated with managing the ring-3 mappings of the allocation chunks.
+ */
+ struct
+ {
+ /** The chunk mapping TLB. */
+ PGMCHUNKR3MAPTLB Tlb;
+ /** The chunk tree, ordered by chunk id. */
+ R3PTRTYPE(PAVLU32NODECORE) pTree;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Alignment0;
+#endif
+ /** The number of mapped chunks. */
+ uint32_t c;
+ /** @cfgm{/PGM/MaxRing3Chunks, uint32_t, host dependent}
+ * The maximum number of mapped chunks. On 64-bit this is unlimited by default,
+ * on 32-bit it defaults to 1 or 3 GB depending on the host. */
+ uint32_t cMax;
+ /** The current time. This is incremented whenever a chunk is inserted. */
+ uint32_t iNow;
+ /** Alignment padding. */
+ uint32_t au32Alignment1[3];
+ } ChunkR3Map;
+
+ /** The page mapping TLB for ring-3. */
+ PGMPAGER3MAPTLB PhysTlbR3;
+ /** The page mapping TLB for ring-0. */
+ PGMPAGER0MAPTLB PhysTlbR0;
+
+ /** The number of handy pages. */
+ uint32_t cHandyPages;
+
+ /** The number of large handy pages. */
+ uint32_t cLargeHandyPages;
+
+ /**
+ * Array of handy pages.
+ *
+ * This array is used in a two way communication between pgmPhysAllocPage
+ * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
+ * an intermediary.
+ *
+ * The size of this array is important, see pgmPhysEnsureHandyPage for details.
+ * (The current size of 32 pages, means 128 KB of handy memory.)
+ */
+ GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
+
+ /**
+ * Array of large handy pages. (currently size 1)
+ *
+ * This array is used in a two way communication between pgmPhysAllocLargePage
+ * and GMMR0AllocateLargePage, with PGMR3PhysAllocateLargePage serving as
+ * an intermediary.
+ */
+ GMMPAGEDESC aLargeHandyPage[1];
+ /** When to try allocate large pages again after a failure. */
+ uint64_t nsLargePageRetry;
+ /** Number of repeated long allocation times. */
+ uint32_t cLargePageLongAllocRepeats;
+ uint32_t uPadding5;
+
+ /**
+ * Live save data.
+ */
+ struct
+ {
+ /** Per type statistics. */
+ struct
+ {
+ /** The number of ready pages. */
+ uint32_t cReadyPages;
+ /** The number of dirty pages. */
+ uint32_t cDirtyPages;
+ /** The number of ready zero pages. */
+ uint32_t cZeroPages;
+ /** The number of write monitored pages. */
+ uint32_t cMonitoredPages;
+ } Rom,
+ Mmio2,
+ Ram;
+ /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
+ uint32_t cIgnoredPages;
+ /** Indicates that a live save operation is active. */
+ bool fActive;
+ /** Padding. */
+ bool afReserved[2];
+ /** The next history index. */
+ uint8_t iDirtyPagesHistory;
+ /** History of the total amount of dirty pages. */
+ uint32_t acDirtyPagesHistory[64];
+ /** Short term dirty page average. */
+ uint32_t cDirtyPagesShort;
+ /** Long term dirty page average. */
+ uint32_t cDirtyPagesLong;
+ /** The number of saved pages. This is used to get some kind of estimate of the
+ * link speed so we can decide when we're done. It is reset after the first
+ * 7 passes so the speed estimate doesn't get inflated by the initial set of
+ * zero pages. */
+ uint64_t cSavedPages;
+ /** The nanosecond timestamp when cSavedPages was 0. */
+ uint64_t uSaveStartNS;
+ /** Pages per second (for statistics). */
+ uint32_t cPagesPerSecond;
+ uint32_t cAlignment;
+ } LiveSave;
+
+ /** @name Error injection.
+ * @{ */
+ /** Inject handy page allocation errors pretending we're completely out of
+ * memory. */
+ bool volatile fErrInjHandyPages;
+ /** Padding. */
+ bool afReserved[3];
+ /** @} */
+
+ /** @name Release Statistics
+ * @{ */
+ uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
+ uint32_t cPrivatePages; /**< The number of private pages. */
+ uint32_t cSharedPages; /**< The number of shared pages. */
+ uint32_t cReusedSharedPages; /**< The number of reused shared pages. */
+ uint32_t cZeroPages; /**< The number of zero backed pages. */
+ uint32_t cPureMmioPages; /**< The number of pure MMIO pages. */
+ uint32_t cMonitoredPages; /**< The number of write monitored pages. */
+ uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */
+ uint32_t cWriteLockedPages; /**< The number of write locked pages. */
+ uint32_t cReadLockedPages; /**< The number of read locked pages. */
+ uint32_t cBalloonedPages; /**< The number of ballooned pages. */
+ uint32_t cMappedChunks; /**< Number of times we mapped a chunk. */
+ uint32_t cUnmappedChunks; /**< Number of times we unmapped a chunk. */
+ uint32_t cLargePages; /**< The number of large pages. */
+ uint32_t cLargePagesDisabled; /**< The number of disabled large pages. */
+/* uint32_t aAlignment4[1]; */
+
+ STAMPROFILE StatLargePageAlloc; /**< Time spent by the host OS for large page allocation. */
+ STAMCOUNTER StatLargePageAllocFailed; /**< Count allocation failures. */
+ STAMCOUNTER StatLargePageOverflow; /**< The number of times allocating a large pages takes more than the allowed period. */
+ STAMCOUNTER StatLargePageReused; /**< The number of large pages we've reused.*/
+ STAMCOUNTER StatLargePageRefused; /**< The number of times we couldn't use a large page.*/
+ STAMCOUNTER StatLargePageRecheck; /**< The number of times we rechecked a disabled large page.*/
+ STAMCOUNTER StatLargePageTlbFlush; /**< The number of a full VCPU TLB flush was required after allocation. */
+ STAMCOUNTER StatLargePageZeroEvict; /**< The number of zero page mappings we had to evict when allocating a large page. */
+
+ STAMPROFILE StatShModCheck; /**< Profiles shared module checks. */
+
+ STAMPROFILE StatMmio2QueryAndResetDirtyBitmap; /**< Profiling PGMR3PhysMmio2QueryAndResetDirtyBitmap. */
+ /** @} */
+
+#ifdef VBOX_WITH_STATISTICS
+ /** These are optional statistics that used to be on the hyper heap. */
+ PGMSTATS Stats;
+#endif
+} PGM;
+#ifndef IN_TSTVMSTRUCTGC /* HACK */
+AssertCompileMemberAlignment(PGM, CritSectX, 8);
+AssertCompileMemberAlignment(PGM, ChunkR3Map, 16);
+AssertCompileMemberAlignment(PGM, PhysTlbR3, 32); /** @todo 32 byte alignment! */
+AssertCompileMemberAlignment(PGM, PhysTlbR0, 32);
+AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
+AssertCompileMemberAlignment(PGM, aHandyPages, 8);
+#endif /* !IN_TSTVMSTRUCTGC */
+/** Pointer to the PGM instance data. */
+typedef PGM *PPGM;
+
+
+#ifdef VBOX_WITH_STATISTICS
+/**
+ * Per CPU statistis for PGM (used to be on the heap).
+ */
+typedef struct PGMCPUSTATS
+{
+ /* Common */
+ STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
+ STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
+
+ /* R0 only: */
+ STAMPROFILE StatR0NpMiscfg; /**< R0: PGMR0Trap0eHandlerNPMisconfig() profiling. */
+ STAMCOUNTER StatR0NpMiscfgSyncPage; /**< R0: SyncPage calls from PGMR0Trap0eHandlerNPMisconfig(). */
+
+ /* RZ only: */
+ STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
+ STAMPROFILE StatRZTrap0eTime2Ballooned; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is read access to a ballooned page. */
+ STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
+ STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
+ STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
+ STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
+ STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
+ STAMPROFILE StatRZTrap0eTime2InvalidPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address. */
+ STAMPROFILE StatRZTrap0eTime2MakeWritable; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a page that needed to be made writable. */
+ STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
+ STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
+ STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
+ STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
+ STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
+ STAMPROFILE StatRZTrap0eTime2WPEmulation; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP emulation. */
+ STAMPROFILE StatRZTrap0eTime2Wp0RoUsHack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be enabled. */
+ STAMPROFILE StatRZTrap0eTime2Wp0RoUsUnhack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled. */
+ STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
+ STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
+ STAMCOUNTER StatRZTrap0eHandlersPhysAll; /**< RC/R0: Number of traps due to physical all-access handlers. */
+ STAMCOUNTER StatRZTrap0eHandlersPhysAllOpt; /**< RC/R0: Number of the physical all-access handler traps using the optimization. */
+ STAMCOUNTER StatRZTrap0eHandlersPhysWrite; /**< RC/R0: Number of traps due to write-physical access handlers. */
+ STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
+ STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
+ STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */
+ STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */
+ STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
+ STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
+ STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the \#PFs. */
+ STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
+ STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
+ STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
+ STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
+ STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
+ STAMCOUNTER StatRZDynMapMigrateInvlPg; /**< RZ: invlpg in PGMR0DynMapMigrateAutoSet. */
+ STAMPROFILE StatRZDynMapGCPageInl; /**< RZ: Calls to pgmRZDynMapGCPageInlined. */
+ STAMCOUNTER StatRZDynMapGCPageInlHits; /**< RZ: Hash table lookup hits. */
+ STAMCOUNTER StatRZDynMapGCPageInlMisses; /**< RZ: Misses that falls back to the code common. */
+ STAMCOUNTER StatRZDynMapGCPageInlRamHits; /**< RZ: 1st ram range hits. */
+ STAMCOUNTER StatRZDynMapGCPageInlRamMisses; /**< RZ: 1st ram range misses, takes slow path. */
+ STAMPROFILE StatRZDynMapHCPageInl; /**< RZ: Calls to pgmRZDynMapHCPageInlined. */
+ STAMCOUNTER StatRZDynMapHCPageInlHits; /**< RZ: Hash table lookup hits. */
+ STAMCOUNTER StatRZDynMapHCPageInlMisses; /**< RZ: Misses that falls back to the code common. */
+ STAMPROFILE StatRZDynMapHCPage; /**< RZ: Calls to pgmRZDynMapHCPageCommon. */
+ STAMCOUNTER StatRZDynMapSetOptimize; /**< RZ: Calls to pgmRZDynMapOptimizeAutoSet. */
+ STAMCOUNTER StatRZDynMapSetSearchFlushes; /**< RZ: Set search restoring to subset flushes. */
+ STAMCOUNTER StatRZDynMapSetSearchHits; /**< RZ: Set search hits. */
+ STAMCOUNTER StatRZDynMapSetSearchMisses; /**< RZ: Set search misses. */
+ STAMCOUNTER StatRZDynMapPage; /**< RZ: Calls to pgmR0DynMapPage. */
+ STAMCOUNTER StatRZDynMapPageHits0; /**< RZ: Hits at iPage+0. */
+ STAMCOUNTER StatRZDynMapPageHits1; /**< RZ: Hits at iPage+1. */
+ STAMCOUNTER StatRZDynMapPageHits2; /**< RZ: Hits at iPage+2. */
+ STAMCOUNTER StatRZDynMapPageInvlPg; /**< RZ: invlpg. */
+ STAMCOUNTER StatRZDynMapPageSlow; /**< RZ: Calls to pgmR0DynMapPageSlow. */
+ STAMCOUNTER StatRZDynMapPageSlowLoopHits; /**< RZ: Hits in the pgmR0DynMapPageSlow search loop. */
+ STAMCOUNTER StatRZDynMapPageSlowLoopMisses; /**< RZ: Misses in the pgmR0DynMapPageSlow search loop. */
+ //STAMCOUNTER StatRZDynMapPageSlowLostHits; /**< RZ: Lost hits. */
+ STAMCOUNTER StatRZDynMapSubsets; /**< RZ: Times PGMDynMapPushAutoSubset was called. */
+ STAMCOUNTER StatRZDynMapPopFlushes; /**< RZ: Times PGMDynMapPopAutoSubset flushes the subset. */
+ STAMCOUNTER aStatRZDynMapSetFilledPct[11]; /**< RZ: Set fill distribution, percent. */
+
+ /* HC - R3 and (maybe) R0: */
+
+ /* RZ & R3: */
+ STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
+ STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
+ STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
+ STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
+ STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
+ STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
+ STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
+ STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
+ STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
+ STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
+ STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
+ STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
+ STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
+ STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
+ STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
+ STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
+ STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
+ STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault(). */
+ STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
+ STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
+ STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
+ STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
+ STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
+ STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
+ STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
+ STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
+ STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
+ STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
+ STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
+ STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
+ STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
+ STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
+ STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
+ STAMCOUNTER StatRZInvalidatePageSizeChanges ; /**< RC/R0: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */
+ STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
+ STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
+ STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
+ STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
+ STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
+ STAMCOUNTER StatRZPageOutOfSyncBallloon; /**< RC/R0: The number of times a ballooned page was accessed (read). */
+ STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
+ STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
+ STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
+ STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
+ STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
+ STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
+ STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
+
+ STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
+ STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
+ STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
+ STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
+ STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
+ STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
+ STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
+ STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
+ STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
+ STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
+ STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
+ STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
+ STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
+ STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
+ STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
+ STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
+ STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
+ STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
+ STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
+ STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
+ STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
+ STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
+ STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
+ STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
+ STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
+ STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
+ STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
+ STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
+ STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
+ STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
+ STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
+ STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
+ STAMCOUNTER StatR3InvalidatePageSizeChanges ; /**< R3: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */
+ STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
+ STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
+ STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
+ STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
+ STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
+ STAMCOUNTER StatR3PageOutOfSyncBallloon; /**< R3: The number of times a ballooned page was accessed (read). */
+ STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
+ STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
+ STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
+ STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
+ STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
+ STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
+ STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
+} PGMCPUSTATS;
+#endif /* VBOX_WITH_STATISTICS */
+
+
+/**
+ * PGMCPU Data (part of VMCPU).
+ */
+typedef struct PGMCPU
+{
+ /** A20 gate mask.
+ * Our current approach to A20 emulation is to let REM do it and don't bother
+ * anywhere else. The interesting Guests will be operating with it enabled anyway.
+ * But whould need arrise, we'll subject physical addresses to this mask. */
+ RTGCPHYS GCPhysA20Mask;
+ /** A20 gate state - boolean! */
+ bool fA20Enabled;
+ /** Mirror of the EFER.NXE bit. Managed by PGMNotifyNxeChanged. */
+ bool fNoExecuteEnabled;
+ /** Whether the guest CR3 and PAE PDPEs have been mapped when guest PAE mode is
+ * active. */
+ bool fPaePdpesAndCr3MappedR3;
+ bool fPaePdpesAndCr3MappedR0;
+
+ /** What needs syncing (PGM_SYNC_*).
+ * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
+ * PGMFlushTLB, and PGMR3Load. */
+ uint32_t fSyncFlags;
+
+ /** The shadow paging mode. */
+ PGMMODE enmShadowMode;
+ /** The guest paging mode. */
+ PGMMODE enmGuestMode;
+ /** The guest second level address translation mode. */
+ PGMSLAT enmGuestSlatMode;
+ /** Guest mode data table index (PGM_TYPE_XXX). */
+ uint8_t volatile idxGuestModeData;
+ /** Shadow mode data table index (PGM_TYPE_XXX). */
+ uint8_t volatile idxShadowModeData;
+ /** Both mode data table index (complicated). */
+ uint8_t volatile idxBothModeData;
+ /** Alignment padding. */
+ uint8_t abPadding[1];
+
+ /** The guest CR3.
+ * When SLAT is active, this is the translated physical address.
+ * When SLAT is inactive, this is the physical address in CR3. */
+ RTGCPHYS GCPhysCR3;
+
+ /** The nested-guest CR3.
+ * When SLAT is active, this is CR3 prior to translation.
+ * When SLAT is inactive, this is unused (and NIL_RTGCPHYS). */
+ RTGCPHYS GCPhysNstGstCR3;
+
+ /** The cached guest CR3 when it has been mapped in PAE mode.
+ * This allows us to skip remapping the CR3 and PAE PDPEs
+ * (in PGMFlushTLB or similar) when it was already done as
+ * part of MOV CRx instruction emulation.
+ */
+ RTGCPHYS GCPhysPaeCR3;
+
+ /** @name 32-bit Guest Paging.
+ * @{ */
+ /** The guest's page directory, R3 pointer. */
+ R3PTRTYPE(PX86PD) pGst32BitPdR3;
+ /** The guest's page directory, R0 pointer. */
+ R0PTRTYPE(PX86PD) pGst32BitPdR0;
+ /** Mask containing the MBZ bits of a big page PDE. */
+ uint32_t fGst32BitMbzBigPdeMask;
+ /** Set if the page size extension (PSE) is enabled. */
+ bool fGst32BitPageSizeExtension;
+ /** Alignment padding. */
+ bool afAlignment2[3];
+ /** @} */
+
+ /** @name PAE Guest Paging.
+ * @{ */
+ /** The guest's page directory pointer table, R3 pointer. */
+ R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
+ /** The guest's page directory pointer table, R0 pointer. */
+ R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
+
+ /** The guest's page directories, R3 pointers.
+ * These are individual pointers and don't have to be adjacent.
+ * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
+ R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
+ /** The guest's page directories, R0 pointers.
+ * Same restrictions as apGstPaePDsR3. */
+ R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
+ /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
+ RTGCPHYS aGCPhysGstPaePDs[4];
+ /** Mask containing the MBZ PTE bits. */
+ uint64_t fGstPaeMbzPteMask;
+ /** Mask containing the MBZ PDE bits. */
+ uint64_t fGstPaeMbzPdeMask;
+ /** Mask containing the MBZ big page PDE bits. */
+ uint64_t fGstPaeMbzBigPdeMask;
+ /** Mask containing the MBZ PDPE bits. */
+ uint64_t fGstPaeMbzPdpeMask;
+ /** @} */
+
+ /** @name AMD64 Guest Paging.
+ * @{ */
+ /** The guest's page directory pointer table, R3 pointer. */
+ R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
+ /** The guest's page directory pointer table, R0 pointer. */
+ R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
+ /** Mask containing the MBZ PTE bits. */
+ uint64_t fGstAmd64MbzPteMask;
+ /** Mask containing the MBZ PDE bits. */
+ uint64_t fGstAmd64MbzPdeMask;
+ /** Mask containing the MBZ big page PDE bits. */
+ uint64_t fGstAmd64MbzBigPdeMask;
+ /** Mask containing the MBZ PDPE bits. */
+ uint64_t fGstAmd64MbzPdpeMask;
+ /** Mask containing the MBZ big page PDPE bits. */
+ uint64_t fGstAmd64MbzBigPdpeMask;
+ /** Mask containing the MBZ PML4E bits. */
+ uint64_t fGstAmd64MbzPml4eMask;
+ /** Mask containing the PDPE bits that we shadow. */
+ uint64_t fGstAmd64ShadowedPdpeMask;
+ /** Mask containing the PML4E bits that we shadow. */
+ uint64_t fGstAmd64ShadowedPml4eMask;
+ /** @} */
+
+ /** @name PAE and AMD64 Guest Paging.
+ * @{ */
+ /** Mask containing the PTE bits that we shadow. */
+ uint64_t fGst64ShadowedPteMask;
+ /** Mask containing the PDE bits that we shadow. */
+ uint64_t fGst64ShadowedPdeMask;
+ /** Mask containing the big page PDE bits that we shadow in the PDE. */
+ uint64_t fGst64ShadowedBigPdeMask;
+ /** Mask containing the big page PDE bits that we shadow in the PTE. */
+ uint64_t fGst64ShadowedBigPde4PteMask;
+ /** @} */
+
+ /** @name EPT Guest Paging.
+ * @{ */
+ /** The guest's EPT PML4 table, R3 pointer. */
+ R3PTRTYPE(PEPTPML4) pGstEptPml4R3;
+ /** The guest's EPT PML4 table, R0 pointer. */
+ R0PTRTYPE(PEPTPML4) pGstEptPml4R0;
+ /** The guest's EPT pointer (copy of virtual VMCS). */
+ uint64_t uEptPtr;
+ /** Copy of the VM's IA32_VMX_EPT_VPID_CAP VPID MSR for faster access. Doesn't
+ * change through the lifetime of the VM. */
+ uint64_t uEptVpidCapMsr;
+ /** Mask containing the MBZ PTE bits. */
+ uint64_t fGstEptMbzPteMask;
+ /** Mask containing the MBZ PDE bits. */
+ uint64_t fGstEptMbzPdeMask;
+ /** Mask containing the MBZ big page (2M) PDE bits. */
+ uint64_t fGstEptMbzBigPdeMask;
+ /** Mask containing the MBZ PDPTE bits. */
+ uint64_t fGstEptMbzPdpteMask;
+ /** Mask containing the MBZ big page (1G) PDPTE bits. */
+ uint64_t fGstEptMbzBigPdpteMask;
+ /** Mask containing the MBZ PML4E bits. */
+ uint64_t fGstEptMbzPml4eMask;
+ /** Mask to determine whether an entry is present. */
+ uint64_t fGstEptPresentMask;
+
+ /** Mask containing the EPT PTE bits we shadow. */
+ uint64_t fGstEptShadowedPteMask;
+ /** Mask containing the EPT PDE bits we shadow. */
+ uint64_t fGstEptShadowedPdeMask;
+ /** Mask containing the EPT PDE (2M) bits we shadow. */
+ uint64_t fGstEptShadowedBigPdeMask;
+ /** Mask containing the EPT PDPTE bits we shadow. */
+ uint64_t fGstEptShadowedPdpteMask;
+ /** Mask containing the EPT PML4E bits we shadow. */
+ uint64_t fGstEptShadowedPml4eMask;
+ /** @} */
+
+ /** Pointer to the page of the current active CR3 - R3 Ptr. */
+ R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
+ /** Pointer to the page of the current active CR3 - R0 Ptr. */
+ R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
+
+ /** For saving stack space, the disassembler state is allocated here instead of
+ * on the stack. */
+ DISCPUSTATE DisState;
+
+ /** Counts the number of times the netware WP0+RO+US hack has been applied. */
+ uint64_t cNetwareWp0Hacks;
+
+ /** Count the number of pgm pool access handler calls. */
+ uint64_t cPoolAccessHandler;
+
+ /** @name Release Statistics
+ * @{ */
+ /** The number of times the guest has switched mode since last reset or statistics reset. */
+ STAMCOUNTER cGuestModeChanges;
+ /** The number of times the guest has switched mode since last reset or statistics reset. */
+ STAMCOUNTER cA20Changes;
+ /** @} */
+
+#ifdef VBOX_WITH_STATISTICS
+ /** These are statistics that used to be on the hyper heap. */
+ PGMCPUSTATS Stats;
+#endif
+} PGMCPU;
+/** Pointer to the per-cpu PGM data. */
+typedef PGMCPU *PPGMCPU;
+
+
+/** @name PGM::fSyncFlags Flags
+ * @note Was part of saved state a long time ago.
+ * @{
+ */
+/* 0 used to be PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL */
+/** Always sync CR3. */
+#define PGM_SYNC_ALWAYS RT_BIT(1)
+/** Check guest mapping in SyncCR3. */
+#define PGM_SYNC_MAP_CR3 RT_BIT(3)
+/** Clear the page pool (a light weight flush). */
+#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
+#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
+/** @} */
+
+
+#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
+
+/**
+ * PGM GVMCPU instance data.
+ */
+typedef struct PGMR0PERVCPU
+{
+# ifdef VBOX_WITH_STATISTICS
+ /** R0: Which statistic this \#PF should be attributed to. */
+ R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
+# endif
+ uint64_t u64Dummy;
+} PGMR0PERVCPU;
+
+
+/**
+ * PGM GVM instance data.
+ */
+typedef struct PGMR0PERVM
+{
+ /** @name PGM Pool related stuff.
+ * @{ */
+ /** Critical section for serializing pool growth. */
+ RTCRITSECT PoolGrowCritSect;
+ /** The memory objects for the pool pages. */
+ RTR0MEMOBJ ahPoolMemObjs[(PGMPOOL_IDX_LAST + PGMPOOL_CFG_MAX_GROW - 1) / PGMPOOL_CFG_MAX_GROW];
+ /** The ring-3 mapping objects for the pool pages. */
+ RTR0MEMOBJ ahPoolMapObjs[(PGMPOOL_IDX_LAST + PGMPOOL_CFG_MAX_GROW - 1) / PGMPOOL_CFG_MAX_GROW];
+ /** @} */
+
+ /** Physical access handler types for ring-0.
+ * Initialized to callback causing return to ring-3 and invalid enmKind. */
+ PGMPHYSHANDLERTYPEINTR0 aPhysHandlerTypes[PGMPHYSHANDLERTYPE_COUNT];
+ /** Physical handler allocator, ring-3 edition. */
+ PGMPHYSHANDLERALLOCATOR PhysHandlerAllocator;
+ /** The pointer to the ring-3 mapping of the physical access handler tree. */
+ PPGMPHYSHANDLERTREE pPhysHandlerTree;
+ /** The allocation object for the physical access handler tree. */
+ RTR0MEMOBJ hPhysHandlerMemObj;
+ /** The ring-3 mapping object for the physicall access handler tree. */
+ RTR0MEMOBJ hPhysHandlerMapObj;
+} PGMR0PERVM;
+
+#endif /* IN_RING0 || DOXYGEN_RUNNING */
+
+RT_C_DECLS_BEGIN
+
+#if defined(VBOX_STRICT)
+int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL);
+# define PGM_LOCK_VOID(a_pVM) pgmLockDebug((a_pVM), true, RT_SRC_POS)
+# define PGM_LOCK(a_pVM) pgmLockDebug((a_pVM), false, RT_SRC_POS)
+#else
+int pgmLock(PVMCC pVM, bool fVoid);
+# define PGM_LOCK_VOID(a_pVM) pgmLock((a_pVM), true)
+# define PGM_LOCK(a_pVM) pgmLock((a_pVM), false)
+#endif
+void pgmUnlock(PVMCC pVM);
+# define PGM_UNLOCK(a_pVM) pgmUnlock((a_pVM))
+/**
+ * Asserts that the caller owns the PDM lock.
+ * This is the internal variant of PGMIsLockOwner.
+ * @param a_pVM Pointer to the VM.
+ */
+#define PGM_LOCK_ASSERT_OWNER(a_pVM) Assert(PDMCritSectIsOwner((a_pVM), &(a_pVM)->pgm.s.CritSectX))
+/**
+ * Asserts that the caller owns the PDM lock.
+ * This is the internal variant of PGMIsLockOwner.
+ * @param a_pVM Pointer to the VM.
+ * @param a_pVCpu The current CPU handle.
+ */
+#define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu) Assert(PDMCritSectIsOwnerEx((a_pVCpu), &(a_pVM)->pgm.s.CritSectX))
+
+uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap);
+int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
+ R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler);
+int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler);
+int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast);
+int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler);
+int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler);
+void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
+bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys);
+void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
+ bool fDoAccounting, bool fFlushIemTlbs);
+DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap);
+DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
+DECLCALLBACK(FNPGMPHYSHANDLER) pgmR3HandlerPhysicalHandlerInvalid;
+#ifndef IN_RING3
+DECLCALLBACK(FNPGMPHYSHANDLER) pgmR0HandlerPhysicalHandlerToRing3;
+DECLCALLBACK(FNPGMRZPHYSPFHANDLER) pgmR0HandlerPhysicalPfHandlerToRing3;
+#endif
+
+int pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
+
+int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
+int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys);
+#ifdef IN_RING0
+int pgmR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu, bool fRing3);
+int pgmR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys);
+#endif
+int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
+int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys);
+int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
+void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
+int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
+int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
+int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
+int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
+int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
+int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr);
+int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr);
+int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
+int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
+int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock);
+void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock);
+DECLCALLBACK(FNPGMPHYSHANDLER) pgmPhysRomWriteHandler;
+DECLCALLBACK(FNPGMPHYSHANDLER) pgmPhysMmio2WriteHandler;
+#ifndef IN_RING3
+DECLCALLBACK(FNPGMRZPHYSPFHANDLER) pgmPhysRomWritePfHandler;
+DECLCALLBACK(FNPGMRZPHYSPFHANDLER) pgmPhysMmio2WritePfHandler;
+#endif
+int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
+ PGMPAGETYPE enmNewType);
+void pgmPhysInvalidRamRangeTlbs(PVMCC pVM);
+void pgmPhysInvalidatePageMapTLB(PVMCC pVM);
+void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys);
+PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys);
+PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys);
+PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys);
+int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage);
+int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam);
+#ifdef VBOX_WITH_NATIVE_NEM
+void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State);
+#endif
+
+#ifdef IN_RING3
+void pgmR3PhysRelinkRamRanges(PVM pVM);
+int pgmR3PhysRamPreAllocate(PVM pVM);
+int pgmR3PhysRamReset(PVM pVM);
+int pgmR3PhysRomReset(PVM pVM);
+int pgmR3PhysRamZeroAll(PVM pVM);
+int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
+int pgmR3PhysRamTerm(PVM pVM);
+void pgmR3PhysRomTerm(PVM pVM);
+void pgmR3PhysAssertSharedPageChecksums(PVM pVM);
+
+int pgmR3PoolInit(PVM pVM);
+void pgmR3PoolRelocate(PVM pVM);
+void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
+void pgmR3PoolReset(PVM pVM);
+void pgmR3PoolClearAll(PVM pVM, bool fFlushRemTlb);
+DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *fpvFlushRemTbl);
+void pgmR3PoolWriteProtectPages(PVM pVM);
+
+#endif /* IN_RING3 */
+#ifdef IN_RING0
+int pgmR0PoolInitVM(PGVM pGVM);
+#endif
+int pgmPoolAlloc(PVMCC pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
+ uint16_t iUser, uint32_t iUserTable, bool fLockPage, PPPGMPOOLPAGE ppPage);
+void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
+void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
+int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
+void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
+PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
+PPGMPOOLPAGE pgmPoolQueryPageForDbg(PPGMPOOL pPool, RTHCPHYS HCPhys);
+int pgmPoolHCPhys2Ptr(PVM pVM, RTHCPHYS HCPhys, void **ppv);
+int pgmPoolSyncCR3(PVMCPUCC pVCpu);
+bool pgmPoolIsDirtyPageSlow(PVMCC pVM, RTGCPHYS GCPhys);
+void pgmPoolInvalidateDirtyPage(PVMCC pVM, RTGCPHYS GCPhysPT);
+int pgmPoolTrackUpdateGCPhys(PVMCC pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
+void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte);
+uint16_t pgmPoolTrackPhysExtAddref(PVMCC pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte);
+void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte);
+void pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
+void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
+FNPGMPHYSHANDLER pgmPoolAccessHandler;
+#ifndef IN_RING3
+FNPGMRZPHYSPFHANDLER pgmRZPoolAccessPfHandler;
+#endif
+
+void pgmPoolAddDirtyPage(PVMCC pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
+void pgmPoolResetDirtyPages(PVMCC pVM);
+void pgmPoolResetDirtyPage(PVMCC pVM, RTGCPTR GCPtrPage);
+
+/** Gets the ring-0 pointer for the given pool page. */
+DECLINLINE(R0PTRTYPE(PPGMPOOLPAGE)) pgmPoolConvertPageToR0(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+#ifdef IN_RING3
+ size_t offPage = (uintptr_t)pPage - (uintptr_t)pPool;
+# ifdef VBOX_STRICT
+ size_t iPage = (offPage - RT_UOFFSETOF(PGMPOOL, aPages)) / sizeof(*pPage);
+ AssertReturn(iPage < pPool->cMaxPages, NIL_RTR0PTR);
+ AssertReturn(iPage * sizeof(*pPage) + RT_UOFFSETOF(PGMPOOL, aPages) == offPage, NIL_RTR0PTR);
+# endif
+ return pPool->pPoolR0 + offPage;
+#else
+ RT_NOREF(pPool);
+ return pPage;
+#endif
+}
+
+/** Gets the ring-3 pointer for the given pool page. */
+DECLINLINE(R3PTRTYPE(PPGMPOOLPAGE)) pgmPoolConvertPageToR3(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+#ifdef IN_RING3
+ RT_NOREF(pPool);
+ return pPage;
+#else
+ size_t offPage = (uintptr_t)pPage - (uintptr_t)pPool;
+# ifdef VBOX_STRICT
+ size_t iPage = (offPage - RT_UOFFSETOF(PGMPOOL, aPages)) / sizeof(*pPage);
+ AssertReturn(iPage < pPool->cMaxPages, NIL_RTR3PTR);
+ AssertReturn(iPage * sizeof(*pPage) + RT_UOFFSETOF(PGMPOOL, aPages) == offPage, NIL_RTR3PTR);
+# endif
+ return pPool->pPoolR3 + offPage;
+#endif
+}
+
+int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu);
+int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
+void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu);
+
+int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags);
+int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
+int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode);
+
+int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd);
+int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt);
+int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd);
+int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppPml4);
+#endif
+int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
+int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
+
+# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 && defined(IN_RING3)
+FNDBGCCMD pgmR3CmdCheckDuplicatePages;
+FNDBGCCMD pgmR3CmdShowSharedModules;
+# endif
+
+void pgmLogState(PVM pVM);
+
+RT_C_DECLS_END
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_PGMInternal_h */
+
diff --git a/src/VBox/VMM/include/PGMSlatDefs.h b/src/VBox/VMM/include/PGMSlatDefs.h
new file mode 100644
index 00000000..a81fa35b
--- /dev/null
+++ b/src/VBox/VMM/include/PGMSlatDefs.h
@@ -0,0 +1,141 @@
+/* $Id: PGMSlatDefs.h $ */
+/** @file
+ * VBox - Page Manager, SLAT Paging Template - All context code.
+ */
+
+/*
+ * Copyright (C) 2022-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+
+#undef SLAT_IS_PGENTRY_PRESENT
+#undef SLAT_IS_PML4E_VALID
+#undef SLAT_IS_PDPE_VALID
+#undef SLAT_IS_BIG_PDPE_VALID
+#undef SLAT_IS_PDE_VALID
+#undef SLAT_IS_BIG_PDE_VALID
+#undef SLAT_IS_PTE_VALID
+#undef SLAT_GET_PDPE1G_GCPHYS
+#undef SLAT_GET_PDE2M_GCPHYS
+#undef SLAT_GET_PTE_GCPHYS
+#undef SLAT_PAGE_1G_OFFSET_MASK
+#undef SLAT_PAGE_2M_OFFSET_MASK
+#undef SLAT_PML4_SHIFT
+#undef SLAT_PML4_MASK
+#undef SLAT_PDPT_SHIFT
+#undef SLAT_PDPT_MASK
+#undef SLAT_PD_SHIFT
+#undef SLAT_PD_MASK
+#undef SLAT_PT_SHIFT
+#undef SLAT_PT_MASK
+#undef SLATPDE
+#undef PSLATPDE
+#undef SLATPTE
+#undef PSLATPTE
+#undef PSLATPTWALK
+
+#define SLAT_IS_PGENTRY_PRESENT(a_pVCpu, a_Pge) ((a_Pge.u) & (a_pVCpu)->pgm.s.fGstEptPresentMask)
+#define SLAT_IS_PML4E_VALID(a_pVCpu, a_Pml4e) (!( (a_Pml4e).u & (a_pVCpu)->pgm.s.fGstEptMbzPml4eMask ))
+#define SLAT_IS_PDPE_VALID(a_pVCpu, a_Pdpte) (!( (a_Pdpte).u & (a_pVCpu)->pgm.s.fGstEptMbzPdpteMask ))
+#define SLAT_IS_BIG_PDPE_VALID(a_pVCpu, a_Pdpe) (!( (a_Pdpe).u & (a_pVCpu)->pgm.s.fGstEptMbzBigPdpteMask ))
+#define SLAT_IS_PDE_VALID(a_pVCpu, a_Pde) (!( (a_Pde).u & (a_pVCpu)->pgm.s.fGstEptMbzPdeMask ))
+#define SLAT_IS_BIG_PDE_VALID(a_pVCpu, a_Pde) (!( (a_Pde).u & (a_pVCpu)->pgm.s.fGstEptMbzBigPdeMask ))
+#define SLAT_IS_PTE_VALID(a_pVCpu, a_Pte) (!( (a_Pte).u & (a_pVCpu)->pgm.s.fGstEptMbzPteMask ))
+#define SLAT_GET_PDPE1G_GCPHYS(a_pVCpu, a_Pdpte) PGM_A20_APPLY(a_pVCpu, ((a_Pdpte).u & EPT_PDPTE1G_PG_MASK))
+#define SLAT_GET_PDE2M_GCPHYS(a_pVCpu, a_Pde) PGM_A20_APPLY(a_pVCpu, ((a_Pde).u & EPT_PDE2M_PG_MASK))
+#define SLAT_GET_PTE_GCPHYS(a_pVCpu, a_Pte) PGM_A20_APPLY(a_pVCpu, ((a_Pte).u & EPT_E_PG_MASK))
+#define SLAT_PAGE_1G_OFFSET_MASK X86_PAGE_1G_OFFSET_MASK
+#define SLAT_PAGE_2M_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
+#define SLAT_PML4_SHIFT EPT_PML4_SHIFT
+#define SLAT_PML4_MASK EPT_PML4_MASK
+#define SLAT_PDPT_SHIFT EPT_PDPT_SHIFT
+#define SLAT_PDPT_MASK EPT_PDPT_MASK
+#define SLAT_PD_SHIFT EPT_PD_SHIFT
+#define SLAT_PD_MASK EPT_PD_MASK
+#define SLAT_PT_SHIFT EPT_PT_SHIFT
+#define SLAT_PT_MASK EPT_PT_MASK
+#define SLATPDE EPTPDE
+#define PSLATPDE PEPTPDE
+#define SLATPTE EPTPTE
+#define PSLATPTE PEPTPTE
+#define PSLATPTWALK PPGMPTWALKGSTEPT
+
+#if 0
+# if PGM_SHW_TYPE != PGM_TYPE_EPT
+# error "Only SLAT type of EPT is supported "
+# endif
+# if PGM_GST_TYPE != PGM_TYPE_EPT
+# error "Guest type for SLAT EPT "
+# endif
+
+# define GST_ATOMIC_OR(a_pu, a_fFlags) ASMAtomicOrU64((a_pu), (a_fFlags))
+# define GSTPT EPTPT
+# define PGSTPT PEPTPT
+# define GSTPTE EPTPTE
+# define PGSTPTE PEPTPTE
+# define GSTPD EPTPD
+# define PGSTPD PEPTPD
+# define GSTPDE EPTPDE
+# define PGSTPDE PEPTPDE
+# define GST_GIGANT_PAGE_SIZE X86_PAGE_1G_SIZE
+# define GST_GIGANT_PAGE_OFFSET_MASK X86_PAGE_1G_OFFSET_MASK
+# define GST_PDPE_BIG_PG_MASK X86_PDPE1G_PG_MASK
+# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
+# define GST_PDE_PG_MASK EPT_PDE_PG_MASK
+# define GST_PDE_BIG_PG_MASK EPT_PDE2M_PG_MASK
+# define GST_PD_SHIFT EPT_PD_SHIFT
+# define GST_PD_MASK EPT_PD_MASK
+# define GSTPTWALK PGMPTWALKGSTEPT
+# define PGSTPTWALK PPGMPTWALKGSTEPT
+# define PCGSTPTWALK PCPGMPTWALKGSTEPT
+# define GST_PDPE_ENTRIES EPT_PG_ENTRIES
+# define GST_PDPT_SHIFT EPT_PDPT_SHIFT
+# define GST_PDPE_PG_MASK EPT_PDPTE_PG_MASK
+# define GST_PDPT_MASK EPT_PDPT_MASK
+# define GST_PTE_PG_MASK EPT_E_PG_MASK
+# define GST_CR3_PAGE_MASK X86_CR3_EPT_PAGE_MASK
+# define GST_PT_SHIFT EPT_PT_SHIFT
+# define GST_PT_MASK EPT_PT_MASK
+# define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(a_pVCpu, ((Pte).u & GST_PTE_PG_MASK))
+# define GST_GET_PDE_GCPHYS(Pde) PGM_A20_APPLY(a_pVCpu, ((Pde).u & GST_PDE_PG_MASK))
+# define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) PGM_A20_APPLY(a_pVCpu, ((Pde).u & GST_PDE_BIG_PG_MASK))
+# define GST_GET_BIG_PDPE_GCPHYS(pVM, Pde) PGM_A20_APPLY(a_pVCpu, ((Pde).u & GST_PDPE_BIG_PG_MASK))
+# define GST_GET_PTE_SHW_FLAGS(a_pVCpu, Pte) (true && This_should_perhaps_not_be_used_in_this_context)
+# define GST_GET_PDE_SHW_FLAGS(a_pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context)
+# define GST_GET_BIG_PDE_SHW_FLAGS(a_pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context)
+# define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(a_pVCpu, Pde) (true && This_should_perhaps_not_be_used_in_this_context)
+# define GST_IS_PTE_VALID(a_pVCpu, Pte) (!( (Pte).u & (a_pVCpu)->pgm.s.fGstEptMbzPteMask ))
+# define GST_IS_PDE_VALID(a_pVCpu, Pde) (!( (Pde).u & (a_pVCpu)->pgm.s.fGstEptMbzPdeMask ))
+# define GST_IS_BIG_PDE_VALID(a_pVCpu, Pde) (!( (Pde).u & (a_pVCpu)->pgm.s.fGstEptMbzBigPdeMask ))
+# define GST_IS_PDPE_VALID(a_pVCpu, Pdpe) (!( (Pdpe).u & (a_pVCpu)->pgm.s.fGstEptMbzPdpteMask ))
+# define GST_IS_BIG_PDPE_VALID(a_pVCpu, Pdpe) (!( (Pdpe).u & (a_pVCpu)->pgm.s.fGstEptMbzBigPdpteMask ))
+# define GST_IS_PML4E_VALID(a_pVCpu, Pml4e) (!( (Pml4e).u & (a_pVCpu)->pgm.s.fGstEptMbzPml4eMask ))
+# define GST_IS_PGENTRY_PRESENT(a_pVCpu, Pge) ((Pge).u & EPT_PRESENT_MASK)
+# define GST_IS_PSE_ACTIVE(a_pVCpu) (!((a_pVCpu)->pgm.s.fGstEptMbzBigPdeMask & EPT_E_BIT_LEAF))
+# define GST_IS_NX_ACTIVE(a_pVCpu) (pgmGstIsNoExecuteActive(a_pVCpu))
+# define BTH_IS_NP_ACTIVE(pVM) (false)
+#endif
+
diff --git a/src/VBox/VMM/include/SELMInternal.h b/src/VBox/VMM/include/SELMInternal.h
new file mode 100644
index 00000000..ab5defc7
--- /dev/null
+++ b/src/VBox/VMM/include/SELMInternal.h
@@ -0,0 +1,72 @@
+/* $Id: SELMInternal.h $ */
+/** @file
+ * SELM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_SELMInternal_h
+#define VMM_INCLUDED_SRC_include_SELMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/log.h>
+#include <iprt/x86.h>
+
+
+
+/** @defgroup grp_selm_int Internals
+ * @ingroup grp_selm
+ * @internal
+ * @{
+ */
+
+/** The number of GDTS allocated for our GDT. (full size) */
+#define SELM_GDT_ELEMENTS 8192
+
+
+/**
+ * SELM Data (part of VM)
+ *
+ * @note This is a very marginal component after kicking raw-mode.
+ */
+typedef struct SELM
+{
+#ifdef VBOX_WITH_STATISTICS
+ STAMCOUNTER StatLoadHidSelGst;
+ STAMCOUNTER StatLoadHidSelShw;
+#endif
+ STAMCOUNTER StatLoadHidSelReadErrors;
+ STAMCOUNTER StatLoadHidSelGstNoGood;
+} SELM, *PSELM;
+
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_SELMInternal_h */
diff --git a/src/VBox/VMM/include/SSMInternal.h b/src/VBox/VMM/include/SSMInternal.h
new file mode 100644
index 00000000..b6e0a6cc
--- /dev/null
+++ b/src/VBox/VMM/include/SSMInternal.h
@@ -0,0 +1,341 @@
+/* $Id: SSMInternal.h $ */
+/** @file
+ * SSM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_SSMInternal_h
+#define VMM_INCLUDED_SRC_include_SSMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/ssm.h>
+#include <iprt/critsect.h>
+
+RT_C_DECLS_BEGIN
+
+/** @defgroup grp_ssm_int Internals
+ * @ingroup grp_ssm
+ * @internal
+ * @{
+ */
+
+
+/**
+ * Data unit callback type.
+ */
+typedef enum SSMUNITTYPE
+{
+ /** PDM Device . */
+ SSMUNITTYPE_DEV = 1,
+ /** PDM Driver. */
+ SSMUNITTYPE_DRV,
+ /** PDM USB device. */
+ SSMUNITTYPE_USB,
+ /** VM Internal. */
+ SSMUNITTYPE_INTERNAL,
+ /** External Wrapper. */
+ SSMUNITTYPE_EXTERNAL
+} SSMUNITTYPE;
+
+/** Pointer to a data unit descriptor. */
+typedef struct SSMUNIT *PSSMUNIT;
+
+/**
+ * Data unit descriptor.
+ */
+typedef struct SSMUNIT
+{
+ /** Pointer ot the next one in the list. */
+ PSSMUNIT pNext;
+
+ /** Called in this save/load operation.
+ * The flag is used to determine whether there is need for a call to
+ * done or not. */
+ bool fCalled;
+ /** Finished its live part.
+ * This is used to handle VERR_SSM_VOTE_FOR_GIVING_UP. */
+ bool fDoneLive;
+ /** Callback interface type. */
+ SSMUNITTYPE enmType;
+ /** Type specific data. */
+ union
+ {
+ /** SSMUNITTYPE_DEV. */
+ struct
+ {
+ /** Prepare live save. */
+ PFNSSMDEVLIVEPREP pfnLivePrep;
+ /** Execute live save. */
+ PFNSSMDEVLIVEEXEC pfnLiveExec;
+ /** Vote live save complete. */
+ PFNSSMDEVLIVEVOTE pfnLiveVote;
+ /** Prepare save. */
+ PFNSSMDEVSAVEPREP pfnSavePrep;
+ /** Execute save. */
+ PFNSSMDEVSAVEEXEC pfnSaveExec;
+ /** Done save. */
+ PFNSSMDEVSAVEDONE pfnSaveDone;
+ /** Prepare load. */
+ PFNSSMDEVLOADPREP pfnLoadPrep;
+ /** Execute load. */
+ PFNSSMDEVLOADEXEC pfnLoadExec;
+ /** Done load. */
+ PFNSSMDEVLOADDONE pfnLoadDone;
+ /** Device instance. */
+ PPDMDEVINS pDevIns;
+ } Dev;
+
+ /** SSMUNITTYPE_DRV. */
+ struct
+ {
+ /** Prepare live save. */
+ PFNSSMDRVLIVEPREP pfnLivePrep;
+ /** Execute live save. */
+ PFNSSMDRVLIVEEXEC pfnLiveExec;
+ /** Vote live save complete. */
+ PFNSSMDRVLIVEVOTE pfnLiveVote;
+ /** Prepare save. */
+ PFNSSMDRVSAVEPREP pfnSavePrep;
+ /** Execute save. */
+ PFNSSMDRVSAVEEXEC pfnSaveExec;
+ /** Done save. */
+ PFNSSMDRVSAVEDONE pfnSaveDone;
+ /** Prepare load. */
+ PFNSSMDRVLOADPREP pfnLoadPrep;
+ /** Execute load. */
+ PFNSSMDRVLOADEXEC pfnLoadExec;
+ /** Done load. */
+ PFNSSMDRVLOADDONE pfnLoadDone;
+ /** Driver instance. */
+ PPDMDRVINS pDrvIns;
+ } Drv;
+
+ /** SSMUNITTYPE_USB. */
+ struct
+ {
+ /** Prepare live save. */
+ PFNSSMUSBLIVEPREP pfnLivePrep;
+ /** Execute live save. */
+ PFNSSMUSBLIVEEXEC pfnLiveExec;
+ /** Vote live save complete. */
+ PFNSSMUSBLIVEVOTE pfnLiveVote;
+ /** Prepare save. */
+ PFNSSMUSBSAVEPREP pfnSavePrep;
+ /** Execute save. */
+ PFNSSMUSBSAVEEXEC pfnSaveExec;
+ /** Done save. */
+ PFNSSMUSBSAVEDONE pfnSaveDone;
+ /** Prepare load. */
+ PFNSSMUSBLOADPREP pfnLoadPrep;
+ /** Execute load. */
+ PFNSSMUSBLOADEXEC pfnLoadExec;
+ /** Done load. */
+ PFNSSMUSBLOADDONE pfnLoadDone;
+ /** USB instance. */
+ PPDMUSBINS pUsbIns;
+ } Usb;
+
+ /** SSMUNITTYPE_INTERNAL. */
+ struct
+ {
+ /** Prepare live save. */
+ PFNSSMINTLIVEPREP pfnLivePrep;
+ /** Execute live save. */
+ PFNSSMINTLIVEEXEC pfnLiveExec;
+ /** Vote live save complete. */
+ PFNSSMINTLIVEVOTE pfnLiveVote;
+ /** Prepare save. */
+ PFNSSMINTSAVEPREP pfnSavePrep;
+ /** Execute save. */
+ PFNSSMINTSAVEEXEC pfnSaveExec;
+ /** Done save. */
+ PFNSSMINTSAVEDONE pfnSaveDone;
+ /** Prepare load. */
+ PFNSSMINTLOADPREP pfnLoadPrep;
+ /** Execute load. */
+ PFNSSMINTLOADEXEC pfnLoadExec;
+ /** Done load. */
+ PFNSSMINTLOADDONE pfnLoadDone;
+ } Internal;
+
+ /** SSMUNITTYPE_EXTERNAL. */
+ struct
+ {
+ /** Prepare live save. */
+ PFNSSMEXTLIVEPREP pfnLivePrep;
+ /** Execute live save. */
+ PFNSSMEXTLIVEEXEC pfnLiveExec;
+ /** Vote live save complete. */
+ PFNSSMEXTLIVEVOTE pfnLiveVote;
+ /** Prepare save. */
+ PFNSSMEXTSAVEPREP pfnSavePrep;
+ /** Execute save. */
+ PFNSSMEXTSAVEEXEC pfnSaveExec;
+ /** Done save. */
+ PFNSSMEXTSAVEDONE pfnSaveDone;
+ /** Prepare load. */
+ PFNSSMEXTLOADPREP pfnLoadPrep;
+ /** Execute load. */
+ PFNSSMEXTLOADEXEC pfnLoadExec;
+ /** Done load. */
+ PFNSSMEXTLOADDONE pfnLoadDone;
+ /** User data. */
+ void *pvUser;
+ } External;
+
+ struct
+ {
+ /** Prepare live save. */
+ PFNRT pfnLivePrep;
+ /** Execute live save. */
+ PFNRT pfnLiveExec;
+ /** Vote live save complete. */
+ PFNRT pfnLiveVote;
+ /** Prepare save. */
+ PFNRT pfnSavePrep;
+ /** Execute save. */
+ PFNRT pfnSaveExec;
+ /** Done save. */
+ PFNRT pfnSaveDone;
+ /** Prepare load. */
+ PFNRT pfnLoadPrep;
+ /** Execute load. */
+ PFNRT pfnLoadExec;
+ /** Done load. */
+ PFNRT pfnLoadDone;
+ /** User data. */
+ void *pvKey;
+ } Common;
+ } u;
+ /** Data layout version. */
+ uint32_t u32Version;
+ /** Instance number. */
+ uint32_t u32Instance;
+ /** The offset of the final data unit.
+ * This is used for constructing the directory. */
+ RTFOFF offStream;
+ /** Critical section to be taken before working any of the callbacks. */
+ PPDMCRITSECT pCritSect;
+ /** The guessed size of the data unit - used only for progress indication. */
+ size_t cbGuess;
+ /** Name size. (bytes) */
+ size_t cchName;
+ /** Name of this unit. (extends beyond the defined size) */
+ char szName[1];
+} SSMUNIT;
+
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLivePrep, u.Dev.pfnLivePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveExec, u.Dev.pfnLiveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveVote, u.Dev.pfnLiveVote);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSavePrep, u.Dev.pfnSavePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveExec, u.Dev.pfnSaveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveDone, u.Dev.pfnSaveDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadPrep, u.Dev.pfnLoadPrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadExec, u.Dev.pfnLoadExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadDone, u.Dev.pfnLoadDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pvKey, u.Dev.pDevIns);
+
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLivePrep, u.Drv.pfnLivePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveExec, u.Drv.pfnLiveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveVote, u.Drv.pfnLiveVote);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSavePrep, u.Drv.pfnSavePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveExec, u.Drv.pfnSaveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveDone, u.Drv.pfnSaveDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadPrep, u.Drv.pfnLoadPrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadExec, u.Drv.pfnLoadExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadDone, u.Drv.pfnLoadDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pvKey, u.Drv.pDrvIns);
+
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLivePrep, u.Usb.pfnLivePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveExec, u.Usb.pfnLiveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveVote, u.Usb.pfnLiveVote);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSavePrep, u.Usb.pfnSavePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveExec, u.Usb.pfnSaveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveDone, u.Usb.pfnSaveDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadPrep, u.Usb.pfnLoadPrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadExec, u.Usb.pfnLoadExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadDone, u.Usb.pfnLoadDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pvKey, u.Usb.pUsbIns);
+
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLivePrep, u.Internal.pfnLivePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveExec, u.Internal.pfnLiveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveVote, u.Internal.pfnLiveVote);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSavePrep, u.Internal.pfnSavePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveExec, u.Internal.pfnSaveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveDone, u.Internal.pfnSaveDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadPrep, u.Internal.pfnLoadPrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadExec, u.Internal.pfnLoadExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadDone, u.Internal.pfnLoadDone);
+
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLivePrep, u.External.pfnLivePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveExec, u.External.pfnLiveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLiveVote, u.External.pfnLiveVote);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSavePrep, u.External.pfnSavePrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveExec, u.External.pfnSaveExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnSaveDone, u.External.pfnSaveDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadPrep, u.External.pfnLoadPrep);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadExec, u.External.pfnLoadExec);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pfnLoadDone, u.External.pfnLoadDone);
+AssertCompile2MemberOffsets(SSMUNIT, u.Common.pvKey, u.External.pvUser);
+
+
+/**
+ * SSM VM Instance data.
+ * Changes to this must checked against the padding of the cfgm union in VM!
+ *
+ * @todo Move this to UVM.
+ */
+typedef struct SSM
+{
+ /** Critical section for serializing cancellation (pSSM). */
+ RTCRITSECT CancelCritSect;
+ /** The handle of the current save or load operation.
+ * This is used by SSMR3Cancel. */
+ PSSMHANDLE volatile pSSM;
+
+ /** FIFO of data entity descriptors. */
+ R3PTRTYPE(PSSMUNIT) pHead;
+ /** The number of register units. */
+ uint32_t cUnits;
+ /** For lazy init. */
+ bool fInitialized;
+ /** Current pass (for STAM). */
+ uint32_t uPass;
+ uint32_t u32Alignment;
+} SSM;
+/** Pointer to SSM VM instance data. */
+typedef SSM *PSSM;
+
+
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_SSMInternal_h */
+
diff --git a/src/VBox/VMM/include/STAMInternal.h b/src/VBox/VMM/include/STAMInternal.h
new file mode 100644
index 00000000..6f2cfdbd
--- /dev/null
+++ b/src/VBox/VMM/include/STAMInternal.h
@@ -0,0 +1,187 @@
+/* $Id: STAMInternal.h $ */
+/** @file
+ * STAM Internal Header.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_STAMInternal_h
+#define VMM_INCLUDED_SRC_include_STAMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/gvmm.h>
+#include <VBox/vmm/gmm.h>
+#include <iprt/list.h>
+#include <iprt/semaphore.h>
+
+
+
+RT_C_DECLS_BEGIN
+
+/** @defgroup grp_stam_int Internals
+ * @ingroup grp_stam
+ * @internal
+ * @{
+ */
+
+/** Pointer to sample descriptor. */
+typedef struct STAMDESC *PSTAMDESC;
+/** Pointer to a sample lookup node. */
+typedef struct STAMLOOKUP *PSTAMLOOKUP;
+
+/**
+ * Sample lookup node.
+ */
+typedef struct STAMLOOKUP
+{
+ /** The parent lookup record. This is NULL for the root node. */
+ PSTAMLOOKUP pParent;
+ /** Array of children (using array for binary searching). */
+ PSTAMLOOKUP *papChildren;
+ /** Pointer to the description node, if any. */
+ PSTAMDESC pDesc;
+ /** Number of decentants with descriptors. (Use for freeing up sub-trees.) */
+ uint32_t cDescsInTree;
+ /** The number of children. */
+ uint16_t cChildren;
+ /** The index in the parent paChildren array. UINT16_MAX for the root node. */
+ uint16_t iParent;
+ /** The path offset. */
+ uint16_t off;
+ /** The size of the path component. */
+ uint16_t cch;
+ /** The name (variable size). */
+ char szName[1];
+} STAMLOOKUP;
+
+
+/**
+ * Sample descriptor.
+ */
+typedef struct STAMDESC
+{
+ /** Our entry in the big linear list. */
+ RTLISTNODE ListEntry;
+ /** Pointer to our lookup node. */
+ PSTAMLOOKUP pLookup;
+ /** Sample name. */
+ const char *pszName;
+ /** Sample type. */
+ STAMTYPE enmType;
+ /** Visibility type. */
+ STAMVISIBILITY enmVisibility;
+ /** Pointer to the sample data. */
+ union STAMDESCSAMPLEDATA
+ {
+ /** Counter. */
+ PSTAMCOUNTER pCounter;
+ /** Profile. */
+ PSTAMPROFILE pProfile;
+ /** Advanced profile. */
+ PSTAMPROFILEADV pProfileAdv;
+ /** Ratio, unsigned 32-bit. */
+ PSTAMRATIOU32 pRatioU32;
+ /** unsigned 8-bit. */
+ uint8_t *pu8;
+ /** unsigned 16-bit. */
+ uint16_t *pu16;
+ /** unsigned 32-bit. */
+ uint32_t *pu32;
+ /** unsigned 64-bit. */
+ uint64_t *pu64;
+ /** Simple void pointer. */
+ void *pv;
+ /** Boolean. */
+ bool *pf;
+ /** */
+ struct STAMDESCSAMPLEDATACALLBACKS
+ {
+ /** The same pointer. */
+ void *pvSample;
+ /** Pointer to the reset callback. */
+ PFNSTAMR3CALLBACKRESET pfnReset;
+ /** Pointer to the print callback. */
+ PFNSTAMR3CALLBACKPRINT pfnPrint;
+ } Callback;
+ } u;
+ /** Unit. */
+ STAMUNIT enmUnit;
+ /** The refresh group number (STAM_REFRESH_GRP_XXX). */
+ uint8_t iRefreshGroup;
+ /** Description. */
+ const char *pszDesc;
+} STAMDESC;
+
+
+/**
+ * STAM data kept in the UVM.
+ */
+typedef struct STAMUSERPERVM
+{
+ /** List of samples. */
+ RTLISTANCHOR List;
+ /** Root of the lookup tree. */
+ PSTAMLOOKUP pRoot;
+
+ /** RW Lock for the list and tree. */
+ RTSEMRW RWSem;
+
+ /** The copy of the GVMM statistics. */
+ GVMMSTATS GVMMStats;
+ /** The number of registered host CPU leaves. */
+ uint32_t cRegisteredHostCpus;
+
+ /** Explicit alignment padding. */
+ uint32_t uAlignment;
+ /** The copy of the GMM statistics. */
+ GMMSTATS GMMStats;
+} STAMUSERPERVM;
+#ifdef IN_RING3
+AssertCompileMemberAlignment(STAMUSERPERVM, GMMStats, 8);
+#endif
+
+/** Pointer to the STAM data kept in the UVM. */
+typedef STAMUSERPERVM *PSTAMUSERPERVM;
+
+
+/** Locks the sample descriptors for reading. */
+#define STAM_LOCK_RD(pUVM) do { int rcSem = RTSemRWRequestRead(pUVM->stam.s.RWSem, RT_INDEFINITE_WAIT); AssertRC(rcSem); } while (0)
+/** Locks the sample descriptors for writing. */
+#define STAM_LOCK_WR(pUVM) do { int rcSem = RTSemRWRequestWrite(pUVM->stam.s.RWSem, RT_INDEFINITE_WAIT); AssertRC(rcSem); } while (0)
+/** UnLocks the sample descriptors after reading. */
+#define STAM_UNLOCK_RD(pUVM) do { int rcSem = RTSemRWReleaseRead(pUVM->stam.s.RWSem); AssertRC(rcSem); } while (0)
+/** UnLocks the sample descriptors after writing. */
+#define STAM_UNLOCK_WR(pUVM) do { int rcSem = RTSemRWReleaseWrite(pUVM->stam.s.RWSem); AssertRC(rcSem); } while (0)
+/** Lazy initialization */
+#define STAM_LAZY_INIT(pUVM) do { } while (0)
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_STAMInternal_h */
diff --git a/src/VBox/VMM/include/SVMInternal.h b/src/VBox/VMM/include/SVMInternal.h
new file mode 100644
index 00000000..dea27c9b
--- /dev/null
+++ b/src/VBox/VMM/include/SVMInternal.h
@@ -0,0 +1,89 @@
+/* $Id: SVMInternal.h $ */
+/** @file
+ * SVM - Internal header file for the SVM code.
+ */
+
+/*
+ * Copyright (C) 2022-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_SVMInternal_h
+#define VMM_INCLUDED_SRC_include_SVMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/** @name SVM transient.
+ *
+ * A state structure for holding miscellaneous information across AMD-V
+ * VMRUN/\#VMEXIT operation, restored after the transition.
+ *
+ * @{ */
+typedef struct SVMTRANSIENT
+{
+ /** The host's rflags/eflags. */
+ RTCCUINTREG fEFlags;
+ /** The \#VMEXIT exit code (the EXITCODE field in the VMCB). */
+ uint64_t u64ExitCode;
+
+ /** The guest's TPR value used for TPR shadowing. */
+ uint8_t u8GuestTpr;
+ /** Alignment. */
+ uint8_t abAlignment0[7];
+
+ /** Pointer to the currently executing VMCB. */
+ PSVMVMCB pVmcb;
+
+ /** Whether we are currently executing a nested-guest. */
+ bool fIsNestedGuest;
+ /** Whether the guest debug state was active at the time of \#VMEXIT. */
+ bool fWasGuestDebugStateActive;
+ /** Whether the hyper debug state was active at the time of \#VMEXIT. */
+ bool fWasHyperDebugStateActive;
+ /** Whether the TSC offset mode needs to be updated. */
+ bool fUpdateTscOffsetting;
+ /** Whether the TSC_AUX MSR needs restoring on \#VMEXIT. */
+ bool fRestoreTscAuxMsr;
+ /** Whether the \#VMEXIT was caused by a page-fault during delivery of a
+ * contributary exception or a page-fault. */
+ bool fVectoringDoublePF;
+ /** Whether the \#VMEXIT was caused by a page-fault during delivery of an
+ * external interrupt or NMI. */
+ bool fVectoringPF;
+ /** Padding. */
+ bool afPadding0;
+} SVMTRANSIENT;
+/** Pointer to SVM transient state. */
+typedef SVMTRANSIENT *PSVMTRANSIENT;
+/** Pointer to a const SVM transient state. */
+typedef const SVMTRANSIENT *PCSVMTRANSIENT;
+
+AssertCompileSizeAlignment(SVMTRANSIENT, sizeof(uint64_t));
+AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
+AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb, sizeof(uint64_t));
+/** @} */
+
+RT_C_DECLS_BEGIN
+/* Nothing for now. */
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_SVMInternal_h */
+
diff --git a/src/VBox/VMM/include/TMInline.h b/src/VBox/VMM/include/TMInline.h
new file mode 100644
index 00000000..ebc46229
--- /dev/null
+++ b/src/VBox/VMM/include/TMInline.h
@@ -0,0 +1,289 @@
+/* $Id: TMInline.h $ */
+/** @file
+ * TM - Common Inlined functions.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_TMInline_h
+#define VMM_INCLUDED_SRC_include_TMInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+DECLINLINE(PTMTIMER) tmTimerQueueGetHead(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueueShared)
+{
+#ifdef IN_RING3
+ RT_NOREF(pQueueShared);
+ uint32_t const idx = pQueueCC->idxActive;
+#else
+ uint32_t const idx = pQueueShared->idxActive;
+#endif
+ if (idx < pQueueCC->cTimersAlloc)
+ return &pQueueCC->paTimers[idx];
+ return NULL;
+}
+
+
+DECLINLINE(void) tmTimerQueueSetHead(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueueShared, PTMTIMER pHead)
+{
+ uint32_t idx;
+ if (pHead)
+ {
+ idx = (uint32_t)(pHead - &pQueueCC->paTimers[0]);
+ AssertMsgStmt(idx < pQueueCC->cTimersAlloc,
+ ("idx=%u (%s) cTimersAlloc=%u\n", idx, pHead->szName, pQueueCC->cTimersAlloc),
+ idx = UINT32_MAX);
+ }
+ else
+ idx = UINT32_MAX;
+#ifndef IN_RING3
+ pQueueShared->idxActive = idx;
+#else
+ pQueueCC->idxActive = idx;
+ RT_NOREF(pQueueShared);
+#endif
+}
+
+
+/**
+ * Get the previous timer - translates TMTIMER::idxPrev.
+ */
+DECLINLINE(PTMTIMER) tmTimerGetPrev(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer)
+{
+ uint32_t const idxPrev = pTimer->idxPrev;
+ Assert(idxPrev);
+ if (idxPrev < pQueueCC->cTimersAlloc)
+ return &pQueueCC->paTimers[idxPrev];
+ Assert(idxPrev == UINT32_MAX);
+ return NULL;
+}
+
+
+/**
+ * Get the next timer - translates TMTIMER::idxNext.
+ */
+DECLINLINE(PTMTIMER) tmTimerGetNext(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer)
+{
+ uint32_t const idxNext = pTimer->idxNext;
+ Assert(idxNext);
+ if (idxNext < pQueueCC->cTimersAlloc)
+ return &pQueueCC->paTimers[idxNext];
+ Assert(idxNext == UINT32_MAX);
+ return NULL;
+}
+
+
+/**
+ * Set the previous timer link (TMTIMER::idxPrev).
+ */
+DECLINLINE(void) tmTimerSetPrev(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer, PTMTIMER pPrev)
+{
+ uint32_t idxPrev;
+ if (pPrev)
+ {
+ idxPrev = (uint32_t)(pPrev - &pQueueCC->paTimers[0]);
+ Assert(idxPrev);
+ AssertMsgStmt(idxPrev < pQueueCC->cTimersAlloc,
+ ("idxPrev=%u (%s) cTimersAlloc=%u\n", idxPrev, pPrev->szName, pQueueCC->cTimersAlloc),
+ idxPrev = UINT32_MAX);
+ }
+ else
+ idxPrev = UINT32_MAX;
+ pTimer->idxPrev = idxPrev;
+}
+
+
+/**
+ * Set the next timer link (TMTIMER::idxNext).
+ */
+DECLINLINE(void) tmTimerSetNext(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer, PTMTIMER pNext)
+{
+ uint32_t idxNext;
+ if (pNext)
+ {
+ idxNext = (uint32_t)(pNext - &pQueueCC->paTimers[0]);
+ Assert(idxNext);
+ AssertMsgStmt(idxNext < pQueueCC->cTimersAlloc,
+ ("idxNext=%u (%s) cTimersAlloc=%u\n", idxNext, pNext->szName, pQueueCC->cTimersAlloc),
+ idxNext = UINT32_MAX);
+ }
+ else
+ idxNext = UINT32_MAX;
+ pTimer->idxNext = idxNext;
+}
+
+
+/**
+ * Used to unlink a timer from the active list.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pQueueCC The context specific queue data (same as @a pQueue for
+ * ring-3).
+ * @param pQueue The shared timer queue data.
+ * @param pTimer The timer that needs linking.
+ *
+ * @remarks Called while owning the relevant queue lock.
+ */
+DECL_FORCE_INLINE(void) tmTimerQueueUnlinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
+{
+#ifdef VBOX_STRICT
+ TMTIMERSTATE const enmState = pTimer->enmState;
+ Assert( pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC
+ ? enmState == TMTIMERSTATE_ACTIVE
+ : enmState == TMTIMERSTATE_PENDING_SCHEDULE || enmState == TMTIMERSTATE_PENDING_STOP_SCHEDULE);
+#endif
+ RT_NOREF(pVM);
+
+ const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pTimer);
+ const PTMTIMER pNext = tmTimerGetNext(pQueueCC, pTimer);
+ if (pPrev)
+ tmTimerSetNext(pQueueCC, pPrev, pNext);
+ else
+ {
+ tmTimerQueueSetHead(pQueueCC, pQueue, pNext);
+ pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
+ DBGFTRACE_U64_TAG(pVM, pQueue->u64Expire, "tmTimerQueueUnlinkActive");
+ }
+ if (pNext)
+ tmTimerSetPrev(pQueueCC, pNext, pPrev);
+ pTimer->idxNext = UINT32_MAX;
+ pTimer->idxPrev = UINT32_MAX;
+}
+
+/** @def TMTIMER_HANDLE_TO_VARS_RETURN_EX
+ * Converts a timer handle to a timer pointer, returning @a a_rcRet if the
+ * handle is invalid.
+ *
+ * This defines the following variables:
+ * - idxQueue: The queue index.
+ * - pQueueCC: Pointer to the context specific queue data.
+ * - pTimer: The timer pointer.
+ * - idxTimer: The timer index.
+ *
+ * @param a_pVM The cross context VM structure.
+ * @param a_hTimer The timer handle to translate.
+ * @param a_rcRet What to return on failure.
+ *
+ * @note This macro has no scoping, so careful when using it around
+ * conditional statements!
+ */
+#ifdef IN_RING3
+# define TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, a_rcRet) \
+ uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \
+ & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \
+ AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \
+ PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; \
+ PTMTIMERQUEUE const pQueueCC = pQueue; RT_NOREF(pQueueCC); \
+ \
+ uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \
+ AssertReturn(idxQueue < pQueue->cTimersAlloc, a_rcRet); \
+ \
+ PTMTIMER const pTimer = &pQueue->paTimers[idxTimer]; \
+ AssertReturn(pTimer->hSelf == a_hTimer, a_rcRet)
+#else
+# define TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, a_rcRet) \
+ uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \
+ & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \
+ AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \
+ AssertCompile(RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues) == RT_ELEMENTS((a_pVM)->tmr0.s.aTimerQueues)); \
+ PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; RT_NOREF(pQueue); \
+ PTMTIMERQUEUER0 const pQueueCC = &(a_pVM)->tmr0.s.aTimerQueues[idxQueue]; \
+ \
+ uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \
+ AssertReturn(idxQueue < pQueueCC->cTimersAlloc, a_rcRet); \
+ \
+ PTMTIMER const pTimer = &pQueueCC->paTimers[idxTimer]; \
+ AssertReturn(pTimer->hSelf == a_hTimer, a_rcRet); \
+ Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0); \
+ Assert(VM_IS_EMT(pVM))
+#endif
+
+/** @def TMTIMER_HANDLE_TO_VARS_RETURN
+ * Converts a timer handle to a timer pointer, returning VERR_INVALID_HANDLE if
+ * the handle is invalid.
+ *
+ * This defines the following variables:
+ * - idxQueue: The queue index.
+ * - pQueueCC: Pointer to the context specific queue data.
+ * - pTimer: The timer pointer.
+ * - idxTimer: The timer index.
+ *
+ * @param a_pVM The cross context VM structure.
+ * @param a_hTimer The timer handle to translate.
+ *
+ * @note This macro has no scoping, so careful when using it around
+ * conditional statements!
+ */
+#define TMTIMER_HANDLE_TO_VARS_RETURN(a_pVM, a_hTimer) TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, VERR_INVALID_HANDLE)
+
+/** @def TMTIMER_HANDLE_TO_VARS_RETURN_VOID
+ * Converts a timer handle to a timer pointer, returning void if the
+ * handle is invalid.
+ *
+ * This defines the following variables:
+ * - idxQueue: The queue index.
+ * - pQueueCC: Pointer to the context specific queue data.
+ * - pTimer: The timer pointer.
+ * - idxTimer: The timer index.
+ *
+ * @param a_pVM The cross context VM structure.
+ * @param a_hTimer The timer handle to translate.
+ *
+ * @note This macro has no scoping, so careful when using it around
+ * conditional statements!
+ */
+#ifdef IN_RING3
+# define TMTIMER_HANDLE_TO_VARS_RETURN_VOID(a_pVM, a_hTimer) \
+ uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \
+ & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \
+ AssertReturnVoid(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues)); \
+ PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; \
+ PTMTIMERQUEUE const pQueueCC = pQueue; RT_NOREF(pQueueCC); \
+ \
+ uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \
+ AssertReturnVoid(idxQueue < pQueue->cTimersAlloc); \
+ \
+ PTMTIMER const pTimer = &pQueue->paTimers[idxTimer]; \
+ AssertReturnVoid(pTimer->hSelf == a_hTimer)
+#else
+# define TMTIMER_HANDLE_TO_VARS_RETURN_VOID(a_pVM, a_hTimer) \
+ uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \
+ & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \
+ AssertReturnVoid(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues)); \
+ AssertCompile(RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues) == RT_ELEMENTS((a_pVM)->tmr0.s.aTimerQueues)); \
+ PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; RT_NOREF(pQueue); \
+ PTMTIMERQUEUER0 const pQueueCC = &(a_pVM)->tmr0.s.aTimerQueues[idxQueue]; \
+ \
+ uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \
+ AssertReturnVoid(idxQueue < pQueueCC->cTimersAlloc); \
+ \
+ PTMTIMER const pTimer = &pQueueCC->paTimers[idxTimer]; \
+ AssertReturnVoid(pTimer->hSelf == a_hTimer); \
+ Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0); \
+ Assert(VM_IS_EMT(pVM))
+#endif
+
+#endif /* !VMM_INCLUDED_SRC_include_TMInline_h */
+
diff --git a/src/VBox/VMM/include/TMInternal.h b/src/VBox/VMM/include/TMInternal.h
new file mode 100644
index 00000000..a012f8c6
--- /dev/null
+++ b/src/VBox/VMM/include/TMInternal.h
@@ -0,0 +1,886 @@
+/* $Id: TMInternal.h $ */
+/** @file
+ * TM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_TMInternal_h
+#define VMM_INCLUDED_SRC_include_TMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <iprt/time.h>
+#include <iprt/timer.h>
+#include <iprt/assert.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_tm_int Internal
+ * @ingroup grp_tm
+ * @internal
+ * @{
+ */
+
+/** Frequency of the real clock. */
+#define TMCLOCK_FREQ_REAL UINT32_C(1000)
+/** Frequency of the virtual clock. */
+#define TMCLOCK_FREQ_VIRTUAL UINT32_C(1000000000)
+
+
+/**
+ * Timer type.
+ */
+typedef enum TMTIMERTYPE
+{
+ /** Invalid zero value. */
+ TMTIMERTYPE_INVALID = 0,
+ /** Device timer. */
+ TMTIMERTYPE_DEV,
+ /** USB device timer. */
+ TMTIMERTYPE_USB,
+ /** Driver timer. */
+ TMTIMERTYPE_DRV,
+ /** Internal timer . */
+ TMTIMERTYPE_INTERNAL
+} TMTIMERTYPE;
+
+/**
+ * Timer state
+ */
+typedef enum TMTIMERSTATE
+{
+ /** Invalid zero entry (used for table entry zero). */
+ TMTIMERSTATE_INVALID = 0,
+ /** Timer is stopped. */
+ TMTIMERSTATE_STOPPED,
+ /** Timer is active. */
+ TMTIMERSTATE_ACTIVE,
+ /** Timer is expired, getting expire and unlinking. */
+ TMTIMERSTATE_EXPIRED_GET_UNLINK,
+ /** Timer is expired and is being delivered. */
+ TMTIMERSTATE_EXPIRED_DELIVER,
+
+ /** Timer is stopped but still in the active list.
+ * Currently in the ScheduleTimers list. */
+ TMTIMERSTATE_PENDING_STOP,
+ /** Timer is stopped but needs unlinking from the ScheduleTimers list.
+ * Currently in the ScheduleTimers list. */
+ TMTIMERSTATE_PENDING_STOP_SCHEDULE,
+ /** Timer is being modified and will soon be pending scheduling.
+ * Currently in the ScheduleTimers list. */
+ TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE,
+ /** Timer is pending scheduling.
+ * Currently in the ScheduleTimers list. */
+ TMTIMERSTATE_PENDING_SCHEDULE,
+ /** Timer is being modified and will soon be pending rescheduling.
+ * Currently in the ScheduleTimers list and the active list. */
+ TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE,
+ /** Timer is modified and is now pending rescheduling.
+ * Currently in the ScheduleTimers list and the active list. */
+ TMTIMERSTATE_PENDING_RESCHEDULE,
+ /** Timer is being destroyed. */
+ TMTIMERSTATE_DESTROY,
+ /** Timer is free. */
+ TMTIMERSTATE_FREE
+} TMTIMERSTATE;
+
+/** Predicate that returns true if the give state is pending scheduling or
+ * rescheduling of any kind. Will reference the argument more than once! */
+#define TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState) \
+ ( (enmState) <= TMTIMERSTATE_PENDING_RESCHEDULE \
+ && (enmState) >= TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE)
+
+/** @name Timer handle value elements
+ * @{ */
+#define TMTIMERHANDLE_RANDOM_MASK UINT64_C(0xffffffffff000000)
+#define TMTIMERHANDLE_QUEUE_IDX_SHIFT 16
+#define TMTIMERHANDLE_QUEUE_IDX_MASK UINT64_C(0x0000000000ff0000)
+#define TMTIMERHANDLE_QUEUE_IDX_SMASK UINT64_C(0x00000000000000ff)
+#define TMTIMERHANDLE_TIMER_IDX_MASK UINT64_C(0x000000000000ffff)
+/** @} */
+
+
+/**
+ * Internal representation of a timer.
+ *
+ * For correct serialization (without the use of semaphores and
+ * other blocking/slow constructs) certain rules applies to updating
+ * this structure:
+ * - For thread other than EMT only u64Expire, enmState and pScheduleNext*
+ * are changeable. Everything else is out of bounds.
+ * - Updating of u64Expire timer can only happen in the TMTIMERSTATE_STOPPED
+ * and TMTIMERSTATE_PENDING_RESCHEDULING_SET_EXPIRE states.
+ * - Timers in the TMTIMERSTATE_EXPIRED state are only accessible from EMT.
+ * - Actual destruction of a timer can only be done at scheduling time.
+ */
+typedef struct TMTIMER
+{
+ /** Expire time. */
+ volatile uint64_t u64Expire;
+
+ /** Timer state. */
+ volatile TMTIMERSTATE enmState;
+ /** The index of the next next timer in the schedule list. */
+ uint32_t volatile idxScheduleNext;
+
+ /** The index of the next timer in the chain. */
+ uint32_t idxNext;
+ /** The index of the previous timer in the chain. */
+ uint32_t idxPrev;
+
+ /** The timer frequency hint. This is 0 if not hint was given. */
+ uint32_t volatile uHzHint;
+ /** Timer callback type. */
+ TMTIMERTYPE enmType;
+
+ /** It's own handle value. */
+ TMTIMERHANDLE hSelf;
+ /** TMTIMER_FLAGS_XXX. */
+ uint32_t fFlags;
+ /** Explicit alignment padding. */
+ uint32_t u32Alignment;
+
+ /** User argument. */
+ RTR3PTR pvUser;
+ /** The critical section associated with the lock. */
+ R3PTRTYPE(PPDMCRITSECT) pCritSect;
+
+ /* --- new cache line (64-bit / 64 bytes) --- */
+
+ /** Type specific data. */
+ union
+ {
+ /** TMTIMERTYPE_DEV. */
+ struct
+ {
+ /** Callback. */
+ R3PTRTYPE(PFNTMTIMERDEV) pfnTimer;
+ /** Device instance. */
+ PPDMDEVINSR3 pDevIns;
+ } Dev;
+
+ /** TMTIMERTYPE_DEV. */
+ struct
+ {
+ /** Callback. */
+ R3PTRTYPE(PFNTMTIMERUSB) pfnTimer;
+ /** USB device instance. */
+ PPDMUSBINS pUsbIns;
+ } Usb;
+
+ /** TMTIMERTYPE_DRV. */
+ struct
+ {
+ /** Callback. */
+ R3PTRTYPE(PFNTMTIMERDRV) pfnTimer;
+ /** Device instance. */
+ R3PTRTYPE(PPDMDRVINS) pDrvIns;
+ } Drv;
+
+ /** TMTIMERTYPE_INTERNAL. */
+ struct
+ {
+ /** Callback. */
+ R3PTRTYPE(PFNTMTIMERINT) pfnTimer;
+ } Internal;
+ } u;
+
+ /** The timer name. */
+ char szName[32];
+
+ /** @todo think of two useful release statistics counters here to fill up the
+ * cache line. */
+#ifndef VBOX_WITH_STATISTICS
+ uint64_t auAlignment2[2];
+#else
+ STAMPROFILE StatTimer;
+ STAMPROFILE StatCritSectEnter;
+ STAMCOUNTER StatGet;
+ STAMCOUNTER StatSetAbsolute;
+ STAMCOUNTER StatSetRelative;
+ STAMCOUNTER StatStop;
+ uint64_t auAlignment2[6];
+#endif
+} TMTIMER;
+AssertCompileMemberSize(TMTIMER, u64Expire, sizeof(uint64_t));
+AssertCompileMemberSize(TMTIMER, enmState, sizeof(uint32_t));
+AssertCompileSizeAlignment(TMTIMER, 64);
+
+
+/**
+ * Updates a timer state in the correct atomic manner.
+ */
+#if 1
+# define TM_SET_STATE(pTimer, state) \
+ ASMAtomicWriteU32((uint32_t volatile *)&(pTimer)->enmState, state)
+#else
+# define TM_SET_STATE(pTimer, state) \
+ do { \
+ uint32_t uOld1 = (pTimer)->enmState; \
+ Log(("%s: %p: %d -> %d\n", __FUNCTION__, (pTimer), (pTimer)->enmState, state)); \
+ uint32_t uOld2 = ASMAtomicXchgU32((uint32_t volatile *)&(pTimer)->enmState, state); \
+ Assert(uOld1 == uOld2); \
+ } while (0)
+#endif
+
+/**
+ * Tries to updates a timer state in the correct atomic manner.
+ */
+#if 1
+# define TM_TRY_SET_STATE(pTimer, StateNew, StateOld, fRc) \
+ (fRc) = ASMAtomicCmpXchgU32((uint32_t volatile *)&(pTimer)->enmState, StateNew, StateOld)
+#else
+# define TM_TRY_SET_STATE(pTimer, StateNew, StateOld, fRc) \
+ do { (fRc) = ASMAtomicCmpXchgU32((uint32_t volatile *)&(pTimer)->enmState, StateNew, StateOld); \
+ Log(("%s: %p: %d -> %d %RTbool\n", __FUNCTION__, (pTimer), StateOld, StateNew, fRc)); \
+ } while (0)
+#endif
+
+
+/**
+ * A timer queue, shared.
+ */
+typedef struct TMTIMERQUEUE
+{
+ /** The ring-0 mapping of the timer table. */
+ R3PTRTYPE(PTMTIMER) paTimers;
+
+ /** The cached expire time for this queue.
+ * Updated by EMT when scheduling the queue or modifying the head timer.
+ * Assigned UINT64_MAX when there is no head timer. */
+ uint64_t u64Expire;
+ /** Doubly linked list of active timers.
+ *
+ * When no scheduling is pending, this list is will be ordered by expire time (ascending).
+ * Access is serialized by only letting the emulation thread (EMT) do changes.
+ */
+ uint32_t idxActive;
+ /** List of timers pending scheduling of some kind.
+ *
+ * Timer stats allowed in the list are TMTIMERSTATE_PENDING_STOPPING,
+ * TMTIMERSTATE_PENDING_DESTRUCTION, TMTIMERSTATE_PENDING_STOPPING_DESTRUCTION,
+ * TMTIMERSTATE_PENDING_RESCHEDULING and TMTIMERSTATE_PENDING_SCHEDULE.
+ */
+ uint32_t volatile idxSchedule;
+ /** The clock for this queue. */
+ TMCLOCK enmClock; /**< @todo consider duplicating this in TMTIMERQUEUER0 for better cache locality (paTimers). */
+
+ /** The size of the paTimers allocation (in entries). */
+ uint32_t cTimersAlloc;
+ /** Number of free timer entries. */
+ uint32_t cTimersFree;
+ /** Where to start looking for free timers. */
+ uint32_t idxFreeHint;
+ /** The queue name. */
+ char szName[16];
+ /** Set when a thread is doing scheduling and callback. */
+ bool volatile fBeingProcessed;
+ /** Set if we've disabled growing. */
+ bool fCannotGrow;
+ /** Align on 64-byte boundrary. */
+ bool afAlignment1[2];
+ /** The current max timer Hz hint. */
+ uint32_t volatile uMaxHzHint;
+
+ /* --- new cache line (64-bit / 64 bytes) --- */
+
+ /** Time spent doing scheduling and timer callbacks. */
+ STAMPROFILE StatDo;
+ /** The thread servicing this queue, NIL if none. */
+ R3PTRTYPE(RTTHREAD) hThread;
+ /** The handle to the event semaphore the worker thread sleeps on. */
+ SUPSEMEVENT hWorkerEvt;
+ /** Absolute sleep deadline for the worker (enmClock time). */
+ uint64_t volatile tsWorkerWakeup;
+ uint64_t u64Alignment2;
+
+ /** Lock serializing the active timer list and associated work. */
+ PDMCRITSECT TimerLock;
+ /** Lock serializing timer allocation and deallocation.
+ * @note This may be used in read-mode all over the place if we later
+ * implement runtime array growing. */
+ PDMCRITSECTRW AllocLock;
+} TMTIMERQUEUE;
+AssertCompileMemberAlignment(TMTIMERQUEUE, AllocLock, 64);
+AssertCompileSizeAlignment(TMTIMERQUEUE, 64);
+/** Pointer to a timer queue. */
+typedef TMTIMERQUEUE *PTMTIMERQUEUE;
+
+/**
+ * A timer queue, ring-0 only bits.
+ */
+typedef struct TMTIMERQUEUER0
+{
+ /** The size of the paTimers allocation (in entries). */
+ uint32_t cTimersAlloc;
+ uint32_t uAlignment;
+ /** The ring-0 mapping of the timer table. */
+ R0PTRTYPE(PTMTIMER) paTimers;
+ /** Handle to the timer table allocation. */
+ RTR0MEMOBJ hMemObj;
+ /** Handle to the ring-3 mapping of the timer table. */
+ RTR0MEMOBJ hMapObj;
+} TMTIMERQUEUER0;
+/** Pointer to the ring-0 timer queue data. */
+typedef TMTIMERQUEUER0 *PTMTIMERQUEUER0;
+
+/** Pointer to the current context data for a timer queue.
+ * @note In ring-3 this is the same as the shared data. */
+#ifdef IN_RING3
+typedef TMTIMERQUEUE *PTMTIMERQUEUECC;
+#else
+typedef TMTIMERQUEUER0 *PTMTIMERQUEUECC;
+#endif
+/** Helper macro for getting the current context queue point. */
+#ifdef IN_RING3
+# define TM_GET_TIMER_QUEUE_CC(a_pVM, a_idxQueue, a_pQueueShared) (a_pQueueShared)
+#else
+# define TM_GET_TIMER_QUEUE_CC(a_pVM, a_idxQueue, a_pQueueShared) (&(a_pVM)->tmr0.s.aTimerQueues[a_idxQueue])
+#endif
+
+
+/**
+ * CPU load data set.
+ * Mainly used by tmR3CpuLoadTimer.
+ */
+typedef struct TMCPULOADSTATE
+{
+ /** The percent of the period spent executing guest code. */
+ uint8_t cPctExecuting;
+ /** The percent of the period spent halted. */
+ uint8_t cPctHalted;
+ /** The percent of the period spent on other things. */
+ uint8_t cPctOther;
+ /** Explicit alignment padding */
+ uint8_t au8Alignment[1];
+ /** Index into aHistory of the current entry. */
+ uint16_t volatile idxHistory;
+ /** Number of valid history entries before idxHistory. */
+ uint16_t volatile cHistoryEntries;
+
+ /** Previous cNsTotal value. */
+ uint64_t cNsPrevTotal;
+ /** Previous cNsExecuting value. */
+ uint64_t cNsPrevExecuting;
+ /** Previous cNsHalted value. */
+ uint64_t cNsPrevHalted;
+ /** Data for the last 30 min (given an interval of 1 second). */
+ struct
+ {
+ uint8_t cPctExecuting;
+ /** The percent of the period spent halted. */
+ uint8_t cPctHalted;
+ /** The percent of the period spent on other things. */
+ uint8_t cPctOther;
+ } aHistory[30*60];
+} TMCPULOADSTATE;
+AssertCompileSizeAlignment(TMCPULOADSTATE, 8);
+AssertCompileMemberAlignment(TMCPULOADSTATE, cNsPrevTotal, 8);
+/** Pointer to a CPU load data set. */
+typedef TMCPULOADSTATE *PTMCPULOADSTATE;
+
+
+/**
+ * TSC mode.
+ *
+ * The main modes of how TM implements the TSC clock (TMCLOCK_TSC).
+ */
+typedef enum TMTSCMODE
+{
+ /** The guest TSC is an emulated, virtual TSC. */
+ TMTSCMODE_VIRT_TSC_EMULATED = 1,
+ /** The guest TSC is an offset of the real TSC. */
+ TMTSCMODE_REAL_TSC_OFFSET,
+ /** The guest TSC is dynamically derived through emulating or offsetting. */
+ TMTSCMODE_DYNAMIC,
+ /** The native API provides it. */
+ TMTSCMODE_NATIVE_API
+} TMTSCMODE;
+AssertCompileSize(TMTSCMODE, sizeof(uint32_t));
+
+
+/**
+ * TM VM Instance data.
+ * Changes to this must checked against the padding of the cfgm union in VM!
+ */
+typedef struct TM
+{
+ /** Timer queues for the different clock types.
+ * @note is first in the structure to ensure cache-line alignment. */
+ TMTIMERQUEUE aTimerQueues[TMCLOCK_MAX];
+
+ /** The current TSC mode of the VM.
+ * Config variable: Mode (string). */
+ TMTSCMODE enmTSCMode;
+ /** The original TSC mode of the VM. */
+ TMTSCMODE enmOriginalTSCMode;
+ /** Whether the TSC is tied to the execution of code.
+ * Config variable: TSCTiedToExecution (bool) */
+ bool fTSCTiedToExecution;
+ /** Modifier for fTSCTiedToExecution which pauses the TSC while halting if true.
+ * Config variable: TSCNotTiedToHalt (bool) */
+ bool fTSCNotTiedToHalt;
+ /** Whether TM TSC mode switching is allowed at runtime. */
+ bool fTSCModeSwitchAllowed;
+ /** Whether the guest has enabled use of paravirtualized TSC. */
+ bool fParavirtTscEnabled;
+ /** The ID of the virtual CPU that normally runs the timers. */
+ VMCPUID idTimerCpu;
+
+ /** The number of CPU clock ticks per seconds of the host CPU. */
+ uint64_t cTSCTicksPerSecondHost;
+ /** The number of CPU clock ticks per second (TMCLOCK_TSC).
+ * Config variable: TSCTicksPerSecond (64-bit unsigned int)
+ * The config variable implies @c enmTSCMode would be
+ * TMTSCMODE_VIRT_TSC_EMULATED. */
+ uint64_t cTSCTicksPerSecond;
+ /** The TSC difference introduced by pausing the VM. */
+ uint64_t offTSCPause;
+ /** The TSC value when the last TSC was paused. */
+ uint64_t u64LastPausedTSC;
+ /** CPU TSCs ticking indicator (one for each VCPU). */
+ uint32_t volatile cTSCsTicking;
+
+ /** Virtual time ticking enabled indicator (counter for each VCPU). (TMCLOCK_VIRTUAL) */
+ uint32_t volatile cVirtualTicking;
+ /** Virtual time is not running at 100%. */
+ bool fVirtualWarpDrive;
+ /** Virtual timer synchronous time ticking enabled indicator (bool). (TMCLOCK_VIRTUAL_SYNC) */
+ bool volatile fVirtualSyncTicking;
+ /** Virtual timer synchronous time catch-up active. */
+ bool volatile fVirtualSyncCatchUp;
+ /** Alignment padding. */
+ bool afAlignment1[1];
+ /** WarpDrive percentage.
+ * 100% is normal (fVirtualSyncNormal == true). When other than 100% we apply
+ * this percentage to the raw time source for the period it's been valid in,
+ * i.e. since u64VirtualWarpDriveStart. */
+ uint32_t u32VirtualWarpDrivePercentage;
+
+ /** The offset of the virtual clock relative to it's timesource.
+ * Only valid if fVirtualTicking is set. */
+ uint64_t u64VirtualOffset;
+ /** The guest virtual time when fVirtualTicking is cleared. */
+ uint64_t u64Virtual;
+ /** When the Warp drive was started or last adjusted.
+ * Only valid when fVirtualWarpDrive is set. */
+ uint64_t u64VirtualWarpDriveStart;
+ /** The previously returned nano TS.
+ * This handles TSC drift on SMP systems and expired interval.
+ * This is a valid range u64NanoTS to u64NanoTS + 1000000000 (ie. 1sec). */
+ uint64_t volatile u64VirtualRawPrev;
+ /** The ring-3 data structure for the RTTimeNanoTS workers used by tmVirtualGetRawNanoTS. */
+ RTTIMENANOTSDATAR3 VirtualGetRawData;
+ /** Pointer to the ring-3 tmVirtualGetRawNanoTS worker function. */
+ R3PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRaw;
+ /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared.
+ * When fVirtualSyncTicking is set it holds the last time returned to
+ * the guest (while the lock was held). */
+ uint64_t volatile u64VirtualSync;
+ /** The offset of the timer synchronous virtual clock (TMCLOCK_VIRTUAL_SYNC) relative
+ * to the virtual clock (TMCLOCK_VIRTUAL).
+ * (This is accessed by the timer thread and must be updated atomically.) */
+ uint64_t volatile offVirtualSync;
+ /** The offset into offVirtualSync that's been irrevocably given up by failed catch-up attempts.
+ * Thus the current lag is offVirtualSync - offVirtualSyncGivenUp. */
+ uint64_t offVirtualSyncGivenUp;
+ /** The TMCLOCK_VIRTUAL at the previous TMVirtualGetSync call when catch-up is active. */
+ uint64_t volatile u64VirtualSyncCatchUpPrev;
+ /** The current catch-up percentage. */
+ uint32_t volatile u32VirtualSyncCatchUpPercentage;
+ /** How much slack when processing timers. */
+ uint32_t u32VirtualSyncScheduleSlack;
+ /** When to stop catch-up. */
+ uint64_t u64VirtualSyncCatchUpStopThreshold;
+ /** When to give up catch-up. */
+ uint64_t u64VirtualSyncCatchUpGiveUpThreshold;
+/** @def TM_MAX_CATCHUP_PERIODS
+ * The number of catchup rates. */
+#define TM_MAX_CATCHUP_PERIODS 10
+ /** The aggressiveness of the catch-up relative to how far we've lagged behind.
+ * The idea is to have increasing catch-up percentage as the lag increases. */
+ struct TMCATCHUPPERIOD
+ {
+ uint64_t u64Start; /**< When this period starts. (u64VirtualSyncOffset). */
+ uint32_t u32Percentage; /**< The catch-up percent to apply. */
+ uint32_t u32Alignment; /**< Structure alignment */
+ } aVirtualSyncCatchUpPeriods[TM_MAX_CATCHUP_PERIODS];
+
+ union
+ {
+ /** Combined value for updating. */
+ uint64_t volatile u64Combined;
+ struct
+ {
+ /** Bitmap indicating which timer queues needs their uMaxHzHint updated. */
+ uint32_t volatile bmNeedsUpdating;
+ /** The current max timer Hz hint. */
+ uint32_t volatile uMax;
+ } s;
+ } HzHint;
+ /** @cfgm{/TM/HostHzMax, uint32_t, Hz, 0, UINT32_MAX, 20000}
+ * The max host Hz frequency hint returned by TMCalcHostTimerFrequency. */
+ uint32_t cHostHzMax;
+ /** @cfgm{/TM/HostHzFudgeFactorTimerCpu, uint32_t, Hz, 0, UINT32_MAX, 111}
+ * The number of Hz TMCalcHostTimerFrequency adds for the timer CPU. */
+ uint32_t cPctHostHzFudgeFactorTimerCpu;
+ /** @cfgm{/TM/HostHzFudgeFactorOtherCpu, uint32_t, Hz, 0, UINT32_MAX, 110}
+ * The number of Hz TMCalcHostTimerFrequency adds for the other CPUs. */
+ uint32_t cPctHostHzFudgeFactorOtherCpu;
+ /** @cfgm{/TM/HostHzFudgeFactorCatchUp100, uint32_t, Hz, 0, UINT32_MAX, 300}
+ * The fudge factor (expressed in percent) that catch-up percentages below
+ * 100% is multiplied by. */
+ uint32_t cPctHostHzFudgeFactorCatchUp100;
+ /** @cfgm{/TM/HostHzFudgeFactorCatchUp200, uint32_t, Hz, 0, UINT32_MAX, 250}
+ * The fudge factor (expressed in percent) that catch-up percentages
+ * 100%-199% is multiplied by. */
+ uint32_t cPctHostHzFudgeFactorCatchUp200;
+ /** @cfgm{/TM/HostHzFudgeFactorCatchUp400, uint32_t, Hz, 0, UINT32_MAX, 200}
+ * The fudge factor (expressed in percent) that catch-up percentages
+ * 200%-399% is multiplied by. */
+ uint32_t cPctHostHzFudgeFactorCatchUp400;
+
+ /** The UTC offset in ns.
+ * This is *NOT* for converting UTC to local time. It is for converting real
+ * world UTC time to VM UTC time. This feature is indented for doing date
+ * testing of software and similar.
+ * @todo Implement warpdrive on UTC. */
+ int64_t offUTC;
+ /** The last value TMR3UtcNow returned. */
+ int64_t volatile nsLastUtcNow;
+ /** File to touch on UTC jump. */
+ R3PTRTYPE(char *) pszUtcTouchFileOnJump;
+
+ /** Pointer to our R3 mapping of the GIP. */
+ R3PTRTYPE(void *) pvGIPR3;
+
+ /** The schedule timer timer handle (runtime timer).
+ * This timer will do frequent check on pending queue schedules and
+ * raise VM_FF_TIMER to pull EMTs attention to them.
+ */
+ R3PTRTYPE(PRTTIMER) pTimer;
+ /** Interval in milliseconds of the pTimer timer. */
+ uint32_t u32TimerMillies;
+
+ /** Indicates that queues are being run. */
+ bool volatile fRunningQueues;
+ /** Indicates that the virtual sync queue is being run. */
+ bool volatile fRunningVirtualSyncQueue;
+ /** Alignment */
+ bool afAlignment3[2];
+
+ /** Lock serializing access to the VirtualSync clock and the associated
+ * timer queue.
+ * @todo Consider merging this with the TMTIMERQUEUE::TimerLock for the
+ * virtual sync queue. */
+ PDMCRITSECT VirtualSyncLock;
+
+ /** CPU load state for all the virtual CPUs (tmR3CpuLoadTimer). */
+ TMCPULOADSTATE CpuLoad;
+
+ /** TMR3TimerQueuesDo
+ * @{ */
+ STAMPROFILE StatDoQueues;
+ /** @} */
+ /** tmSchedule
+ * @{ */
+ STAMPROFILE StatScheduleOneRZ;
+ STAMPROFILE StatScheduleOneR3;
+ STAMCOUNTER StatScheduleSetFF;
+ STAMCOUNTER StatPostponedR3;
+ STAMCOUNTER StatPostponedRZ;
+ /** @} */
+ /** Read the time
+ * @{ */
+ STAMCOUNTER StatVirtualGet;
+ STAMCOUNTER StatVirtualGetSetFF;
+ STAMCOUNTER StatVirtualSyncGet;
+ STAMCOUNTER StatVirtualSyncGetAdjLast;
+ STAMCOUNTER StatVirtualSyncGetELoop;
+ STAMCOUNTER StatVirtualSyncGetExpired;
+ STAMCOUNTER StatVirtualSyncGetLockless;
+ STAMCOUNTER StatVirtualSyncGetLocked;
+ STAMCOUNTER StatVirtualSyncGetSetFF;
+ STAMCOUNTER StatVirtualPause;
+ STAMCOUNTER StatVirtualResume;
+ /** @} */
+ /** TMTimerPoll
+ * @{ */
+ STAMCOUNTER StatPoll;
+ STAMCOUNTER StatPollAlreadySet;
+ STAMCOUNTER StatPollELoop;
+ STAMCOUNTER StatPollMiss;
+ STAMCOUNTER StatPollRunning;
+ STAMCOUNTER StatPollSimple;
+ STAMCOUNTER StatPollVirtual;
+ STAMCOUNTER StatPollVirtualSync;
+ /** @} */
+ /** TMTimerSet sans virtual sync timers.
+ * @{ */
+ STAMCOUNTER StatTimerSet;
+ STAMCOUNTER StatTimerSetOpt;
+ STAMPROFILE StatTimerSetRZ;
+ STAMPROFILE StatTimerSetR3;
+ STAMCOUNTER StatTimerSetStStopped;
+ STAMCOUNTER StatTimerSetStExpDeliver;
+ STAMCOUNTER StatTimerSetStActive;
+ STAMCOUNTER StatTimerSetStPendStop;
+ STAMCOUNTER StatTimerSetStPendStopSched;
+ STAMCOUNTER StatTimerSetStPendSched;
+ STAMCOUNTER StatTimerSetStPendResched;
+ STAMCOUNTER StatTimerSetStOther;
+ /** @} */
+ /** TMTimerSet on virtual sync timers.
+ * @{ */
+ STAMCOUNTER StatTimerSetVs;
+ STAMPROFILE StatTimerSetVsRZ;
+ STAMPROFILE StatTimerSetVsR3;
+ STAMCOUNTER StatTimerSetVsStStopped;
+ STAMCOUNTER StatTimerSetVsStExpDeliver;
+ STAMCOUNTER StatTimerSetVsStActive;
+ /** @} */
+ /** TMTimerSetRelative sans virtual sync timers
+ * @{ */
+ STAMCOUNTER StatTimerSetRelative;
+ STAMPROFILE StatTimerSetRelativeRZ;
+ STAMPROFILE StatTimerSetRelativeR3;
+ STAMCOUNTER StatTimerSetRelativeOpt;
+ STAMCOUNTER StatTimerSetRelativeStStopped;
+ STAMCOUNTER StatTimerSetRelativeStExpDeliver;
+ STAMCOUNTER StatTimerSetRelativeStActive;
+ STAMCOUNTER StatTimerSetRelativeStPendStop;
+ STAMCOUNTER StatTimerSetRelativeStPendStopSched;
+ STAMCOUNTER StatTimerSetRelativeStPendSched;
+ STAMCOUNTER StatTimerSetRelativeStPendResched;
+ STAMCOUNTER StatTimerSetRelativeStOther;
+ /** @} */
+ /** TMTimerSetRelative on virtual sync timers.
+ * @{ */
+ STAMCOUNTER StatTimerSetRelativeVs;
+ STAMPROFILE StatTimerSetRelativeVsRZ;
+ STAMPROFILE StatTimerSetRelativeVsR3;
+ STAMCOUNTER StatTimerSetRelativeVsStStopped;
+ STAMCOUNTER StatTimerSetRelativeVsStExpDeliver;
+ STAMCOUNTER StatTimerSetRelativeVsStActive;
+ /** @} */
+ /** TMTimerStop sans virtual sync.
+ * @{ */
+ STAMPROFILE StatTimerStopRZ;
+ STAMPROFILE StatTimerStopR3;
+ /** @} */
+ /** TMTimerStop on virtual sync timers.
+ * @{ */
+ STAMPROFILE StatTimerStopVsRZ;
+ STAMPROFILE StatTimerStopVsR3;
+ /** @} */
+ /** VirtualSync - Running and Catching Up
+ * @{ */
+ STAMCOUNTER StatVirtualSyncRun;
+ STAMCOUNTER StatVirtualSyncRunRestart;
+ STAMPROFILE StatVirtualSyncRunSlack;
+ STAMCOUNTER StatVirtualSyncRunStop;
+ STAMCOUNTER StatVirtualSyncRunStoppedAlready;
+ STAMCOUNTER StatVirtualSyncGiveUp;
+ STAMCOUNTER StatVirtualSyncGiveUpBeforeStarting;
+ STAMPROFILEADV StatVirtualSyncCatchup;
+ STAMCOUNTER aStatVirtualSyncCatchupInitial[TM_MAX_CATCHUP_PERIODS];
+ STAMCOUNTER aStatVirtualSyncCatchupAdjust[TM_MAX_CATCHUP_PERIODS];
+ /** @} */
+ /** TMR3VirtualSyncFF (non dedicated EMT). */
+ STAMPROFILE StatVirtualSyncFF;
+ /** The timer callback. */
+ STAMCOUNTER StatTimerCallbackSetFF;
+ STAMCOUNTER StatTimerCallback;
+
+ /** Calls to TMCpuTickSet. */
+ STAMCOUNTER StatTSCSet;
+
+ /** TSC starts and stops. */
+ STAMCOUNTER StatTSCPause;
+ STAMCOUNTER StatTSCResume;
+
+ /** @name Reasons for refusing TSC offsetting in TMCpuTickCanUseRealTSC.
+ * @{ */
+ STAMCOUNTER StatTSCNotFixed;
+ STAMCOUNTER StatTSCNotTicking;
+ STAMCOUNTER StatTSCCatchupLE010;
+ STAMCOUNTER StatTSCCatchupLE025;
+ STAMCOUNTER StatTSCCatchupLE100;
+ STAMCOUNTER StatTSCCatchupOther;
+ STAMCOUNTER StatTSCWarp;
+ STAMCOUNTER StatTSCUnderflow;
+ STAMCOUNTER StatTSCSyncNotTicking;
+ /** @} */
+} TM;
+/** Pointer to TM VM instance data. */
+typedef TM *PTM;
+
+
+/**
+ * TM VMCPU Instance data.
+ * Changes to this must checked against the padding of the tm union in VM!
+ */
+typedef struct TMCPU
+{
+ /** The offset between the host tick (TSC/virtual depending on the TSC mode) and
+ * the guest tick. */
+ uint64_t offTSCRawSrc;
+ /** The guest TSC when fTicking is cleared. */
+ uint64_t u64TSC;
+ /** The last seen TSC by the guest. */
+ uint64_t u64TSCLastSeen;
+ /** CPU timestamp ticking enabled indicator (bool). (RDTSC) */
+ bool fTSCTicking;
+#ifdef VBOX_WITHOUT_NS_ACCOUNTING
+ bool afAlignment1[7]; /**< alignment padding */
+#else /* !VBOX_WITHOUT_NS_ACCOUNTING */
+
+ /** Set by the timer callback to trigger updating of statistics in
+ * TMNotifyEndOfExecution. */
+ bool volatile fUpdateStats;
+ bool afAlignment1[6];
+ /** The time not spent executing or halted.
+ * @note Only updated after halting and after the timer runs. */
+ uint64_t cNsOtherStat;
+ /** Reasonably up to date total run time value.
+ * @note Only updated after halting and after the timer runs. */
+ uint64_t cNsTotalStat;
+# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
+ /** Resettable copy of version of cNsOtherStat.
+ * @note Only updated after halting. */
+ STAMCOUNTER StatNsOther;
+ /** Resettable copy of cNsTotalStat.
+ * @note Only updated after halting. */
+ STAMCOUNTER StatNsTotal;
+# else
+ uint64_t auAlignment2[2];
+# endif
+
+ /** @name Core accounting data.
+ * @note Must be cache-line aligned and only written to by the EMT owning it.
+ * @{ */
+ /** The cNsXXX generation. */
+ uint32_t volatile uTimesGen;
+ /** Set if executing (between TMNotifyStartOfExecution and
+ * TMNotifyEndOfExecution). */
+ bool volatile fExecuting;
+ /** Set if halting (between TMNotifyStartOfHalt and TMNotifyEndOfHalt). */
+ bool volatile fHalting;
+ /** Set if we're suspended and u64NsTsStartTotal is to be cNsTotal. */
+ bool volatile fSuspended;
+ bool afAlignment;
+ /** The nanosecond timestamp of the CPU start or resume.
+ * This is recalculated when the VM is started so that
+ * cNsTotal = RTTimeNanoTS() - u64NsTsStartCpu. */
+ uint64_t nsStartTotal;
+ /** The TSC of the last start-execute notification. */
+ uint64_t uTscStartExecuting;
+ /** The number of nanoseconds spent executing. */
+ uint64_t cNsExecuting;
+ /** The number of guest execution runs. */
+ uint64_t cPeriodsExecuting;
+ /** The nanosecond timestamp of the last start-halt notification. */
+ uint64_t nsStartHalting;
+ /** The number of nanoseconds being halted. */
+ uint64_t cNsHalted;
+ /** The number of halts. */
+ uint64_t cPeriodsHalted;
+ /** @} */
+
+# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
+ /** Resettable version of cNsExecuting. */
+ STAMPROFILE StatNsExecuting;
+ /** Long execution intervals. */
+ STAMPROFILE StatNsExecLong;
+ /** Short execution intervals. */
+ STAMPROFILE StatNsExecShort;
+ /** Tiny execution intervals. */
+ STAMPROFILE StatNsExecTiny;
+ /** Resettable version of cNsHalted. */
+ STAMPROFILE StatNsHalted;
+# endif
+
+ /** CPU load state for this virtual CPU (tmR3CpuLoadTimer). */
+ TMCPULOADSTATE CpuLoad;
+#endif
+} TMCPU;
+#ifndef VBOX_WITHOUT_NS_ACCOUNTING
+AssertCompileMemberAlignment(TMCPU, uTimesGen, 64);
+# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
+AssertCompileMemberAlignment(TMCPU, StatNsExecuting, 64);
+# else
+AssertCompileMemberAlignment(TMCPU, CpuLoad, 64);
+# endif
+#endif
+/** Pointer to TM VMCPU instance data. */
+typedef TMCPU *PTMCPU;
+
+
+/**
+ * TM data kept in the ring-0 GVM.
+ */
+typedef struct TMR0PERVM
+{
+ /** Timer queues for the different clock types. */
+ TMTIMERQUEUER0 aTimerQueues[TMCLOCK_MAX];
+
+ /** The ring-0 data structure for the RTTimeNanoTS workers used by tmVirtualGetRawNanoTS. */
+ RTTIMENANOTSDATAR0 VirtualGetRawData;
+ /** Pointer to the ring-0 tmVirtualGetRawNanoTS worker function. */
+ R0PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRaw;
+} TMR0PERVM;
+
+
+const char *tmTimerState(TMTIMERSTATE enmState);
+void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue);
+#ifdef VBOX_STRICT
+void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere);
+#endif
+void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers);
+
+uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM);
+int tmCpuTickPause(PVMCPUCC pVCpu);
+int tmCpuTickPauseLocked(PVMCC pVM, PVMCPUCC pVCpu);
+int tmCpuTickResume(PVMCC pVM, PVMCPUCC pVCpu);
+int tmCpuTickResumeLocked(PVMCC pVM, PVMCPUCC pVCpu);
+
+int tmVirtualPauseLocked(PVMCC pVM);
+int tmVirtualResumeLocked(PVMCC pVM);
+DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS,
+ uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS);
+DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra);
+DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
+ uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu);
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_TMInternal_h */
diff --git a/src/VBox/VMM/include/TRPMInternal.h b/src/VBox/VMM/include/TRPMInternal.h
new file mode 100644
index 00000000..de697f32
--- /dev/null
+++ b/src/VBox/VMM/include/TRPMInternal.h
@@ -0,0 +1,102 @@
+/* $Id: TRPMInternal.h $ */
+/** @file
+ * TRPM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_TRPMInternal_h
+#define VMM_INCLUDED_SRC_include_TRPMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pgm.h>
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_trpm_int Internals
+ * @ingroup grp_trpm
+ * @internal
+ * @{
+ */
+
+/**
+ * TRPM Data (part of VM)
+ *
+ * @note This used to be a big deal when we had raw-mode, now it's a dud. :-)
+ */
+typedef struct TRPM
+{
+ /** Statistics for interrupt handlers. */
+ STAMCOUNTER aStatForwardedIRQ[256];
+} TRPM;
+
+/** Pointer to TRPM Data. */
+typedef TRPM *PTRPM;
+
+
+/**
+ * Per CPU data for TRPM.
+ */
+typedef struct TRPMCPU
+{
+ /** Active Interrupt or trap vector number.
+ * If not UINT32_MAX this indicates that we're currently processing a
+ * interrupt, trap, fault, abort, whatever which have arrived at that
+ * vector number.
+ */
+ uint32_t uActiveVector;
+
+ /** Active trap type. */
+ TRPMEVENT enmActiveType;
+
+ /** Errorcode for the active interrupt/trap. */
+ uint32_t uActiveErrorCode;
+
+ /** Instruction length for software interrupts and software exceptions
+ * (\#BP, \#OF) */
+ uint8_t cbInstr;
+
+ /** Whether this \#DB trap is caused due to INT1/ICEBP. */
+ bool fIcebp;
+
+ /** CR2 at the time of the active exception. */
+ RTGCUINTPTR uActiveCR2;
+} TRPMCPU;
+
+/** Pointer to TRPMCPU Data. */
+typedef TRPMCPU *PTRPMCPU;
+/** Pointer to const TRPMCPU Data. */
+typedef const TRPMCPU *PCTRPMCPU;
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_TRPMInternal_h */
diff --git a/src/VBox/VMM/include/VMInternal.h b/src/VBox/VMM/include/VMInternal.h
new file mode 100644
index 00000000..d6bc9e68
--- /dev/null
+++ b/src/VBox/VMM/include/VMInternal.h
@@ -0,0 +1,495 @@
+/* $Id: VMInternal.h $ */
+/** @file
+ * VM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_VMInternal_h
+#define VMM_INCLUDED_SRC_include_VMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/vmm/vmapi.h>
+#include <iprt/assert.h>
+#include <iprt/critsect.h>
+#include <iprt/setjmp-without-sigmask.h>
+
+
+
+/** @defgroup grp_vm_int Internals
+ * @ingroup grp_vm
+ * @internal
+ * @{
+ */
+
+
+/**
+ * VM state change callback.
+ */
+typedef struct VMATSTATE
+{
+ /** Pointer to the next one. */
+ struct VMATSTATE *pNext;
+ /** Pointer to the callback. */
+ PFNVMATSTATE pfnAtState;
+ /** The user argument. */
+ void *pvUser;
+} VMATSTATE;
+/** Pointer to a VM state change callback. */
+typedef VMATSTATE *PVMATSTATE;
+
+
+/**
+ * VM error callback.
+ */
+typedef struct VMATERROR
+{
+ /** Pointer to the next one. */
+ struct VMATERROR *pNext;
+ /** Pointer to the callback. */
+ PFNVMATERROR pfnAtError;
+ /** The user argument. */
+ void *pvUser;
+} VMATERROR;
+/** Pointer to a VM error callback. */
+typedef VMATERROR *PVMATERROR;
+
+
+/**
+ * Chunk of memory allocated off the hypervisor heap in which
+ * we copy the error details.
+ */
+typedef struct VMERROR
+{
+ /** The size of the chunk. */
+ uint32_t cbAllocated;
+ /** The current offset into the chunk.
+ * We start by putting the filename and function immediately
+ * after the end of the buffer. */
+ uint32_t off;
+ /** Offset from the start of this structure to the file name. */
+ uint32_t offFile;
+ /** The line number. */
+ uint32_t iLine;
+ /** Offset from the start of this structure to the function name. */
+ uint32_t offFunction;
+ /** Offset from the start of this structure to the formatted message text. */
+ uint32_t offMessage;
+ /** The VBox status code. */
+ int32_t rc;
+} VMERROR, *PVMERROR;
+
+
+/**
+ * VM runtime error callback.
+ */
+typedef struct VMATRUNTIMEERROR
+{
+ /** Pointer to the next one. */
+ struct VMATRUNTIMEERROR *pNext;
+ /** Pointer to the callback. */
+ PFNVMATRUNTIMEERROR pfnAtRuntimeError;
+ /** The user argument. */
+ void *pvUser;
+} VMATRUNTIMEERROR;
+/** Pointer to a VM error callback. */
+typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
+
+
+/**
+ * Chunk of memory allocated off the hypervisor heap in which
+ * we copy the runtime error details.
+ */
+typedef struct VMRUNTIMEERROR
+{
+ /** The size of the chunk. */
+ uint32_t cbAllocated;
+ /** The current offset into the chunk.
+ * We start by putting the error ID immediately
+ * after the end of the buffer. */
+ uint32_t off;
+ /** Offset from the start of this structure to the error ID. */
+ uint32_t offErrorId;
+ /** Offset from the start of this structure to the formatted message text. */
+ uint32_t offMessage;
+ /** Error flags. */
+ uint32_t fFlags;
+} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
+
+/** The halt method. */
+typedef enum
+{
+ /** The usual invalid value. */
+ VMHALTMETHOD_INVALID = 0,
+ /** Use the method used during bootstrapping. */
+ VMHALTMETHOD_BOOTSTRAP,
+ /** Use the default method. */
+ VMHALTMETHOD_DEFAULT,
+ /** The old spin/yield/block method. */
+ VMHALTMETHOD_OLD,
+ /** The first go at a block/spin method. */
+ VMHALTMETHOD_1,
+ /** The first go at a more global approach. */
+ VMHALTMETHOD_GLOBAL_1,
+ /** The end of valid methods. (not inclusive of course) */
+ VMHALTMETHOD_END,
+ /** The usual 32-bit max value. */
+ VMHALTMETHOD_32BIT_HACK = 0x7fffffff
+} VMHALTMETHOD;
+
+
+/**
+ * VM Internal Data (part of the VM structure).
+ *
+ * @todo Move this and all related things to VMM. The VM component was, to some
+ * extent at least, a bad ad hoc design which should all have been put in
+ * VMM. @see pg_vm.
+ */
+typedef struct VMINT
+{
+ /** VM Error Message. */
+ R3PTRTYPE(PVMERROR) pErrorR3;
+ /** VM Runtime Error Message. */
+ R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
+ /** The VM was/is-being teleported and has not yet been fully resumed. */
+ bool fTeleportedAndNotFullyResumedYet;
+ /** The VM should power off instead of reset. */
+ bool fPowerOffInsteadOfReset;
+ /** Reset counter (soft + hard). */
+ uint32_t cResets;
+ /** Hard reset counter. */
+ uint32_t cHardResets;
+ /** Soft reset counter. */
+ uint32_t cSoftResets;
+} VMINT;
+/** Pointer to the VM Internal Data (part of the VM structure). */
+typedef VMINT *PVMINT;
+
+
+#ifdef IN_RING3
+
+/**
+ * VM internal data kept in the UVM.
+ */
+typedef struct VMINTUSERPERVM
+{
+ /** Head of the standard request queue. Atomic. */
+ volatile PVMREQ pNormalReqs;
+ /** Head of the priority request queue. Atomic. */
+ volatile PVMREQ pPriorityReqs;
+ /** The last index used during alloc/free. */
+ volatile uint32_t iReqFree;
+ /** Number of free request packets. */
+ volatile uint32_t cReqFree;
+ /** Array of pointers to lists of free request packets. Atomic. */
+ volatile PVMREQ apReqFree[16 - (HC_ARCH_BITS == 32 ? 5 : 4)];
+
+ /** The reference count of the UVM handle. */
+ volatile uint32_t cUvmRefs;
+
+ /** Number of active EMTs. */
+ volatile uint32_t cActiveEmts;
+
+# ifdef VBOX_WITH_STATISTICS
+# if HC_ARCH_BITS == 32
+ uint32_t uPadding;
+# endif
+ /** Number of VMR3ReqAlloc returning a new packet. */
+ STAMCOUNTER StatReqAllocNew;
+ /** Number of VMR3ReqAlloc causing races. */
+ STAMCOUNTER StatReqAllocRaces;
+ /** Number of VMR3ReqAlloc returning a recycled packet. */
+ STAMCOUNTER StatReqAllocRecycled;
+ /** Number of VMR3ReqFree calls. */
+ STAMCOUNTER StatReqFree;
+ /** Number of times the request was actually freed. */
+ STAMCOUNTER StatReqFreeOverflow;
+ /** Number of requests served. */
+ STAMCOUNTER StatReqProcessed;
+ /** Number of times there are more than one request and the others needed to be
+ * pushed back onto the list. */
+ STAMCOUNTER StatReqMoreThan1;
+ /** Number of times we've raced someone when pushing the other requests back
+ * onto the list. */
+ STAMCOUNTER StatReqPushBackRaces;
+# endif
+
+ /** Pointer to the support library session.
+ * Mainly for creation and destruction. */
+ PSUPDRVSESSION pSession;
+
+ /** Force EMT to terminate. */
+ bool volatile fTerminateEMT;
+
+ /** Critical section for pAtState and enmPrevVMState. */
+ RTCRITSECT AtStateCritSect;
+ /** List of registered state change callbacks. */
+ PVMATSTATE pAtState;
+ /** List of registered state change callbacks. */
+ PVMATSTATE *ppAtStateNext;
+ /** The previous VM state.
+ * This is mainly used for the 'Resetting' state, but may come in handy later
+ * and when debugging. */
+ VMSTATE enmPrevVMState;
+
+ /** Reason for the most recent suspend operation. */
+ VMSUSPENDREASON enmSuspendReason;
+ /** Reason for the most recent operation. */
+ VMRESUMEREASON enmResumeReason;
+
+ /** Critical section for pAtError and pAtRuntimeError. */
+ RTCRITSECT AtErrorCritSect;
+
+ /** List of registered error callbacks. */
+ PVMATERROR pAtError;
+ /** List of registered error callbacks. */
+ PVMATERROR *ppAtErrorNext;
+ /** The error message count.
+ * This is incremented every time an error is raised. */
+ uint32_t volatile cErrors;
+
+ /** The runtime error message count.
+ * This is incremented every time a runtime error is raised. */
+ uint32_t volatile cRuntimeErrors;
+ /** List of registered error callbacks. */
+ PVMATRUNTIMEERROR pAtRuntimeError;
+ /** List of registered error callbacks. */
+ PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
+
+ /** @name Generic Halt data
+ * @{
+ */
+ /** The current halt method.
+ * Can be selected by CFGM option 'VM/HaltMethod'. */
+ VMHALTMETHOD enmHaltMethod;
+ /** The index into g_aHaltMethods of the current halt method. */
+ uint32_t volatile iHaltMethod;
+ /** @} */
+
+ /** @todo Do NOT add new members here or reuse the current, we need to store the config for
+ * each halt method separately because we're racing on SMP guest rigs. */
+ union
+ {
+ /**
+ * Method 1 & 2 - Block whenever possible, and when lagging behind
+ * switch to spinning with regular blocking every 5-200ms (defaults)
+ * depending on the accumulated lag. The blocking interval is adjusted
+ * with the average oversleeping of the last 64 times.
+ *
+ * The difference between 1 and 2 is that we use native absolute
+ * time APIs for the blocking instead of the millisecond based IPRT
+ * interface.
+ */
+ struct
+ {
+ /** The max interval without blocking (when spinning). */
+ uint32_t u32MinBlockIntervalCfg;
+ /** The minimum interval between blocking (when spinning). */
+ uint32_t u32MaxBlockIntervalCfg;
+ /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
+ uint32_t u32LagBlockIntervalDivisorCfg;
+ /** When to start spinning (lag / nano secs). */
+ uint32_t u32StartSpinningCfg;
+ /** When to stop spinning (lag / nano secs). */
+ uint32_t u32StopSpinningCfg;
+ } Method12;
+
+ /**
+ * The GVMM manages halted and waiting EMTs.
+ */
+ struct
+ {
+ /** The threshold between spinning and blocking. */
+ uint32_t cNsSpinBlockThresholdCfg;
+ } Global1;
+ } Halt;
+
+ /** Pointer to the DBGC instance data. */
+ void *pvDBGC;
+
+ /** TLS index for the VMINTUSERPERVMCPU pointer. */
+ RTTLS idxTLS;
+
+ /** The VM name. (Set after the config constructure has been called.) */
+ char *pszName;
+ /** The VM UUID. (Set after the config constructure has been called.) */
+ RTUUID Uuid;
+} VMINTUSERPERVM;
+# ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(VMINTUSERPERVM, StatReqAllocNew, 8);
+# endif
+
+/** Pointer to the VM internal data kept in the UVM. */
+typedef VMINTUSERPERVM *PVMINTUSERPERVM;
+
+
+/**
+ * VMCPU internal data kept in the UVM.
+ *
+ * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
+ */
+typedef struct VMINTUSERPERVMCPU
+{
+ /** Head of the normal request queue. Atomic. */
+ volatile PVMREQ pNormalReqs;
+ /** Head of the priority request queue. Atomic. */
+ volatile PVMREQ pPriorityReqs;
+
+ /** The handle to the EMT thread. */
+ RTTHREAD ThreadEMT;
+ /** The native of the EMT thread. */
+ RTNATIVETHREAD NativeThreadEMT;
+ /** Wait event semaphore. */
+ RTSEMEVENT EventSemWait;
+ /** Wait/Idle indicator. */
+ bool volatile fWait;
+ /** Set if we've been thru vmR3Destroy and decremented the active EMT count
+ * already. */
+ bool volatile fBeenThruVmDestroy;
+ /** Align the next bit. */
+ bool afAlignment[HC_ARCH_BITS == 32 ? 2 : 6];
+
+ /** @name Generic Halt data
+ * @{
+ */
+ /** The average time (ns) between two halts in the last second. (updated once per second) */
+ uint32_t HaltInterval;
+ /** The average halt frequency for the last second. (updated once per second) */
+ uint32_t HaltFrequency;
+ /** The number of halts in the current period. */
+ uint32_t cHalts;
+ uint32_t padding; /**< alignment padding. */
+ /** When we started counting halts in cHalts (RTTimeNanoTS). */
+ uint64_t u64HaltsStartTS;
+ /** @} */
+
+ /** Union containing data and config for the different halt algorithms. */
+ union
+ {
+ /**
+ * Method 1 & 2 - Block whenever possible, and when lagging behind
+ * switch to spinning with regular blocking every 5-200ms (defaults)
+ * depending on the accumulated lag. The blocking interval is adjusted
+ * with the average oversleeping of the last 64 times.
+ *
+ * The difference between 1 and 2 is that we use native absolute
+ * time APIs for the blocking instead of the millisecond based IPRT
+ * interface.
+ */
+ struct
+ {
+ /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
+ uint32_t cBlocks;
+ /** Align the next member. */
+ uint32_t u32Alignment;
+ /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
+ uint64_t cNSBlockedTooLongAvg;
+ /** Total time spend oversleeping when blocking. */
+ uint64_t cNSBlockedTooLong;
+ /** Total time spent blocking. */
+ uint64_t cNSBlocked;
+ /** The timestamp (RTTimeNanoTS) of the last block. */
+ uint64_t u64LastBlockTS;
+
+ /** When we started spinning relentlessly in order to catch up some of the oversleeping.
+ * This is 0 when we're not spinning. */
+ uint64_t u64StartSpinTS;
+ } Method12;
+
+# if 0
+ /**
+ * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
+ * sprinkle it with yields.
+ */
+ struct
+ {
+ /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
+ uint32_t cBlocks;
+ /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
+ uint64_t cBlockedTooLongNSAvg;
+ /** Total time spend oversleeping when blocking. */
+ uint64_t cBlockedTooLongNS;
+ /** Total time spent blocking. */
+ uint64_t cBlockedNS;
+ /** The timestamp (RTTimeNanoTS) of the last block. */
+ uint64_t u64LastBlockTS;
+
+ /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
+ uint32_t cYields;
+ /** Avg. time spend oversleeping when yielding. */
+ uint32_t cYieldTooLongNSAvg;
+ /** Total time spend oversleeping when yielding. */
+ uint64_t cYieldTooLongNS;
+ /** Total time spent yielding. */
+ uint64_t cYieldedNS;
+ /** The timestamp (RTTimeNanoTS) of the last block. */
+ uint64_t u64LastYieldTS;
+
+ /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
+ uint64_t u64StartSpinTS;
+ } Method34;
+# endif
+ } Halt;
+
+ /** Profiling the halted state; yielding vs blocking.
+ * @{ */
+ STAMPROFILE StatHaltYield;
+ STAMPROFILE StatHaltBlock;
+ STAMPROFILE StatHaltBlockOverslept;
+ STAMPROFILE StatHaltBlockInsomnia;
+ STAMPROFILE StatHaltBlockOnTime;
+ STAMPROFILE StatHaltTimers;
+ STAMPROFILE StatHaltPoll;
+ /** @} */
+} VMINTUSERPERVMCPU;
+AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
+AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
+AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
+
+/** Pointer to the VM internal data kept in the UVM. */
+typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
+
+#endif /* IN_RING3 */
+
+RT_C_DECLS_BEGIN
+
+DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
+int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
+DECLCALLBACK(int) vmR3Destroy(PVM pVM);
+DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
+void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
+DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
+DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
+void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
+void vmR3SetTerminated(PVM pVM);
+
+RT_C_DECLS_END
+
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_VMInternal_h */
+
diff --git a/src/VBox/VMM/include/VMMInternal.h b/src/VBox/VMM/include/VMMInternal.h
new file mode 100644
index 00000000..a7af9e58
--- /dev/null
+++ b/src/VBox/VMM/include/VMMInternal.h
@@ -0,0 +1,754 @@
+/* $Id: VMMInternal.h $ */
+/** @file
+ * VMM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
+#define VMM_INCLUDED_SRC_include_VMMInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+#include <iprt/critsect.h>
+
+#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
+# error "Not in VMM! This is an internal header!"
+#endif
+#if HC_ARCH_BITS == 32
+# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
+#endif
+
+
+
+/** @defgroup grp_vmm_int Internals
+ * @ingroup grp_vmm
+ * @internal
+ * @{
+ */
+
+/** @def VBOX_WITH_RC_RELEASE_LOGGING
+ * Enables RC release logging. */
+#define VBOX_WITH_RC_RELEASE_LOGGING
+
+/** @def VBOX_WITH_R0_LOGGING
+ * Enables Ring-0 logging (non-release).
+ *
+ * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
+ * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
+ * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
+ */
+#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
+# define VBOX_WITH_R0_LOGGING
+#endif
+
+/** @def VBOX_STRICT_VMM_STACK
+ * Enables VMM stack guard pages to catch stack over- and underruns. */
+#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
+# define VBOX_STRICT_VMM_STACK
+#endif
+
+
+/** Number of buffers per logger. */
+#define VMMLOGGER_BUFFER_COUNT 4
+
+/**
+ * R0 logger data (ring-0 only data).
+ */
+typedef struct VMMR0PERVCPULOGGER
+{
+ /** Pointer to the logger instance.
+ * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
+ * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
+ * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
+ R0PTRTYPE(PRTLOGGER) pLogger;
+ /** Log buffer descriptor.
+ * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
+ RTLOGBUFFERDESC aBufDescs[VMMLOGGER_BUFFER_COUNT];
+ /** Flag indicating whether we've registered the instance already. */
+ bool fRegistered;
+ /** Set if the EMT is waiting on hEventFlushWait. */
+ bool fEmtWaiting;
+ /** Set while we're inside vmmR0LoggerFlushCommon to prevent recursion. */
+ bool fFlushing;
+ /** Flush to parent VMM's debug log instead of ring-3. */
+ bool fFlushToParentVmmDbg : 1;
+ /** Flush to parent VMM's debug log instead of ring-3. */
+ bool fFlushToParentVmmRel : 1;
+ /** Number of buffers currently queued for flushing. */
+ uint32_t volatile cFlushing;
+ /** The event semaphore the EMT waits on while the buffer is being flushed. */
+ RTSEMEVENT hEventFlushWait;
+} VMMR0PERVCPULOGGER;
+/** Pointer to the R0 logger data (ring-0 only). */
+typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
+
+
+/**
+ * R0 logger data shared with ring-3 (per CPU).
+ */
+typedef struct VMMR3CPULOGGER
+{
+ /** Buffer info. */
+ struct
+ {
+ /** Auxiliary buffer descriptor. */
+ RTLOGBUFFERAUXDESC AuxDesc;
+ /** Ring-3 mapping of the logging buffer. */
+ R3PTRTYPE(char *) pchBufR3;
+ } aBufs[VMMLOGGER_BUFFER_COUNT];
+ /** The current buffer. */
+ uint32_t idxBuf;
+ /** Number of buffers currently queued for flushing (copy of
+ * VMMR0PERVCPULOGGER::cFlushing). */
+ uint32_t volatile cFlushing;
+ /** The buffer size. */
+ uint32_t cbBuf;
+ /** Number of bytes dropped because the flush context didn't allow waiting. */
+ uint32_t cbDropped;
+ STAMCOUNTER StatFlushes;
+ STAMCOUNTER StatCannotBlock;
+ STAMPROFILE StatWait;
+ STAMPROFILE StatRaces;
+ STAMCOUNTER StatRacesToR0;
+} VMMR3CPULOGGER;
+/** Pointer to r0 logger data shared with ring-3. */
+typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
+
+/** @name Logger indexes for VMMR0PERVCPU::u.aLoggers and VMMCPU::u.aLoggers.
+ * @{ */
+#define VMMLOGGER_IDX_REGULAR 0
+#define VMMLOGGER_IDX_RELEASE 1
+#define VMMLOGGER_IDX_MAX 2
+/** @} */
+
+
+/** Pointer to a ring-0 jump buffer. */
+typedef struct VMMR0JMPBUF *PVMMR0JMPBUF;
+/**
+ * Jump buffer for the setjmp/longjmp like constructs used to
+ * quickly 'call' back into Ring-3.
+ */
+typedef struct VMMR0JMPBUF
+{
+ /** Traditional jmp_buf stuff
+ * @{ */
+#if HC_ARCH_BITS == 32
+ uint32_t ebx;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebp;
+ uint32_t esp;
+ uint32_t eip;
+ uint32_t eflags;
+#endif
+#if HC_ARCH_BITS == 64
+ uint64_t rbx;
+# ifdef RT_OS_WINDOWS
+ uint64_t rsi;
+ uint64_t rdi;
+# endif
+ uint64_t rbp;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rsp;
+ uint64_t rip;
+# ifdef RT_OS_WINDOWS
+ uint128_t xmm6;
+ uint128_t xmm7;
+ uint128_t xmm8;
+ uint128_t xmm9;
+ uint128_t xmm10;
+ uint128_t xmm11;
+ uint128_t xmm12;
+ uint128_t xmm13;
+ uint128_t xmm14;
+ uint128_t xmm15;
+# endif
+ uint64_t rflags;
+#endif
+ /** @} */
+
+ /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */
+ RTHCUINTREG UnwindSp;
+ /** RSP/ESP at the time of the long jump call. */
+ RTHCUINTREG UnwindRetSp;
+ /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */
+ RTHCUINTREG UnwindBp;
+ /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */
+ RTHCUINTREG UnwindPc;
+ /** Unwind: The vmmR0CallRing3SetJmp return address value. */
+ RTHCUINTREG UnwindRetPcValue;
+ /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
+ RTHCUINTREG UnwindRetPcLocation;
+
+ /** The function last being executed here. */
+ RTHCUINTREG pfn;
+ /** The first argument to the function. */
+ RTHCUINTREG pvUser1;
+ /** The second argument to the function. */
+ RTHCUINTREG pvUser2;
+
+ /** Number of valid bytes in pvStackBuf. */
+ uint32_t cbStackValid;
+ /** Size of buffer pvStackBuf points to. */
+ uint32_t cbStackBuf;
+ /** Pointer to buffer for mirroring the stack. Optional. */
+ RTR0PTR pvStackBuf;
+ /** Pointer to a ring-3 accessible jump buffer structure for automatic
+ * mirroring on longjmp. Optional. */
+ R0PTRTYPE(PVMMR0JMPBUF) pMirrorBuf;
+} VMMR0JMPBUF;
+
+
+/**
+ * Log flusher job.
+ *
+ * There is a ring buffer of these in ring-0 (VMMR0PERVM::aLogFlushRing) and a
+ * copy of the current one in the shared VM structure (VMM::LogFlusherItem).
+ */
+typedef union VMMLOGFLUSHERENTRY
+{
+ struct
+ {
+ /** The virtual CPU ID. */
+ uint32_t idCpu : 16;
+ /** The logger: 0 for release, 1 for debug. */
+ uint32_t idxLogger : 8;
+ /** The buffer to be flushed. */
+ uint32_t idxBuffer : 7;
+ /** Set by the flusher thread once it fetched the entry and started
+ * processing it. */
+ uint32_t fProcessing : 1;
+ } s;
+ uint32_t u32;
+} VMMLOGFLUSHERENTRY;
+
+
+/**
+ * VMM Data (part of VM)
+ */
+typedef struct VMM
+{
+ /** Whether we should use the periodic preemption timers. */
+ bool fUsePeriodicPreemptionTimers;
+ /** Alignment padding. */
+ bool afPadding0[7];
+
+#if 0 /* pointless when timers doesn't run on EMT */
+ /** The EMT yield timer. */
+ TMTIMERHANDLE hYieldTimer;
+ /** The period to the next timeout when suspended or stopped.
+ * This is 0 when running. */
+ uint32_t cYieldResumeMillies;
+ /** The EMT yield timer interval (milliseconds). */
+ uint32_t cYieldEveryMillies;
+ /** The timestamp of the previous yield. (nano) */
+ uint64_t u64LastYield;
+#endif
+
+ /** @name EMT Rendezvous
+ * @{ */
+ /** Semaphore to wait on upon entering ordered execution. */
+ R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
+ /** Semaphore to wait on upon entering for one-by-one execution. */
+ RTSEMEVENT hEvtRendezvousEnterOneByOne;
+ /** Semaphore to wait on upon entering for all-at-once execution. */
+ RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
+ /** Semaphore to wait on when done. */
+ RTSEMEVENTMULTI hEvtMulRendezvousDone;
+ /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
+ RTSEMEVENT hEvtRendezvousDoneCaller;
+ /** Semaphore to wait on upon recursing. */
+ RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
+ /** Semaphore to wait on after done with recursion (caller restoring state). */
+ RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
+ /** Semaphore the initiator waits on while the EMTs are getting into position
+ * on hEvtMulRendezvousRecursionPush. */
+ RTSEMEVENT hEvtRendezvousRecursionPushCaller;
+ /** Semaphore the initiator waits on while the EMTs sitting on
+ * hEvtMulRendezvousRecursionPop wakes up and leave. */
+ RTSEMEVENT hEvtRendezvousRecursionPopCaller;
+ /** Callback. */
+ R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
+ /** The user argument for the callback. */
+ RTR3PTR volatile pvRendezvousUser;
+ /** Flags. */
+ volatile uint32_t fRendezvousFlags;
+ /** The number of EMTs that has entered. */
+ volatile uint32_t cRendezvousEmtsEntered;
+ /** The number of EMTs that has done their job. */
+ volatile uint32_t cRendezvousEmtsDone;
+ /** The number of EMTs that has returned. */
+ volatile uint32_t cRendezvousEmtsReturned;
+ /** The status code. */
+ volatile int32_t i32RendezvousStatus;
+ /** Spin lock. */
+ volatile uint32_t u32RendezvousLock;
+ /** The recursion depth. */
+ volatile uint32_t cRendezvousRecursions;
+ /** The number of EMTs that have entered the recursion routine. */
+ volatile uint32_t cRendezvousEmtsRecursingPush;
+ /** The number of EMTs that have leaft the recursion routine. */
+ volatile uint32_t cRendezvousEmtsRecursingPop;
+ /** Triggers rendezvous recursion in the other threads. */
+ volatile bool fRendezvousRecursion;
+
+ /** @} */
+
+ /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
+ * release logging purposes. */
+ bool fIsPreemptPendingApiTrusty : 1;
+ /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
+ * release logging purposes. */
+ bool fIsPreemptPossible : 1;
+ /** Set if ring-0 uses context hooks. */
+ bool fIsUsingContextHooks : 1;
+
+ bool afAlignment2[2]; /**< Alignment padding. */
+
+ /** Buffer for storing the standard assertion message for a ring-0 assertion.
+ * Used for saving the assertion message text for the release log and guru
+ * meditation dump. */
+ char szRing0AssertMsg1[512];
+ /** Buffer for storing the custom message for a ring-0 assertion. */
+ char szRing0AssertMsg2[256];
+
+ /** @name Logging
+ * @{ */
+ /** Used when setting up ring-0 logger. */
+ uint64_t nsProgramStart;
+ /** Log flusher thread. */
+ RTTHREAD hLogFlusherThread;
+ /** Copy of the current work log flusher work item. */
+ VMMLOGFLUSHERENTRY volatile LogFlusherItem;
+ STAMCOUNTER StatLogFlusherFlushes;
+ STAMCOUNTER StatLogFlusherNoWakeUp;
+ /** @} */
+
+ /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
+ STAMCOUNTER StatRunGC;
+
+ /** Statistics for each of the RC/R0 return codes.
+ * @{ */
+ STAMCOUNTER StatRZRetNormal;
+ STAMCOUNTER StatRZRetInterrupt;
+ STAMCOUNTER StatRZRetInterruptHyper;
+ STAMCOUNTER StatRZRetGuestTrap;
+ STAMCOUNTER StatRZRetRingSwitch;
+ STAMCOUNTER StatRZRetRingSwitchInt;
+ STAMCOUNTER StatRZRetStaleSelector;
+ STAMCOUNTER StatRZRetIRETTrap;
+ STAMCOUNTER StatRZRetEmulate;
+ STAMCOUNTER StatRZRetPatchEmulate;
+ STAMCOUNTER StatRZRetIORead;
+ STAMCOUNTER StatRZRetIOWrite;
+ STAMCOUNTER StatRZRetIOCommitWrite;
+ STAMCOUNTER StatRZRetMMIORead;
+ STAMCOUNTER StatRZRetMMIOWrite;
+ STAMCOUNTER StatRZRetMMIOCommitWrite;
+ STAMCOUNTER StatRZRetMMIOPatchRead;
+ STAMCOUNTER StatRZRetMMIOPatchWrite;
+ STAMCOUNTER StatRZRetMMIOReadWrite;
+ STAMCOUNTER StatRZRetMSRRead;
+ STAMCOUNTER StatRZRetMSRWrite;
+ STAMCOUNTER StatRZRetLDTFault;
+ STAMCOUNTER StatRZRetGDTFault;
+ STAMCOUNTER StatRZRetIDTFault;
+ STAMCOUNTER StatRZRetTSSFault;
+ STAMCOUNTER StatRZRetCSAMTask;
+ STAMCOUNTER StatRZRetSyncCR3;
+ STAMCOUNTER StatRZRetMisc;
+ STAMCOUNTER StatRZRetPatchInt3;
+ STAMCOUNTER StatRZRetPatchPF;
+ STAMCOUNTER StatRZRetPatchGP;
+ STAMCOUNTER StatRZRetPatchIretIRQ;
+ STAMCOUNTER StatRZRetRescheduleREM;
+ STAMCOUNTER StatRZRetToR3Total;
+ STAMCOUNTER StatRZRetToR3FF;
+ STAMCOUNTER StatRZRetToR3Unknown;
+ STAMCOUNTER StatRZRetToR3TMVirt;
+ STAMCOUNTER StatRZRetToR3HandyPages;
+ STAMCOUNTER StatRZRetToR3PDMQueues;
+ STAMCOUNTER StatRZRetToR3Rendezvous;
+ STAMCOUNTER StatRZRetToR3Timer;
+ STAMCOUNTER StatRZRetToR3DMA;
+ STAMCOUNTER StatRZRetToR3CritSect;
+ STAMCOUNTER StatRZRetToR3Iem;
+ STAMCOUNTER StatRZRetToR3Iom;
+ STAMCOUNTER StatRZRetTimerPending;
+ STAMCOUNTER StatRZRetInterruptPending;
+ STAMCOUNTER StatRZRetPATMDuplicateFn;
+ STAMCOUNTER StatRZRetPendingRequest;
+ STAMCOUNTER StatRZRetPGMFlushPending;
+ STAMCOUNTER StatRZRetPatchTPR;
+ /** @} */
+} VMM;
+/** Pointer to VMM. */
+typedef VMM *PVMM;
+
+
+/**
+ * VMMCPU Data (part of VMCPU)
+ */
+typedef struct VMMCPU
+{
+ /** The last RC/R0 return code. */
+ int32_t iLastGZRc;
+ /** Alignment padding. */
+ uint32_t u32Padding0;
+
+ /** @name Rendezvous
+ * @{ */
+ /** Whether the EMT is executing a rendezvous right now. For detecting
+ * attempts at recursive rendezvous. */
+ bool volatile fInRendezvous;
+ bool afPadding1[2];
+ /** @} */
+
+ /** Whether we can HLT in VMMR0 rather than having to return to EM.
+ * Updated by vmR3SetHaltMethodU(). */
+ bool fMayHaltInRing0;
+ /** The minimum delta for which we can HLT in ring-0 for.
+ * The deadlines we can calculate are from TM, so, if it's too close
+ * we should just return to ring-3 and run the timer wheel, no point
+ * in spinning in ring-0.
+ * Updated by vmR3SetHaltMethodU(). */
+ uint32_t cNsSpinBlockThreshold;
+ /** Number of ring-0 halts (used for depreciating following values). */
+ uint32_t cR0Halts;
+ /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
+ uint32_t cR0HaltsSucceeded;
+ /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
+ uint32_t cR0HaltsToRing3;
+ /** Padding */
+ uint32_t u32Padding2;
+
+ /** @name Raw-mode context tracing data.
+ * @{ */
+ SUPDRVTRACERUSRCTX TracerCtx;
+ /** @} */
+
+ /** @name Ring-0 assertion info for this EMT.
+ * @{ */
+ /** Copy of the ring-0 jmp buffer after an assertion. */
+ VMMR0JMPBUF AssertJmpBuf;
+ /** Copy of the assertion stack. */
+ uint8_t abAssertStack[8192];
+ /** @} */
+
+ /**
+ * Loggers.
+ */
+ union
+ {
+ struct
+ {
+ /** The R0 logger data shared with ring-3. */
+ VMMR3CPULOGGER Logger;
+ /** The R0 release logger data shared with ring-3. */
+ VMMR3CPULOGGER RelLogger;
+ } s;
+ /** Array view. */
+ VMMR3CPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
+ } u;
+
+ STAMPROFILE StatR0HaltBlock;
+ STAMPROFILE StatR0HaltBlockOnTime;
+ STAMPROFILE StatR0HaltBlockOverslept;
+ STAMPROFILE StatR0HaltBlockInsomnia;
+ STAMCOUNTER StatR0HaltExec;
+ STAMCOUNTER StatR0HaltExecFromBlock;
+ STAMCOUNTER StatR0HaltExecFromSpin;
+ STAMCOUNTER StatR0HaltToR3;
+ STAMCOUNTER StatR0HaltToR3FromSpin;
+ STAMCOUNTER StatR0HaltToR3Other;
+ STAMCOUNTER StatR0HaltToR3PendingFF;
+ STAMCOUNTER StatR0HaltToR3SmallDelta;
+ STAMCOUNTER StatR0HaltToR3PostNoInt;
+ STAMCOUNTER StatR0HaltToR3PostPendingFF;
+} VMMCPU;
+AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
+AssertCompile( RTASSERT_OFFSET_OF(VMMCPU, u.s.Logger)
+ == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_REGULAR);
+AssertCompile(RTASSERT_OFFSET_OF(VMMCPU, u.s.RelLogger)
+ == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_RELEASE);
+
+/** Pointer to VMMCPU. */
+typedef VMMCPU *PVMMCPU;
+
+/**
+ * VMM per-VCpu ring-0 only instance data.
+ */
+typedef struct VMMR0PERVCPU
+{
+ /** The EMT hash table index. */
+ uint16_t idxEmtHash;
+ /** Flag indicating whether we've disabled flushing (world switch) or not. */
+ bool fLogFlushingDisabled;
+ bool afPadding1[5];
+ /** Pointer to the VMMR0EntryFast preemption state structure.
+ * This is used to temporarily restore preemption before blocking. */
+ R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
+ /** Thread context switching hook (ring-0). */
+ RTTHREADCTXHOOK hCtxHook;
+
+ /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
+ * @note Cannot be put on the stack as the location may change and upset the
+ * validation of resume-after-ring-3-call logic.
+ * @todo This no longer needs to be here now that we don't call ring-3 and mess
+ * around with stack restoring/switching.
+ * @{ */
+ PGVM pGVM;
+ VMCPUID idCpu;
+ VMMR0OPERATION enmOperation;
+ PSUPVMMR0REQHDR pReq;
+ uint64_t u64Arg;
+ PSUPDRVSESSION pSession;
+ /** @} */
+
+ /** @name Ring-0 setjmp / assertion handling.
+ * @{ */
+ /** The ring-0 setjmp buffer. */
+ VMMR0JMPBUF AssertJmpBuf;
+ /** The disable counter. */
+ uint32_t cCallRing3Disabled;
+ uint32_t u32Padding3;
+ /** Ring-0 assertion notification callback. */
+ R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback;
+ /** Argument for pfnRing0AssertionNotificationCallback. */
+ R0PTRTYPE(void *) pvAssertCallbackUser;
+ /** @} */
+
+ /**
+ * Loggers
+ */
+ union
+ {
+ struct
+ {
+ /** The R0 logger data. */
+ VMMR0PERVCPULOGGER Logger;
+ /** The R0 release logger data. */
+ VMMR0PERVCPULOGGER RelLogger;
+ } s;
+ /** Array view. */
+ VMMR0PERVCPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
+ } u;
+} VMMR0PERVCPU;
+AssertCompile( RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.Logger)
+ == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_REGULAR);
+AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger)
+ == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE);
+AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64);
+/** Pointer to VMM ring-0 VMCPU instance data. */
+typedef VMMR0PERVCPU *PVMMR0PERVCPU;
+
+/** @name RTLOGGER::u32UserValue1 Flags
+ * @{ */
+/** The magic value. */
+#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
+/** Part of the flags value used for the magic. */
+#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
+/** @} */
+
+
+/**
+ * VMM data kept in the ring-0 GVM.
+ */
+typedef struct VMMR0PERVM
+{
+ /** Set if vmmR0InitVM has been called. */
+ bool fCalledInitVm;
+ bool afPadding1[7];
+
+ /** @name Logging
+ * @{ */
+ /** Logger (debug) buffer allocation.
+ * This covers all CPUs. */
+ RTR0MEMOBJ hMemObjLogger;
+ /** The ring-3 mapping object for hMemObjLogger. */
+ RTR0MEMOBJ hMapObjLogger;
+
+ /** Release logger buffer allocation.
+ * This covers all CPUs. */
+ RTR0MEMOBJ hMemObjReleaseLogger;
+ /** The ring-3 mapping object for hMemObjReleaseLogger. */
+ RTR0MEMOBJ hMapObjReleaseLogger;
+
+ struct
+ {
+ /** Spinlock protecting the logger ring buffer and associated variables. */
+ R0PTRTYPE(RTSPINLOCK) hSpinlock;
+ /** The log flusher thread handle to make sure there is only one. */
+ RTNATIVETHREAD hThread;
+ /** The handle to the event semaphore the log flusher waits on. */
+ RTSEMEVENT hEvent;
+ /** The index of the log flusher queue head (flusher thread side). */
+ uint32_t volatile idxRingHead;
+ /** The index of the log flusher queue tail (EMT side). */
+ uint32_t volatile idxRingTail;
+ /** Set if the log flusher thread is waiting for work and needs poking. */
+ bool volatile fThreadWaiting;
+ /** Set when the log flusher thread should shut down. */
+ bool volatile fThreadShutdown;
+ /** Indicates that the log flusher thread is running. */
+ bool volatile fThreadRunning;
+ bool afPadding2[5];
+ STAMCOUNTER StatFlushes;
+ STAMCOUNTER StatNoWakeUp;
+ /** Logger ring buffer.
+ * This is for communicating with the log flusher thread. */
+ VMMLOGFLUSHERENTRY aRing[VMM_MAX_CPU_COUNT * 2 /*loggers*/ * 1 /*buffer*/ + 16 /*fudge*/];
+ } LogFlusher;
+ /** @} */
+} VMMR0PERVM;
+
+RT_C_DECLS_BEGIN
+
+int vmmInitFormatTypes(void);
+void vmmTermFormatTypes(void);
+uint32_t vmmGetBuildType(void);
+
+#ifdef IN_RING3
+int vmmR3SwitcherInit(PVM pVM);
+void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
+#endif /* IN_RING3 */
+
+#ifdef IN_RING0
+
+/**
+ * World switcher assembly routine.
+ * It will call VMMRCEntry().
+ *
+ * @returns return code from VMMRCEntry().
+ * @param pVM The cross context VM structure.
+ * @param uArg See VMMRCEntry().
+ * @internal
+ */
+DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
+
+/**
+ * Callback function for vmmR0CallRing3SetJmp.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
+/** Pointer to FNVMMR0SETJMP(). */
+typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
+
+/**
+ * The setjmp variant used for calling Ring-3.
+ *
+ * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
+ * in the middle of a ring-3 call. Another differences is the function pointer and
+ * argument. This has to do with resuming code and the stack frame of the caller.
+ *
+ * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
+ * @param pJmpBuf The jmp_buf to set.
+ * @param pfn The function to be called when not resuming.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
+
+
+/**
+ * Callback function for vmmR0CallRing3SetJmp2.
+ *
+ * @returns VBox status code.
+ * @param pGVM The ring-0 VM structure.
+ * @param idCpu The ID of the calling EMT.
+ */
+typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
+/** Pointer to FNVMMR0SETJMP2(). */
+typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
+
+/**
+ * Same as vmmR0CallRing3SetJmp except for the function signature.
+ *
+ * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
+ * @param pJmpBuf The jmp_buf to set.
+ * @param pfn The function to be called when not resuming.
+ * @param pGVM The ring-0 VM structure.
+ * @param idCpu The ID of the calling EMT.
+ */
+DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
+
+
+/**
+ * Callback function for vmmR0CallRing3SetJmpEx.
+ *
+ * @returns VBox status code.
+ * @param pvUser The user argument.
+ */
+typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
+/** Pointer to FNVMMR0SETJMPEX(). */
+typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
+
+/**
+ * Same as vmmR0CallRing3SetJmp except for the function signature.
+ *
+ * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
+ * @param pJmpBuf The jmp_buf to set.
+ * @param pfn The function to be called when not resuming.
+ * @param pvUser The argument of that function.
+ * @param uCallKey Unused call parameter that should be used to help
+ * uniquely identify the call.
+ */
+DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
+
+
+/**
+ * Worker for VMMRZCallRing3.
+ * This will save the stack and registers.
+ *
+ * @returns rc.
+ * @param pJmpBuf Pointer to the jump buffer.
+ * @param rc The return code.
+ */
+DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
+
+# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
+int vmmR0TripleFaultHackInit(void);
+void vmmR0TripleFaultHackTerm(void);
+# endif
+
+#endif /* IN_RING0 */
+
+RT_C_DECLS_END
+
+/** @} */
+
+#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
diff --git a/src/VBox/VMM/include/VMMInternal.mac b/src/VBox/VMM/include/VMMInternal.mac
new file mode 100644
index 00000000..09b7ab39
--- /dev/null
+++ b/src/VBox/VMM/include/VMMInternal.mac
@@ -0,0 +1,119 @@
+; $Id: VMMInternal.mac $
+;; @file
+; VMM - Internal header file.
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+%include "VBox/asmdefs.mac"
+%include "VBox/sup.mac"
+
+
+struc VMMR0JMPBUF
+ ;
+ ; traditional jmp_buf
+ ;
+%ifdef RT_ARCH_X86
+ .ebx resd 1
+ .esi resd 1
+ .edi resd 1
+ .ebp resd 1
+ .esp resd 1
+ .eip resd 1
+ .eflags resd 1
+%endif
+%ifdef RT_ARCH_AMD64
+ .rbx resq 1
+ %ifdef RT_OS_WINDOWS
+ .rsi resq 1
+ .rdi resq 1
+ %endif
+ .rbp resq 1
+ .r12 resq 1
+ .r13 resq 1
+ .r14 resq 1
+ .r15 resq 1
+ .rsp resq 1
+ .rip resq 1
+ %ifdef RT_OS_WINDOWS
+ .xmm6 resq 2
+ .xmm7 resq 2
+ .xmm8 resq 2
+ .xmm9 resq 2
+ .xmm10 resq 2
+ .xmm11 resq 2
+ .xmm12 resq 2
+ .xmm13 resq 2
+ .xmm14 resq 2
+ .xmm15 resq 2
+ %endif
+ .rflags resq 1
+%endif
+
+ ;
+ ; Additional state and stack info for unwinding.
+ ;
+ .UnwindSp RTR0PTR_RES 1
+ .UnwindRetSp RTR0PTR_RES 1
+ .UnwindBp RTR0PTR_RES 1
+ .UnwindPc RTR0PTR_RES 1
+ .UnwindRetPcValue RTR0PTR_RES 1
+ .UnwindRetPcLocation RTR0PTR_RES 1
+
+ ;
+ ; Info about what we were doing in case it's helpful.
+ ;
+ .pfn RTR0PTR_RES 1
+ .pvUser1 RTR0PTR_RES 1
+ .pvUser2 RTR0PTR_RES 1
+
+ ;
+ ; For mirroring the jump buffer and stack to ring-3 for unwinding and analysis.
+ ;
+ .cbStackValid resd 1
+ .cbStackBuf resd 1
+ .pvStackBuf RTR0PTR_RES 1
+ .pMirrorBuf RTR0PTR_RES 1
+endstruc
+
+
+struc VMMCPU
+
+ .iLastGZRc resd 1
+ alignb 8
+
+ .fInRendezvous resb 1
+ .afPadding1 resb 2
+ .fMayHaltInRing0 resb 1
+ .cNsSpinBlockThreshold resd 1
+ .cR0Halts resd 1
+ .cR0HaltsSucceeded resd 1
+ .cR0HaltsToRing3 resd 1
+
+ alignb 8
+ .TracerCtx resb SUPDRVTRACERUSRCTX64_size
+
+ alignb 8
+ .AssertJmpBuf resb 1
+endstruc
+
diff --git a/src/VBox/VMM/include/VMMTracing.h b/src/VBox/VMM/include/VMMTracing.h
new file mode 100644
index 00000000..67cb333e
--- /dev/null
+++ b/src/VBox/VMM/include/VMMTracing.h
@@ -0,0 +1,136 @@
+/* $Id: VMMTracing.h $ */
+/** @file
+ * VBoxVMM - Trace point macros for the VMM.
+ */
+
+/*
+ * Copyright (C) 2012-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_VMMTracing_h
+#define VMM_INCLUDED_SRC_include_VMMTracing_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#ifdef DOXYGEN_RUNNING
+# undef VBOX_WITH_DTRACE
+# undef VBOX_WITH_DTRACE_R3
+# undef VBOX_WITH_DTRACE_R0
+# undef VBOX_WITH_DTRACE_RC
+# define DBGFTRACE_ENABLED
+#endif
+#include <VBox/vmm/dbgftrace.h>
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+/** Gets the trace buffer handle from a VMCPU pointer. */
+#define VMCPU_TO_HTB(a_pVCpu) ((a_pVCpu)->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf))
+
+/** Gets the trace buffer handle from a VMCPU pointer. */
+#define VM_TO_HTB(a_pVM) ((a_pVM)->CTX_SUFF(hTraceBuf))
+
+/** Macro wrapper for trace points that are disabled by default. */
+#define TP_COND_VMCPU(a_pVCpu, a_GrpSuff, a_TraceStmt) \
+ do { \
+ if (RT_UNLIKELY( (a_pVCpu)->fTraceGroups & VMMTPGROUP_##a_GrpSuff )) \
+ { \
+ RTTRACEBUF const hTB = (a_pVCpu)->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf); \
+ a_TraceStmt; \
+ } \
+ } while (0)
+
+/** @name VMM Trace Point Groups.
+ * @{ */
+#define VMMTPGROUP_EM RT_BIT(0)
+#define VMMTPGROUP_HM RT_BIT(1)
+#define VMMTPGROUP_TM RT_BIT(2)
+/** @} */
+
+
+
+/** @name Ring-3 trace points.
+ * @{
+ */
+#ifdef IN_RING3
+# ifdef VBOX_WITH_DTRACE_R3
+# include "dtrace/VBoxVMM.h"
+
+# elif defined(DBGFTRACE_ENABLED)
+# define VBOXVMM_EM_STATE_CHANGED(a_pVCpu, a_enmOldState, a_enmNewState, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-state-changed %d -> %d (rc=%d)", a_enmOldState, a_enmNewState, a_rc))
+# define VBOXVMM_EM_STATE_UNCHANGED(a_pVCpu, a_enmState, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-state-unchanged %d (rc=%d)", a_enmState, a_rc))
+# define VBOXVMM_EM_RAW_RUN_PRE(a_pVCpu, a_pCtx) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-raw-pre %04x:%08llx", (a_pCtx)->cs, (a_pCtx)->rip))
+# define VBOXVMM_EM_RAW_RUN_RET(a_pVCpu, a_pCtx, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-raw-ret %04x:%08llx rc=%d", (a_pCtx)->cs, (a_pCtx)->rip, (a_rc)))
+# define VBOXVMM_EM_FF_HIGH(a_pVCpu, a_fGlobal, a_fLocal, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-ff-high vm=%#x cpu=%#x rc=%d", (a_fGlobal), (a_fLocal), (a_rc)))
+# define VBOXVMM_EM_FF_ALL(a_pVCpu, a_fGlobal, a_fLocal, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-ff-all vm=%#x cpu=%#x rc=%d", (a_fGlobal), (a_fLocal), (a_rc)))
+# define VBOXVMM_EM_FF_ALL_RET(a_pVCpu, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-ff-all-ret %d", (a_rc)))
+# define VBOXVMM_EM_FF_RAW(a_pVCpu, a_fGlobal, a_fLocal) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-ff-raw vm=%#x cpu=%#x", (a_fGlobal), (a_fLocal)))
+# define VBOXVMM_EM_FF_RAW_RET(a_pVCpu, a_rc) \
+ TP_COND_VMCPU(a_pVCpu, EM, RTTraceBufAddMsgF(hTB, "em-ff-raw-ret %d", (a_rc)))
+
+# else
+# define VBOXVMM_EM_STATE_CHANGED(a_pVCpu, a_enmOldState, a_enmNewState, a_rc) do { } while (0)
+# define VBOXVMM_EM_STATE_UNCHANGED(a_pVCpu, a_enmState, a_rc) do { } while (0)
+# define VBOXVMM_EM_RAW_RUN_PRE(a_pVCpu, a_pCtx) do { } while (0)
+# define VBOXVMM_EM_RAW_RUN_RET(a_pVCpu, a_pCtx, a_rc) do { } while (0)
+# define VBOXVMM_EM_FF_HIGH(a_pVCpu, a_fGlobal, a_fLocal, a_rc) do { } while (0)
+# define VBOXVMM_EM_FF_ALL(a_pVCpu, a_fGlobal, a_fLocal, a_rc) do { } while (0)
+# define VBOXVMM_EM_FF_ALL_RET(a_pVCpu, a_rc) do { } while (0)
+# define VBOXVMM_EM_FF_RAW(a_pVCpu, a_fGlobal, a_fLocal) do { } while (0)
+# define VBOXVMM_EM_FF_RAW_RET(a_pVCpu, a_rc) do { } while (0)
+
+# endif
+#endif /* IN_RING3 */
+/** @} */
+
+
+/** @name Ring-0 trace points.
+ * @{
+ */
+#ifdef IN_RING0
+# ifdef VBOX_WITH_DTRACE_R0
+# include "VBoxVMMR0-dtrace.h"
+
+# elif defined(DBGFTRACE_ENABLED)
+
+# else
+
+# endif
+#endif /* IN_RING0 */
+/** @} */
+
+
+#endif /* !VMM_INCLUDED_SRC_include_VMMTracing_h */
+
diff --git a/src/VBox/VMM/include/VMXInternal.h b/src/VBox/VMM/include/VMXInternal.h
new file mode 100644
index 00000000..03c85634
--- /dev/null
+++ b/src/VBox/VMM/include/VMXInternal.h
@@ -0,0 +1,335 @@
+/* $Id: VMXInternal.h $ */
+/** @file
+ * VMX - Internal header file for the VMX code template.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VMM_INCLUDED_SRC_include_VMXInternal_h
+#define VMM_INCLUDED_SRC_include_VMXInternal_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "HMVMXCommon.h"
+
+#if HC_ARCH_BITS == 32
+# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
+#endif
+
+/** @def HM_PROFILE_EXIT_DISPATCH
+ * Enables profiling of the VM exit handler dispatching. */
+#if 0 || defined(DOXYGEN_RUNNING)
+# define HM_PROFILE_EXIT_DISPATCH
+#endif
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_hm_int Internal
+ * @ingroup grp_hm
+ * @internal
+ * @{
+ */
+
+/** @addtogroup grp_hm_int_vmx VMX Internal
+ * @{ */
+/**
+ * VMX per-VCPU transient state.
+ *
+ * A state structure for holding miscellaneous information across
+ * VMX non-root operation and restored after the transition.
+ *
+ * Note: The members are ordered and aligned such that the most
+ * frequently used ones (in the guest execution loop) fall within
+ * the first cache line.
+ */
+typedef struct VMXTRANSIENT
+{
+ /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
+ uint32_t fVmcsFieldsRead;
+ /** The guest's TPR value used for TPR shadowing. */
+ uint8_t u8GuestTpr;
+ uint8_t abAlignment0[3];
+
+ /** Whether the VM-exit was caused by a page-fault during delivery of an
+ * external interrupt or NMI. */
+ bool fVectoringPF;
+ /** Whether the VM-exit was caused by a page-fault during delivery of a
+ * contributory exception or a page-fault. */
+ bool fVectoringDoublePF;
+ /** Whether the VM-entry failed or not. */
+ bool fVMEntryFailed;
+ /** Whether the TSC_AUX MSR needs to be removed from the auto-load/store MSR
+ * area after VM-exit. */
+ bool fRemoveTscAuxMsr;
+ /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
+ bool fUpdatedTscOffsettingAndPreemptTimer;
+ /** Whether we are currently executing a nested-guest. */
+ bool fIsNestedGuest;
+ /** Whether the guest debug state was active at the time of VM-exit. */
+ bool fWasGuestDebugStateActive;
+ /** Whether the hyper debug state was active at the time of VM-exit. */
+ bool fWasHyperDebugStateActive;
+
+ /** The basic VM-exit reason. */
+ uint32_t uExitReason;
+ /** The VM-exit interruption error code. */
+ uint32_t uExitIntErrorCode;
+
+ /** The host's rflags/eflags. */
+ RTCCUINTREG fEFlags;
+
+ /** The VM-exit exit code qualification. */
+ uint64_t uExitQual;
+
+ /** The VMCS info. object. */
+ PVMXVMCSINFO pVmcsInfo;
+
+ /** The VM-exit interruption-information field. */
+ uint32_t uExitIntInfo;
+ /** The VM-exit instruction-length field. */
+ uint32_t cbExitInstr;
+
+ /** The VM-exit instruction-information field. */
+ VMXEXITINSTRINFO ExitInstrInfo;
+ /** IDT-vectoring information field. */
+ uint32_t uIdtVectoringInfo;
+
+ /** IDT-vectoring error code. */
+ uint32_t uIdtVectoringErrorCode;
+ uint32_t u32Alignment0;
+
+ /** The Guest-linear address. */
+ uint64_t uGuestLinearAddr;
+
+ /** The Guest-physical address. */
+ uint64_t uGuestPhysicalAddr;
+
+ /** The Guest pending-debug exceptions. */
+ uint64_t uGuestPendingDbgXcpts;
+
+ /** The VM-entry interruption-information field. */
+ uint32_t uEntryIntInfo;
+ /** The VM-entry exception error code field. */
+ uint32_t uEntryXcptErrorCode;
+
+ /** The VM-entry instruction length field. */
+ uint32_t cbEntryInstr;
+} VMXTRANSIENT;
+AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
+AssertCompileMemberAlignment(VMXTRANSIENT, fVmcsFieldsRead, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, fVectoringPF, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, fEFlags, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uExitQual, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, ExitInstrInfo, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uIdtVectoringErrorCode, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uGuestLinearAddr, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uGuestPhysicalAddr, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, 8);
+AssertCompileMemberAlignment(VMXTRANSIENT, cbEntryInstr, 8);
+/** Pointer to VMX transient state. */
+typedef VMXTRANSIENT *PVMXTRANSIENT;
+/** Pointer to a const VMX transient state. */
+typedef const VMXTRANSIENT *PCVMXTRANSIENT;
+
+
+/**
+ * VMX statistics structure.
+ */
+typedef struct VMXSTATISTICS
+{
+ /* These two comes because they are accessed from assembly and we don't
+ want to detail all the stats in the assembly version of this structure. */
+ STAMCOUNTER StatVmxWriteHostRip;
+ STAMCOUNTER StatVmxWriteHostRsp;
+ STAMCOUNTER StatVmxVmLaunch;
+ STAMCOUNTER StatVmxVmResume;
+
+ STAMPROFILEADV StatEntry;
+ STAMPROFILEADV StatPreExit;
+ STAMPROFILEADV StatExitHandling;
+ STAMPROFILEADV StatExitIO;
+ STAMPROFILEADV StatExitMovCRx;
+ STAMPROFILEADV StatExitXcptNmi;
+ STAMPROFILEADV StatExitVmentry;
+ STAMPROFILEADV StatImportGuestState;
+ STAMPROFILEADV StatExportGuestState;
+ STAMPROFILEADV StatLoadGuestFpuState;
+ STAMPROFILEADV StatInGC;
+ STAMPROFILEADV StatPoke;
+ STAMPROFILEADV StatSpinPoke;
+ STAMPROFILEADV StatSpinPokeFailed;
+
+ STAMCOUNTER StatImportGuestStateFallback;
+ STAMCOUNTER StatReadToTransientFallback;
+
+ STAMCOUNTER StatInjectInterrupt;
+ STAMCOUNTER StatInjectXcpt;
+ STAMCOUNTER StatInjectReflect;
+ STAMCOUNTER StatInjectConvertDF;
+ STAMCOUNTER StatInjectInterpret;
+ STAMCOUNTER StatInjectReflectNPF;
+
+ STAMCOUNTER StatExitAll;
+ STAMCOUNTER StatNestedExitAll;
+ STAMCOUNTER StatExitShadowNM;
+ STAMCOUNTER StatExitGuestNM;
+ STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
+ STAMCOUNTER StatExitShadowPFEM;
+ STAMCOUNTER StatExitGuestPF;
+ STAMCOUNTER StatExitGuestUD;
+ STAMCOUNTER StatExitGuestSS;
+ STAMCOUNTER StatExitGuestNP;
+ STAMCOUNTER StatExitGuestTS;
+ STAMCOUNTER StatExitGuestOF;
+ STAMCOUNTER StatExitGuestGP;
+ STAMCOUNTER StatExitGuestDE;
+ STAMCOUNTER StatExitGuestDF;
+ STAMCOUNTER StatExitGuestBR;
+ STAMCOUNTER StatExitGuestAC;
+ STAMCOUNTER StatExitGuestACSplitLock;
+ STAMCOUNTER StatExitGuestDB;
+ STAMCOUNTER StatExitGuestMF;
+ STAMCOUNTER StatExitGuestBP;
+ STAMCOUNTER StatExitGuestXF;
+ STAMCOUNTER StatExitGuestXcpUnk;
+ STAMCOUNTER StatExitDRxWrite;
+ STAMCOUNTER StatExitDRxRead;
+ STAMCOUNTER StatExitCR0Read;
+ STAMCOUNTER StatExitCR2Read;
+ STAMCOUNTER StatExitCR3Read;
+ STAMCOUNTER StatExitCR4Read;
+ STAMCOUNTER StatExitCR8Read;
+ STAMCOUNTER StatExitCR0Write;
+ STAMCOUNTER StatExitCR2Write;
+ STAMCOUNTER StatExitCR3Write;
+ STAMCOUNTER StatExitCR4Write;
+ STAMCOUNTER StatExitCR8Write;
+ STAMCOUNTER StatExitRdmsr;
+ STAMCOUNTER StatExitWrmsr;
+ STAMCOUNTER StatExitClts;
+ STAMCOUNTER StatExitXdtrAccess;
+ STAMCOUNTER StatExitLmsw;
+ STAMCOUNTER StatExitIOWrite;
+ STAMCOUNTER StatExitIORead;
+ STAMCOUNTER StatExitIOStringWrite;
+ STAMCOUNTER StatExitIOStringRead;
+ STAMCOUNTER StatExitIntWindow;
+ STAMCOUNTER StatExitExtInt;
+ STAMCOUNTER StatExitHostNmiInGC;
+ STAMCOUNTER StatExitHostNmiInGCIpi;
+ STAMCOUNTER StatExitPreemptTimer;
+ STAMCOUNTER StatExitTprBelowThreshold;
+ STAMCOUNTER StatExitTaskSwitch;
+ STAMCOUNTER StatExitApicAccess;
+ STAMCOUNTER StatExitReasonNpf;
+
+ STAMCOUNTER StatNestedExitReasonNpf;
+
+ STAMCOUNTER StatFlushPage;
+ STAMCOUNTER StatFlushPageManual;
+ STAMCOUNTER StatFlushPhysPageManual;
+ STAMCOUNTER StatFlushTlb;
+ STAMCOUNTER StatFlushTlbNstGst;
+ STAMCOUNTER StatFlushTlbManual;
+ STAMCOUNTER StatFlushTlbWorldSwitch;
+ STAMCOUNTER StatNoFlushTlbWorldSwitch;
+ STAMCOUNTER StatFlushEntire;
+ STAMCOUNTER StatFlushAsid;
+ STAMCOUNTER StatFlushNestedPaging;
+ STAMCOUNTER StatFlushTlbInvlpgVirt;
+ STAMCOUNTER StatFlushTlbInvlpgPhys;
+ STAMCOUNTER StatTlbShootdown;
+ STAMCOUNTER StatTlbShootdownFlush;
+
+ STAMCOUNTER StatSwitchPendingHostIrq;
+ STAMCOUNTER StatSwitchTprMaskedIrq;
+ STAMCOUNTER StatSwitchGuestIrq;
+ STAMCOUNTER StatSwitchHmToR3FF;
+ STAMCOUNTER StatSwitchVmReq;
+ STAMCOUNTER StatSwitchPgmPoolFlush;
+ STAMCOUNTER StatSwitchDma;
+ STAMCOUNTER StatSwitchExitToR3;
+ STAMCOUNTER StatSwitchLongJmpToR3;
+ STAMCOUNTER StatSwitchMaxResumeLoops;
+ STAMCOUNTER StatSwitchHltToR3;
+ STAMCOUNTER StatSwitchApicAccessToR3;
+ STAMCOUNTER StatSwitchPreempt;
+ STAMCOUNTER StatSwitchNstGstVmexit;
+
+ STAMCOUNTER StatTscParavirt;
+ STAMCOUNTER StatTscOffset;
+ STAMCOUNTER StatTscIntercept;
+
+ STAMCOUNTER StatDRxArmed;
+ STAMCOUNTER StatDRxContextSwitch;
+ STAMCOUNTER StatDRxIoCheck;
+
+ STAMCOUNTER StatExportMinimal;
+ STAMCOUNTER StatExportFull;
+ STAMCOUNTER StatLoadGuestFpu;
+ STAMCOUNTER StatExportHostState;
+
+ STAMCOUNTER StatVmxCheckBadRmSelBase;
+ STAMCOUNTER StatVmxCheckBadRmSelLimit;
+ STAMCOUNTER StatVmxCheckBadRmSelAttr;
+ STAMCOUNTER StatVmxCheckBadV86SelBase;
+ STAMCOUNTER StatVmxCheckBadV86SelLimit;
+ STAMCOUNTER StatVmxCheckBadV86SelAttr;
+ STAMCOUNTER StatVmxCheckRmOk;
+ STAMCOUNTER StatVmxCheckBadSel;
+ STAMCOUNTER StatVmxCheckBadRpl;
+ STAMCOUNTER StatVmxCheckPmOk;
+
+ STAMCOUNTER StatVmxPreemptionRecalcingDeadline;
+ STAMCOUNTER StatVmxPreemptionRecalcingDeadlineExpired;
+ STAMCOUNTER StatVmxPreemptionReusingDeadline;
+ STAMCOUNTER StatVmxPreemptionReusingDeadlineExpired;
+
+#ifdef VBOX_WITH_STATISTICS
+ STAMCOUNTER aStatExitReason[MAX_EXITREASON_STAT];
+ STAMCOUNTER aStatNestedExitReason[MAX_EXITREASON_STAT];
+ STAMCOUNTER aStatInjectedIrqs[256];
+ STAMCOUNTER aStatInjectedXcpts[X86_XCPT_LAST + 1];
+#endif
+#ifdef HM_PROFILE_EXIT_DISPATCH
+ STAMPROFILEADV StatExitDispatch;
+#endif
+} VMXSTATISTICS;
+/** Pointer to the VMX statistics. */
+typedef VMXSTATISTICS *PVMXSTATISTICS;
+/** Pointer to a const VMX statistics structure. */
+typedef const VMXSTATISTICS *PCVMXSTATISTICS;
+
+/** @} */
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_include_VMXInternal_h */
+