summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/VMMRC
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 03:01:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 03:01:46 +0000
commitf8fe689a81f906d1b91bb3220acde2a4ecb14c5b (patch)
tree26484e9d7e2c67806c2d1760196ff01aaa858e8c /src/VBox/VMM/VMMRC
parentInitial commit. (diff)
downloadvirtualbox-f8fe689a81f906d1b91bb3220acde2a4ecb14c5b.tar.xz
virtualbox-f8fe689a81f906d1b91bb3220acde2a4ecb14c5b.zip
Adding upstream version 6.0.4-dfsg.upstream/6.0.4-dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/VMM/VMMRC')
-rw-r--r--src/VBox/VMM/VMMRC/CPUMRC.cpp255
-rw-r--r--src/VBox/VMM/VMMRC/CPUMRCA.asm483
-rw-r--r--src/VBox/VMM/VMMRC/CPUMRCPatchHlp.asm236
-rw-r--r--src/VBox/VMM/VMMRC/CSAMRC.cpp137
-rw-r--r--src/VBox/VMM/VMMRC/EMRCA.asm26
-rw-r--r--src/VBox/VMM/VMMRC/IOMRC.cpp247
-rw-r--r--src/VBox/VMM/VMMRC/MMRamRC.cpp198
-rw-r--r--src/VBox/VMM/VMMRC/MMRamRCA.asm157
-rw-r--r--src/VBox/VMM/VMMRC/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMRC/PATMRC.cpp545
-rw-r--r--src/VBox/VMM/VMMRC/PDMRCDevice.cpp811
-rw-r--r--src/VBox/VMM/VMMRC/PGMRC.cpp166
-rw-r--r--src/VBox/VMM/VMMRC/PGMRCBth.h24
-rw-r--r--src/VBox/VMM/VMMRC/PGMRCGst.h76
-rw-r--r--src/VBox/VMM/VMMRC/PGMRCShw.h74
-rw-r--r--src/VBox/VMM/VMMRC/SELMRC.cpp587
-rw-r--r--src/VBox/VMM/VMMRC/TRPMRC.cpp180
-rw-r--r--src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp1559
-rw-r--r--src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm1483
-rw-r--r--src/VBox/VMM/VMMRC/VMMRC.cpp464
-rw-r--r--src/VBox/VMM/VMMRC/VMMRC.def106
-rw-r--r--src/VBox/VMM/VMMRC/VMMRC.mac194
-rw-r--r--src/VBox/VMM/VMMRC/VMMRC0.asm40
-rw-r--r--src/VBox/VMM/VMMRC/VMMRC99.asm47
-rw-r--r--src/VBox/VMM/VMMRC/VMMRCA.asm397
-rw-r--r--src/VBox/VMM/VMMRC/VMMRCBuiltin.def32
-rw-r--r--src/VBox/VMM/VMMRC/VMMRCDeps.cpp41
27 files changed, 8565 insertions, 0 deletions
diff --git a/src/VBox/VMM/VMMRC/CPUMRC.cpp b/src/VBox/VMM/VMMRC/CPUMRC.cpp
new file mode 100644
index 00000000..a7b94447
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/CPUMRC.cpp
@@ -0,0 +1,255 @@
+/* $Id: CPUMRC.cpp $ */
+/** @file
+ * CPUM - Raw-mode Context Code.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/em.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/asm-amd64-x86.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN /* addressed from asm (not called so no DECLASM). */
+DECLCALLBACK(int) cpumRCHandleNPAndGP(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser);
+RT_C_DECLS_END
+
+
+/**
+ * Deal with traps occurring during segment loading and IRET when resuming guest
+ * context execution.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame The register frame.
+ * @param uUser User argument. In this case a combination of the
+ * CPUM_HANDLER_* \#defines.
+ */
+DECLCALLBACK(int) cpumRCHandleNPAndGP(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser)
+{
+ Log(("********************************************************\n"));
+ Log(("cpumRCHandleNPAndGP: eip=%RX32 uUser=%#x\n", pRegFrame->eip, uUser));
+ Log(("********************************************************\n"));
+
+ /*
+ * Take action based on what's happened.
+ */
+ switch (uUser & CPUM_HANDLER_TYPEMASK)
+ {
+ case CPUM_HANDLER_GS:
+ case CPUM_HANDLER_DS:
+ case CPUM_HANDLER_ES:
+ case CPUM_HANDLER_FS:
+ TRPMGCHyperReturnToHost(pVM, VINF_EM_RAW_STALE_SELECTOR);
+ break;
+
+ case CPUM_HANDLER_IRET:
+ TRPMGCHyperReturnToHost(pVM, VINF_EM_RAW_IRET_TRAP);
+ break;
+ }
+
+ AssertMsgFailed(("uUser=%#x eip=%#x\n", uUser, pRegFrame->eip)); RT_NOREF_PV(pRegFrame);
+ return VERR_TRPM_DONT_PANIC;
+}
+
+
+/**
+ * Called by TRPM and CPUM assembly code to make sure the guest state is
+ * ready for execution.
+ *
+ * @param pVM The cross context VM structure.
+ */
+DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM)
+{
+#ifdef VBOX_STRICT
+ /*
+ * Check some important assumptions before resuming guest execution.
+ */
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
+ uint8_t const uRawCpl = CPUMGetGuestCPL(pVCpu);
+ uint32_t const u32EFlags = CPUMRawGetEFlags(pVCpu);
+ bool const fPatch = PATMIsPatchGCAddr(pVM, pCtx->eip);
+ AssertMsg(pCtx->eflags.Bits.u1IF, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ AssertMsg(pCtx->eflags.Bits.u2IOPL < RT_MAX(uRawCpl, 1U),
+ ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ if (!(u32EFlags & X86_EFL_VM))
+ {
+ AssertMsg((u32EFlags & X86_EFL_IF) || fPatch,("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ AssertMsg((pCtx->cs.Sel & X86_SEL_RPL) > 0, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) > 0, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ }
+ AssertMsg(CPUMIsGuestInRawMode(pVCpu), ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+ //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
+#else
+ RT_NOREF_PV(pVM);
+#endif
+}
+
+
+/**
+ * Get the current privilege level of the guest.
+ *
+ * @returns CPL
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pRegFrame Pointer to the register frame.
+ *
+ * @todo r=bird: This is very similar to CPUMGetGuestCPL and I cannot quite
+ * see why this variant of the code is necessary.
+ */
+VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
+{
+ /*
+ * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
+ *
+ * Note! We used to check CS.DPL here, assuming it was always equal to
+ * CPL even if a conforming segment was loaded. But this truned out to
+ * only apply to older AMD-V. With VT-x we had an ACP2 regression
+ * during install after a far call to ring 2 with VT-x. Then on newer
+ * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
+ * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
+ *
+ * So, forget CS.DPL, always use SS.DPL.
+ *
+ * Note! The SS RPL is always equal to the CPL, while the CS RPL
+ * isn't necessarily equal if the segment is conforming.
+ * See section 4.11.1 in the AMD manual.
+ */
+ uint32_t uCpl;
+ if (!pRegFrame->eflags.Bits.u1VM)
+ {
+ uCpl = (pRegFrame->ss.Sel & X86_SEL_RPL);
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+# ifdef VBOX_WITH_RAW_RING1
+ if (pVCpu->cpum.s.fRawEntered)
+ {
+ if ( uCpl == 2
+ && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)) )
+ uCpl = 1;
+ else if (uCpl == 1)
+ uCpl = 0;
+ }
+ Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
+# else
+ if (uCpl == 1)
+ uCpl = 0;
+# endif
+#endif
+ }
+ else
+ uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
+
+ return uCpl;
+}
+
+
+#ifdef VBOX_WITH_RAW_RING1
+/**
+ * Transforms the guest CPU state to raw-ring mode.
+ *
+ * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
+ *
+ * Used by emInterpretIret() after the new state has been loaded.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pCtxCore The context core (for trap usage).
+ * @see @ref pg_raw
+ * @remarks Will be probably obsoleted by #5653 (it will leave and reenter raw
+ * mode instead, I think).
+ */
+VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
+{
+ /*
+ * Are we in Ring-0?
+ */
+ if ( pCtxCore->ss.Sel
+ && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
+ && !pCtxCore->eflags.Bits.u1VM)
+ {
+ /*
+ * Set CPL to Ring-1.
+ */
+ pCtxCore->ss.Sel |= 1;
+ if ( pCtxCore->cs.Sel
+ && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
+ pCtxCore->cs.Sel |= 1;
+ }
+ else
+ {
+ if ( EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))
+ && !pCtxCore->eflags.Bits.u1VM
+ && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
+ {
+ /* Set CPL to Ring-2. */
+ pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
+ if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
+ pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
+ }
+ }
+
+ /*
+ * Assert sanity.
+ */
+ AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
+ AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
+ ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
+
+ pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
+}
+#endif /* VBOX_WITH_RAW_RING1 */
+
+
+/**
+ * Called by trpmGCExitTrap when VMCPU_FF_CPUM is set (by CPUMRZ.cpp).
+ *
+ * We can be called unecessarily here if we returned to ring-3 for some other
+ * reason before we tried to resume executed guest code. This is detected and
+ * ignored.
+ *
+ * @param pVCpu The cross context CPU structure for the calling EMT.
+ */
+VMMRCDECL(void) CPUMRCProcessForceFlag(PVMCPU pVCpu)
+{
+ /* Only modify CR0 if we're in the post IEM state (host state saved, guest no longer active). */
+ if ((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)) == CPUM_USED_FPU_HOST)
+ {
+ /*
+ * Doing the same CR0 calculation as in AMD64andLegacy.mac so that we'll
+ * catch guest FPU accesses and load the FPU/SSE/AVX register state as needed.
+ */
+ uint32_t cr0 = ASMGetCR0();
+ cr0 |= pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM;
+ cr0 |= X86_CR0_TS | X86_CR0_MP;
+ ASMSetCR0(cr0);
+ Log6(("CPUMRCProcessForceFlag: cr0=%#x\n", cr0));
+ }
+ else
+ Log6(("CPUMRCProcessForceFlag: no change - cr0=%#x\n", ASMGetCR0()));
+}
+
diff --git a/src/VBox/VMM/VMMRC/CPUMRCA.asm b/src/VBox/VMM/VMMRC/CPUMRCA.asm
new file mode 100644
index 00000000..a0b520c1
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/CPUMRCA.asm
@@ -0,0 +1,483 @@
+; $Id: CPUMRCA.asm $
+;; @file
+; CPUM - Raw-mode Context Assembly Routines.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VMMRC.mac"
+%include "VBox/vmm/vm.mac"
+%include "VBox/err.mac"
+%include "VBox/vmm/stam.mac"
+%include "CPUMInternal.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern IMPNAME(g_CPUM) ; VMM GC Builtin import
+extern IMPNAME(g_VM) ; VMM GC Builtin import
+extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
+extern NAME(CPUMRCAssertPreExecutionSanity)
+
+
+;
+; Enables write protection of Hypervisor memory pages.
+; !note! Must be commented out for Trap8 debug handler.
+;
+%define ENABLE_WRITE_PROTECTION 1
+
+BEGINCODE
+
+
+;;
+; Handles lazy FPU saving and restoring.
+;
+; This handler will implement lazy fpu (sse/mmx/stuff) saving.
+; Two actions may be taken in this handler since the Guest OS may
+; be doing lazy fpu switching. So, we'll have to generate those
+; traps which the Guest CPU CTX shall have according to the
+; its CR0 flags. If no traps for the Guest OS, we'll save the host
+; context and restore the guest context.
+;
+; @returns 0 if caller should continue execution.
+; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
+; @param pCpumCpu [ebp+8] Pointer to the CPUMCPU.
+;
+align 16
+BEGINPROC cpumHandleLazyFPUAsm
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ mov ebx, [ebp + 8]
+%define pCpumCpu ebx
+%define pXState esi
+
+ ;
+ ; Figure out what to do.
+ ;
+ ; There are two basic actions:
+ ; 1. Save host fpu and restore guest fpu.
+ ; 2. Generate guest trap.
+ ;
+ ; When entering the hypervisor we'll always enable MP (for proper wait
+ ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
+ ; is taken from the guest OS in order to get proper SSE handling.
+ ;
+ ;
+ ; Actions taken depending on the guest CR0 flags:
+ ;
+ ; 3 2 1
+ ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
+ ; ------------------------------------------------------------------------
+ ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
+ ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
+ ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
+ ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
+ ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
+ ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
+ ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
+ ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
+
+ ;
+ ; Before taking any of these actions we're checking if we have already
+ ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
+ ;
+ test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST
+ jz hlfpua_not_loaded
+ jmp hlfpua_guest_trap
+
+ ;
+ ; Take action.
+ ;
+align 16
+hlfpua_not_loaded:
+ mov eax, [pCpumCpu + CPUMCPU.Guest.cr0]
+ and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
+ jmp dword [eax*2 + hlfpuajmp1]
+align 16
+;; jump table using fpu related cr0 flags as index.
+hlfpuajmp1:
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_guest_trap
+ RTCCPTR_DEF hlfpua_switch_fpu_ctx
+ RTCCPTR_DEF hlfpua_guest_trap
+;; and mask for cr0.
+hlfpu_afFlags:
+ RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
+ RTCCPTR_DEF ~(X86_CR0_TS)
+ RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
+ RTCCPTR_DEF ~(X86_CR0_TS)
+ RTCCPTR_DEF ~(X86_CR0_MP)
+ RTCCPTR_DEF 0
+ RTCCPTR_DEF ~(X86_CR0_MP)
+ RTCCPTR_DEF 0
+
+ ;
+ ; Action - switch FPU context and change cr0 flags.
+ ;
+align 16
+hlfpua_switch_fpu_ctx:
+ mov ecx, cr0
+ mov edx, ecx
+ and ecx, [eax*2 + hlfpu_afFlags] ; Calc the new cr0 flags. Do NOT use ECX until we restore it!
+ and edx, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, edx ; Clear flags so we don't trap here.
+
+ test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST
+ jnz hlfpua_host_done
+
+ mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
+ mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC]
+ or eax, eax
+ jz hlfpua_host_fxsave
+ mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
+ xsave [pXState]
+ jmp hlfpua_host_done
+hlfpua_host_fxsave:
+ fxsave [pXState]
+hlfpua_host_done:
+
+ mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
+ mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC]
+ or eax, eax
+ jz hlfpua_guest_fxrstor
+ mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
+ xrstor [pXState]
+ jmp hlfpua_guest_done
+hlfpua_guest_fxrstor:
+ fxrstor [pXState]
+hlfpua_guest_done:
+
+hlfpua_finished_switch:
+ or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM)
+
+ ; Load new CR0 value.
+ mov cr0, ecx ; load the new cr0 flags.
+
+ ; return continue execution.
+ pop esi
+ pop ebx
+ xor eax, eax
+ leave
+ ret
+
+ ;
+ ; Action - Generate Guest trap.
+ ;
+hlfpua_action_4:
+hlfpua_guest_trap:
+ pop esi
+ pop ebx
+ mov eax, VINF_EM_RAW_GUEST_TRAP
+ leave
+ ret
+ENDPROC cpumHandleLazyFPUAsm
+
+
+;;
+; Calls a guest trap/interrupt handler directly
+; Assumes a trap stack frame has already been setup on the guest's stack!
+;
+; @param pRegFrame [esp + 4] Original trap/interrupt context
+; @param selCS [esp + 8] Code selector of handler
+; @param pHandler [esp + 12] GC virtual address of handler
+; @param eflags [esp + 16] Callee's EFLAGS
+; @param selSS [esp + 20] Stack selector for handler
+; @param pEsp [esp + 24] Stack address for handler
+;
+; @remark This call never returns!
+;
+; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
+align 16
+BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
+ mov ebp, esp
+
+ ; construct iret stack frame
+ push dword [ebp + 20] ; SS
+ push dword [ebp + 24] ; ESP
+ push dword [ebp + 16] ; EFLAGS
+ push dword [ebp + 8] ; CS
+ push dword [ebp + 12] ; EIP
+
+ ;
+ ; enable WP
+ ;
+%ifdef ENABLE_WRITE_PROTECTION
+ mov eax, cr0
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+%endif
+
+ ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
+ mov ebp, [ebp + 4] ; pRegFrame
+ mov ebx, [ebp + CPUMCTXCORE.ebx]
+ mov ecx, [ebp + CPUMCTXCORE.ecx]
+ mov edx, [ebp + CPUMCTXCORE.edx]
+ mov esi, [ebp + CPUMCTXCORE.esi]
+ mov edi, [ebp + CPUMCTXCORE.edi]
+
+ ;; @todo load segment registers *before* enabling WP.
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
+ mov gs, [ebp + CPUMCTXCORE.gs.Sel]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
+ mov fs, [ebp + CPUMCTXCORE.fs.Sel]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
+ mov es, [ebp + CPUMCTXCORE.es.Sel]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
+ mov ds, [ebp + CPUMCTXCORE.ds.Sel]
+
+ mov eax, [ebp + CPUMCTXCORE.eax]
+ mov ebp, [ebp + CPUMCTXCORE.ebp]
+
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
+ iret
+ENDPROC CPUMGCCallGuestTrapHandler
+
+
+;;
+; Performs an iret to V86 code
+; Assumes a trap stack frame has already been setup on the guest's stack!
+;
+; @param pRegFrame Original trap/interrupt context
+;
+; This function does not return!
+;
+;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
+align 16
+BEGINPROC CPUMGCCallV86Code
+ push ebp
+ mov ebp, esp
+ mov ebx, [ebp + 8] ; pRegFrame
+
+ ; Construct iret stack frame.
+ push dword [ebx + CPUMCTXCORE.gs.Sel]
+ push dword [ebx + CPUMCTXCORE.fs.Sel]
+ push dword [ebx + CPUMCTXCORE.ds.Sel]
+ push dword [ebx + CPUMCTXCORE.es.Sel]
+ push dword [ebx + CPUMCTXCORE.ss.Sel]
+ push dword [ebx + CPUMCTXCORE.esp]
+ push dword [ebx + CPUMCTXCORE.eflags]
+ push dword [ebx + CPUMCTXCORE.cs.Sel]
+ push dword [ebx + CPUMCTXCORE.eip]
+
+ ; Invalidate all segment registers.
+ mov al, ~CPUMSELREG_FLAGS_VALID
+ and [ebx + CPUMCTXCORE.fs.fFlags], al
+ and [ebx + CPUMCTXCORE.ds.fFlags], al
+ and [ebx + CPUMCTXCORE.es.fFlags], al
+ and [ebx + CPUMCTXCORE.ss.fFlags], al
+ and [ebx + CPUMCTXCORE.gs.fFlags], al
+ and [ebx + CPUMCTXCORE.cs.fFlags], al
+
+ ;
+ ; enable WP
+ ;
+%ifdef ENABLE_WRITE_PROTECTION
+ mov eax, cr0
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+%endif
+
+ ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
+ mov eax, [ebx + CPUMCTXCORE.eax]
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ mov edx, [ebx + CPUMCTXCORE.edx]
+ mov esi, [ebx + CPUMCTXCORE.esi]
+ mov edi, [ebx + CPUMCTXCORE.edi]
+ mov ebp, [ebx + CPUMCTXCORE.ebp]
+ mov ebx, [ebx + CPUMCTXCORE.ebx]
+
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
+ iret
+ENDPROC CPUMGCCallV86Code
+
+
+;;
+; This is a main entry point for resuming (or starting) guest
+; code execution.
+;
+; We get here directly from VMMSwitcher.asm (jmp at the end
+; of VMMSwitcher_HostToGuest).
+;
+; This call never returns!
+;
+; @param edx Pointer to CPUMCPU structure.
+;
+align 16
+BEGINPROC_EXPORTED CPUMGCResumeGuest
+%ifdef VBOX_STRICT
+ ; Call CPUM to check sanity.
+ push edx
+ mov edx, IMP(g_VM)
+ push edx
+ call NAME(CPUMRCAssertPreExecutionSanity)
+ add esp, 4
+ pop edx
+%endif
+
+ ;
+ ; Setup iretd
+ ;
+ push dword [edx + CPUMCPU.Guest.ss.Sel]
+ push dword [edx + CPUMCPU.Guest.esp]
+ push dword [edx + CPUMCPU.Guest.eflags]
+ push dword [edx + CPUMCPU.Guest.cs.Sel]
+ push dword [edx + CPUMCPU.Guest.eip]
+
+ ;
+ ; Restore registers.
+ ;
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
+ mov es, [edx + CPUMCPU.Guest.es.Sel]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
+ mov fs, [edx + CPUMCPU.Guest.fs.Sel]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
+ mov gs, [edx + CPUMCPU.Guest.gs.Sel]
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Statistics.
+ ;
+ push edx
+ mov edx, IMP(g_VM)
+ lea edx, [edx + VM.StatTotalQemuToGC]
+ STAM_PROFILE_ADV_STOP edx
+
+ mov edx, IMP(g_VM)
+ lea edx, [edx + VM.StatTotalInGC]
+ STAM_PROFILE_ADV_START edx
+ pop edx
+%endif
+
+ ;
+ ; enable WP
+ ;
+%ifdef ENABLE_WRITE_PROTECTION
+ mov eax, cr0
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+%endif
+
+ ;
+ ; Continue restore.
+ ;
+ mov esi, [edx + CPUMCPU.Guest.esi]
+ mov edi, [edx + CPUMCPU.Guest.edi]
+ mov ebp, [edx + CPUMCPU.Guest.ebp]
+ mov ebx, [edx + CPUMCPU.Guest.ebx]
+ mov ecx, [edx + CPUMCPU.Guest.ecx]
+ mov eax, [edx + CPUMCPU.Guest.eax]
+ push dword [edx + CPUMCPU.Guest.ds.Sel]
+ mov edx, [edx + CPUMCPU.Guest.edx]
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
+ pop ds
+
+ ; restart execution.
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
+ iretd
+ENDPROC CPUMGCResumeGuest
+
+
+;;
+; This is a main entry point for resuming (or starting) guest
+; code execution for raw V86 mode
+;
+; We get here directly from VMMSwitcher.asm (jmp at the end
+; of VMMSwitcher_HostToGuest).
+;
+; This call never returns!
+;
+; @param edx Pointer to CPUMCPU structure.
+;
+align 16
+BEGINPROC_EXPORTED CPUMGCResumeGuestV86
+%ifdef VBOX_STRICT
+ ; Call CPUM to check sanity.
+ push edx
+ mov edx, IMP(g_VM)
+ push edx
+ call NAME(CPUMRCAssertPreExecutionSanity)
+ add esp, 4
+ pop edx
+%endif
+
+ ;
+ ; Setup iretd
+ ;
+ push dword [edx + CPUMCPU.Guest.gs.Sel]
+ push dword [edx + CPUMCPU.Guest.fs.Sel]
+ push dword [edx + CPUMCPU.Guest.ds.Sel]
+ push dword [edx + CPUMCPU.Guest.es.Sel]
+
+ push dword [edx + CPUMCPU.Guest.ss.Sel]
+ push dword [edx + CPUMCPU.Guest.esp]
+
+ push dword [edx + CPUMCPU.Guest.eflags]
+ push dword [edx + CPUMCPU.Guest.cs.Sel]
+ push dword [edx + CPUMCPU.Guest.eip]
+
+ ;
+ ; Restore registers.
+ ;
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Statistics.
+ ;
+ push edx
+ mov edx, IMP(g_VM)
+ lea edx, [edx + VM.StatTotalQemuToGC]
+ STAM_PROFILE_ADV_STOP edx
+
+ mov edx, IMP(g_VM)
+ lea edx, [edx + VM.StatTotalInGC]
+ STAM_PROFILE_ADV_START edx
+ pop edx
+%endif
+
+ ;
+ ; enable WP
+ ;
+%ifdef ENABLE_WRITE_PROTECTION
+ mov eax, cr0
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+%endif
+
+ ;
+ ; Continue restore.
+ ;
+ mov esi, [edx + CPUMCPU.Guest.esi]
+ mov edi, [edx + CPUMCPU.Guest.edi]
+ mov ebp, [edx + CPUMCPU.Guest.ebp]
+ mov ecx, [edx + CPUMCPU.Guest.ecx]
+ mov ebx, [edx + CPUMCPU.Guest.ebx]
+ mov eax, [edx + CPUMCPU.Guest.eax]
+ mov edx, [edx + CPUMCPU.Guest.edx]
+
+ ; restart execution.
+ TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
+ iretd
+ENDPROC CPUMGCResumeGuestV86
+
diff --git a/src/VBox/VMM/VMMRC/CPUMRCPatchHlp.asm b/src/VBox/VMM/VMMRC/CPUMRCPatchHlp.asm
new file mode 100644
index 00000000..a652e652
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/CPUMRCPatchHlp.asm
@@ -0,0 +1,236 @@
+; $Id: CPUMRCPatchHlp.asm $
+;; @file
+; CPUM - Patch Helpers.
+;
+
+;
+; Copyright (C) 2015-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/vmm/cpum.mac"
+%include "CPUMInternal.mac"
+%include "VBox/vmm/vm.mac"
+%include "VMMRC.mac"
+%include "iprt/x86.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern IMPNAME(g_VM)
+
+
+BEGIN_PATCH_HLP_SEG
+
+;;
+; Helper for PATMCpuidReplacement.
+;
+; We have at most 32 bytes of stack to play with, .
+;
+; @input eax, ecx(, edx, ebx)
+; @output eax, ebx, ecx, ebx
+;
+; @uses eflags (caller saves them)
+;
+BEGINPROC_EXPORTED CPUMPatchHlpCpuId
+ ; Save the registers we use for passthru and sub-leaf matching (eax is not used).
+ push edx
+ push ecx
+ push ebx
+
+ ; Use edi as VM pointer.
+ push edi
+ mov edi, IMP_SEG(ss, g_VM)
+
+%define CPUMCPUIDLEAF_SIZE_LOG2 5 ; ASSUMES CPUMCPUIDLEAF_size == 32
+
+ ;
+ ; Perform a binary search looking for leaf with the EAX value.
+ ;
+ mov edx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.cCpuIdLeaves]
+ mov ecx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.paCpuIdLeavesRC]
+ test edx, edx
+ jz cpuid_unknown
+ shl edx, CPUMCPUIDLEAF_SIZE_LOG2
+ add edx, ecx ; edx = end (exclusive); ecx = start.
+
+cpuid_lookup_leaf:
+ ; Find the middle element
+ mov ebx, edx
+cpuid_lookup_leaf_ebx_loaded:
+ sub ebx, ecx
+ shr ebx, 1 + CPUMCPUIDLEAF_SIZE_LOG2
+ shl ebx, CPUMCPUIDLEAF_SIZE_LOG2
+ add ebx, ecx ; ebx = middle element.
+
+ ; Compare.
+ cmp eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf]
+ jae cpuid_lookup_split_up
+
+ ; The leaf is before ebx.
+cpuid_lookup_split_down:
+ cmp ecx, ebx ; start == middle? if so, we failed.
+ mov edx, ebx ; end = middle;
+ jne cpuid_lookup_leaf_ebx_loaded
+ jmp cpuid_unknown
+
+ ; The leaf is at or after ebx.
+cpuid_lookup_split_up:
+ je cpuid_match_eax
+ lea ecx, [ebx + CPUMCPUIDLEAF_size] ; start = middle + 1
+ cmp ecx, edx ; middle + 1 == start? if so, we failed.
+ jne cpuid_lookup_leaf
+ jmp cpuid_unknown
+
+ ;
+ ; We've to a matching leaf, does the sub-leaf match too?
+ ;
+cpuid_match_eax:
+ mov ecx, [esp + 4]
+ and ecx, [ss:ebx + CPUMCPUIDLEAF.fSubLeafMask]
+ cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]
+ je cpuid_fetch
+ ja cpuid_lookup_subleaf_forwards
+
+ ;
+ ; Search backwards.
+ ;
+cpuid_lookup_subleaf_backwards:
+ mov edx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.paCpuIdLeavesRC] ; edx = first leaf
+
+cpuid_lookup_subleaf_backwards_loop:
+ cmp ebx, edx ; Is there a leaf before the current?
+ jbe cpuid_subleaf_not_found ; If not we're out of luck.
+ cmp eax, [ss:ebx - CPUMCPUIDLEAF_size + CPUMCPUIDLEAF.uLeaf]
+ jne cpuid_subleaf_not_found ; If the leaf before us does not have the same leaf number, we failed.
+ sub ebx, CPUMCPUIDLEAF_size
+ cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]
+ je cpuid_fetch ; If the subleaf matches, we're good!.
+ jb cpuid_lookup_subleaf_backwards_loop ; Still hope if the subleaf we're seeking is smaller.
+ jmp cpuid_subleaf_not_found ; Too bad.
+
+ ;
+ ; Search forward until we've got a matching sub-leaf (or not).
+ ;
+cpuid_lookup_subleaf_forwards:
+ ; Calculate the last leaf address.
+ mov edx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.cCpuIdLeaves]
+ dec edx
+ shl edx, CPUMCPUIDLEAF_SIZE_LOG2
+ add edx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.paCpuIdLeavesRC] ; edx = last leaf (inclusive)
+
+cpuid_subleaf_lookup:
+ cmp ebx, edx
+ jae cpuid_subleaf_not_found
+ cmp eax, [ss:ebx + CPUMCPUIDLEAF_size + CPUMCPUIDLEAF.uLeaf]
+ jne cpuid_subleaf_not_found
+ add ebx, CPUMCPUIDLEAF_size
+ cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]
+ ja cpuid_subleaf_lookup
+ je cpuid_fetch
+
+ ;
+ ; Out of range sub-leaves aren't quite as easy and pretty as we emulate them
+ ; here, but we do an adequate job.
+ ;
+cpuid_subleaf_not_found:
+ xor ecx, ecx
+ test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES
+ jz cpuid_load_zeros_except_ecx
+ mov ecx, [esp + 4]
+ and ecx, 0ffh
+cpuid_load_zeros_except_ecx:
+ xor edx, edx
+ xor eax, eax
+ xor ebx, ebx
+ jmp cpuid_done
+
+ ;
+ ; Different CPUs have different ways of dealing with unknown CPUID leaves.
+ ;
+cpuid_unknown:
+ mov ebx, IMP_SEG(ss, g_VM)
+ mov dword [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.enmUnknownCpuIdMethod], CPUMUNKNOWNCPUID_PASSTHRU
+ je cpuid_unknown_passthru
+ ; Load the default cpuid leaf.
+cpuid_unknown_def_leaf:
+ mov edx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.DefCpuId + CPUMCPUID.uEdx]
+ mov ecx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.DefCpuId + CPUMCPUID.uEcx]
+ mov eax, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.DefCpuId + CPUMCPUID.uEax]
+ mov ebx, [ss:edi + VM.cpum + CPUM.GuestInfo + CPUMINFO.DefCpuId + CPUMCPUID.uEbx]
+ jmp cpuid_done
+ ; Pass thru the input values unmodified (eax is still virgin).
+cpuid_unknown_passthru:
+ mov edx, [esp + 8]
+ mov ecx, [esp + 4]
+ mov ebx, [esp]
+ jmp cpuid_done
+
+ ;
+ ; Normal return unless flags (we ignore APIC_ID as we only have a single CPU with ID 0).
+ ;
+cpuid_fetch:
+ test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_CONTAINS_APIC | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
+ jnz cpuid_fetch_with_flags
+ mov edx, [ss:ebx + CPUMCPUIDLEAF.uEdx]
+ mov ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx]
+ mov eax, [ss:ebx + CPUMCPUIDLEAF.uEax]
+ mov ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx]
+
+cpuid_done:
+ pop edi
+ add esp, 12
+ ret
+
+
+ ;
+ ; Need to adjust the result according to VCpu state.
+ ;
+ ; APIC: CPUID[0x00000001].EDX[9] &= pVCpu->cpum.s.fCpuIdApicFeatureVisible;
+ ; CPUID[0x80000001].EDX[9] &= pVCpu->cpum.s.fCpuIdApicFeatureVisible;
+ ;
+ ; OSXSAVE: CPUID[0x00000001].ECX[27] = CR4.OSXSAVE;
+ ;
+cpuid_fetch_with_flags:
+ mov edx, [ss:ebx + CPUMCPUIDLEAF.uEdx]
+ mov ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx]
+
+ mov eax, [ss:edi + VM.offVMCPU]
+
+ ; APIC
+ test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_CONTAINS_APIC
+ jz cpuid_fetch_with_flags_done_apic
+ test byte [ss:edi + eax + VMCPU.cpum + CPUMCPU.fCpuIdApicFeatureVisible], 0ffh
+ jnz cpuid_fetch_with_flags_done_apic
+ and edx, ~X86_CPUID_FEATURE_EDX_APIC
+cpuid_fetch_with_flags_done_apic:
+
+ ; OSXSAVE
+ test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
+ jz cpuid_fetch_with_flags_done_osxsave
+ and ecx, ~X86_CPUID_FEATURE_ECX_OSXSAVE
+ test dword [ss:edi + eax + VMCPU.cpum + CPUMCPU.Guest.cr4], X86_CR4_OSXSAVE
+ jz cpuid_fetch_with_flags_done_osxsave
+ or ecx, X86_CPUID_FEATURE_ECX_OSXSAVE
+cpuid_fetch_with_flags_done_osxsave:
+
+ ; Load the two remaining registers and jump to the common normal exit.
+ mov eax, [ss:ebx + CPUMCPUIDLEAF.uEax]
+ mov ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx]
+ jmp cpuid_done
+
+ENDPROC CPUMPatchHlpCpuId
+
diff --git a/src/VBox/VMM/VMMRC/CSAMRC.cpp b/src/VBox/VMM/VMMRC/CSAMRC.cpp
new file mode 100644
index 00000000..28ebf685
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/CSAMRC.cpp
@@ -0,0 +1,137 @@
+/* $Id: CSAMRC.cpp $ */
+/** @file
+ * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CSAM
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/csam.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/sup.h>
+#include <VBox/vmm/mm.h>
+#ifdef VBOX_WITH_REM
+# include <VBox/vmm/rem.h>
+#endif
+#include <VBox/param.h>
+#include <iprt/avl.h>
+#include "CSAMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/dbg.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/string.h>
+
+
+
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Handler callback for virtual access handler ranges. (CSAM self-modifying
+ * code monitor)}
+ *
+ * Important to realize that a physical page in a range can have aliases, and
+ * for ALL and WRITE handlers these will also trigger.
+ */
+DECLEXPORT(VBOXSTRICTRC) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ PPATMGCSTATE pPATMGCState;
+ bool fPatchCode = PATMIsPatchGCAddr(pVM, pRegFrame->eip);
+ RT_NOREF_PV(uErrorCode);
+ RT_NOREF_PV(pvUser);
+
+
+ Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
+
+#ifdef VBOX_WITH_REM
+ /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
+ REMFlushTBs(pVM);
+#endif
+
+ pPATMGCState = PATMGetGCState(pVM);
+ Assert(pPATMGCState);
+
+ Assert(pPATMGCState->fPIF || fPatchCode);
+ /* When patch code is executing instructions that must complete, then we must *never* interrupt it. */
+ if (!pPATMGCState->fPIF && fPatchCode)
+ {
+ Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", pRegFrame->eip));
+ /** @note there are cases when pages previously used for code are now used for stack; patch generated code will fault (pushf))
+ * Just make the page r/w and continue.
+ */
+ /*
+ * Make this particular page R/W.
+ */
+ int rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
+ AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
+ ASMInvalidatePage((uintptr_t)pvFault);
+ return VINF_SUCCESS;
+ }
+
+ uint32_t cpl;
+
+ if (pRegFrame->eflags.Bits.u1VM)
+ cpl = 3;
+ else
+ cpl = (pRegFrame->ss.Sel & X86_SEL_RPL);
+
+ Log(("csamRCCodePageWriteHandler: code page write at %RGv original address %RGv (cpl=%d)\n", pvFault, (RTGCUINTPTR)pvRange + offRange, cpl));
+
+ /* If user code is modifying one of our monitored pages, then we can safely make it r/w as it's no longer being used for supervisor code. */
+ if (cpl != 3)
+ {
+ VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, pRegFrame, (RTRCPTR)((RTRCUINTPTR)pvRange + offRange),
+ 4 /** @todo */);
+ if (rcStrict == VINF_SUCCESS)
+ return VBOXSTRICTRC_TODO(rcStrict);
+ if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
+ {
+ STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+ Assert(rcStrict == VERR_PATCH_NOT_FOUND);
+ }
+
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
+
+ /* Note that pvFault might be a different address in case of aliases. So use pvRange + offset instead!. */
+ pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)((RTRCUINTPTR)pvRange + offRange);
+ pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)pvFault;
+ if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
+ return VINF_CSAM_PENDING_ACTION;
+
+ /*
+ * Make this particular page R/W. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
+ */
+ Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv\n", pvFault));
+ int rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
+ AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
+ ASMInvalidatePage((uintptr_t)pvFault);
+
+ STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMRC/EMRCA.asm b/src/VBox/VMM/VMMRC/EMRCA.asm
new file mode 100644
index 00000000..6007cd93
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/EMRCA.asm
@@ -0,0 +1,26 @@
+; $Id: EMRCA.asm $
+;; @file
+; EM Assembly Routines.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/err.mac"
+%include "iprt/x86.mac"
+
+BEGINCODE
+
diff --git a/src/VBox/VMM/VMMRC/IOMRC.cpp b/src/VBox/VMM/VMMRC/IOMRC.cpp
new file mode 100644
index 00000000..e16f6fa8
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/IOMRC.cpp
@@ -0,0 +1,247 @@
+/* $Id: IOMRC.cpp $ */
+/** @file
+ * IOM - Input / Output Monitor - Raw-Mode Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_IOM
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/trpm.h>
+#include "IOMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+
+
+/**
+ * Converts disassembler mode to IEM mode.
+ * @return IEM CPU mode.
+ * @param enmDisMode Disassembler CPU mode.
+ */
+DECLINLINE(IEMMODE) iomDisModeToIemMode(DISCPUMODE enmDisMode)
+{
+ switch (enmDisMode)
+ {
+ case DISCPUMODE_16BIT: return IEMMODE_16BIT;
+ case DISCPUMODE_32BIT: return IEMMODE_32BIT;
+ case DISCPUMODE_64BIT: return IEMMODE_64BIT;
+ default:
+ AssertFailed();
+ return IEMMODE_32BIT;
+ }
+}
+
+
+/**
+ * IN <AL|AX|EAX>, <DX|imm16>
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
+ * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
+ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
+ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
+ * @param pCpu Disassembler CPU state.
+ */
+static VBOXSTRICTRC iomRCInterpretIN(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
+{
+ STAM_COUNTER_INC(&pVM->iom.s.StatInstIn); RT_NOREF_PV(pVM);
+ Assert(pCpu->Param2.fUse & (DISUSE_IMMEDIATE8 | DISUSE_REG_GEN16));
+ bool const fUseReg = RT_BOOL(pCpu->Param2.fUse & DISUSE_REG_GEN16);
+ uint16_t const u16Port = fUseReg ? pRegFrame->dx : (uint16_t)pCpu->Param2.uValue;
+
+ Assert(pCpu->Param1.fUse & (DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8));
+ uint8_t cbValue = pCpu->Param1.fUse & DISUSE_REG_GEN32 ? 4 : pCpu->Param1.fUse & DISUSE_REG_GEN16 ? 2 : 1;
+
+ return IEMExecDecodedIn(pVCpu, pCpu->cbInstr, u16Port, !fUseReg, cbValue);
+}
+
+
+/**
+ * OUT <DX|imm16>, <AL|AX|EAX>
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
+ * @retval VINF_IOM_R3_IOPORT_COMMIT_WRITE Defer the write to ring-3. (R0/GC only)
+ * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
+ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
+ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
+ * @param pCpu Disassembler CPU state.
+ */
+static VBOXSTRICTRC iomRCInterpretOUT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
+{
+ STAM_COUNTER_INC(&pVM->iom.s.StatInstOut); RT_NOREF_PV(pVM);
+ Assert(pCpu->Param1.fUse & (DISUSE_IMMEDIATE8 | DISUSE_REG_GEN16));
+ bool const fUseReg = RT_BOOL(pCpu->Param1.fUse & DISUSE_REG_GEN16);
+ uint16_t const u16Port = fUseReg ? pRegFrame->dx : (uint16_t)pCpu->Param1.uValue;
+
+ Assert(pCpu->Param2.fUse & (DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8));
+ uint8_t const cbValue = pCpu->Param2.fUse & DISUSE_REG_GEN32 ? 4 : pCpu->Param2.fUse & DISUSE_REG_GEN16 ? 2 : 1;
+
+ return IEMExecDecodedOut(pVCpu, pCpu->cbInstr, u16Port, !fUseReg, cbValue);
+}
+
+
+/**
+ * [REP*] INSB/INSW/INSD
+ * ES:EDI,DX[,ECX]
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
+ * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
+ * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
+ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
+ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pCpu Disassembler CPU state.
+ */
+static VBOXSTRICTRC iomRCInterpretINS(PVMCPU pVCpu, PDISCPUSTATE pCpu)
+{
+ uint8_t cbValue = pCpu->pCurInstr->uOpcode == OP_INSB ? 1
+ : pCpu->uOpMode == DISCPUMODE_16BIT ? 2 : 4; /* dword in both 32 & 64 bits mode */
+ return IEMExecStringIoRead(pVCpu,
+ cbValue,
+ iomDisModeToIemMode((DISCPUMODE)pCpu->uCpuMode),
+ RT_BOOL(pCpu->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP)),
+ pCpu->cbInstr,
+ false /*fIoChecked*/);
+}
+
+
+/**
+ * [REP*] OUTSB/OUTSW/OUTSD
+ * DS:ESI,DX[,ECX]
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
+ * @retval VINF_IOM_R3_IOPORT_COMMIT_WRITE Defer the write to ring-3. (R0/GC only)
+ * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
+ * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
+ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
+ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pCpu Disassembler CPU state.
+ */
+static VBOXSTRICTRC iomRCInterpretOUTS(PVMCPU pVCpu, PDISCPUSTATE pCpu)
+{
+ uint8_t cbValue = pCpu->pCurInstr->uOpcode == OP_OUTSB ? 1
+ : pCpu->uOpMode == DISCPUMODE_16BIT ? 2 : 4; /* dword in both 32 & 64 bits mode */
+ return IEMExecStringIoWrite(pVCpu,
+ cbValue,
+ iomDisModeToIemMode((DISCPUMODE)pCpu->uCpuMode),
+ RT_BOOL(pCpu->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP)),
+ pCpu->cbInstr,
+ pCpu->fPrefix & DISPREFIX_SEG ? pCpu->idxSegPrefix : X86_SREG_DS,
+ false /*fIoChecked*/);
+}
+
+
+
+/**
+ * Attempts to service an IN/OUT instruction.
+ *
+ * The \#GP trap handler in RC will call this function if the opcode causing
+ * the trap is a in or out type instruction. (Call it indirectly via EM that
+ * is.)
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
+ * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
+ * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3.
+ * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
+ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
+ * @param pCpu Disassembler CPU state.
+ */
+VMMRCDECL(VBOXSTRICTRC) IOMRCIOPortHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
+{
+ switch (pCpu->pCurInstr->uOpcode)
+ {
+ case OP_IN:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ));
+ return iomRCInterpretIN(pVM, pVCpu, pRegFrame, pCpu);
+
+ case OP_OUT:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE));
+ return iomRCInterpretOUT(pVM, pVCpu, pRegFrame, pCpu);
+
+ case OP_INSB:
+ case OP_INSWD:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ));
+ return iomRCInterpretINS(pVCpu, pCpu);
+
+ case OP_OUTSB:
+ case OP_OUTSWD:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE));
+ return iomRCInterpretOUTS(pVCpu, pCpu);
+
+ /*
+ * The opcode wasn't know to us, freak out.
+ */
+ default:
+ AssertMsgFailed(("Unknown I/O port access opcode %d.\n", pCpu->pCurInstr->uOpcode));
+ return VERR_IOM_IOPORT_UNKNOWN_OPCODE;
+ }
+}
+
diff --git a/src/VBox/VMM/VMMRC/MMRamRC.cpp b/src/VBox/VMM/VMMRC/MMRamRC.cpp
new file mode 100644
index 00000000..cf733998
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/MMRamRC.cpp
@@ -0,0 +1,198 @@
+/* $Id: MMRamRC.cpp $ */
+/** @file
+ * MMRamGC - Guest Context Ram access Routines, pair for MMRamGCA.asm.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_MM
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/em.h>
+#include "MMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/pgm.h>
+
+#include <iprt/assert.h>
+#include <VBox/param.h>
+#include <VBox/err.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(int) mmGCRamTrap0eHandler(PVM pVM, PCPUMCTXCORE pRegFrame);
+
+DECLASM(void) MMGCRamReadNoTrapHandler_EndProc(void);
+DECLASM(void) MMGCRamWriteNoTrapHandler_EndProc(void);
+DECLASM(void) MMGCRamRead_Error(void);
+DECLASM(void) MMGCRamWrite_Error(void);
+
+
+/**
+ * Install MMGCRam Hypervisor page fault handler for normal working
+ * of MMGCRamRead and MMGCRamWrite calls.
+ * This handler will be automatically removed at page fault.
+ * In other case it must be removed by MMGCRamDeregisterTrapHandler call.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMRCDECL(void) MMGCRamRegisterTrapHandler(PVM pVM)
+{
+ TRPMGCSetTempHandler(pVM, 0xe, mmGCRamTrap0eHandler);
+}
+
+
+/**
+ * Remove MMGCRam Hypervisor page fault handler.
+ * See description of MMGCRamRegisterTrapHandler call.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMRCDECL(void) MMGCRamDeregisterTrapHandler(PVM pVM)
+{
+ TRPMGCSetTempHandler(pVM, 0xe, NULL);
+}
+
+
+/**
+ * Read data in guest context with \#PF control.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDst Where to store the read data.
+ * @param pSrc Pointer to the data to read.
+ * @param cb Size of data to read.
+ */
+VMMRCDECL(int) MMGCRamRead(PVM pVM, void *pDst, void *pSrc, size_t cb)
+{
+ int rc;
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+
+ /*
+ * Save the current trap info, because it will get trashed if our access failed.
+ */
+ TRPMSaveTrap(pVCpu);
+
+ /*
+ * Need to serve the request in a silly loop because the assembly code wasn't
+ * written for abrbitrary sizes, only 1/2/4/8.
+ */
+ MMGCRamRegisterTrapHandler(pVM);
+ for (;;)
+ {
+ size_t cbThisRead;
+ switch (cb)
+ {
+ case 1: cbThisRead = 1; break;
+ case 2: cbThisRead = 2; break;
+ case 3: cbThisRead = 2; break;
+ case 4: cbThisRead = 4; break;
+ case 5: cbThisRead = 4; break;
+ case 6: cbThisRead = 4; break;
+ case 7: cbThisRead = 4; break;
+ default:
+ case 8: cbThisRead = 8; break;
+ }
+ rc = MMGCRamReadNoTrapHandler(pDst, pSrc, cbThisRead);
+ if (RT_FAILURE(rc) || cbThisRead == cb)
+ break;
+
+ /* advance */
+ cb -= cbThisRead;
+ pDst = (uint8_t *)pDst + cbThisRead;
+ pSrc = (uint8_t *)pSrc + cbThisRead;
+ }
+ MMGCRamDeregisterTrapHandler(pVM);
+
+ if (RT_FAILURE(rc))
+ TRPMRestoreTrap(pVCpu);
+
+ return rc;
+}
+
+
+/**
+ * Write data in guest context with \#PF control.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pDst Where to write the data.
+ * @param pSrc Pointer to the data to write.
+ * @param cb Size of data to write, only 1/2/4 is valid.
+ *
+ * @deprecated Don't use this as it doesn't check the page state.
+ */
+VMMRCDECL(int) MMGCRamWrite(PVM pVM, void *pDst, void *pSrc, size_t cb)
+{
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ TRPMSaveTrap(pVCpu); /* save the current trap info, because it will get trashed if our access failed. */
+
+ MMGCRamRegisterTrapHandler(pVM);
+ int rc = MMGCRamWriteNoTrapHandler(pDst, pSrc, cb);
+ MMGCRamDeregisterTrapHandler(pVM);
+ if (RT_FAILURE(rc))
+ TRPMRestoreTrap(pVCpu);
+
+ /*
+ * And mark the relevant guest page as accessed and dirty.
+ */
+ PGMGstModifyPage(VMMGetCpu0(pVM), (RTGCPTR)(RTRCUINTPTR)pDst, cb, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
+
+ return rc;
+}
+
+
+/**
+ * \#PF Handler for servicing traps inside MMGCRamReadNoTrapHandler and MMGCRamWriteNoTrapHandler functions.
+ *
+ * @internal
+ */
+DECLCALLBACK(int) mmGCRamTrap0eHandler(PVM pVM, PCPUMCTXCORE pRegFrame)
+{
+ /*
+ * Page fault inside MMGCRamRead()? Resume at *_Error.
+ */
+ if ( (uintptr_t)&MMGCRamReadNoTrapHandler < (uintptr_t)pRegFrame->eip
+ && (uintptr_t)pRegFrame->eip < (uintptr_t)&MMGCRamReadNoTrapHandler_EndProc)
+ {
+ /* Must be a read violation. */
+ AssertReturn(!(TRPMGetErrorCode(VMMGetCpu0(pVM)) & X86_TRAP_PF_RW), VERR_MM_BAD_TRAP_TYPE_IPE);
+ pRegFrame->eip = (uintptr_t)&MMGCRamRead_Error;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Page fault inside MMGCRamWrite()? Resume at _Error.
+ */
+ if ( (uintptr_t)&MMGCRamWriteNoTrapHandler < (uintptr_t)pRegFrame->eip
+ && (uintptr_t)pRegFrame->eip < (uintptr_t)&MMGCRamWriteNoTrapHandler_EndProc)
+ {
+ /* Must be a write violation. */
+ AssertReturn(TRPMGetErrorCode(VMMGetCpu0(pVM)) & X86_TRAP_PF_RW, VERR_MM_BAD_TRAP_TYPE_IPE);
+ pRegFrame->eip = (uintptr_t)&MMGCRamWrite_Error;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * #PF is not handled - cause guru meditation.
+ */
+ return VERR_INTERNAL_ERROR;
+}
+
diff --git a/src/VBox/VMM/VMMRC/MMRamRCA.asm b/src/VBox/VMM/VMMRC/MMRamRCA.asm
new file mode 100644
index 00000000..c9e8446c
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/MMRamRCA.asm
@@ -0,0 +1,157 @@
+; $Id: MMRamRCA.asm $
+;; @file
+; MMRamGCA - Guest Context Ram access Assembly Routines.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/err.mac"
+%include "iprt/err.mac"
+%include "iprt/x86.mac"
+
+
+BEGINCODE
+
+
+;;
+; Read data in guest context, CDECL calling conv.
+; VMMRCDECL(int) MMGCRamRead(void *pDst, void *pSrc, size_t cb);
+; MMRamGC page fault handler must be installed prior this call for safe operation.
+;
+; @returns eax=0 if data read, other code - invalid access, #PF was generated.
+; @param [esp + 04h] Param 1 - Pointer where to store result data (pDst).
+; @param [esp + 08h] Param 2 - Pointer of data to read (pSrc).
+; @param [esp + 0ch] Param 3 - Size of data to read, only 1/2/4/8 is valid.
+; @uses eax, ecx, edx
+;
+; @remark Data is saved to destination (Param 1) even if read error occurred!
+;
+align 16
+BEGINPROC MMGCRamReadNoTrapHandler
+ mov eax, [esp + 0ch] ; eax = size of data to read
+ cmp eax, byte 8 ; yes, it's slow, validate input
+ ja ramread_InvalidSize
+ mov edx, [esp + 04h] ; edx = result address
+ mov ecx, [esp + 08h] ; ecx = data address
+ jmp [ramread_table + eax*4]
+
+ramread_byte:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov cl, [ecx] ; read data
+ mov [edx], cl ; save data
+ ret
+
+ramread_word:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov cx, [ecx] ; read data
+ mov [edx], cx ; save data
+ ret
+
+ramread_dword:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov ecx, [ecx] ; read data
+ mov [edx], ecx ; save data
+ ret
+
+ramread_qword:
+ mov eax, [ecx] ; read data
+ mov [edx], eax ; save data
+ mov eax, [ecx+4] ; read data
+ mov [edx+4], eax ; save data
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ ret
+
+; Read error - we will be here after our page fault handler.
+GLOBALNAME MMGCRamRead_Error
+ mov eax, VERR_ACCESS_DENIED
+ ret
+
+; Invalid data size
+ramread_InvalidSize:
+ mov eax, VERR_INVALID_PARAMETER
+ ret
+
+; Jump table
+ramread_table:
+ DD ramread_InvalidSize
+ DD ramread_byte
+ DD ramread_word
+ DD ramread_InvalidSize
+ DD ramread_dword
+ DD ramread_InvalidSize
+ DD ramread_InvalidSize
+ DD ramread_InvalidSize
+ DD ramread_qword
+ENDPROC MMGCRamReadNoTrapHandler
+
+
+;;
+; Write data in guest context, CDECL calling conv.
+; VMMRCDECL(int) MMGCRamWrite(void *pDst, void *pSrc, size_t cb);
+;
+; @returns eax=0 if data written, other code - invalid access, #PF was generated.
+; @param [esp + 04h] Param 1 - Pointer where to write data (pDst).
+; @param [esp + 08h] Param 2 - Pointer of data to write (pSrc).
+; @param [esp + 0ch] Param 3 - Size of data to write, only 1/2/4 is valid.
+; @uses eax, ecx, edx
+;
+align 16
+BEGINPROC MMGCRamWriteNoTrapHandler
+ mov eax, [esp + 0ch] ; eax = size of data to write
+ cmp eax, byte 4 ; yes, it's slow, validate input
+ ja ramwrite_InvalidSize
+ mov edx, [esp + 04h] ; edx = write address
+ mov ecx, [esp + 08h] ; ecx = data address
+ jmp [ramwrite_table + eax*4]
+
+ramwrite_byte:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov cl, [ecx] ; read data
+ mov [edx], cl ; write data
+ ret
+
+ramwrite_word:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov cx, [ecx] ; read data
+ mov [edx], cx ; write data
+ ret
+
+ramwrite_dword:
+ xor eax, eax ; rc = VINF_SUCCESS by default
+ mov ecx, [ecx] ; read data
+ mov [edx], ecx ; write data
+ ret
+
+; Write error - we will be here after our page fault handler.
+GLOBALNAME MMGCRamWrite_Error
+ mov eax, VERR_ACCESS_DENIED
+ ret
+
+; Invalid data size
+ramwrite_InvalidSize:
+ mov eax, VERR_INVALID_PARAMETER
+ ret
+
+; Jump table
+ramwrite_table:
+ DD ramwrite_InvalidSize
+ DD ramwrite_byte
+ DD ramwrite_word
+ DD ramwrite_InvalidSize
+ DD ramwrite_dword
+ENDPROC MMGCRamWriteNoTrapHandler
+
diff --git a/src/VBox/VMM/VMMRC/Makefile.kup b/src/VBox/VMM/VMMRC/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/Makefile.kup
diff --git a/src/VBox/VMM/VMMRC/PATMRC.cpp b/src/VBox/VMM/VMMRC/PATMRC.cpp
new file mode 100644
index 00000000..558d364f
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PATMRC.cpp
@@ -0,0 +1,545 @@
+/* $Id: PATMRC.cpp $ */
+/** @file
+ * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PATM
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include "PATMInternal.h"
+#include "PATMA.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/dbg.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/string.h>
+
+
+/**
+ * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
+ * PATM all access handler callback.}
+ *
+ * @remarks pvUser is NULL.
+ */
+DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ NOREF(pVCpu); NOREF(uErrorCode); NOREF(pCtxCore); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); RT_NOREF_PV(pvUser);
+
+ pVM->patm.s.pvFaultMonitor = (RTRCPTR)(pvRange + offRange);
+ return VINF_PATM_CHECK_PATCH_PAGE;
+}
+
+
+/**
+ * Checks if the write is located on a page with was patched before.
+ * (if so, then we are not allowed to turn on r/w)
+ *
+ * @returns Strict VBox status code.
+ * @retval VINF_SUCCESS if access interpreted (@a pCtxCore != NULL).
+ * @retval VINF_PGM_HANDLER_DO_DEFAULT (@a pCtxCore == NULL).
+ * @retval VINF_EM_RAW_EMULATE_INSTR on needing to go to ring-3 to do this.
+ * @retval VERR_PATCH_NOT_FOUND if no patch was found.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCtxCore CPU context if \#PF, NULL if other write..
+ * @param GCPtr GC pointer to write address.
+ * @param cbWrite Number of bytes to write.
+ *
+ */
+VMMRC_INT_DECL(VBOXSTRICTRC) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR GCPtr, uint32_t cbWrite)
+{
+ Assert(cbWrite > 0);
+
+ /* Quick boundary check */
+ if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
+ || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest))
+ return VERR_PATCH_NOT_FOUND;
+
+ STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
+
+ /*
+ * Lookup the patch page record for the write.
+ */
+ RTRCUINTPTR pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
+ RTRCUINTPTR pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
+
+ PPATMPATCHPAGE pPatchPage;
+ pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageStart);
+ if ( !pPatchPage
+ && pWritePageStart != pWritePageEnd)
+ pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageEnd);
+ if (pPatchPage)
+ {
+ Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n",
+ pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
+ if ( (RTRCUINTPTR)pPatchPage->pLowestAddrGC > (RTRCUINTPTR)GCPtr + cbWrite - 1U
+ || (RTRCUINTPTR)pPatchPage->pHighestAddrGC < (RTRCUINTPTR)GCPtr)
+ {
+ /* This part of the page was not patched; try to emulate the instruction / tell the caller to do so. */
+ if (!pCtxCore)
+ {
+ LogFlow(("PATMHandleWriteToPatchPage: Allow writing %RRv LB %#x\n", GCPtr, cbWrite));
+ STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
+ STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
+ return VINF_PGM_HANDLER_DO_DEFAULT;
+ }
+ LogFlow(("PATMHandleWriteToPatchPage: Interpret %#x accessing %RRv\n", pCtxCore->eip, GCPtr));
+ int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(VMMGetCpu0(pVM), pCtxCore, (RTGCPTR)(RTRCUINTPTR)GCPtr));
+ if (rc == VINF_SUCCESS)
+ {
+ STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
+ STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
+ return VINF_SUCCESS;
+ }
+ STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
+ }
+ R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
+
+ /* Increase the invalid write counter for each patch that's registered for that page. */
+ for (uint32_t i=0;i<pPatchPage->cCount;i++)
+ {
+ PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
+
+ pPatch->cInvalidWrites++;
+ }
+
+ STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+
+ STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
+ return VERR_PATCH_NOT_FOUND;
+}
+
+
+/**
+ * Checks if the illegal instruction was caused by a patched instruction
+ *
+ * @returns VBox status
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCtxCore The relevant core context.
+ */
+VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
+{
+ PPATMPATCHREC pRec;
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ int rc;
+
+ /* Very important check -> otherwise we have a security leak. */
+ AssertReturn(!pCtxCore->eflags.Bits.u1VM && (pCtxCore->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U),
+ VERR_ACCESS_DENIED);
+ Assert(PATMIsPatchGCAddr(pVM, pCtxCore->eip));
+
+ /* OP_ILLUD2 in PATM generated code? */
+ if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
+ {
+ LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pCtxCore->eip));
+
+ /* Private PATM interface (@todo hack due to lack of anything generic). */
+ /* Parameters:
+ * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
+ * ecx = PATM_ACTION_MAGIC
+ */
+ if ( (pCtxCore->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
+ && pCtxCore->ecx == PATM_ACTION_MAGIC
+ )
+ {
+ CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
+
+ switch (pCtxCore->eax)
+ {
+ case PATM_ACTION_LOOKUP_ADDRESS:
+ {
+ /* Parameters:
+ * edx = GC address to find
+ * edi = PATCHJUMPTABLE ptr
+ */
+ AssertMsg(!pCtxCore->edi || PATMIsPatchGCAddr(pVM, pCtxCore->edi), ("edi = %x\n", pCtxCore->edi));
+
+ Log(("PATMRC: lookup %x jump table=%x\n", pCtxCore->edx, pCtxCore->edi));
+
+ pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pCtxCore->edx);
+ if (pRec)
+ {
+ if (pRec->patch.uState == PATCH_ENABLED)
+ {
+ RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
+ rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pCtxCore->edi, (RTRCPTR)pCtxCore->edx, pRelAddr);
+ if (rc == VINF_SUCCESS)
+ {
+ Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
+ pRec->patch.flags |= PATMFL_CODE_REFERENCED;
+
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ pCtxCore->eax = pRelAddr;
+ STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
+ return VINF_SUCCESS;
+ }
+ AssertFailed();
+ return rc;
+ }
+ else
+ {
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ pCtxCore->eax = 0; /* make it fault */
+ STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ /* Check first before trying to generate a function/trampoline patch. */
+ if (pVM->patm.s.fOutOfMemory)
+ {
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ pCtxCore->eax = 0; /* make it fault */
+ STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
+ return VINF_SUCCESS;
+ }
+ STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
+ return VINF_PATM_DUPLICATE_FUNCTION;
+ }
+ }
+
+ case PATM_ACTION_DISPATCH_PENDING_IRQ:
+ /* Parameters:
+ * edi = GC address to jump to
+ */
+ Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
+
+ /* Change EIP to the guest address the patch would normally jump to after setting IF. */
+ pCtxCore->eip = pCtxCore->edi;
+
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
+
+ pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
+ pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
+ pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
+
+ pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
+
+ /* We are no longer executing PATM code; set PIF again. */
+ pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
+
+ STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
+
+ /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
+ /* Parameters:
+ * edi = GC address to jump to
+ */
+ Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
+
+ /* Change EIP to the guest address of the iret. */
+ pCtxCore->eip = pCtxCore->edi;
+
+ pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
+ pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
+ pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
+ pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
+
+ /* We are no longer executing PATM code; set PIF again. */
+ pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
+
+ return VINF_PATM_PENDING_IRQ_AFTER_IRET;
+
+ case PATM_ACTION_DO_V86_IRET:
+ {
+ Log(("PATMRC: Do iret to V86 code; eip=%x\n", pCtxCore->eip));
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
+ Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
+
+ pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
+ pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
+ pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
+
+ rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pCtxCore);
+ if (RT_SUCCESS(rc))
+ {
+ STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
+
+ /* We are no longer executing PATM code; set PIF again. */
+ pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ CPUMGCCallV86Code(pCtxCore);
+ /* does not return */
+ }
+ else
+ STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
+ return rc;
+ }
+
+#ifdef DEBUG
+ case PATM_ACTION_LOG_CLI:
+ Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_STI:
+ Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_POPF_IF1:
+ Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_POPF_IF0:
+ Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_PUSHF:
+ Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_IF1:
+ Log(("PATMRC: IF=1 escape from %x\n", pCtxCore->eip));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_IRET:
+ {
+ char *pIretFrame = (char *)pCtxCore->edx;
+ uint32_t eip, selCS, uEFlags;
+
+ rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
+ rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
+ rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
+ if (rc == VINF_SUCCESS)
+ {
+ if ( (uEFlags & X86_EFL_VM)
+ || (selCS & X86_SEL_RPL) == 3)
+ {
+ uint32_t selSS, esp;
+
+ rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
+ rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
+
+ if (uEFlags & X86_EFL_VM)
+ {
+ uint32_t selDS, selES, selFS, selGS;
+ rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
+ rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
+ rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
+ rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
+ if (rc == VINF_SUCCESS)
+ {
+ Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
+ Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
+ }
+ }
+ else
+ Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
+ }
+ else
+ Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
+ }
+ Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pCtxCore->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+ }
+
+ case PATM_ACTION_LOG_GATE_ENTRY:
+ {
+ char *pIretFrame = (char *)pCtxCore->edx;
+ uint32_t eip, selCS, uEFlags;
+
+ rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
+ rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
+ rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
+ if (rc == VINF_SUCCESS)
+ {
+ if ( (uEFlags & X86_EFL_VM)
+ || (selCS & X86_SEL_RPL) == 3)
+ {
+ uint32_t selSS, esp;
+
+ rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
+ rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
+
+ if (uEFlags & X86_EFL_VM)
+ {
+ uint32_t selDS, selES, selFS, selGS;
+ rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
+ rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
+ rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
+ rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
+ if (rc == VINF_SUCCESS)
+ {
+ Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
+ Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
+ }
+ }
+ else
+ Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
+ }
+ else
+ Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
+ }
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+ }
+
+ case PATM_ACTION_LOG_RET:
+ Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pCtxCore->eip, pCtxCore->edx, pCtxCore->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+
+ case PATM_ACTION_LOG_CALL:
+ Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pCtxCore->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
+ pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
+ return VINF_SUCCESS;
+#endif
+ default:
+ AssertFailed();
+ break;
+ }
+ }
+ else
+ AssertFailed();
+ CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
+ }
+ AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
+ return VINF_EM_RAW_EMULATE_INSTR;
+}
+
+/**
+ * Checks if the int 3 was caused by a patched instruction
+ *
+ * @returns Strict VBox status, includes all statuses that
+ * EMInterpretInstructionDisasState and
+ * @retval VINF_SUCCESS
+ * @retval VINF_PATM_PATCH_INT3
+ * @retval VINF_EM_RAW_EMULATE_INSTR
+ *
+ * @param pVM The cross context VM structure.
+ * @param pCtxCore The relevant core context.
+ */
+VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
+{
+ PPATMPATCHREC pRec;
+
+ AssertReturn(!pCtxCore->eflags.Bits.u1VM
+ && ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
+ || (EMIsRawRing1Enabled(pVM) && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
+
+ /* Int 3 in PATM generated code? (most common case) */
+ if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
+ {
+ /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
+ pCtxCore->eip--;
+ return VINF_PATM_PATCH_INT3;
+ }
+
+ /** @todo could use simple caching here to speed things up. */
+ pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pCtxCore->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
+ if (pRec && pRec->patch.uState == PATCH_ENABLED)
+ {
+ if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
+ {
+ Assert(pRec->patch.opcode == OP_CLI);
+ /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
+ pCtxCore->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
+ STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
+ return VINF_SUCCESS;
+ }
+ if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
+ {
+ /* eip is pointing to the instruction *after* 'int 3' already */
+ pCtxCore->eip = pCtxCore->eip - 1;
+
+ PATM_STAT_RUN_INC(&pRec->patch);
+
+ Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pCtxCore->eip));
+
+ switch(pRec->patch.opcode)
+ {
+ case OP_CPUID:
+ case OP_IRET:
+#ifdef VBOX_WITH_RAW_RING1
+ case OP_SMSW:
+ case OP_MOV: /* mov xx, CS */
+#endif
+ break;
+
+ case OP_STR:
+ case OP_SGDT:
+ case OP_SLDT:
+ case OP_SIDT:
+ case OP_LSL:
+ case OP_LAR:
+#ifndef VBOX_WITH_RAW_RING1
+ case OP_SMSW:
+#endif
+ case OP_VERW:
+ case OP_VERR:
+ default:
+ PATM_STAT_FAULT_INC(&pRec->patch);
+ pRec->patch.cTraps++;
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
+ if (enmCpuMode != DISCPUMODE_32BIT)
+ {
+ AssertFailed();
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+
+ VBOXSTRICTRC rcStrict;
+ rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pCtxCore, pCtxCore->rip,
+ pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
+ if (RT_SUCCESS(rcStrict))
+ {
+ if (rcStrict != VINF_SUCCESS)
+ Log(("PATMRCHandleInt3PatchTrap: returns %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
+ return VBOXSTRICTRC_TODO(rcStrict);
+ }
+
+ Log(("IEMExecOneBypassWithPrefetchedByPC failed with %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
+ PATM_STAT_FAULT_INC(&pRec->patch);
+ pRec->patch.cTraps++;
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+ }
+ return VERR_PATCH_NOT_FOUND;
+}
+
diff --git a/src/VBox/VMM/VMMRC/PDMRCDevice.cpp b/src/VBox/VMM/VMMRC/PDMRCDevice.cpp
new file mode 100644
index 00000000..c0a29e3d
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PDMRCDevice.cpp
@@ -0,0 +1,811 @@
+/* $Id: PDMRCDevice.cpp $ */
+/** @file
+ * PDM - Pluggable Device and Driver Manager, RC Device parts.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_DEVICE
+#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/apic.h>
+
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+#include "dtrace/VBoxVMM.h"
+#include "PDMInline.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+extern DECLEXPORT(const PDMDEVHLPRC) g_pdmRCDevHlp;
+extern DECLEXPORT(const PDMPICHLPRC) g_pdmRCPicHlp;
+extern DECLEXPORT(const PDMIOAPICHLPRC) g_pdmRCIoApicHlp;
+extern DECLEXPORT(const PDMPCIHLPRC) g_pdmRCPciHlp;
+extern DECLEXPORT(const PDMHPETHLPRC) g_pdmRCHpetHlp;
+extern DECLEXPORT(const PDMDRVHLPRC) g_pdmRCDrvHlp;
+/** @todo missing PDMPCIRAWHLPRC */
+RT_C_DECLS_END
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static bool pdmRCIsaSetIrq(PVM pVM, int iIrq, int iLevel, uint32_t uTagSrc);
+
+
+/** @name Raw-Mode Context Device Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPCIPhysRead} */
+static DECLCALLBACK(int) pdmRCDevHlp_PCIPhysRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys,
+ void *pvBuf, size_t cbRead)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->Internal.s.pHeadPciDevRC;
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely */ }
+ else
+ {
+ Log(("pdmRCDevHlp_PCIPhysRead: caller=%p/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbRead=%#zx\n",
+ pDevIns, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbRead));
+ memset(pvBuf, 0xff, cbRead);
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+ return pDevIns->pHlpRC->pfnPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPCIPhysWrite} */
+static DECLCALLBACK(int) pdmRCDevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys,
+ const void *pvBuf, size_t cbWrite)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->Internal.s.pHeadPciDevRC;
+ AssertReturn(pPciDev, VERR_PDM_NOT_PCI_DEVICE);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ if (PCIDevIsBusmaster(pPciDev))
+ { /* likely*/ }
+ else
+ {
+ Log(("pdmRCDevHlp_PCIPhysWrite: caller=%p/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n",
+ pDevIns, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+ return pDevIns->pHlpRC->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPCISetIrq} */
+static DECLCALLBACK(void) pdmRCDevHlp_PCISetIrq(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ if (!pPciDev) /* NULL is an alias for the default PCI device. */
+ pPciDev = pDevIns->Internal.s.pHeadPciDevRC;
+ AssertReturnVoid(pPciDev);
+ LogFlow(("pdmRCDevHlp_PCISetIrq: caller=%p/%d: pPciDev=%p:{%#x} iIrq=%d iLevel=%d\n",
+ pDevIns, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iIrq, iLevel));
+
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+ PPDMPCIBUS pPciBus = pPciDev->Int.s.pPdmBusRC;
+
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ if ( pPciDev
+ && pPciBus
+ && pPciBus->pDevInsRC)
+ {
+ pPciBus->pfnSetIrqRC(pPciBus->pDevInsRC, pPciDev, iIrq, iLevel, uTagSrc);
+
+ pdmUnlock(pVM);
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ {
+ pdmUnlock(pVM);
+
+ /* queue for ring-3 execution. */
+ PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueRC);
+ AssertReturnVoid(pTask);
+
+ pTask->enmOp = PDMDEVHLPTASKOP_PCI_SET_IRQ;
+ pTask->pDevInsR3 = PDMDEVINS_2_R3PTR(pDevIns);
+ pTask->u.PciSetIRQ.iIrq = iIrq;
+ pTask->u.PciSetIRQ.iLevel = iLevel;
+ pTask->u.PciSetIRQ.uTagSrc = uTagSrc;
+ pTask->u.PciSetIRQ.pPciDevR3 = MMHyperRCToR3(pVM, pPciDev);
+
+ PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueRC, &pTask->Core, 0);
+ }
+
+ LogFlow(("pdmRCDevHlp_PCISetIrq: caller=%p/%d: returns void; uTagSrc=%#x\n", pDevIns, pDevIns->iInstance, uTagSrc));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnISASetIrq} */
+static DECLCALLBACK(void) pdmRCDevHlp_ISASetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_ISASetIrq: caller=%p/%d: iIrq=%d iLevel=%d\n", pDevIns, pDevIns->iInstance, iIrq, iLevel));
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+
+ pdmLock(pVM);
+ uint32_t uTagSrc;
+ if (iLevel & PDM_IRQ_LEVEL_HIGH)
+ {
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ if (iLevel == PDM_IRQ_LEVEL_HIGH)
+ VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ else
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ }
+ else
+ uTagSrc = pDevIns->Internal.s.uLastIrqTag;
+
+ bool fRc = pdmRCIsaSetIrq(pVM, iIrq, iLevel, uTagSrc);
+
+ if (iLevel == PDM_IRQ_LEVEL_LOW && fRc)
+ VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+ pdmUnlock(pVM);
+ LogFlow(("pdmRCDevHlp_ISASetIrq: caller=%p/%d: returns void; uTagSrc=%#x\n", pDevIns, pDevIns->iInstance, uTagSrc));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnIoApicSendMsi} */
+static DECLCALLBACK(void) pdmRCDevHlp_IoApicSendMsi(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint32_t uValue)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_IoApicSendMsi: caller=%p/%d: GCPhys=%RGp uValue=%#x\n", pDevIns, pDevIns->iInstance, GCPhys, uValue));
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+
+ uint32_t uTagSrc;
+ pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
+ VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
+
+ if (pVM->pdm.s.IoApic.pDevInsRC)
+ pVM->pdm.s.IoApic.pfnSendMsiRC(pVM->pdm.s.IoApic.pDevInsRC, GCPhys, uValue, uTagSrc);
+ else
+ AssertFatalMsgFailed(("Lazy bastards!"));
+
+ LogFlow(("pdmRCDevHlp_IoApicSendMsi: caller=%p/%d: returns void; uTagSrc=%#x\n", pDevIns, pDevIns->iInstance, uTagSrc));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPhysRead} */
+static DECLCALLBACK(int) pdmRCDevHlp_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_PhysRead: caller=%p/%d: GCPhys=%RGp pvBuf=%p cbRead=%#x\n",
+ pDevIns, pDevIns->iInstance, GCPhys, pvBuf, cbRead));
+
+ VBOXSTRICTRC rcStrict = PGMPhysRead(pDevIns->Internal.s.pVMRC, GCPhys, pvBuf, cbRead, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ Log(("pdmRCDevHlp_PhysRead: caller=%p/%d: returns %Rrc\n", pDevIns, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPhysWrite} */
+static DECLCALLBACK(int) pdmRCDevHlp_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_PhysWrite: caller=%p/%d: GCPhys=%RGp pvBuf=%p cbWrite=%#x\n",
+ pDevIns, pDevIns->iInstance, GCPhys, pvBuf, cbWrite));
+
+ VBOXSTRICTRC rcStrict = PGMPhysWrite(pDevIns->Internal.s.pVMRC, GCPhys, pvBuf, cbWrite, PGMACCESSORIGIN_DEVICE);
+ AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /** @todo track down the users for this bugger. */
+
+ Log(("pdmRCDevHlp_PhysWrite: caller=%p/%d: returns %Rrc\n", pDevIns, pDevIns->iInstance, VBOXSTRICTRC_VAL(rcStrict) ));
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnA20IsEnabled} */
+static DECLCALLBACK(bool) pdmRCDevHlp_A20IsEnabled(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_A20IsEnabled: caller=%p/%d:\n", pDevIns, pDevIns->iInstance));
+
+ bool fEnabled = PGMPhysIsA20Enabled(VMMGetCpu0(pDevIns->Internal.s.pVMRC));
+
+ Log(("pdmRCDevHlp_A20IsEnabled: caller=%p/%d: returns %RTbool\n", pDevIns, pDevIns->iInstance, fEnabled));
+ return fEnabled;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnVMState} */
+static DECLCALLBACK(VMSTATE) pdmRCDevHlp_VMState(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+ VMSTATE enmVMState = pDevIns->Internal.s.pVMRC->enmVMState;
+
+ LogFlow(("pdmRCDevHlp_VMState: caller=%p/%d: returns %d\n", pDevIns, pDevIns->iInstance, enmVMState));
+ return enmVMState;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnVMSetError} */
+static DECLCALLBACK(int) pdmRCDevHlp_VMSetError(PPDMDEVINS pDevIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ va_list args;
+ va_start(args, pszFormat);
+ int rc2 = VMSetErrorV(pDevIns->Internal.s.pVMRC, rc, RT_SRC_POS_ARGS, pszFormat, args); Assert(rc2 == rc); NOREF(rc2);
+ va_end(args);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnVMSetErrorV} */
+static DECLCALLBACK(int) pdmRCDevHlp_VMSetErrorV(PPDMDEVINS pDevIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ int rc2 = VMSetErrorV(pDevIns->Internal.s.pVMRC, rc, RT_SRC_POS_ARGS, pszFormat, va); Assert(rc2 == rc); NOREF(rc2);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnVMSetRuntimeError} */
+static DECLCALLBACK(int) pdmRCDevHlp_VMSetRuntimeError(PPDMDEVINS pDevIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = VMSetRuntimeErrorV(pDevIns->Internal.s.pVMRC, fFlags, pszErrorId, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnVMSetRuntimeErrorV} */
+static DECLCALLBACK(int) pdmRCDevHlp_VMSetRuntimeErrorV(PPDMDEVINS pDevIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ int rc = VMSetRuntimeErrorV(pDevIns->Internal.s.pVMRC, fFlags, pszErrorId, pszFormat, va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnPATMSetMMIOPatchInfo} */
+static DECLCALLBACK(int) pdmRCDevHlp_PATMSetMMIOPatchInfo(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPTR pCachedData)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_PATMSetMMIOPatchInfo: caller=%p/%d:\n", pDevIns, pDevIns->iInstance));
+
+ return PATMSetMMIOPatchInfo(pDevIns->Internal.s.pVMRC, GCPhys, (RTRCPTR)(uintptr_t)pCachedData);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnGetVM} */
+static DECLCALLBACK(PVM) pdmRCDevHlp_GetVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
+ return pDevIns->Internal.s.pVMRC;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnGetVMCPU} */
+static DECLCALLBACK(PVMCPU) pdmRCDevHlp_GetVMCPU(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_GetVMCPU: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
+ return VMMGetCpu(pDevIns->Internal.s.pVMRC);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnGetCurrentCpuId} */
+static DECLCALLBACK(VMCPUID) pdmRCDevHlp_GetCurrentCpuId(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ VMCPUID idCpu = VMMGetCpuId(pDevIns->Internal.s.pVMRC);
+ LogFlow(("pdmRCDevHlp_GetCurrentCpuId: caller='%p'/%d for CPU %u\n", pDevIns, pDevIns->iInstance, idCpu));
+ return idCpu;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnTMTimeVirtGet} */
+static DECLCALLBACK(uint64_t) pdmRCDevHlp_TMTimeVirtGet(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_TMTimeVirtGet: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
+ return TMVirtualGet(pDevIns->Internal.s.pVMRC);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnTMTimeVirtGetFreq} */
+static DECLCALLBACK(uint64_t) pdmRCDevHlp_TMTimeVirtGetFreq(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_TMTimeVirtGetFreq: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
+ return TMVirtualGetFreq(pDevIns->Internal.s.pVMRC);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnTMTimeVirtGetNano} */
+static DECLCALLBACK(uint64_t) pdmRCDevHlp_TMTimeVirtGetNano(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmRCDevHlp_TMTimeVirtGetNano: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
+ return TMVirtualToNano(pDevIns->Internal.s.pVMRC, TMVirtualGet(pDevIns->Internal.s.pVMRC));
+}
+
+
+/** @interface_method_impl{PDMDEVHLPRC,pfnDBGFTraceBuf} */
+static DECLCALLBACK(RTTRACEBUF) pdmRCDevHlp_DBGFTraceBuf(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ RTTRACEBUF hTraceBuf = pDevIns->Internal.s.pVMRC->hTraceBufRC;
+ LogFlow(("pdmRCDevHlp_DBGFTraceBuf: caller='%p'/%d: returns %p\n", pDevIns, pDevIns->iInstance, hTraceBuf));
+ return hTraceBuf;
+}
+
+
+/**
+ * The Raw-Mode Context Device Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMDEVHLPRC) g_pdmRCDevHlp =
+{
+ PDM_DEVHLPRC_VERSION,
+ pdmRCDevHlp_PCIPhysRead,
+ pdmRCDevHlp_PCIPhysWrite,
+ pdmRCDevHlp_PCISetIrq,
+ pdmRCDevHlp_ISASetIrq,
+ pdmRCDevHlp_IoApicSendMsi,
+ pdmRCDevHlp_PhysRead,
+ pdmRCDevHlp_PhysWrite,
+ pdmRCDevHlp_A20IsEnabled,
+ pdmRCDevHlp_VMState,
+ pdmRCDevHlp_VMSetError,
+ pdmRCDevHlp_VMSetErrorV,
+ pdmRCDevHlp_VMSetRuntimeError,
+ pdmRCDevHlp_VMSetRuntimeErrorV,
+ pdmRCDevHlp_PATMSetMMIOPatchInfo,
+ pdmRCDevHlp_GetVM,
+ pdmRCDevHlp_GetVMCPU,
+ pdmRCDevHlp_GetCurrentCpuId,
+ pdmRCDevHlp_TMTimeVirtGet,
+ pdmRCDevHlp_TMTimeVirtGetFreq,
+ pdmRCDevHlp_TMTimeVirtGetNano,
+ pdmRCDevHlp_DBGFTraceBuf,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ PDM_DEVHLPRC_VERSION
+};
+
+/** @} */
+
+
+
+
+/** @name PIC RC Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMPICHLPRC,pfnSetInterruptFF} */
+static DECLCALLBACK(void) pdmRCPicHlp_SetInterruptFF(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */
+ /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */
+ APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */);
+}
+
+
+/** @interface_method_impl{PDMPICHLPRC,pfnClearInterruptFF} */
+static DECLCALLBACK(void) pdmRCPicHlp_ClearInterruptFF(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.CTX_SUFF(pVM);
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */
+ /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */
+ APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */);
+}
+
+
+/** @interface_method_impl{PDMPICHLPRC,pfnLock} */
+static DECLCALLBACK(int) pdmRCPicHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return pdmLockEx(pDevIns->Internal.s.pVMRC, rc);
+}
+
+
+/** @interface_method_impl{PDMPICHLPRC,pfnUnlock} */
+static DECLCALLBACK(void) pdmRCPicHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ pdmUnlock(pDevIns->Internal.s.pVMRC);
+}
+
+
+/**
+ * The Raw-Mode Context PIC Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMPICHLPRC) g_pdmRCPicHlp =
+{
+ PDM_PICHLPRC_VERSION,
+ pdmRCPicHlp_SetInterruptFF,
+ pdmRCPicHlp_ClearInterruptFF,
+ pdmRCPicHlp_Lock,
+ pdmRCPicHlp_Unlock,
+ PDM_PICHLPRC_VERSION
+};
+
+/** @} */
+
+
+/** @name I/O APIC RC Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMIOAPICHLPRC,pfnApicBusDeliver} */
+static DECLCALLBACK(int) pdmRCIoApicHlp_ApicBusDeliver(PPDMDEVINS pDevIns, uint8_t u8Dest, uint8_t u8DestMode,
+ uint8_t u8DeliveryMode, uint8_t uVector, uint8_t u8Polarity,
+ uint8_t u8TriggerMode, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+ LogFlow(("pdmRCIoApicHlp_ApicBusDeliver: caller=%p/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n",
+ pDevIns, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc));
+ return APICBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLPRC,pfnLock} */
+static DECLCALLBACK(int) pdmRCIoApicHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return pdmLockEx(pDevIns->Internal.s.pVMRC, rc);
+}
+
+
+/** @interface_method_impl{PDMIOAPICHLPRC,pfnUnlock} */
+static DECLCALLBACK(void) pdmRCIoApicHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ pdmUnlock(pDevIns->Internal.s.pVMRC);
+}
+
+
+/**
+ * The Raw-Mode Context I/O APIC Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMIOAPICHLPRC) g_pdmRCIoApicHlp =
+{
+ PDM_IOAPICHLPRC_VERSION,
+ pdmRCIoApicHlp_ApicBusDeliver,
+ pdmRCIoApicHlp_Lock,
+ pdmRCIoApicHlp_Unlock,
+ PDM_IOAPICHLPRC_VERSION
+};
+
+/** @} */
+
+
+
+
+/** @name PCI Bus RC Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMPCIHLPRC,pfnIsaSetIrq} */
+static DECLCALLBACK(void) pdmRCPciHlp_IsaSetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Log4(("pdmRCPciHlp_IsaSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc));
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+
+ pdmLock(pVM);
+ pdmRCIsaSetIrq(pDevIns->Internal.s.pVMRC, iIrq, iLevel, uTagSrc);
+ pdmUnlock(pVM);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPRC,pfnIoApicSetIrq} */
+static DECLCALLBACK(void) pdmRCPciHlp_IoApicSetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Log4(("pdmRCPciHlp_IoApicSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc));
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+
+ if (pVM->pdm.s.IoApic.pDevInsRC)
+ pVM->pdm.s.IoApic.pfnSetIrqRC(pVM->pdm.s.IoApic.pDevInsRC, iIrq, iLevel, uTagSrc);
+ else if (pVM->pdm.s.IoApic.pDevInsR3)
+ {
+ /* queue for ring-3 execution. */
+ PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueRC);
+ if (pTask)
+ {
+ pTask->enmOp = PDMDEVHLPTASKOP_IOAPIC_SET_IRQ;
+ pTask->pDevInsR3 = NIL_RTR3PTR; /* not required */
+ pTask->u.IoApicSetIRQ.iIrq = iIrq;
+ pTask->u.IoApicSetIRQ.iLevel = iLevel;
+ pTask->u.IoApicSetIRQ.uTagSrc = uTagSrc;
+
+ PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueRC, &pTask->Core, 0);
+ }
+ else
+ AssertMsgFailed(("We're out of devhlp queue items!!!\n"));
+ }
+}
+
+
+/** @interface_method_impl{PDMPCIHLPRC,pfnIoApicSendMsi} */
+static DECLCALLBACK(void) pdmRCPciHlp_IoApicSendMsi(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint32_t uValue, uint32_t uTagSrc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ Log4(("pdmRCPciHlp_IoApicSendMsi: GCPhys=%p uValue=%d uTagSrc=%#x\n", GCPhys, uValue, uTagSrc));
+ PVM pVM = pDevIns->Internal.s.pVMRC;
+
+ if (pVM->pdm.s.IoApic.pDevInsRC)
+ pVM->pdm.s.IoApic.pfnSendMsiRC(pVM->pdm.s.IoApic.pDevInsRC, GCPhys, uValue, uTagSrc);
+ else
+ AssertFatalMsgFailed(("Lazy bastards!"));
+}
+
+
+/** @interface_method_impl{PDMPCIHLPRC,pfnLock} */
+static DECLCALLBACK(int) pdmRCPciHlp_Lock(PPDMDEVINS pDevIns, int rc)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ return pdmLockEx(pDevIns->Internal.s.pVMRC, rc);
+}
+
+
+/** @interface_method_impl{PDMPCIHLPRC,pfnUnlock} */
+static DECLCALLBACK(void) pdmRCPciHlp_Unlock(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ pdmUnlock(pDevIns->Internal.s.pVMRC);
+}
+
+
+/**
+ * The Raw-Mode Context PCI Bus Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMPCIHLPRC) g_pdmRCPciHlp =
+{
+ PDM_PCIHLPRC_VERSION,
+ pdmRCPciHlp_IsaSetIrq,
+ pdmRCPciHlp_IoApicSetIrq,
+ pdmRCPciHlp_IoApicSendMsi,
+ pdmRCPciHlp_Lock,
+ pdmRCPciHlp_Unlock,
+ PDM_PCIHLPRC_VERSION, /* the end */
+};
+
+/** @} */
+
+
+
+
+/** @name HPET RC Helpers
+ * @{
+ */
+
+
+/**
+ * The Raw-Mode Context HPET Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMHPETHLPRC) g_pdmRCHpetHlp =
+{
+ PDM_HPETHLPRC_VERSION,
+ PDM_HPETHLPRC_VERSION, /* the end */
+};
+
+/** @} */
+
+
+
+
+/** @name Raw-Mode Context Driver Helpers
+ * @{
+ */
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnVMSetError} */
+static DECLCALLBACK(int) pdmRCDrvHlp_VMSetError(PPDMDRVINS pDrvIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ va_list args;
+ va_start(args, pszFormat);
+ int rc2 = VMSetErrorV(pDrvIns->Internal.s.pVMRC, rc, RT_SRC_POS_ARGS, pszFormat, args); Assert(rc2 == rc); NOREF(rc2);
+ va_end(args);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnVMSetErrorV} */
+static DECLCALLBACK(int) pdmRCDrvHlp_VMSetErrorV(PPDMDRVINS pDrvIns, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ int rc2 = VMSetErrorV(pDrvIns->Internal.s.pVMRC, rc, RT_SRC_POS_ARGS, pszFormat, va); Assert(rc2 == rc); NOREF(rc2);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnVMSetRuntimeError} */
+static DECLCALLBACK(int) pdmRCDrvHlp_VMSetRuntimeError(PPDMDRVINS pDrvIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ va_list va;
+ va_start(va, pszFormat);
+ int rc = VMSetRuntimeErrorV(pDrvIns->Internal.s.pVMRC, fFlags, pszErrorId, pszFormat, va);
+ va_end(va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnVMSetRuntimeErrorV} */
+static DECLCALLBACK(int) pdmRCDrvHlp_VMSetRuntimeErrorV(PPDMDRVINS pDrvIns, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ int rc = VMSetRuntimeErrorV(pDrvIns->Internal.s.pVMRC, fFlags, pszErrorId, pszFormat, va);
+ return rc;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnAssertEMT} */
+static DECLCALLBACK(bool) pdmRCDrvHlp_AssertEMT(PPDMDRVINS pDrvIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns); RT_NOREF_PV(pDrvIns);
+ if (VM_IS_EMT(pDrvIns->Internal.s.pVMRC))
+ return true;
+
+ RTAssertMsg1Weak("AssertEMT", iLine, pszFile, pszFunction);
+ RTAssertPanic();
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnAssertOther} */
+static DECLCALLBACK(bool) pdmRCDrvHlp_AssertOther(PPDMDRVINS pDrvIns, const char *pszFile, unsigned iLine, const char *pszFunction)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns); RT_NOREF_PV(pDrvIns);
+ if (!VM_IS_EMT(pDrvIns->Internal.s.pVMRC))
+ return true;
+
+ /* Note: While we don't have any other threads but EMT(0) in RC, might
+ still have drive code compiled in which it shouldn't execute. */
+ RTAssertMsg1Weak("AssertOther", iLine, pszFile, pszFunction);
+ RTAssertPanic();
+ RT_NOREF_PV(pszFile); RT_NOREF_PV(iLine); RT_NOREF_PV(pszFunction);
+ return false;
+}
+
+
+/** @interface_method_impl{PDMDRVHLPRC,pfnFTSetCheckpoint} */
+static DECLCALLBACK(int) pdmRCDrvHlp_FTSetCheckpoint(PPDMDRVINS pDrvIns, FTMCHECKPOINTTYPE enmType)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ return FTMSetCheckpoint(pDrvIns->Internal.s.pVMRC, enmType);
+}
+
+
+/**
+ * The Raw-Mode Context Driver Helper Callbacks.
+ */
+extern DECLEXPORT(const PDMDRVHLPRC) g_pdmRCDrvHlp =
+{
+ PDM_DRVHLPRC_VERSION,
+ pdmRCDrvHlp_VMSetError,
+ pdmRCDrvHlp_VMSetErrorV,
+ pdmRCDrvHlp_VMSetRuntimeError,
+ pdmRCDrvHlp_VMSetRuntimeErrorV,
+ pdmRCDrvHlp_AssertEMT,
+ pdmRCDrvHlp_AssertOther,
+ pdmRCDrvHlp_FTSetCheckpoint,
+ PDM_DRVHLPRC_VERSION
+};
+
+/** @} */
+
+
+
+
+/**
+ * Sets an irq on the PIC and I/O APIC.
+ *
+ * @returns true if delivered, false if postponed.
+ * @param pVM The cross context VM structure.
+ * @param iIrq The irq.
+ * @param iLevel The new level.
+ * @param uTagSrc The IRQ tag and source.
+ *
+ * @remarks The caller holds the PDM lock.
+ */
+static bool pdmRCIsaSetIrq(PVM pVM, int iIrq, int iLevel, uint32_t uTagSrc)
+{
+ if (RT_LIKELY( ( pVM->pdm.s.IoApic.pDevInsRC
+ || !pVM->pdm.s.IoApic.pDevInsR3)
+ && ( pVM->pdm.s.Pic.pDevInsRC
+ || !pVM->pdm.s.Pic.pDevInsR3)))
+ {
+ if (pVM->pdm.s.Pic.pDevInsRC)
+ pVM->pdm.s.Pic.pfnSetIrqRC(pVM->pdm.s.Pic.pDevInsRC, iIrq, iLevel, uTagSrc);
+ if (pVM->pdm.s.IoApic.pDevInsRC)
+ pVM->pdm.s.IoApic.pfnSetIrqRC(pVM->pdm.s.IoApic.pDevInsRC, iIrq, iLevel, uTagSrc);
+ return true;
+ }
+
+ /* queue for ring-3 execution. */
+ PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueRC);
+ AssertReturn(pTask, false);
+
+ pTask->enmOp = PDMDEVHLPTASKOP_ISA_SET_IRQ;
+ pTask->pDevInsR3 = NIL_RTR3PTR; /* not required */
+ pTask->u.IsaSetIRQ.iIrq = iIrq;
+ pTask->u.IsaSetIRQ.iLevel = iLevel;
+ pTask->u.IsaSetIRQ.uTagSrc = uTagSrc;
+
+ PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueRC, &pTask->Core, 0);
+ return false;
+}
+
diff --git a/src/VBox/VMM/VMMRC/PGMRC.cpp b/src/VBox/VMM/VMMRC/PGMRC.cpp
new file mode 100644
index 00000000..8c2f3102
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PGMRC.cpp
@@ -0,0 +1,166 @@
+/* $Id: PGMRC.cpp $ */
+/** @file
+ * PGM - Page Monitor, Guest Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/trpm.h>
+#ifdef VBOX_WITH_REM
+# include <VBox/vmm/rem.h>
+#endif
+#include "PGMInternal.h"
+#include <VBox/vmm/vm.h>
+#include "PGMInline.h"
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <iprt/errcore.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+
+
+#ifndef RT_ARCH_AMD64
+/*
+ * Shadow - 32-bit mode
+ */
+#define PGM_SHW_TYPE PGM_TYPE_32BIT
+#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
+#include "PGMRCShw.h"
+
+/* Guest - real mode */
+#define PGM_GST_TYPE PGM_TYPE_REAL
+#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
+#include "PGMRCGst.h"
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+/* Guest - protected mode */
+#define PGM_GST_TYPE PGM_TYPE_PROT
+#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
+#include "PGMRCGst.h"
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+/* Guest - 32-bit mode */
+#define PGM_GST_TYPE PGM_TYPE_32BIT
+#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
+#include "PGMRCGst.h"
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+#undef PGM_SHW_TYPE
+#undef PGM_SHW_NAME
+#endif /* !RT_ARCH_AMD64 */
+
+
+/*
+ * Shadow - PAE mode
+ */
+#define PGM_SHW_TYPE PGM_TYPE_PAE
+#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
+#include "PGMRCShw.h"
+
+/* Guest - real mode */
+#define PGM_GST_TYPE PGM_TYPE_REAL
+#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+/* Guest - protected mode */
+#define PGM_GST_TYPE PGM_TYPE_PROT
+#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+/* Guest - 32-bit mode */
+#define PGM_GST_TYPE PGM_TYPE_32BIT
+#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+/* Guest - PAE mode */
+#define PGM_GST_TYPE PGM_TYPE_PAE
+#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
+#include "PGMRCGst.h"
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+
+#undef PGM_SHW_TYPE
+#undef PGM_SHW_NAME
+
+
+/*
+ * Shadow - AMD64 mode
+ */
+#define PGM_SHW_TYPE PGM_TYPE_AMD64
+#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
+#include "PGMRCShw.h"
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+/* Guest - AMD64 mode */
+#define PGM_GST_TYPE PGM_TYPE_AMD64
+#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
+#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
+#include "PGMRCGst.h"
+#include "PGMRCBth.h"
+#undef PGM_BTH_NAME
+#undef PGM_GST_TYPE
+#undef PGM_GST_NAME
+#endif
+
+#undef PGM_SHW_TYPE
+#undef PGM_SHW_NAME
+
diff --git a/src/VBox/VMM/VMMRC/PGMRCBth.h b/src/VBox/VMM/VMMRC/PGMRCBth.h
new file mode 100644
index 00000000..c24a96c7
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PGMRCBth.h
@@ -0,0 +1,24 @@
+/* $Id: PGMRCBth.h $ */
+/** @file
+ * VBox - Page Manager, Shadow+Guest Paging Template - Guest Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*******************************************************************************
+* Internal Functions *
+*******************************************************************************/
+RT_C_DECLS_BEGIN
+RT_C_DECLS_END
+
diff --git a/src/VBox/VMM/VMMRC/PGMRCGst.h b/src/VBox/VMM/VMMRC/PGMRCGst.h
new file mode 100644
index 00000000..44bb95fa
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PGMRCGst.h
@@ -0,0 +1,76 @@
+/* $Id: PGMRCGst.h $ */
+/** @file
+ * VBox - Page Manager, Guest Paging Template - Guest Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+#undef GSTPT
+#undef PGSTPT
+#undef GSTPTE
+#undef PGSTPTE
+#undef GSTPD
+#undef PGSTPD
+#undef GSTPDE
+#undef PGSTPDE
+#undef GST_BIG_PAGE_SIZE
+#undef GST_BIG_PAGE_OFFSET_MASK
+#undef GST_PDE_PG_MASK
+#undef GST_PDE4M_PG_MASK
+#undef GST_PD_SHIFT
+#undef GST_PD_MASK
+#undef GST_PTE_PG_MASK
+#undef GST_PT_SHIFT
+#undef GST_PT_MASK
+
+#if PGM_GST_TYPE == PGM_TYPE_32BIT
+# define GSTPT X86PT
+# define PGSTPT PX86PT
+# define GSTPTE X86PTE
+# define PGSTPTE PX86PTE
+# define GSTPD X86PD
+# define PGSTPD PX86PD
+# define GSTPDE X86PDE
+# define PGSTPDE PX86PDE
+# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
+# define GST_PDE_PG_MASK X86_PDE_PG_MASK
+# define GST_PDE4M_PG_MASK X86_PDE4M_PG_MASK
+# define GST_PD_SHIFT X86_PD_SHIFT
+# define GST_PD_MASK X86_PD_MASK
+# define GST_PTE_PG_MASK X86_PTE_PG_MASK
+# define GST_PT_SHIFT X86_PT_SHIFT
+# define GST_PT_MASK X86_PT_MASK
+#else
+# define GSTPT X86PTPAE
+# define PGSTPT PX86PTPAE
+# define GSTPTE X86PTEPAE
+# define PGSTPTE PX86PTEPAE
+# define GSTPD X86PDPAE
+# define PGSTPD PX86PDPAE
+# define GSTPDE X86PDEPAE
+# define PGSTPDE PX86PDEPAE
+# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
+# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
+# define GST_PDE4M_PG_MASK X86_PDE4M_PAE_PG_MASK
+# define GST_PD_SHIFT X86_PD_PAE_SHIFT
+# define GST_PD_MASK X86_PD_PAE_MASK
+# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# define GST_PT_SHIFT X86_PT_PAE_SHIFT
+# define GST_PT_MASK X86_PT_PAE_MASK
+#endif
+
diff --git a/src/VBox/VMM/VMMRC/PGMRCShw.h b/src/VBox/VMM/VMMRC/PGMRCShw.h
new file mode 100644
index 00000000..eac695e7
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/PGMRCShw.h
@@ -0,0 +1,74 @@
+/* $Id: PGMRCShw.h $ */
+/** @file
+ * VBox - Page Manager, Shadow Paging Template - Guest Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+#undef SHWPT
+#undef PSHWPT
+#undef SHWPTE
+#undef PSHWPTE
+#undef SHWPD
+#undef PSHWPD
+#undef SHWPDE
+#undef PSHWPDE
+#undef SHW_PDE_PG_MASK
+#undef SHW_PD_SHIFT
+#undef SHW_PD_MASK
+#undef SHW_PTE_PG_MASK
+#undef SHW_PT_SHIFT
+#undef SHW_PT_MASK
+
+#if PGM_SHW_TYPE == PGM_TYPE_32BIT
+# define SHWPT X86PT
+# define PSHWPT PX86PT
+# define SHWPTE X86PTE
+# define PSHWPTE PX86PTE
+# define SHWPD X86PD
+# define PSHWPD PX86PD
+# define SHWPDE X86PDE
+# define PSHWPDE PX86PDE
+# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
+# define SHW_PD_SHIFT X86_PD_SHIFT
+# define SHW_PD_MASK X86_PD_MASK
+# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
+# define SHW_PT_SHIFT X86_PT_SHIFT
+# define SHW_PT_MASK X86_PT_MASK
+#else
+# define SHWPT PGMSHWPTPAE
+# define PSHWPT PPGMSHWPTPAE
+# define SHWPTE PGMSHWPTEPAE
+# define PSHWPTE PPGMSHWPTEPAE
+# define SHWPD X86PDPAE
+# define PSHWPD PX86PDPAE
+# define SHWPDE X86PDEPAE
+# define PSHWPDE PX86PDEPAE
+# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
+# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
+# define SHW_PD_MASK X86_PD_PAE_MASK
+# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
+# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
+# define SHW_PT_MASK X86_PT_PAE_MASK
+#endif
+
+
+/*******************************************************************************
+* Internal Functions *
+*******************************************************************************/
+RT_C_DECLS_BEGIN
+RT_C_DECLS_END
+
diff --git a/src/VBox/VMM/VMMRC/SELMRC.cpp b/src/VBox/VMM/VMMRC/SELMRC.cpp
new file mode 100644
index 00000000..1daf6bdc
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/SELMRC.cpp
@@ -0,0 +1,587 @@
+/* $Id: SELMRC.cpp $ */
+/** @file
+ * SELM - The Selector Manager, Guest Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_SELM
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/trpm.h>
+#include "SELMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/pgm.h>
+
+#include <VBox/param.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+
+#include "SELMInline.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef LOG_ENABLED
+/** Segment register names. */
+static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+#endif
+
+
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
+
+/**
+ * Synchronizes one GDT entry (guest -> shadow).
+ *
+ * @returns VBox strict status code (appropriate for trap handling and GC
+ * return).
+ * @retval VINF_SUCCESS
+ * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
+ * @retval VINF_SELM_SYNC_GDT
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pCtx CPU context for the current CPU.
+ * @param iGDTEntry The GDT entry to sync.
+ *
+ * @remarks Caller checks that this isn't the LDT entry!
+ */
+static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
+{
+ Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
+
+ /*
+ * Validate the offset.
+ */
+ VBOXGDTR GdtrGuest;
+ CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
+ unsigned offEntry = iGDTEntry * sizeof(X86DESC);
+ if ( iGDTEntry >= SELM_GDT_ELEMENTS
+ || offEntry > GdtrGuest.cbGdt)
+ return VINF_SUCCESS; /* ignore */
+
+ /*
+ * Read the guest descriptor.
+ */
+ X86DESC Desc;
+ int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
+ if (RT_FAILURE(rc))
+ {
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
+ if (RT_FAILURE(rc))
+ {
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
+ /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
+ return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
+ }
+ }
+
+ /*
+ * Check for conflicts.
+ */
+ RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
+ Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
+ && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
+ && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
+ && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
+ && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
+ if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
+ || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
+ || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
+ || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
+ || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
+ {
+ if (Desc.Gen.u1Present)
+ {
+ Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+ return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
+ }
+ Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
+
+ /* Note: we can't continue below or else we'll change the shadow descriptor!! */
+ /* When the guest makes the selector present, then we'll do a GDT sync. */
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Convert the guest selector to a shadow selector and update the shadow GDT.
+ */
+ selmGuestToShadowDesc(pVM, &Desc);
+ PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
+ //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
+ //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
+ *pShwDescr = Desc;
+
+ /*
+ * Detect and mark stale registers.
+ */
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+ PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
+ for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
+ {
+ if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
+ {
+ if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
+ {
+ if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
+ {
+ Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+ paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
+ /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
+ rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
+ }
+ else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
+ {
+ Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+ paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
+ }
+ else
+ Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+ }
+ else
+ Log(("GDT write to selector in %s register %04X (out of sync)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+ }
+ }
+
+ /** @todo Detect stale LDTR as well? */
+
+ return rcStrict;
+}
+
+
+/**
+ * Synchronizes any segment registers refering to the given GDT entry.
+ *
+ * This is called before any changes performed and shadowed, so it's possible to
+ * look in both the shadow and guest descriptor table entries for hidden
+ * register content.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @param pCtx The CPU context.
+ * @param iGDTEntry The GDT entry to sync.
+ */
+void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
+{
+ /*
+ * Validate the offset.
+ */
+ VBOXGDTR GdtrGuest;
+ CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
+ unsigned offEntry = iGDTEntry * sizeof(X86DESC);
+ if ( iGDTEntry >= SELM_GDT_ELEMENTS
+ || offEntry > GdtrGuest.cbGdt)
+ return;
+
+ /*
+ * Sync outdated segment registers using this entry.
+ */
+ PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
+ uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
+ PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
+ for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
+ {
+ if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
+ {
+ if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
+ {
+ if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
+ {
+ selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
+ Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
+ }
+ else
+ Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
+ iGDTEntry, g_aszSRegNms[iSReg], pDesc));
+ }
+ }
+ }
+}
+
+
+/**
+ * Syncs hidden selector register parts before emulating a GDT change.
+ *
+ * This is shared between the selmRCGuestGDTWritePfHandler and
+ * selmGuestGDTWriteHandler.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param offGuestTss The offset into the TSS of the write that was made.
+ * @param cbWrite The number of bytes written.
+ * @param pCtx The current CPU context.
+ */
+void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
+{
+ uint32_t iGdt = offGuestGdt >> X86_SEL_SHIFT;
+ uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
+ do
+ {
+ selmRCSyncGdtSegRegs(pVM, pVCpu, pCtx, iGdt);
+ iGdt++;
+ } while (iGdt <= iGdtLast);
+}
+
+
+/**
+ * Checks the guest GDT for changes after a write has been emulated.
+ *
+ *
+ * This is shared between the selmRCGuestGDTWritePfHandler and
+ * selmGuestGDTWriteHandler.
+ *
+ * @retval VINF_SUCCESS
+ * @retval VINF_SELM_SYNC_GDT
+ * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param offGuestTss The offset into the TSS of the write that was made.
+ * @param cbWrite The number of bytes written.
+ * @param pCtx The current CPU context.
+ */
+VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
+{
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+
+ /* Check if the LDT was in any way affected. Do not sync the
+ shadow GDT if that's the case or we might have trouble in
+ the world switcher (or so they say). */
+ uint32_t const iGdtFirst = offGuestGdt >> X86_SEL_SHIFT;
+ uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
+ uint32_t const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
+ if (iGdtFirst <= iLdt && iGdtLast >= iLdt)
+ {
+ Log(("LDTR selector change -> fall back to HC!!\n"));
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ rcStrict = VINF_SELM_SYNC_GDT;
+ /** @todo Implement correct stale LDT handling. */
+ }
+ else
+ {
+ /* Sync the shadow GDT and continue provided the update didn't
+ cause any segment registers to go stale in any way. */
+ uint32_t iGdt = iGdtFirst;
+ do
+ {
+ VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pCtx, iGdt);
+ Assert(rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT || rcStrict2 == VINF_SELM_SYNC_GDT);
+ if (rcStrict == VINF_SUCCESS)
+ rcStrict = rcStrict2;
+ iGdt++;
+ } while ( iGdt <= iGdtLast
+ && (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT));
+ if (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT)
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
+ }
+ return rcStrict;
+}
+
+
+/**
+ * @callback_method_impl{FNPGMVIRTHANDLER, Guest GDT write access \#PF handler }
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
+ NOREF(pvRange); NOREF(pvUser); RT_NOREF_PV(uErrorCode);
+
+ /*
+ * Check if any selectors might be affected.
+ */
+ selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offRange, 8 /*cbWrite*/, CPUMCTX_FROM_CORE(pRegFrame));
+
+ /*
+ * Attempt to emulate the instruction and sync the affected entries.
+ */
+ uint32_t cb;
+ VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
+ if (RT_SUCCESS(rcStrict) && cb)
+ rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offRange, cb, CPUMCTX_FROM_CORE(pRegFrame));
+ else
+ {
+ Assert(RT_FAILURE(rcStrict));
+ if (rcStrict == VERR_EM_INTERPRETER)
+ rcStrict = VINF_EM_RAW_EMULATE_INSTR; /* No, not VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT, see PGM_PHYS_RW_IS_SUCCESS. */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ }
+
+ if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
+ else
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
+ return rcStrict;
+}
+
+#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
+
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
+/**
+ * @callback_method_impl{FNPGMVIRTHANDLER, Guest LDT write access \#PF handler }
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ /** @todo To be implemented... or not. */
+ ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
+ NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
+
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); RT_NOREF_PV(pVM);
+ return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
+}
+#endif
+
+
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
+
+/**
+ * Read wrapper used by selmRCGuestTSSWriteHandler.
+ * @returns VBox status code (appropriate for trap handling and GC return).
+ * @param pVM The cross context VM structure.
+ * @param pvDst Where to put the bits we read.
+ * @param pvSrc Guest address to read from.
+ * @param cb The number of bytes to read.
+ */
+DECLINLINE(int) selmRCReadTssBits(PVM pVM, PVMCPU pVCpu, void *pvDst, void const *pvSrc, size_t cb)
+{
+ int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+
+ /** @todo use different fallback? */
+ rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
+ AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
+ if (rc == VINF_SUCCESS)
+ {
+ rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
+ AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
+ }
+ return rc;
+}
+
+
+/**
+ * Checks the guest TSS for changes after a write has been emulated.
+ *
+ * This is shared between the
+ *
+ * @returns Strict VBox status code appropriate for raw-mode returns.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param offGuestTss The offset into the TSS of the write that was made.
+ * @param cbWrite The number of bytes written.
+ */
+VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite)
+{
+ VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+
+ /*
+ * Check if the ring-0 or/and ring-1 stacks have been change,
+ * synchronize our ring-compressed copies of the stacks.
+ */
+ struct
+ {
+ uint32_t esp;
+ uint16_t ss;
+ uint16_t padding_ss;
+ } s;
+ AssertCompileSize(s, 8);
+ PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
+ if ( offGuestTss < RT_UOFFSET_AFTER(VBOXTSS, ss0)
+ && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, esp0))
+ {
+ rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s));
+ if ( rcStrict == VINF_SUCCESS
+ && ( s.esp != pVM->selm.s.Tss.esp1
+ || s.ss != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */)
+ {
+ Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
+ (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss, (RTGCPTR)s.esp));
+ pVM->selm.s.Tss.esp1 = s.esp;
+ pVM->selm.s.Tss.ss1 = s.ss | 1;
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
+ }
+ }
+# ifdef VBOX_WITH_RAW_RING1
+ if ( EMIsRawRing1Enabled(pVM)
+ && offGuestTss < RT_UOFFSET_AFTER(VBOXTSS, ss1)
+ && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, esp1)
+ && rcStrict == VINF_SUCCESS)
+ {
+ rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp1, sizeof(s));
+ if ( rcStrict == VINF_SUCCESS
+ && ( s.esp != pVM->selm.s.Tss.esp2
+ || s.ss != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */)
+ {
+
+ Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
+ (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)s.ss, (RTGCPTR)s.esp));
+ pVM->selm.s.Tss.esp2 = s.esp;
+ pVM->selm.s.Tss.ss2 = (s.ss & ~1) | 2;
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
+ }
+ }
+# endif
+
+ /*
+ * If VME is enabled we need to check if the interrupt redirection bitmap
+ * needs updating.
+ */
+ if ( offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
+ && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME)
+ && rcStrict == VINF_SUCCESS)
+ {
+ if ( offGuestTss < RT_UOFFSET_AFTER(VBOXTSS, offIoBitmap)
+ && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, offIoBitmap))
+ {
+ uint16_t offIoBitmap = 0;
+ rcStrict = selmRCReadTssBits(pVM, pVCpu, &offIoBitmap, &pGuestTss->offIoBitmap, sizeof(offIoBitmap));
+ if ( rcStrict != VINF_SUCCESS
+ || offIoBitmap != pVM->selm.s.offGuestIoBitmap)
+ {
+ Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+ }
+ else
+ Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
+ }
+
+ if ( rcStrict == VINF_SUCCESS
+ && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)
+ && pVM->selm.s.offGuestIoBitmap != 0)
+ {
+ /** @todo not sure how the partial case is handled; probably not allowed */
+ uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
+ if ( offGuestTss < offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap)
+ && offGuestTss + cbWrite > offIntRedirBitmap
+ && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
+ {
+ Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offGuestTss=%x cbWrite=%x\n",
+ pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offGuestTss, cbWrite));
+
+ /** @todo only update the changed part. */
+ for (uint32_t i = 0; rcStrict == VINF_SUCCESS && i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
+ rcStrict = selmRCReadTssBits(pVM, pVCpu, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
+ (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
+ }
+ }
+ }
+
+ /*
+ * Return to ring-3 for a full resync if any of the above fails... (?)
+ */
+ if (rcStrict != VINF_SUCCESS)
+ {
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+ if (RT_SUCCESS(rcStrict) || rcStrict == VERR_ACCESS_DENIED)
+ rcStrict = VINF_SUCCESS;
+ }
+
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
+ return rcStrict;
+}
+
+
+/**
+ * @callback_method_impl{FNPGMVIRTHANDLER, Guest TSS write access \#PF handler}
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
+ NOREF(pvRange); NOREF(pvUser); RT_NOREF_PV(uErrorCode);
+
+ /*
+ * Try emulate the access.
+ */
+ uint32_t cb;
+ VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
+ if ( RT_SUCCESS(rcStrict)
+ && cb)
+ rcStrict = selmRCGuestTssPostWriteCheck(pVM, pVCpu, offRange, cb);
+ else
+ {
+ AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
+ if (rcStrict == VERR_EM_INTERPRETER)
+ rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
+ }
+ return rcStrict;
+}
+
+#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
+
+#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.}
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
+ NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
+ return VERR_SELM_SHADOW_GDT_WRITE;
+}
+#endif
+
+
+#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.}
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
+ Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
+ NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
+ return VERR_SELM_SHADOW_LDT_WRITE;
+}
+#endif
+
+
+#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.}
+ */
+DECLEXPORT(VBOXSTRICTRC) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
+ NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
+ return VERR_SELM_SHADOW_TSS_WRITE;
+}
+#endif
+
diff --git a/src/VBox/VMM/VMMRC/TRPMRC.cpp b/src/VBox/VMM/VMMRC/TRPMRC.cpp
new file mode 100644
index 00000000..85d632e0
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/TRPMRC.cpp
@@ -0,0 +1,180 @@
+/* $Id: TRPMRC.cpp $ */
+/** @file
+ * TRPM - The Trap Monitor, Guest Context
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_TRPM
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/vmm.h>
+#include "TRPMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/x86.h>
+#include <VBox/log.h>
+#include <VBox/vmm/selm.h>
+
+
+
+/**
+ * Arms a temporary trap handler for traps in Hypervisor code.
+ *
+ * The operation is similar to a System V signal handler. I.e. when the handler
+ * is called it is first set to default action. So, if you need to handler more
+ * than one trap, you must reinstall the handler.
+ *
+ * To uninstall the temporary handler, call this function with pfnHandler set to NULL.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param iTrap Trap number to install handler [0..255].
+ * @param pfnHandler Pointer to the handler. Use NULL for uninstalling the handler.
+ */
+VMMRCDECL(int) TRPMGCSetTempHandler(PVM pVM, unsigned iTrap, PFNTRPMGCTRAPHANDLER pfnHandler)
+{
+ /*
+ * Validate input.
+ */
+ if (iTrap >= RT_ELEMENTS(pVM->trpm.s.aTmpTrapHandlers))
+ {
+ AssertMsgFailed(("Trap handler iTrap=%u is out of range!\n", iTrap));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Install handler.
+ */
+ pVM->trpm.s.aTmpTrapHandlers[iTrap] = (RTRCPTR)(uintptr_t)pfnHandler;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Return to host context from a hypervisor trap handler.
+ *
+ * This function will *never* return.
+ * It will also reset any traps that are pending.
+ *
+ * @param pVM The cross context VM structure.
+ * @param rc The return code for host context.
+ */
+VMMRCDECL(void) TRPMGCHyperReturnToHost(PVM pVM, int rc)
+{
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+
+ LogFlow(("TRPMGCHyperReturnToHost: rc=%Rrc\n", rc));
+ TRPMResetTrap(pVCpu);
+ VMMRCGuestToHost(pVM, rc);
+ AssertReleaseFailed();
+}
+
+
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Virtual Handler callback for Guest write access to the Guest's own current IDT.}
+ */
+DECLEXPORT(VBOXSTRICTRC) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ uint16_t cbIDT;
+ RTGCPTR GCPtrIDT = (RTGCPTR)CPUMGetGuestIDTR(pVCpu, &cbIDT);
+#ifdef VBOX_STRICT
+ RTGCPTR GCPtrIDTEnd = (RTGCPTR)((RTGCUINTPTR)GCPtrIDT + cbIDT + 1);
+#endif
+ uint32_t iGate = ((RTGCUINTPTR)pvFault - (RTGCUINTPTR)GCPtrIDT)/sizeof(VBOXIDTE);
+ RT_NOREF_PV(offRange); RT_NOREF_PV(pvRange); RT_NOREF_PV(pRegFrame); RT_NOREF_PV(pVM);
+
+ AssertMsg(offRange < (uint32_t)cbIDT+1, ("pvFault=%RGv GCPtrIDT=%RGv-%RGv pvRange=%RGv\n", pvFault, GCPtrIDT, GCPtrIDTEnd, pvRange));
+ Assert((RTGCPTR)(RTRCUINTPTR)pvRange == GCPtrIDT);
+ NOREF(uErrorCode); NOREF(pvUser);
+
+#if 0
+ /* Note! this causes problems in Windows XP as instructions following the update can be dangerous (str eax has been seen) */
+ /* Note! not going back to ring 3 could make the code scanner miss them. */
+ /* Check if we can handle the write here. */
+ if ( iGate != 3 /* Gate 3 is handled differently; could do it here as well, but let ring 3 handle this case for now. */
+ && !ASMBitTest(&pVM->trpm.s.au32IdtPatched[0], iGate)) /* Passthru gates need special attention too. */
+ {
+ uint32_t cb;
+ int rc = EMInterpretInstructionEx(pVM, pVCpu, pRegFrame, pvFault, &cb);
+ if (RT_SUCCESS(rc) && cb)
+ {
+ uint32_t iGate1 = (offRange + cb - 1)/sizeof(VBOXIDTE);
+
+ Log(("trpmRCGuestIDTWriteHandler: write to gate %x (%x) offset %x cb=%d\n", iGate, iGate1, offRange, cb));
+
+ trpmClearGuestTrapHandler(pVM, iGate);
+ if (iGate != iGate1)
+ trpmClearGuestTrapHandler(pVM, iGate1);
+
+ STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTHandled);
+ return VINF_SUCCESS;
+ }
+ }
+#else
+ NOREF(iGate);
+#endif
+
+ Log(("trpmRCGuestIDTWritePfHandler: eip=%RGv write to gate %x offset %x\n", pRegFrame->eip, iGate, offRange));
+
+ /** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
+
+ STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault);
+ return VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT;
+}
+
+
+/**
+ * @callback_method_impl{FNPGMRCVIRTPFHANDLER,
+ * \#PF Virtual Handler callback for Guest write access to the VBox shadow IDT.}
+ */
+DECLEXPORT(VBOXSTRICTRC) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
+ RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
+{
+ LogRel(("FATAL ERROR: trpmRCShadowIDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%08RGv\r\n", pRegFrame->eip, pvFault, pvRange));
+ NOREF(uErrorCode); NOREF(offRange); NOREF(pvUser);
+
+ /*
+ * If we ever get here, then the guest has *probably* executed an SIDT
+ * instruction that we failed to patch. In theory this could be very bad,
+ * but there are nasty applications out there that install device drivers
+ * that mess with the guest's IDT. In those cases, it's quite ok to simply
+ * ignore the writes and pretend success.
+ *
+ * Another posibility is that the guest is touching some page memory and
+ * it having nothing to do with our IDT or anything like that, just a
+ * potential conflict that we didn't discover in time.
+ */
+ DISSTATE Dis;
+ int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, NULL);
+ if (rc == VINF_SUCCESS)
+ {
+ /* Just ignore the write. */
+ pRegFrame->eip += Dis.cbInstr;
+ return VINF_SUCCESS;
+ }
+
+ return VERR_TRPM_SHADOW_IDT_WRITE;
+}
+
diff --git a/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp b/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
new file mode 100644
index 00000000..03f91509
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
@@ -0,0 +1,1559 @@
+/* $Id: TRPMRCHandlers.cpp $ */
+/** @file
+ * TRPM - Raw-mode Context Trap Handlers, CPP part
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_TRPM
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/csam.h>
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/cpum.h>
+#include "TRPMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/param.h>
+
+#include <VBox/err.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+#include <VBox/log.h>
+#include <VBox/vmm/tm.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/x86.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/* still here. MODR/M byte parsing */
+#define X86_OPCODE_MODRM_MOD_MASK 0xc0
+#define X86_OPCODE_MODRM_REG_MASK 0x38
+#define X86_OPCODE_MODRM_RM_MASK 0x07
+
+/** @todo fix/remove/permanent-enable this when DIS/PATM handles invalid lock sequences. */
+#define DTRACE_EXPERIMENT
+
+#if 1
+# define TRPM_ENTER_DBG_HOOK(a_iVector) do {} while (0)
+# define TRPM_EXIT_DBG_HOOK(a_iVector) do {} while (0)
+# define TRPM_ENTER_DBG_HOOK_HYPER(a_iVector) do {} while (0)
+# define TRPM_EXIT_DBG_HOOK_HYPER(a_iVector) do {} while (0)
+#else
+# define TRPM_ENTER_DBG_HOOK(a_iVector) \
+ uint32_t const fDbgEFlags1 = CPUMRawGetEFlags(pVCpu); \
+ if (!(fDbgEFlags1 & X86_EFL_IF)) Log(("%s: IF=0 ##\n", __FUNCTION__)); \
+ else do {} while (0)
+# define TRPM_EXIT_DBG_HOOK(a_iVector) \
+ do { \
+ uint32_t const fDbgEFlags2 = CPUMRawGetEFlags(pVCpu); \
+ if ((fDbgEFlags1 ^ fDbgEFlags2) & (X86_EFL_IF | X86_EFL_IOPL)) \
+ Log(("%s: IF=%d->%d IOPL=%d->%d !#\n", __FUNCTION__, \
+ !!(fDbgEFlags1 & X86_EFL_IF), !!(fDbgEFlags2 & X86_EFL_IF), \
+ X86_EFL_GET_IOPL(fDbgEFlags1), X86_EFL_GET_IOPL(fDbgEFlags2) )); \
+ else if (!(fDbgEFlags2 & X86_EFL_IF)) Log(("%s: IF=0 [ret] ##\n", __FUNCTION__)); \
+ } while (0)
+# define TRPM_ENTER_DBG_HOOK_HYPER(a_iVector) do {} while (0)
+# define TRPM_EXIT_DBG_HOOK_HYPER(a_iVector) do {} while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Pointer to a readonly hypervisor trap record. */
+typedef const struct TRPMGCHYPER *PCTRPMGCHYPER;
+
+/**
+ * A hypervisor trap record.
+ * This contains information about a handler for a instruction range.
+ *
+ * @remark This must match what TRPM_HANDLER outputs.
+ */
+typedef struct TRPMGCHYPER
+{
+ /** The start address. */
+ uintptr_t uStartEIP;
+ /** The end address. (exclusive)
+ * If NULL the it's only for the instruction at pvStartEIP. */
+ uintptr_t uEndEIP;
+ /**
+ * The handler.
+ *
+ * @returns VBox status code
+ * VINF_SUCCESS means we've handled the trap.
+ * Any other error code means returning to the host context.
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame The register frame.
+ * @param uUser The user argument.
+ */
+ DECLRCCALLBACKMEMBER(int, pfnHandler, (PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser));
+ /** Whatever the handler desires to put here. */
+ uintptr_t uUser;
+} TRPMGCHYPER;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+/** Defined in VMMRC0.asm or VMMRC99.asm.
+ * @{ */
+extern const TRPMGCHYPER g_aTrap0bHandlers[1];
+extern const TRPMGCHYPER g_aTrap0bHandlersEnd[1];
+extern const TRPMGCHYPER g_aTrap0dHandlers[1];
+extern const TRPMGCHYPER g_aTrap0dHandlersEnd[1];
+extern const TRPMGCHYPER g_aTrap0eHandlers[1];
+extern const TRPMGCHYPER g_aTrap0eHandlersEnd[1];
+/** @} */
+RT_C_DECLS_END
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN /* addressed from asm (not called so no DECLASM). */
+DECLCALLBACK(int) trpmRCTrapInGeneric(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser);
+RT_C_DECLS_END
+
+
+
+/**
+ * Exits the trap, called when exiting a trap handler.
+ *
+ * Will reset the trap if it's not a guest trap or the trap
+ * is already handled. Will process resume guest FFs.
+ *
+ * @returns rc, can be adjusted if its VINF_SUCCESS or something really bad
+ * happened.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param rc The VBox status code to return.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ *
+ * @remarks This must not be used for hypervisor traps, only guest traps.
+ */
+static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame)
+{
+ uint32_t uOldActiveVector = pVCpu->trpm.s.uActiveVector;
+ NOREF(uOldActiveVector);
+
+ /* Reset trap? */
+ if ( rc != VINF_EM_RAW_GUEST_TRAP
+ && rc != VINF_EM_RAW_RING_SWITCH_INT)
+ pVCpu->trpm.s.uActiveVector = UINT32_MAX;
+
+#ifdef VBOX_HIGH_RES_TIMERS_HACK
+ /*
+ * We should poll the timers occasionally.
+ * We must *NOT* do this too frequently as it adds a significant overhead
+ * and it'll kill us if the trap load is high. (See @bugref{1354}.)
+ * (The heuristic is not very intelligent, we should really check trap
+ * frequency etc. here, but alas, we lack any such information atm.)
+ */
+ static unsigned s_iTimerPoll = 0;
+ if (rc == VINF_SUCCESS)
+ {
+ if (!(++s_iTimerPoll & 0xf))
+ {
+ TMTimerPollVoid(pVM, pVCpu);
+ Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip,
+ VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)));
+ }
+ }
+ else
+ s_iTimerPoll = 0;
+#endif
+
+ /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
+ {
+ Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu)));
+ if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu))
+ {
+ /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
+ * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
+ * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
+ * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
+ */
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
+ }
+ }
+
+ /*
+ * Pending resume-guest-FF?
+ * Or pending (A)PIC interrupt? Windows XP will crash if we delay APIC interrupts.
+ */
+ if ( rc == VINF_SUCCESS
+ && ( VM_FF_IS_ANY_SET(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA)
+ || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3
+ | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
+ | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
+ | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM | VMCPU_FF_SELM_SYNC_GDT
+ | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
+ | VMCPU_FF_IOM | VMCPU_FF_CPUM
+ )
+ )
+ )
+ {
+ /* The out of memory condition naturally outranks the others. */
+ if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)))
+ rc = VINF_EM_NO_MEMORY;
+ else
+ {
+ /* APIC needs updating. */
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
+ APICUpdatePendingInterrupts(pVCpu);
+
+ if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_CPUM))
+ CPUMRCProcessForceFlag(pVCpu);
+
+ /* Pending Ring-3 action. */
+ if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM | VMCPU_FF_IOM))
+ {
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
+ rc = VINF_EM_RAW_TO_R3;
+ }
+ /* Pending timer action. */
+ else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
+ rc = VINF_EM_RAW_TIMER_PENDING;
+ /* The Virtual Sync clock has stopped. */
+ else if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
+ rc = VINF_EM_RAW_TO_R3;
+ /* DMA work pending? */
+ else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
+ rc = VINF_EM_RAW_TO_R3;
+ /* Pending request packets might contain actions that need immediate
+ attention, such as pending hardware interrupts. */
+ else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
+ || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
+ rc = VINF_EM_PENDING_REQUEST;
+ /* Pending GDT/LDT/TSS sync. */
+ else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS))
+ rc = VINF_SELM_SYNC_GDT;
+ else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
+ rc = VINF_EM_RAW_TO_R3;
+ /* Possibly pending interrupt: dispatch it. */
+ else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
+ && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
+ && PATMAreInterruptsEnabledByCtx(pVM, CPUMCTX_FROM_CORE(pRegFrame))
+ )
+ {
+ uint8_t u8Interrupt;
+ rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+ Log(("trpmGCExitTrap: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
+ if (RT_SUCCESS(rc))
+ {
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_HARDWARE_INT, uOldActiveVector);
+ /* can't return if successful */
+ Assert(rc != VINF_SUCCESS);
+
+ /* Stop the profile counter that was started in TRPMRCHandlersA.asm */
+ Assert(uOldActiveVector <= 16);
+ STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a);
+
+ /* Assert the trap and go to the recompiler to dispatch it. */
+ TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
+
+ STAM_PROFILE_ADV_START(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a);
+ rc = VINF_EM_RAW_INTERRUPT_PENDING;
+ }
+ else if ( rc == VERR_APIC_INTR_MASKED_BY_TPR /* Can happen if TPR is too high for the newly arrived interrupt. */
+ || rc == VERR_NO_DATA) /* Can happen if the APIC is disabled. */
+ {
+ STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a);
+ rc = VINF_SUCCESS;
+ }
+ else
+ AssertFatalMsgRC(rc, ("PDMGetInterrupt failed. rc=%Rrc\n", rc));
+ }
+ /*
+ * Try sync CR3?
+ */
+ else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+ {
+#if 1
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ PGMRZDynMapStartAutoSet(pVCpu);
+ rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+#else
+ rc = VINF_PGM_SYNC_CR3;
+#endif
+ }
+ }
+ }
+
+ /* Note! TRPMRCHandlersA.asm performs sanity checks in debug builds.*/
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ return rc;
+}
+
+
+/**
+ * \#DB (Debug event) handler.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ RTGCUINTREG uDr6 = ASMGetAndClearDR6();
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGC01: cs:eip=%04x:%08x uDr6=%RTreg EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, uDr6, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(1);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_DB),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+
+ /*
+ * We currently don't make use of the X86_DR7_GD bit, but
+ * there might come a time when we do.
+ */
+ AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
+ ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
+ ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
+ VERR_NOT_IMPLEMENTED);
+ AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
+
+ /*
+ * Now leave the rest to the DBGF.
+ */
+ PGMRZDynMapStartAutoSet(pVCpu);
+ int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6, false /*fAltStepping*/);
+ if (rc == VINF_EM_RAW_GUEST_TRAP)
+ {
+ CPUMSetGuestDR6(pVCpu, (CPUMGetGuestDR6(pVCpu) & ~X86_DR6_B_MASK) | uDr6);
+ if (CPUMGetGuestDR7(pVCpu) & X86_DR7_GD)
+ CPUMSetGuestDR7(pVCpu, CPUMGetGuestDR7(pVCpu) & ~X86_DR7_GD);
+ }
+ else if (rc == VINF_EM_DBG_STEPPED)
+ pRegFrame->eflags.Bits.u1TF = 0;
+
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC01: %Rrc (%04x:%08x %RTreg EFlag=%#x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, uDr6, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(1);
+ return rc;
+}
+
+
+/**
+ * \#DB (Debug event) handler for the hypervisor code.
+ *
+ * This is mostly the same as TRPMGCTrap01Handler, but we skip the PGM auto
+ * mapping set as well as the default trap exit path since they are both really
+ * bad ideas in this context.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ RTGCUINTREG uDr6 = ASMGetAndClearDR6();
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ TRPM_ENTER_DBG_HOOK_HYPER(1);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_DB),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ LogFlow(("TRPMGCHyper01: cs:eip=%04x:%08x uDr6=%RTreg\n", pRegFrame->cs.Sel, pRegFrame->eip, uDr6));
+
+ /*
+ * We currently don't make use of the X86_DR7_GD bit, but
+ * there might come a time when we do.
+ */
+ AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
+ ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
+ ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
+ VERR_NOT_IMPLEMENTED);
+ AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
+
+ /*
+ * Now leave the rest to the DBGF.
+ */
+ int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6, false /*fAltStepping*/);
+ if (rc == VINF_EM_DBG_STEPPED)
+ pRegFrame->eflags.Bits.u1TF = 0;
+
+ Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, uDr6));
+ TRPM_EXIT_DBG_HOOK_HYPER(1);
+ return rc;
+}
+
+
+/**
+ * NMI handler, for when we are using NMIs to debug things.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ * @remark This is not hooked up unless you're building with VBOX_WITH_NMI defined.
+ */
+DECLASM(int) TRPMGCTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ LogFlow(("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip));
+ EMRCHistoryAddExitCsEip(TRPMCPU_2_VMCPU(pTrpmCpu), EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_NMI),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+#if 0 /* Enable this iff you have a COM port and really want this debug info. */
+ RTLogComPrintf("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip);
+#endif
+ NOREF(pTrpmCpu); RT_NOREF_PV(pRegFrame);
+ return VERR_TRPM_DONT_PANIC;
+}
+
+
+/**
+ * NMI handler, for when we are using NMIs to debug things.
+ *
+ * This is the handler we're most likely to hit when the NMI fires (it is
+ * unlikely that we'll be stuck in guest code).
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ * @remark This is not hooked up unless you're building with VBOX_WITH_NMI defined.
+ */
+DECLASM(int) TRPMGCHyperTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ LogFlow(("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip));
+ EMRCHistoryAddExitCsEip(TRPMCPU_2_VMCPU(pTrpmCpu), EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_NMI),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+#if 0 /* Enable this iff you have a COM port and really want this debug info. */
+ RTLogComPrintf("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip);
+#endif
+ NOREF(pTrpmCpu); RT_NOREF_PV(pRegFrame);
+ return VERR_TRPM_DONT_PANIC;
+}
+
+
+/**
+ * \#BP (Breakpoint) handler.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ int rc;
+ LogFlow(("TRPMGC03: %04x:%08x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(3);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_BP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ PGMRZDynMapStartAutoSet(pVCpu);
+
+ /*
+ * PATM is using INT3s, let them have a go first.
+ */
+ if ( ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1
+ || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2) )
+ && !pRegFrame->eflags.Bits.u1VM)
+ {
+ rc = PATMRCHandleInt3PatchTrap(pVM, pRegFrame);
+ if ( rc == VINF_SUCCESS
+ || rc == VINF_EM_RESCHEDULE
+ || rc == VINF_EM_RAW_EMULATE_INSTR
+ || rc == VINF_PATM_PATCH_INT3
+ || rc == VINF_PATM_DUPLICATE_FUNCTION )
+ {
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC03: %Rrc (%04x:%08x EFL=%x) (PATM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(3);
+ return rc;
+ }
+ }
+ rc = DBGFRZTrap03Handler(pVM, pVCpu, pRegFrame);
+
+ /* anything we should do with this? Schedule it in GC? */
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC03: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(3);
+ return rc;
+}
+
+
+/**
+ * \#BP (Breakpoint) handler.
+ *
+ * This is similar to TRPMGCTrap03Handler but we bits which are potentially
+ * harmful to us (common trap exit and the auto mapping set).
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGCHyper03: %04x:%08x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK_HYPER(3);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_BP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+
+ /*
+ * Hand it over to DBGF.
+ */
+ int rc = DBGFRZTrap03Handler(pVM, pVCpu, pRegFrame);
+ AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_TRPM_IPE_2);
+
+ Log6(("TRPMGCHyper03: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK_HYPER(3);
+ return rc;
+}
+
+
+/**
+ * Trap handler for illegal opcode fault (\#UD).
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap06Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ int rc;
+ LogFlow(("TRPMGC06: %04x:%08x EFL=%#x/%#x\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->eflags.u32, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(6);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_UD),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ PGMRZDynMapStartAutoSet(pVCpu);
+
+ if (CPUMGetGuestCPL(pVCpu) <= (EMIsRawRing1Enabled(pVM) ? 1U : 0U))
+ {
+ /*
+ * Decode the instruction.
+ */
+ RTGCPTR PC;
+ rc = SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs,
+ pRegFrame->rip, &PC);
+ if (RT_FAILURE(rc))
+ {
+ Log(("TRPMGCTrap06Handler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc));
+ rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_GUEST_TRAP, pRegFrame);
+ Log6(("TRPMGC06: %Rrc (%04x:%08x EFL=%x) (SELM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(6);
+ return rc;
+ }
+
+ DISCPUSTATE Cpu;
+ uint32_t cbOp;
+ rc = EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)PC, pRegFrame, &Cpu, &cbOp);
+ if (RT_FAILURE(rc))
+ {
+ rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
+ Log6(("TRPMGC06: %Rrc (%04x:%08x EFL=%x) (EM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(6);
+ return rc;
+ }
+
+ /*
+ * UD2 in a patch?
+ * Note! PATMGCHandleIllegalInstrTrap doesn't always return.
+ */
+ if ( Cpu.pCurInstr->uOpcode == OP_ILLUD2
+ && PATMIsPatchGCAddr(pVM, pRegFrame->eip))
+ {
+ LogFlow(("TRPMGCTrap06Handler: -> PATMRCHandleIllegalInstrTrap\n"));
+ rc = PATMRCHandleIllegalInstrTrap(pVM, pRegFrame);
+ /** @todo These tests are completely unnecessary, should just follow the
+ * flow and return at the end of the function. */
+ if ( rc == VINF_SUCCESS
+ || rc == VINF_EM_RAW_EMULATE_INSTR
+ || rc == VINF_PATM_DUPLICATE_FUNCTION
+ || rc == VINF_PATM_PENDING_IRQ_AFTER_IRET
+ || rc == VINF_EM_RESCHEDULE)
+ {
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC06: %Rrc (%04x:%08x EFL=%x) (PATM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(6);
+ return rc;
+ }
+ }
+ /*
+ * Speed up dtrace and don't entrust invalid lock sequences to the recompiler.
+ */
+ else if (Cpu.fPrefix & DISPREFIX_LOCK)
+ {
+ Log(("TRPMGCTrap06Handler: pc=%08x op=%d\n", pRegFrame->eip, Cpu.pCurInstr->uOpcode));
+#ifdef DTRACE_EXPERIMENT /** @todo fix/remove/permanent-enable this when DIS/PATM handles invalid lock sequences. */
+ Assert(!PATMIsPatchGCAddr(pVM, pRegFrame->eip));
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, X86_XCPT_UD, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, X86_XCPT_UD);
+ Assert(rc == VINF_EM_RAW_GUEST_TRAP);
+#else
+ rc = VINF_EM_RAW_EMULATE_INSTR;
+#endif
+ }
+ /*
+ * Handle MONITOR - it causes an #UD exception instead of #GP when not executed in ring 0.
+ */
+ else if (Cpu.pCurInstr->uOpcode == OP_MONITOR)
+ {
+ LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n"));
+ rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR));
+ }
+ else if (GIMShouldTrapXcptUD(pVCpu))
+ {
+ LogFlow(("TRPMGCTrap06Handler: -> GIMXcptUD\n"));
+ VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, CPUMCTX_FROM_CORE(pRegFrame), &Cpu, NULL /* pcbInstr */);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ /* The interrupt inhibition wrt to EIP will be handled by trpmGCExitTrap() below. */
+ pRegFrame->eip += Cpu.cbInstr;
+ Assert(Cpu.cbInstr);
+ }
+ else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
+ rc = VINF_SUCCESS;
+ else if (rcStrict == VINF_GIM_R3_HYPERCALL)
+ rc = VINF_GIM_R3_HYPERCALL;
+ else
+ {
+ Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
+ LogFlow(("TRPMGCTrap06Handler: GIMXcptUD returns %Rrc -> VINF_EM_RAW_EMULATE_INSTR\n", rc));
+ rc = VINF_EM_RAW_EMULATE_INSTR;
+ }
+ }
+ /* Never generate a raw trap here; it might be an instruction, that requires emulation. */
+ else
+ {
+ LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n"));
+ rc = VINF_EM_RAW_EMULATE_INSTR;
+ }
+ }
+ else
+ {
+ LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n"));
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, X86_XCPT_UD, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, X86_XCPT_UD);
+ Assert(rc == VINF_EM_RAW_GUEST_TRAP);
+ }
+
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC06: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(6);
+ return rc;
+}
+
+
+/**
+ * Trap handler for device not present fault (\#NM).
+ *
+ * Device not available, FP or (F)WAIT instruction.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap07Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGC07: %04x:%08x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(7);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_NM),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ PGMRZDynMapStartAutoSet(pVCpu);
+
+ int rc = CPUMHandleLazyFPU(pVCpu);
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC07: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(7);
+ return rc;
+}
+
+
+/**
+ * \#NP ((segment) Not Present) handler.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap0bHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGC0b: %04x:%08x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(0xb);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_NP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ PGMRZDynMapStartAutoSet(pVCpu);
+
+ /*
+ * Try to detect instruction by opcode which caused trap.
+ * XXX note: this code may cause \#PF (trap e) or \#GP (trap d) while
+ * accessing user code. need to handle it somehow in future!
+ */
+ RTGCPTR GCPtr;
+ if ( SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs,
+ (RTGCPTR)pRegFrame->eip, &GCPtr)
+ == VINF_SUCCESS)
+ {
+ uint8_t *pu8Code = (uint8_t *)(uintptr_t)GCPtr;
+
+ /*
+ * First skip possible instruction prefixes, such as:
+ * OS, AS
+ * CS:, DS:, ES:, SS:, FS:, GS:
+ * REPE, REPNE
+ *
+ * note: Currently we supports only up to 4 prefixes per opcode, more
+ * prefixes (normally not used anyway) will cause trap d in guest.
+ * note: Instruction length in IA-32 may be up to 15 bytes, we dont
+ * check this issue, its too hard.
+ */
+ for (unsigned i = 0; i < 4; i++)
+ {
+ if ( pu8Code[0] != 0xf2 /* REPNE/REPNZ */
+ && pu8Code[0] != 0xf3 /* REP/REPE/REPZ */
+ && pu8Code[0] != 0x2e /* CS: */
+ && pu8Code[0] != 0x36 /* SS: */
+ && pu8Code[0] != 0x3e /* DS: */
+ && pu8Code[0] != 0x26 /* ES: */
+ && pu8Code[0] != 0x64 /* FS: */
+ && pu8Code[0] != 0x65 /* GS: */
+ && pu8Code[0] != 0x66 /* OS */
+ && pu8Code[0] != 0x67 /* AS */
+ )
+ break;
+ pu8Code++;
+ }
+
+ /*
+ * Detect right switch using a callgate.
+ *
+ * We recognize the following causes for the trap 0b:
+ * CALL FAR, CALL FAR []
+ * JMP FAR, JMP FAR []
+ * IRET (may cause a task switch)
+ *
+ * Note: we can't detect whether the trap was caused by a call to a
+ * callgate descriptor or it is a real trap 0b due to a bad selector.
+ * In both situations we'll pass execution to our recompiler so we don't
+ * have to worry.
+ * If we wanted to do better detection, we have set GDT entries to callgate
+ * descriptors pointing to our own handlers.
+ */
+ /** @todo not sure about IRET, may generate Trap 0d (\#GP), NEED TO CHECK! */
+ if ( pu8Code[0] == 0x9a /* CALL FAR */
+ || ( pu8Code[0] == 0xff /* CALL FAR [] */
+ && (pu8Code[1] & X86_OPCODE_MODRM_REG_MASK) == 0x18)
+ || pu8Code[0] == 0xea /* JMP FAR */
+ || ( pu8Code[0] == 0xff /* JMP FAR [] */
+ && (pu8Code[1] & X86_OPCODE_MODRM_REG_MASK) == 0x28)
+ || pu8Code[0] == 0xcf /* IRET */
+ )
+ {
+ /*
+ * Got potential call to callgate.
+ * We simply return execution to the recompiler to do emulation
+ * starting from the instruction which caused the trap.
+ */
+ pTrpmCpu->uActiveVector = UINT32_MAX;
+ Log6(("TRPMGC0b: %Rrc (%04x:%08x EFL=%x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(0xb);
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ return VINF_EM_RAW_RING_SWITCH;
+ }
+ }
+
+ /*
+ * Pass trap 0b as is to the recompiler in all other cases.
+ */
+ Log6(("TRPMGC0b: %Rrc (%04x:%08x EFL=%x)\n", VINF_EM_RAW_GUEST_TRAP, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ TRPM_EXIT_DBG_HOOK(0xb);
+ return VINF_EM_RAW_GUEST_TRAP;
+}
+
+
+/**
+ * \#GP (General Protection Fault) handler for Ring-0 privileged instructions.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @param pCpu The opcode info.
+ * @param PC The program counter corresponding to cs:eip in pRegFrame.
+ */
+static int trpmGCTrap0dHandlerRing0(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, RTGCPTR PC)
+{
+ int rc;
+ TRPM_ENTER_DBG_HOOK(0xd);
+
+ /*
+ * Try handle it here, if not return to HC and emulate/interpret it there.
+ */
+ uint16_t const uOpcode = pCpu->pCurInstr->uOpcode;
+ switch (uOpcode)
+ {
+ case OP_INT3:
+ /*
+ * Little hack to make the code below not fail
+ */
+ pCpu->Param1.fUse = DISUSE_IMMEDIATE8;
+ pCpu->Param1.uValue = 3;
+ RT_FALL_THRU();
+ case OP_INT:
+ {
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_INT));
+ Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
+ Assert(!(PATMIsPatchGCAddr(pVM, PC)));
+ if (pCpu->Param1.uValue == 3)
+ {
+ /* Int 3 replacement patch? */
+ if (PATMRCHandleInt3PatchTrap(pVM, pRegFrame) == VINF_SUCCESS)
+ {
+ AssertFailed();
+ return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
+ }
+ }
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)pCpu->Param1.uValue, pCpu->cbInstr, TRPM_TRAP_NO_ERRORCODE, TRPM_SOFTWARE_INT, 0xd);
+ if (RT_SUCCESS(rc) && rc != VINF_EM_RAW_GUEST_TRAP)
+ {
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
+ }
+
+ pVCpu->trpm.s.uActiveVector = (pVCpu->trpm.s.uActiveErrorCode & X86_TRAP_ERR_SEL_MASK) >> X86_TRAP_ERR_SEL_SHIFT;
+ pVCpu->trpm.s.enmActiveType = TRPM_SOFTWARE_INT;
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH_INT, pRegFrame);
+ }
+
+#ifdef PATM_EMULATE_SYSENTER
+ case OP_SYSEXIT:
+ case OP_SYSRET:
+ rc = PATMSysCall(pVM, CPUMCTX_FROM_CORE(pRegFrame), pCpu);
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+#endif
+
+ case OP_HLT:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_HLT));
+
+ /* If it's in patch code, defer to ring-3. */
+ if (PATMIsPatchGCAddr(pVM, PC))
+ break;
+
+ pRegFrame->eip += pCpu->cbInstr;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_HALT, pRegFrame);
+
+
+ /*
+ * These instructions are used by PATM and CASM for finding
+ * dangerous non-trapping instructions. Thus, since all
+ * scanning and patching is done in ring-3 we'll have to
+ * return to ring-3 on the first encounter of these instructions.
+ */
+ case OP_MOV_CR:
+ case OP_MOV_DR:
+ /* We can safely emulate control/debug register move instructions in patched code. */
+ if ( !PATMIsPatchGCAddr(pVM, PC)
+ && !CSAMIsKnownDangerousInstr(pVM, PC))
+ {
+ if (uOpcode == OP_MOV_CR)
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MOV_CRX));
+ else
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MOV_DRX));
+ break;
+ }
+ RT_FALL_THRU();
+ case OP_INVLPG:
+ case OP_LLDT:
+ case OP_STI:
+ case OP_RDTSC: /* just in case */
+ case OP_RDPMC:
+ case OP_CLTS:
+ case OP_WBINVD: /* nop */
+ case OP_RDMSR:
+ case OP_WRMSR:
+ {
+ /* Update history. */
+ switch (uOpcode)
+ {
+ case OP_MOV_CR:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MOV_CRX));
+ break;
+ case OP_MOV_DR:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MOV_DRX));
+ break;
+ case OP_INVLPG:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_INVLPG));
+ break;
+ case OP_LLDT:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_LLDT));
+ break;
+ case OP_STI:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_STI));
+ break;
+ case OP_RDPMC:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_RDPMC));
+ break;
+ case OP_CLTS:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CLTS));
+ break;
+ case OP_WBINVD:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CLTS));
+ break;
+ case OP_RDMSR:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ));
+ break;
+ case OP_WRMSR:
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE));
+ break;
+ }
+
+ rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, pCpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR));
+ if (rc == VERR_EM_INTERPRETER)
+ rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ }
+ }
+
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EXCEPTION_PRIVILEGED, pRegFrame);
+}
+
+
+/**
+ * \#GP (General Protection Fault) handler for Ring-3.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @param pCpu The opcode info.
+ * @param PC The program counter corresponding to cs:eip in pRegFrame.
+ */
+static int trpmGCTrap0dHandlerRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, RTGCPTR PC)
+{
+ int rc;
+ Assert(!pRegFrame->eflags.Bits.u1VM);
+ TRPM_ENTER_DBG_HOOK(0xd);
+
+ uint16_t const uOpcode = pCpu->pCurInstr->uOpcode;
+ switch (uOpcode)
+ {
+ /*
+ * INT3 and INT xx are ring-switching.
+ * (The shadow IDT will have set the entries to DPL=0, that's why we're here.)
+ */
+ case OP_INT3:
+ /*
+ * Little hack to make the code below not fail
+ */
+ pCpu->Param1.fUse = DISUSE_IMMEDIATE8;
+ pCpu->Param1.uValue = 3;
+ RT_FALL_THRU();
+ case OP_INT:
+ {
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_INT));
+ Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)pCpu->Param1.uValue, pCpu->cbInstr, TRPM_TRAP_NO_ERRORCODE, TRPM_SOFTWARE_INT, 0xd);
+ if (RT_SUCCESS(rc) && rc != VINF_EM_RAW_GUEST_TRAP)
+ {
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
+ }
+
+ pVCpu->trpm.s.uActiveVector = (pVCpu->trpm.s.uActiveErrorCode & X86_TRAP_ERR_SEL_MASK) >> X86_TRAP_ERR_SEL_SHIFT;
+ pVCpu->trpm.s.enmActiveType = TRPM_SOFTWARE_INT;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH_INT, pRegFrame);
+ }
+
+ /*
+ * SYSCALL, SYSENTER, INTO and BOUND are also ring-switchers.
+ */
+ case OP_SYSCALL:
+ case OP_SYSENTER:
+ if (uOpcode == OP_SYSCALL)
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_SYSCALL));
+ else
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_SYSENTER));
+#ifdef PATM_EMULATE_SYSENTER
+ rc = PATMSysCall(pVM, CPUMCTX_FROM_CORE(pRegFrame), pCpu);
+ if (rc == VINF_SUCCESS)
+ {
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
+ }
+ /* else no break; */
+#endif
+ RT_FALL_THRU();
+ case OP_BOUND:
+ case OP_INTO:
+ pVCpu->trpm.s.uActiveVector = UINT32_MAX;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH, pRegFrame);
+
+ /*
+ * Handle virtualized TSC & PMC reads, just in case.
+ */
+ case OP_RDTSC:
+ case OP_RDPMC:
+ {
+ rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, pCpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR));
+ if (rc == VERR_EM_INTERPRETER)
+ rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ }
+
+ /*
+ * STI and CLI are I/O privileged, i.e. if IOPL
+ */
+ case OP_STI:
+ case OP_CLI:
+ {
+ uint32_t efl = CPUMRawGetEFlags(pVCpu);
+ uint32_t cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
+ if (X86_EFL_GET_IOPL(efl) >= cpl)
+ {
+ LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> REM\n"));
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RESCHEDULE_REM, pRegFrame);
+ }
+ LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0) iopl=%x, cpl=%x\n", X86_EFL_GET_IOPL(efl), cpl));
+ break;
+ }
+ }
+
+ /*
+ * A genuine guest fault.
+ */
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_GUEST_TRAP, pRegFrame);
+}
+
+
+/**
+ * Emulates RDTSC for the \#GP handler.
+ *
+ * @returns VINF_SUCCESS or VINF_EM_RAW_EMULATE_INSTR.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context virtual CPU structure.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * This will be updated on successful return.
+ */
+DECLINLINE(int) trpmGCTrap0dHandlerRdTsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
+{
+ STAM_COUNTER_INC(&pVM->trpm.s.StatTrap0dRdTsc);
+ TRPM_ENTER_DBG_HOOK(0xd);
+
+ if (CPUMGetGuestCR4(pVCpu) & X86_CR4_TSD)
+ {
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); /* will trap (optimize later). */
+ }
+
+ uint64_t uTicks = TMCpuTickGet(pVCpu);
+ pRegFrame->eax = RT_LO_U32(uTicks);
+ pRegFrame->edx = RT_HI_U32(uTicks);
+ pRegFrame->eip += 2;
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
+}
+
+
+/**
+ * \#GP (General Protection Fault) handler.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ */
+static int trpmGCTrap0dHandler(PVM pVM, PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("trpmGCTrap0dHandler: cs:eip=%RTsel:%08RX32 uErr=%RGv EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, pTrpmCpu->uActiveErrorCode, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(0xd);
+
+ /*
+ * Convert and validate CS.
+ */
+ STAM_PROFILE_START(&pVM->trpm.s.StatTrap0dDisasm, a);
+ RTGCPTR PC;
+ int rc = SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs,
+ pRegFrame->rip, &PC);
+ if (RT_FAILURE(rc))
+ {
+ Log(("trpmGCTrap0dHandler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n",
+ pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc));
+ TRPM_EXIT_DBG_HOOK(0xd);
+ STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
+ }
+
+ /*
+ * Disassemble the instruction.
+ */
+ DISCPUSTATE Cpu;
+ uint32_t cbOp;
+ rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, &Cpu, &cbOp);
+ if (RT_FAILURE(rc))
+ {
+ AssertMsgFailed(("DISCoreOneEx failed to PC=%RGv rc=%Rrc\n", PC, rc));
+ TRPM_EXIT_DBG_HOOK(0xd);
+ STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
+ }
+ STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a);
+
+ /*
+ * Optimize RDTSC traps.
+ * Some guests (like Solaris) are using RDTSC all over the place and
+ * will end up trapping a *lot* because of that.
+ *
+ * Note: it's no longer safe to access the instruction opcode directly due to possible stale code TLB entries
+ */
+ if (Cpu.pCurInstr->uOpcode == OP_RDTSC)
+ {
+ EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_RDTSC));
+ return trpmGCTrap0dHandlerRdTsc(pVM, pVCpu, pRegFrame);
+ }
+
+ /*
+ * Deal with I/O port access.
+ */
+ if ( pVCpu->trpm.s.uActiveErrorCode == 0
+ && (Cpu.pCurInstr->fOpType & DISOPTYPE_PORTIO))
+ {
+ /* IOMRCIOPortHandler updates exit history. */
+ VBOXSTRICTRC rcStrict = IOMRCIOPortHandler(pVM, pVCpu, pRegFrame, &Cpu);
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pRegFrame);
+ }
+
+ /*
+ * Deal with Ring-0 (privileged instructions)
+ */
+ if ( (pRegFrame->ss.Sel & X86_SEL_RPL) <= 1
+ && !pRegFrame->eflags.Bits.u1VM)
+ return trpmGCTrap0dHandlerRing0(pVM, pVCpu, pRegFrame, &Cpu, PC);
+
+ /*
+ * Deal with Ring-3 GPs.
+ */
+ if (!pRegFrame->eflags.Bits.u1VM)
+ return trpmGCTrap0dHandlerRing3(pVM, pVCpu, pRegFrame, &Cpu, PC);
+
+ /*
+ * Deal with v86 code.
+ *
+ * We always set IOPL to zero which makes e.g. pushf fault in V86
+ * mode. The guest might use IOPL=3 and therefore not expect a #GP.
+ * Simply fall back to the recompiler to emulate this instruction if
+ * that's the case. To get the correct we must use CPUMRawGetEFlags.
+ */
+ X86EFLAGS eflags;
+ eflags.u32 = CPUMRawGetEFlags(pVCpu); /* Get the correct value. */
+ Log3(("TRPM #GP V86: cs:eip=%04x:%08x IOPL=%d efl=%08x\n", pRegFrame->cs.Sel, pRegFrame->eip, eflags.Bits.u2IOPL, eflags.u));
+ if (eflags.Bits.u2IOPL != 3)
+ {
+ Assert(EMIsRawRing1Enabled(pVM) || eflags.Bits.u2IOPL == 0);
+
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xD, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xd);
+ Assert(rc == VINF_EM_RAW_GUEST_TRAP);
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ }
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
+}
+
+
+/**
+ * \#GP (General Protection Fault) handler.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap0dHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGC0d: %04x:%08x err=%x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(0xd);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_GP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+
+ PGMRZDynMapStartAutoSet(pVCpu);
+ int rc = trpmGCTrap0dHandler(pVM, pTrpmCpu, pRegFrame);
+ switch (rc)
+ {
+ case VINF_EM_RAW_GUEST_TRAP:
+ case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
+ if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
+ rc = VINF_PATM_PATCH_TRAP_GP;
+ break;
+
+ case VINF_EM_RAW_INTERRUPT_PENDING:
+ Assert(TRPMHasTrap(pVCpu));
+ /* no break; */
+ case VINF_PGM_SYNC_CR3:
+ case VINF_EM_RAW_EMULATE_INSTR:
+ case VINF_IOM_R3_IOPORT_READ:
+ case VINF_IOM_R3_IOPORT_WRITE:
+ case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
+ case VINF_IOM_R3_MMIO_WRITE:
+ case VINF_IOM_R3_MMIO_COMMIT_WRITE:
+ case VINF_IOM_R3_MMIO_READ:
+ case VINF_IOM_R3_MMIO_READ_WRITE:
+ case VINF_CPUM_R3_MSR_READ:
+ case VINF_CPUM_R3_MSR_WRITE:
+ case VINF_PATM_PATCH_INT3:
+ case VINF_EM_NO_MEMORY:
+ case VINF_EM_RAW_TO_R3:
+ case VINF_EM_RAW_TIMER_PENDING:
+ case VINF_EM_PENDING_REQUEST:
+ case VINF_EM_HALT:
+ case VINF_SELM_SYNC_GDT:
+ case VINF_SUCCESS:
+ break;
+
+ default:
+ AssertMsg(PATMIsPatchGCAddr(pVM, pRegFrame->eip) == false, ("return code %d\n", rc));
+ break;
+ }
+ Log6(("TRPMGC0d: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(0xd);
+ return rc;
+}
+
+
+/**
+ * \#PF (Page Fault) handler.
+ *
+ * Calls PGM which does the actual handling.
+ *
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed execution to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCTrap0eHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+ PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+ LogFlow(("TRPMGC0e: %04x:%08x err=%x cr2=%08x EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode, (uint32_t)pVCpu->trpm.s.uActiveCR2, CPUMRawGetEFlags(pVCpu)));
+ TRPM_ENTER_DBG_HOOK(0xe);
+ EMRCHistoryAddExitCsEip(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_PF),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+
+ /*
+ * This is all PGM stuff.
+ */
+ PGMRZDynMapStartAutoSet(pVCpu);
+ int rc = PGMTrap0eHandler(pVCpu, pVCpu->trpm.s.uActiveErrorCode, pRegFrame, (RTGCPTR)pVCpu->trpm.s.uActiveCR2);
+ switch (rc)
+ {
+ case VINF_EM_RAW_EMULATE_INSTR:
+ case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
+ case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
+ case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
+ case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
+ if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
+ rc = VINF_PATCH_EMULATE_INSTR;
+ break;
+
+ case VINF_EM_RAW_GUEST_TRAP:
+ if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
+ {
+ PGMRZDynMapReleaseAutoSet(pVCpu);
+ TRPM_EXIT_DBG_HOOK(0xe);
+ return VINF_PATM_PATCH_TRAP_PF;
+ }
+
+ rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xE, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xe);
+ Assert(rc == VINF_EM_RAW_GUEST_TRAP);
+ break;
+
+ case VINF_EM_RAW_INTERRUPT_PENDING:
+ Assert(TRPMHasTrap(pVCpu));
+ /* no break; */
+ case VINF_IOM_R3_MMIO_READ:
+ case VINF_IOM_R3_MMIO_WRITE:
+ case VINF_IOM_R3_MMIO_COMMIT_WRITE:
+ case VINF_IOM_R3_MMIO_READ_WRITE:
+ case VINF_PATM_HC_MMIO_PATCH_READ:
+ case VINF_PATM_HC_MMIO_PATCH_WRITE:
+ case VINF_SUCCESS:
+ case VINF_EM_RAW_TO_R3:
+ case VINF_EM_PENDING_REQUEST:
+ case VINF_EM_RAW_TIMER_PENDING:
+ case VINF_EM_NO_MEMORY:
+ case VINF_CSAM_PENDING_ACTION:
+ case VINF_PGM_SYNC_CR3: /** @todo Check this with Sander. */
+ break;
+
+ default:
+ AssertMsg(PATMIsPatchGCAddr(pVM, pRegFrame->eip) == false, ("Patch address for return code %d. eip=%08x\n", rc, pRegFrame->eip));
+ break;
+ }
+ rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
+ Log6(("TRPMGC0e: %Rrc (%04x:%08x EFL=%x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, CPUMRawGetEFlags(pVCpu)));
+ TRPM_EXIT_DBG_HOOK(0xe);
+ return rc;
+}
+
+
+/**
+ * Scans for the EIP in the specified array of trap handlers.
+ *
+ * If we don't fine the EIP, we'll panic.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @param paHandlers The array of trap handler records.
+ * @param pEndRecord The end record (exclusive).
+ */
+static int trpmGCHyperGeneric(PVM pVM, PCPUMCTXCORE pRegFrame, PCTRPMGCHYPER paHandlers, PCTRPMGCHYPER pEndRecord)
+{
+ uintptr_t uEip = (uintptr_t)pRegFrame->eip;
+ Assert(paHandlers <= pEndRecord);
+
+ Log(("trpmGCHyperGeneric: uEip=%x %p-%p\n", uEip, paHandlers, pEndRecord));
+
+#if 0 /// @todo later
+ /*
+ * Start by doing a kind of binary search.
+ */
+ unsigned iStart = 0;
+ unsigned iEnd = pEndRecord - paHandlers;
+ unsigned i = iEnd / 2;
+#endif
+
+ /*
+ * Do a linear search now (in case the array wasn't properly sorted).
+ */
+ for (PCTRPMGCHYPER pCur = paHandlers; pCur < pEndRecord; pCur++)
+ {
+ if ( pCur->uStartEIP <= uEip
+ && (pCur->uEndEIP ? pCur->uEndEIP > uEip : pCur->uStartEIP == uEip))
+ return pCur->pfnHandler(pVM, pRegFrame, pCur->uUser);
+ }
+
+ return VERR_TRPM_DONT_PANIC;
+}
+
+
+/**
+ * Hypervisor \#NP ((segment) Not Present) handler.
+ *
+ * Scans for the EIP in the registered trap handlers.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed back to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap0bHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ EMRCHistoryAddExitCsEip(TRPMCPU_2_VMCPU(pTrpmCpu), EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_NP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ return trpmGCHyperGeneric(TRPMCPU_2_VM(pTrpmCpu), pRegFrame, g_aTrap0bHandlers, g_aTrap0bHandlersEnd);
+}
+
+
+/**
+ * Hypervisor \#GP (General Protection Fault) handler.
+ *
+ * Scans for the EIP in the registered trap handlers.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed back to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap0dHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ EMRCHistoryAddExitCsEip(TRPMCPU_2_VMCPU(pTrpmCpu), EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_GP),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ return trpmGCHyperGeneric(TRPMCPU_2_VM(pTrpmCpu), pRegFrame, g_aTrap0dHandlers, g_aTrap0dHandlersEnd);
+}
+
+
+/**
+ * Hypervisor \#PF (Page Fault) handler.
+ *
+ * Scans for the EIP in the registered trap handlers.
+ *
+ * @returns VBox status code.
+ * VINF_SUCCESS means we completely handled this trap,
+ * other codes are passed back to host context.
+ *
+ * @param pTrpmCpu Pointer to TRPMCPU data (within VM).
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap0eHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+ EMRCHistoryAddExitCsEip(TRPMCPU_2_VMCPU(pTrpmCpu), EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, X86_XCPT_PF),
+ pRegFrame->cs.Sel, pRegFrame->eip, ASMReadTSC());
+ return trpmGCHyperGeneric(TRPMCPU_2_VM(pTrpmCpu), pRegFrame, g_aTrap0dHandlers, g_aTrap0dHandlersEnd);
+}
+
+
+/**
+ * Deal with hypervisor traps occurring when resuming execution on a trap.
+ *
+ * There is a little problem with recursive RC (hypervisor) traps. We deal with
+ * this by not allowing recursion without it being the subject of a guru
+ * meditation. (We used to / tried to handle this but there isn't any reason
+ * for it.)
+ *
+ * So, do NOT use this for handling RC traps!
+ *
+ * @returns VBox status code. (Anything but VINF_SUCCESS will cause guru.)
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame Register frame.
+ * @param uUser User arg.
+ */
+DECLCALLBACK(int) trpmRCTrapInGeneric(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser)
+{
+ RT_NOREF_PV(pRegFrame);
+ Log(("********************************************************\n"));
+ Log(("trpmRCTrapInGeneric: eip=%RX32 uUser=%#x\n", pRegFrame->eip, uUser));
+ Log(("********************************************************\n"));
+
+ /*
+ * This used to be kind of complicated, but since we stopped storing
+ * the register frame on the stack and instead storing it directly
+ * in the CPUMCPU::Guest structure, we just have to figure out which
+ * status to hand on to the host and let the recompiler/IEM do its
+ * job.
+ */
+ switch (uUser)
+ {
+ case TRPM_TRAP_IN_MOV_GS:
+ case TRPM_TRAP_IN_MOV_FS:
+ case TRPM_TRAP_IN_MOV_ES:
+ case TRPM_TRAP_IN_MOV_DS:
+ TRPMGCHyperReturnToHost(pVM, VINF_EM_RAW_STALE_SELECTOR);
+ break;
+
+ case TRPM_TRAP_IN_IRET:
+ case TRPM_TRAP_IN_IRET | TRPM_TRAP_IN_V86:
+ TRPMGCHyperReturnToHost(pVM, VINF_EM_RAW_IRET_TRAP);
+ break;
+
+ default:
+ AssertMsgFailed(("Invalid uUser=%#x\n", uUser));
+ return VERR_TRPM_BAD_TRAP_IN_OP;
+ }
+
+ AssertMsgFailed(("Impossible!\n"));
+ return VERR_TRPM_IPE_3;
+}
+
+
+/**
+ * Generic hyper trap handler that sets the EIP to @a uUser.
+ *
+ * @returns VBox status code. (Anything but VINF_SUCCESS will cause guru.)
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame Pointer to the register frame (within VM)
+ * @param uUser The user arg, which should be the new EIP address.
+ */
+extern "C" DECLCALLBACK(int) TRPMRCTrapHyperHandlerSetEIP(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser)
+{
+ AssertReturn(MMHyperIsInsideArea(pVM, uUser), VERR_TRPM_IPE_3);
+ pRegFrame->eip = uUser;
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm b/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm
new file mode 100644
index 00000000..c4875cd9
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm
@@ -0,0 +1,1483 @@
+; $Id: TRPMRCHandlersA.asm $
+;; @file
+; TRPM - Raw-mode Context Trap Handlers
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VMMRC.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/vmm/stam.mac"
+%include "VBox/vmm/vm.mac"
+%include "TRPMInternal.mac"
+%include "VBox/err.mac"
+%include "VBox/vmm/trpm.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern IMPNAME(g_TRPM) ; These IMPNAME(g_*) symbols resolve to the import table
+extern IMPNAME(g_TRPMCPU) ; where there is a pointer to the real symbol. PE imports
+extern IMPNAME(g_VM) ; are a bit confusing at first... :-)
+extern IMPNAME(g_trpmGuestCtxCore)
+extern IMPNAME(g_trpmHyperCtxCore)
+extern NAME(trpmRCTrapInGeneric)
+extern NAME(TRPMGCTrap01Handler)
+extern NAME(TRPMGCHyperTrap01Handler)
+%ifdef VBOX_WITH_NMI
+extern NAME(TRPMGCTrap02Handler)
+extern NAME(TRPMGCHyperTrap02Handler)
+%endif
+extern NAME(TRPMGCTrap03Handler)
+extern NAME(TRPMGCHyperTrap03Handler)
+extern NAME(TRPMGCTrap06Handler)
+extern NAME(TRPMGCTrap07Handler)
+extern NAME(TRPMGCTrap0bHandler)
+extern NAME(TRPMGCHyperTrap0bHandler)
+extern NAME(TRPMGCTrap0dHandler)
+extern NAME(TRPMGCHyperTrap0dHandler)
+extern NAME(TRPMGCTrap0eHandler)
+extern NAME(TRPMGCHyperTrap0eHandler)
+extern NAME(CPUMRCAssertPreExecutionSanity)
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+;; Some conditional COM port debugging.
+;%define DEBUG_STUFF 1
+;%define DEBUG_STUFF_TRPG 1
+;%define DEBUG_STUFF_INT 1
+
+
+BEGINCODE
+
+;;
+; Jump table for trap handlers for hypervisor traps.
+;
+g_apfnStaticTrapHandlersHyper:
+ ; N - M M - T - C - D i
+ ; o - n o - y - o - e p
+ ; - e n - p - d - s t
+ ; - i - e - e - c .
+ ; - c - - - r
+ ; =============================================================
+ dd 0 ; 0 - #DE - F - N - Divide error
+ dd NAME(TRPMGCHyperTrap01Handler) ; 1 - #DB - F/T - N - Single step, INT 1 instruction
+%ifdef VBOX_WITH_NMI
+ dd NAME(TRPMGCHyperTrap02Handler) ; 2 - - I - N - Non-Maskable Interrupt (NMI)
+%else
+ dd 0 ; 2 - - I - N - Non-Maskable Interrupt (NMI)
+%endif
+ dd NAME(TRPMGCHyperTrap03Handler) ; 3 - #BP - T - N - Breakpoint, INT 3 instruction.
+ dd 0 ; 4 - #OF - T - N - Overflow, INTO instruction.
+ dd 0 ; 5 - #BR - F - N - BOUND Range Exceeded, BOUND instruction.
+ dd 0 ; 6 - #UD - F - N - Undefined(/Invalid) Opcode.
+ dd 0 ; 7 - #NM - F - N - Device not available, FP or (F)WAIT instruction.
+ dd 0 ; 8 - #DF - A - 0 - Double fault.
+ dd 0 ; 9 - - F - N - Coprocessor Segment Overrun (obsolete).
+ dd 0 ; a - #TS - F - Y - Invalid TSS, Taskswitch or TSS access.
+ dd NAME(TRPMGCHyperTrap0bHandler) ; b - #NP - F - Y - Segment not present.
+ dd 0 ; c - #SS - F - Y - Stack-Segment fault.
+ dd NAME(TRPMGCHyperTrap0dHandler) ; d - #GP - F - Y - General protection fault.
+ dd NAME(TRPMGCHyperTrap0eHandler) ; e - #PF - F - Y - Page fault.
+ dd 0 ; f - - - - Intel Reserved. Do not use.
+ dd 0 ; 10 - #MF - F - N - x86 FPU Floating-Point Error (Math fault), FP or (F)WAIT instruction.
+ dd 0 ; 11 - #AC - F - 0 - Alignment Check.
+ dd 0 ; 12 - #MC - A - N - Machine Check.
+ dd 0 ; 13 - #XF - F - N - SIMD Floating-Point Exception.
+ dd 0 ; 14 - - - - Intel Reserved. Do not use.
+ dd 0 ; 15 - - - - Intel Reserved. Do not use.
+ dd 0 ; 16 - - - - Intel Reserved. Do not use.
+ dd 0 ; 17 - - - - Intel Reserved. Do not use.
+ dd 0 ; 18 - - - - Intel Reserved. Do not use.
+
+
+;;
+; Jump table for trap handlers for guest traps
+;
+g_apfnStaticTrapHandlersGuest:
+ ; N - M M - T - C - D i
+ ; o - n o - y - o - e p
+ ; - e n - p - d - s t
+ ; - i - e - e - c .
+ ; - c - - - r
+ ; =============================================================
+ dd 0 ; 0 - #DE - F - N - Divide error
+ dd NAME(TRPMGCTrap01Handler) ; 1 - #DB - F/T - N - Single step, INT 1 instruction
+%ifdef VBOX_WITH_NMI
+ dd NAME(TRPMGCTrap02Handler) ; 2 - - I - N - Non-Maskable Interrupt (NMI)
+%else
+ dd 0 ; 2 - - I - N - Non-Maskable Interrupt (NMI)
+%endif
+ dd NAME(TRPMGCTrap03Handler) ; 3 - #BP - T - N - Breakpoint, INT 3 instruction.
+ dd 0 ; 4 - #OF - T - N - Overflow, INTO instruction.
+ dd 0 ; 5 - #BR - F - N - BOUND Range Exceeded, BOUND instruction.
+ dd NAME(TRPMGCTrap06Handler) ; 6 - #UD - F - N - Undefined(/Invalid) Opcode.
+ dd NAME(TRPMGCTrap07Handler) ; 7 - #NM - F - N - Device not available, FP or (F)WAIT instruction.
+ dd 0 ; 8 - #DF - A - 0 - Double fault.
+ dd 0 ; 9 - - F - N - Coprocessor Segment Overrun (obsolete).
+ dd 0 ; a - #TS - F - Y - Invalid TSS, Taskswitch or TSS access.
+ dd NAME(TRPMGCTrap0bHandler) ; b - #NP - F - Y - Segment not present.
+ dd 0 ; c - #SS - F - Y - Stack-Segment fault.
+ dd NAME(TRPMGCTrap0dHandler) ; d - #GP - F - Y - General protection fault.
+ dd NAME(TRPMGCTrap0eHandler) ; e - #PF - F - Y - Page fault.
+ dd 0 ; f - - - - Intel Reserved. Do not use.
+ dd 0 ; 10 - #MF - F - N - x86 FPU Floating-Point Error (Math fault), FP or (F)WAIT instruction.
+ dd 0 ; 11 - #AC - F - 0 - Alignment Check.
+ dd 0 ; 12 - #MC - A - N - Machine Check.
+ dd 0 ; 13 - #XF - F - N - SIMD Floating-Point Exception.
+ dd 0 ; 14 - - - - Intel Reserved. Do not use.
+ dd 0 ; 15 - - - - Intel Reserved. Do not use.
+ dd 0 ; 16 - - - - Intel Reserved. Do not use.
+ dd 0 ; 17 - - - - Intel Reserved. Do not use.
+ dd 0 ; 18 - - - - Intel Reserved. Do not use.
+
+
+
+;;
+; We start by 24 push <vector no.> + jmp <generic entry point>
+;
+ALIGNCODE(16)
+BEGINPROC_EXPORTED TRPMGCHandlerGeneric
+%macro TRPMGenericEntry 2
+EXPORTEDNAME_EX RT_CONCAT(TRPMRCHandlerAsmTrap,%2), function
+ db 06ah, i ; push imm8 - note that this is a signextended value.
+ jmp %1
+ ALIGNCODE(8)
+%assign i i+1
+%endmacro
+
+%assign i 0 ; start counter.
+ TRPMGenericEntry GenericTrap , 00 ; 0
+ TRPMGenericEntry GenericTrap , 01 ; 1
+ TRPMGenericEntry GenericTrap , 02 ; 2
+ TRPMGenericEntry GenericTrap , 03 ; 3
+ TRPMGenericEntry GenericTrap , 04 ; 4
+ TRPMGenericEntry GenericTrap , 05 ; 5
+ TRPMGenericEntry GenericTrap , 06 ; 6
+ TRPMGenericEntry GenericTrap , 07 ; 7
+ TRPMGenericEntry GenericTrapErrCode, 08 ; 8
+ TRPMGenericEntry GenericTrap , 09 ; 9
+ TRPMGenericEntry GenericTrapErrCode, 0a ; a
+ TRPMGenericEntry GenericTrapErrCode, 0b ; b
+ TRPMGenericEntry GenericTrapErrCode, 0c ; c
+ TRPMGenericEntry GenericTrapErrCode, 0d ; d
+ TRPMGenericEntry GenericTrapErrCode, 0e ; e
+ TRPMGenericEntry GenericTrap , 0f ; f (reserved)
+ TRPMGenericEntry GenericTrap , 10 ; 10
+ TRPMGenericEntry GenericTrapErrCode, 11 ; 11
+ TRPMGenericEntry GenericTrap , 12 ; 12
+ TRPMGenericEntry GenericTrap , 13 ; 13
+ TRPMGenericEntry GenericTrap , 14 ; 14 (reserved)
+ TRPMGenericEntry GenericTrap , 15 ; 15 (reserved)
+ TRPMGenericEntry GenericTrap , 16 ; 16 (reserved)
+ TRPMGenericEntry GenericTrap , 17 ; 17 (reserved)
+%undef i
+%undef TRPMGenericEntry
+
+;;
+; Main exception handler for the guest context
+;
+; Stack:
+; 14 SS
+; 10 ESP
+; c EFLAGS
+; 8 CS
+; 4 EIP
+; 0 vector number
+;
+; @uses none
+;
+ALIGNCODE(8)
+GenericTrap:
+ ;
+ ; for the present we fake an error code ~0
+ ;
+ push eax
+ mov eax, 0ffffffffh
+ xchg [esp + 4], eax ; get vector number, set error code
+ xchg [esp], eax ; get saved eax, set vector number
+ jmp short GenericTrapErrCode
+
+
+;;
+; Main exception handler for the guest context with error code
+;
+; Stack:
+; 28 GS (V86 only)
+; 24 FS (V86 only)
+; 20 DS (V86 only)
+; 1C ES (V86 only)
+; 18 SS (only if ring transition.)
+; 14 ESP (only if ring transition.)
+; 10 EFLAGS
+; c CS
+; 8 EIP
+; 4 Error code. (~0 for vectors which don't take an error code.)
+; 0 vector number
+;
+; Error code:
+;
+; 31 16 15 3 2 1 0
+;
+; reserved segment TI IDT EXT
+; selector GDT/LDT (1) IDT External interrupt
+; index (IDT=0) index
+;
+; NOTE: Page faults (trap 14) have a different error code
+;
+; @uses none
+;
+ALIGNCODE(8)
+GenericTrapErrCode:
+ cld
+
+ ;
+ ; Save ds, es, fs, gs, eax and ebx so we have a context pointer (ebx) and
+ ; scratch (eax) register to work with. A sideeffect of using ebx is that
+ ; it's preserved accross cdecl calls.
+ ;
+ ; In order to safely access data, we need to load our flat DS & ES selector,
+ ; clear FS and GS (stale guest selector prevention), and clear make sure
+ ; that CR0.WP is cleared.
+ ;
+ push ds ; +14h
+ push es ; +10h
+ push fs ; +0ch
+ push gs ; +08h
+ push eax ; +04h
+ push ebx ; +00h
+%push StackFrame
+%define %$STK_SAVED_EBX esp
+%define %$STK_SAVED_EAX esp + 04h
+%define %$STK_SAVED_GS esp + 08h
+%define %$STK_SAVED_FS esp + 0ch
+%define %$STK_SAVED_ES esp + 10h
+%define %$STK_SAVED_DS esp + 14h
+%define %$ESPOFF 18h
+%define %$STK_VECTOR esp + 00h + %$ESPOFF
+%define %$STK_ERRCD esp + 04h + %$ESPOFF
+%define %$STK_EIP esp + 08h + %$ESPOFF
+%define %$STK_CS esp + 0ch + %$ESPOFF
+%define %$STK_EFLAGS esp + 10h + %$ESPOFF
+%define %$STK_ESP esp + 14h + %$ESPOFF
+%define %$STK_SS esp + 18h + %$ESPOFF
+%define %$STK_V86_ES esp + 1ch + %$ESPOFF
+%define %$STK_V86_DS esp + 20h + %$ESPOFF
+%define %$STK_V86_FS esp + 24h + %$ESPOFF
+%define %$STK_V86_GS esp + 28h + %$ESPOFF
+
+ mov bx, ss ; load
+ mov ds, bx
+ mov es, bx
+
+ xor bx, bx ; load 0 into gs and fs.
+ mov gs, bx
+ mov fs, bx
+
+ mov eax, cr0 ;; @todo elimitate this read?
+ and eax, ~X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+
+ mov ebx, IMP(g_trpmGuestCtxCore) ; Assume GC as the most common.
+ test byte [%$STK_CS], 3h ; check RPL of the cs selector
+ jnz .save_guest_state
+ test dword [%$STK_EFLAGS], X86_EFL_VM; If in V86, then guest.
+ jnz .save_guest_state
+ mov ebx, IMP(g_trpmHyperCtxCore) ; It's raw-mode context, actually.
+
+ ;
+ ; Save the state.
+ ;
+.save_hyper_state:
+ mov [ebx + CPUMCTXCORE.ecx], ecx
+ lea eax, [%$STK_ESP]
+ mov [ebx + CPUMCTXCORE.esp], eax
+ mov cx, ss
+ mov [ebx + CPUMCTXCORE.ss.Sel], cx
+ jmp .save_state_common
+
+.save_guest_state:
+ mov [ebx + CPUMCTXCORE.ecx], ecx
+ mov eax, [%$STK_ESP]
+ mov [ebx + CPUMCTXCORE.esp], eax
+ mov cx, [%$STK_SS]
+ mov [ebx + CPUMCTXCORE.ss.Sel], cx
+
+.save_state_common:
+ mov eax, [%$STK_SAVED_EAX]
+ mov [ebx + CPUMCTXCORE.eax], eax
+ mov [ebx + CPUMCTXCORE.edx], edx
+ mov eax, [%$STK_SAVED_EBX]
+ mov [ebx + CPUMCTXCORE.ebx], eax
+ mov [ebx + CPUMCTXCORE.esi], esi
+ mov [ebx + CPUMCTXCORE.edi], edi
+ mov [ebx + CPUMCTXCORE.ebp], ebp
+
+ mov cx, [%$STK_CS]
+ mov [ebx + CPUMCTXCORE.cs.Sel], cx
+ mov eax, [%$STK_EIP]
+ mov [ebx + CPUMCTXCORE.eip], eax
+ mov eax, [%$STK_EFLAGS]
+ mov [ebx + CPUMCTXCORE.eflags], eax
+
+%if GC_ARCH_BITS == 64 ; zero out the high dwords - probably not necessary any more.
+ mov dword [ebx + CPUMCTXCORE.eax + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ecx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.edx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ebx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.esi + 4], 0
+ mov dword [ebx + CPUMCTXCORE.edi + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ebp + 4], 0
+ mov dword [ebx + CPUMCTXCORE.esp + 4], 0
+ mov dword [ebx + CPUMCTXCORE.eip + 4], 0
+%endif
+
+ test dword [%$STK_EFLAGS], X86_EFL_VM
+ jnz .save_V86_segregs
+
+ mov cx, [%$STK_SAVED_ES]
+ mov [ebx + CPUMCTXCORE.es.Sel], cx
+ mov cx, [%$STK_SAVED_DS]
+ mov [ebx + CPUMCTXCORE.ds.Sel], cx
+ mov cx, [%$STK_SAVED_FS]
+ mov [ebx + CPUMCTXCORE.fs.Sel], cx
+ mov cx, [%$STK_SAVED_GS]
+ mov [ebx + CPUMCTXCORE.gs.Sel], cx
+ jmp .done_saving
+
+ ;
+ ; The DS, ES, FS and GS registers are zeroed in V86 mode and their real
+ ; values are on the stack.
+ ;
+.save_V86_segregs:
+ mov cx, [%$STK_V86_ES]
+ mov [ebx + CPUMCTXCORE.es.Sel], cx
+ mov cx, [%$STK_V86_DS]
+ mov [ebx + CPUMCTXCORE.ds.Sel], cx
+ mov cx, [%$STK_V86_FS]
+ mov [ebx + CPUMCTXCORE.fs.Sel], cx
+ mov cx, [%$STK_V86_GS]
+ mov [ebx + CPUMCTXCORE.gs.Sel], cx
+
+.done_saving:
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Start profiling.
+ ;
+ mov edx, [%$STK_VECTOR]
+ imul edx, edx, byte STAMPROFILEADV_size ; assumes < 128.
+ add edx, TRPM.aStatGCTraps
+ add edx, IMP(g_TRPM)
+ STAM_PROFILE_ADV_START edx
+%endif
+
+ ;
+ ; Store the information about the active trap/interrupt.
+ ;
+ mov esi, IMP(g_TRPMCPU) ; esi = TRPMCPU until resume!
+ movzx edx, byte [%$STK_VECTOR]
+ mov [esi + TRPMCPU.uActiveVector], edx
+ mov edx, [%$STK_ERRCD]
+ mov [esi + TRPMCPU.uActiveErrorCode], edx
+ mov dword [esi + TRPMCPU.enmActiveType], TRPM_TRAP
+ mov edx, cr2 ;; @todo Check how expensive cr2 reads are!
+ mov dword [esi + TRPMCPU.uActiveCR2], edx
+%if GC_ARCH_BITS == 64 ; zero out the high dwords.
+ mov dword [esi + TRPMCPU.uActiveErrorCode + 4], 0
+ mov dword [esi + TRPMCPU.uActiveCR2 + 4], 0
+%endif
+
+ ;
+ ; Check if we're in the raw-mode context (RC / hypervisor) when this happened.
+ ;
+ test dword [%$STK_EFLAGS], X86_EFL_VM
+ jnz short .gc_not_raw_mode_context
+
+ test byte [%$STK_CS], 3h ; check RPL of the cs selector
+ jz .rc_in_raw_mode_context
+
+ ;
+ ; Trap in guest code.
+ ;
+.gc_not_raw_mode_context:
+%ifdef DEBUG_STUFF_TRPG
+ mov eax, [%$STK_ERRCD]
+ mov ecx, 'trpG' ; indicate trap.
+ mov edx, [%$STK_VECTOR]
+ call trpmDbgDumpRegisterFrame
+%endif
+
+ ;
+ ; Do we have a GC handler for these traps?
+ ;
+ mov edx, [%$STK_VECTOR]
+ mov eax, [g_apfnStaticTrapHandlersGuest + edx * 4]
+ or eax, eax
+ jnz short .gc_have_static_handler
+ mov eax, VINF_EM_RAW_GUEST_TRAP
+ jmp short .gc_guest_trap
+
+ ;
+ ; Call static handler.
+ ;
+.gc_have_static_handler:
+ push ebx ; Param 2 - CPUMCTXCORE pointer.
+ push esi ; Param 1 - Pointer to TRPMCPU.
+ call eax
+ add esp, byte 8 ; cleanup stack (cdecl)
+ or eax, eax
+ je near .gc_continue_guest
+
+ ;
+ ; Switch back to the host and process it there.
+ ;
+.gc_guest_trap:
+%ifdef VBOX_WITH_STATISTICS
+ mov edx, [%$STK_VECTOR]
+ imul edx, edx, byte STAMPROFILEADV_size ; assume < 128
+ add edx, IMP(g_TRPM)
+ add edx, TRPM.aStatGCTraps
+ STAM_PROFILE_ADV_STOP edx
+%endif
+ mov edx, IMP(g_VM)
+ call [edx + VM.pfnVMMRCToHostAsm]
+
+ ; We shouldn't ever return this way. So, raise a special IPE if we do.
+.gc_panic_again:
+ mov eax, VERR_TRPM_IPE_3
+ mov edx, IMP(g_VM)
+ call [edx + VM.pfnVMMRCToHostAsm]
+ jmp .gc_panic_again
+
+ ;
+ ; Continue(/Resume/Restart/Whatever) guest execution.
+ ;
+ALIGNCODE(16)
+.gc_continue_guest:
+%ifdef VBOX_WITH_STATISTICS
+ mov edx, [%$STK_VECTOR]
+ imul edx, edx, byte STAMPROFILEADV_size ; assumes < 128
+ add edx, TRPM.aStatGCTraps
+ add edx, IMP(g_TRPM)
+ STAM_PROFILE_ADV_STOP edx
+%endif
+
+%ifdef VBOX_STRICT
+ ; Call CPUM to check sanity.
+ mov edx, IMP(g_VM)
+ push edx
+ call NAME(CPUMRCAssertPreExecutionSanity)
+ add esp, 4
+%endif
+
+ ; For v8086 mode we must branch off before we enable write protection.
+ test dword [ebx + CPUMCTXCORE.eflags], X86_EFL_VM
+ jnz .gc_V86_return
+
+ ; enable WP
+ mov eax, cr0 ;; @todo try elimiate this read.
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+
+ ; restore guest state and start executing again.
+ mov eax, [ebx + CPUMCTXCORE.eax]
+ mov [%$STK_SAVED_EAX], eax
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ mov edx, [ebx + CPUMCTXCORE.edx]
+ mov eax, [ebx + CPUMCTXCORE.ebx]
+ mov [%$STK_SAVED_EBX], eax
+ mov ebp, [ebx + CPUMCTXCORE.ebp]
+ mov esi, [ebx + CPUMCTXCORE.esi]
+ mov edi, [ebx + CPUMCTXCORE.edi]
+
+ mov eax, [ebx + CPUMCTXCORE.esp]
+ mov [%$STK_ESP], eax
+ mov eax, dword [ebx + CPUMCTXCORE.ss.Sel]
+ mov [%$STK_SS], eax
+ mov eax, [ebx + CPUMCTXCORE.eflags]
+ mov [%$STK_EFLAGS], eax
+ mov eax, dword [ebx + CPUMCTXCORE.cs.Sel]
+ mov [%$STK_CS], eax
+ mov eax, [ebx + CPUMCTXCORE.eip]
+ mov [%$STK_EIP], eax
+
+ mov ax, [ebx + CPUMCTXCORE.gs.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_GS
+ mov gs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.fs.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_FS
+ mov fs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.es.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_ES
+ mov es, ax
+
+ mov ax, [ebx + CPUMCTXCORE.ds.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_DS
+ mov ds, ax
+
+ ; finally restore our scratch register eax and ebx.
+ pop ebx
+ pop eax
+ add esp, 16 + 8 ; skip segregs, error code, and vector number.
+
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_IRET
+ iret
+
+ALIGNCODE(16)
+.gc_V86_return:
+ ;
+ ; We may be returning to V8086 while having entered from protected mode!
+ ; So, we have to push the whole stack frame. There's code in CPUMRC that
+ ; does exactly that, so call it instead of duplicating it.
+ ;
+ push ebx
+ extern NAME(CPUMGCCallV86Code)
+ call NAME(CPUMGCCallV86Code)
+ int3 ; doesn't return...
+
+
+ ;
+ ; Trap in Hypervisor, try to handle it.
+ ;
+ ; (eax = pTRPMCPU)
+ ;
+ALIGNCODE(16)
+.rc_in_raw_mode_context:
+ ; fix ss:esp.
+ lea ecx, [%$STK_ESP] ; calc esp at trap
+ mov [ebx + CPUMCTXCORE.esp], ecx; update esp in register frame
+ mov [ebx + CPUMCTXCORE.ss.Sel], ss ; update ss in register frame
+
+ ; check for temporary handler.
+ movzx edx, byte [esi + TRPMCPU.uActiveVector]
+ mov edi, IMP(g_TRPM)
+ xor ecx, ecx
+ xchg ecx, [edi + TRPM.aTmpTrapHandlers + edx * 4] ; ecx = Temp handler pointer or 0
+ or ecx, ecx
+ jnz short .rc_have_temporary_handler
+
+ ; check for static trap handler.
+ mov ecx, [g_apfnStaticTrapHandlersHyper + edx * 4] ; ecx = Static handler pointer or 0
+ or ecx, ecx
+ jnz short .rc_have_static_handler
+ jmp .rc_abandon_ship
+
+
+ ;
+ ; Temporary trap handler present, call it (CDECL).
+ ;
+.rc_have_temporary_handler:
+ push ebx ; Param 2 - Pointer to CPUMCTXCORE.
+ push IMP(g_VM) ; Param 1 - Pointer to VM.
+ call ecx
+ add esp, byte 8 ; cleanup stack (cdecl)
+
+ cmp eax, byte VINF_SUCCESS ; If completely handled Then resume execution.
+ je near .rc_continue
+ jmp .rc_abandon_ship
+
+
+ ;
+ ; Static trap handler present, call it (CDECL).
+ ;
+.rc_have_static_handler:
+ push ebx ; Param 2 - Pointer to CPUMCTXCORE.
+ push esi ; Param 1 - Pointer to TRPMCPU
+ call ecx
+ add esp, byte 8 ; cleanup stack (cdecl)
+
+ cmp eax, byte VINF_SUCCESS ; If completely handled Then resume execution.
+ je short .rc_continue
+ cmp eax, VINF_EM_DBG_HYPER_STEPPED
+ je short .rc_to_host
+ cmp eax, VINF_EM_DBG_HYPER_BREAKPOINT
+ je short .rc_to_host
+ cmp eax, VINF_EM_DBG_HYPER_ASSERTION
+ je short .rc_to_host
+ cmp eax, VINF_EM_RAW_GUEST_TRAP ; Special #DB case, see bugref:9171.
+ je short .rc_to_host
+ jmp .rc_abandon_ship
+
+ ;
+ ; Pop back to the host to service the error.
+ ;
+.rc_to_host:
+ mov edx, IMP(g_VM)
+ call [edx + VM.pfnVMMRCToHostAsmNoReturn]
+ mov eax, VERR_TRPM_DONT_PANIC
+ jmp .rc_to_host
+
+ ;
+ ; Continue(/Resume/Restart/Whatever) hypervisor execution.
+ ; Don't reset the TRPM state. Caller takes care of that.
+ ;
+ALIGNCODE(16)
+.rc_continue:
+%ifdef DEBUG_STUFF
+ mov eax, [%$STK_ERRCD]
+ mov ecx, 'resH' ; indicate trap.
+ mov edx, [%$STK_VECTOR]
+ call trpmDbgDumpRegisterFrame
+%endif
+
+%ifdef VBOX_WITH_STATISTICS
+ mov edx, [%$STK_VECTOR]
+ imul edx, edx, byte STAMPROFILEADV_size ; assumes < 128
+ add edx, TRPM.aStatGCTraps
+ add edx, IMP(g_TRPM)
+ STAM_PROFILE_ADV_STOP edx
+%endif
+
+ ; restore
+ mov eax, [ebx + CPUMCTXCORE.eax]
+ mov [%$STK_SAVED_EAX], eax
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ mov edx, [ebx + CPUMCTXCORE.edx]
+ mov eax, [ebx + CPUMCTXCORE.ebx]
+ mov [%$STK_SAVED_EBX], eax
+ mov ebp, [ebx + CPUMCTXCORE.ebp]
+ mov esi, [ebx + CPUMCTXCORE.esi]
+ mov edi, [ebx + CPUMCTXCORE.edi]
+
+ ; skipping esp & ss.
+
+ mov eax, [ebx + CPUMCTXCORE.eflags]
+ mov [%$STK_EFLAGS], eax
+ mov eax, dword [ebx + CPUMCTXCORE.cs.Sel]
+ mov [%$STK_CS], eax
+ mov eax, [ebx + CPUMCTXCORE.eip]
+ mov [%$STK_EIP], eax
+
+ mov ax, [ebx + CPUMCTXCORE.gs.Sel]
+ mov gs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.fs.Sel]
+ mov fs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.es.Sel]
+ mov es, ax
+
+ mov ax, [ebx + CPUMCTXCORE.ds.Sel]
+ mov ds, ax
+
+ ; finally restore our scratch register eax and ebx.
+ pop ebx
+ pop eax
+ add esp, 16 + 8 ; skip segregs, error code, and vector number.
+
+ iret
+
+
+ ;
+ ; Internal processing error - don't panic, start meditating!
+ ;
+.rc_abandon_ship:
+%ifdef DEBUG_STUFF
+ mov eax, [%$STK_ERRCD]
+ mov ecx, 'trpH' ; indicate trap.
+ mov edx, [%$STK_VECTOR]
+ call trpmDbgDumpRegisterFrame
+%endif
+
+.rc_do_not_panic:
+ mov edx, IMP(g_VM)
+ mov eax, VERR_TRPM_DONT_PANIC
+ call [edx + VM.pfnVMMRCToHostAsmNoReturn]
+%ifdef DEBUG_STUFF
+ COM_S_PRINT 'bad!!!'
+%endif
+ jmp .rc_do_not_panic ; this shall never ever happen!
+%pop
+ENDPROC TRPMGCHandlerGeneric
+
+
+
+
+
+;;
+; We start by 256 push <vector no.> + jmp interruptworker
+;
+ALIGNCODE(16)
+BEGINPROC_EXPORTED TRPMGCHandlerInterupt
+ ; NASM has some nice features, here an example of a loop.
+%assign i 0
+%rep 256
+ db 06ah, i ; push imm8 - note that this is a signextended value.
+ jmp ti_GenericInterrupt
+ ALIGNCODE(8)
+%assign i i+1
+%endrep
+
+;;
+; Main interrupt handler for the guest context
+;
+; Stack:
+; 24 GS (V86 only)
+; 20 FS (V86 only)
+; 1C DS (V86 only)
+; 18 ES (V86 only)
+; 14 SS
+; 10 ESP
+; c EFLAGS
+; 8 CS
+; 4 EIP
+; ESP -> 0 Vector number (only use low byte!).
+;
+; @uses none
+ti_GenericInterrupt:
+ cld
+
+ ;
+ ; Save ds, es, fs, gs, eax and ebx so we have a context pointer (ebx) and
+ ; scratch (eax) register to work with. A sideeffect of using ebx is that
+ ; it's preserved accross cdecl calls.
+ ;
+ ; In order to safely access data, we need to load our flat DS & ES selector,
+ ; clear FS and GS (stale guest selector prevention), and clear make sure
+ ; that CR0.WP is cleared.
+ ;
+ push ds ; +14h
+ push es ; +10h
+ push fs ; +0ch
+ push gs ; +08h
+ push eax ; +04h
+ push ebx ; +00h
+%push StackFrame
+%define %$STK_SAVED_EBX esp
+%define %$STK_SAVED_EAX esp + 04h
+%define %$STK_SAVED_GS esp + 08h
+%define %$STK_SAVED_FS esp + 0ch
+%define %$STK_SAVED_ES esp + 10h
+%define %$STK_SAVED_DS esp + 14h
+%define %$ESPOFF 18h
+%define %$STK_VECTOR esp + 00h + %$ESPOFF
+%define %$STK_EIP esp + 04h + %$ESPOFF
+%define %$STK_CS esp + 08h + %$ESPOFF
+%define %$STK_EFLAGS esp + 0ch + %$ESPOFF
+%define %$STK_ESP esp + 10h + %$ESPOFF
+%define %$STK_SS esp + 14h + %$ESPOFF
+%define %$STK_V86_ES esp + 18h + %$ESPOFF
+%define %$STK_V86_DS esp + 1ch + %$ESPOFF
+%define %$STK_V86_FS esp + 20h + %$ESPOFF
+%define %$STK_V86_GS esp + 24h + %$ESPOFF
+
+ mov bx, ss ; load
+ mov ds, bx
+ mov es, bx
+
+ xor bx, bx ; load 0 into gs and fs.
+ mov gs, bx
+ mov fs, bx
+
+ mov eax, cr0 ;; @todo elimitate this read?
+ and eax, ~X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+
+ mov ebx, IMP(g_trpmGuestCtxCore) ; Assume GC as the most common.
+ test byte [%$STK_CS], 3h ; check RPL of the cs selector
+ jnz .save_guest_state
+ test dword [%$STK_EFLAGS], X86_EFL_VM ; If in V86, then guest.
+ jnz .save_guest_state
+ mov ebx, IMP(g_trpmHyperCtxCore) ; It's raw-mode context, actually.
+
+ ;
+ ; Save the state.
+ ;
+.save_hyper_state:
+ mov [ebx + CPUMCTXCORE.ecx], ecx
+ lea eax, [%$STK_ESP]
+ mov [ebx + CPUMCTXCORE.esp], eax
+ mov cx, ss
+ mov [ebx + CPUMCTXCORE.ss.Sel], cx
+ jmp .save_state_common
+
+.save_guest_state:
+ mov [ebx + CPUMCTXCORE.ecx], ecx
+ mov eax, [%$STK_ESP]
+ mov [ebx + CPUMCTXCORE.esp], eax
+ mov cx, [%$STK_SS]
+ mov [ebx + CPUMCTXCORE.ss.Sel], cx
+
+.save_state_common:
+ mov eax, [%$STK_SAVED_EAX]
+ mov [ebx + CPUMCTXCORE.eax], eax
+ mov [ebx + CPUMCTXCORE.edx], edx
+ mov eax, [%$STK_SAVED_EBX]
+ mov [ebx + CPUMCTXCORE.ebx], eax
+ mov [ebx + CPUMCTXCORE.esi], esi
+ mov [ebx + CPUMCTXCORE.edi], edi
+ mov [ebx + CPUMCTXCORE.ebp], ebp
+
+ mov cx, [%$STK_CS]
+ mov [ebx + CPUMCTXCORE.cs.Sel], cx
+ mov eax, [%$STK_EIP]
+ mov [ebx + CPUMCTXCORE.eip], eax
+ mov eax, [%$STK_EFLAGS]
+ mov [ebx + CPUMCTXCORE.eflags], eax
+
+%if GC_ARCH_BITS == 64 ; zero out the high dwords - probably not necessary any more.
+ mov dword [ebx + CPUMCTXCORE.eax + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ecx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.edx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ebx + 4], 0
+ mov dword [ebx + CPUMCTXCORE.esi + 4], 0
+ mov dword [ebx + CPUMCTXCORE.edi + 4], 0
+ mov dword [ebx + CPUMCTXCORE.ebp + 4], 0
+ mov dword [ebx + CPUMCTXCORE.esp + 4], 0
+ mov dword [ebx + CPUMCTXCORE.eip + 4], 0
+%endif
+
+ test dword [%$STK_EFLAGS], X86_EFL_VM
+ jnz .save_V86_segregs
+
+ mov cx, [%$STK_SAVED_ES]
+ mov [ebx + CPUMCTXCORE.es.Sel], cx
+ mov cx, [%$STK_SAVED_DS]
+ mov [ebx + CPUMCTXCORE.ds.Sel], cx
+ mov cx, [%$STK_SAVED_FS]
+ mov [ebx + CPUMCTXCORE.fs.Sel], cx
+ mov cx, [%$STK_SAVED_GS]
+ mov [ebx + CPUMCTXCORE.gs.Sel], cx
+ jmp .done_saving
+
+ ;
+ ; The DS, ES, FS and GS registers are zeroed in V86 mode and their real
+ ; values are on the stack.
+ ;
+.save_V86_segregs:
+ mov cx, [%$STK_V86_ES]
+ mov [ebx + CPUMCTXCORE.es.Sel], cx
+ mov cx, [%$STK_V86_DS]
+ mov [ebx + CPUMCTXCORE.ds.Sel], cx
+ mov cx, [%$STK_V86_FS]
+ mov [ebx + CPUMCTXCORE.fs.Sel], cx
+ mov cx, [%$STK_V86_GS]
+ mov [ebx + CPUMCTXCORE.gs.Sel], cx
+
+.done_saving:
+
+ ;
+ ; Store the information about the active trap/interrupt.
+ ;
+ mov esi, IMP(g_TRPMCPU) ; esi = TRPMCPU until resume!
+ movzx edx, byte [%$STK_VECTOR]
+ mov [esi + TRPMCPU.uActiveVector], edx
+ mov dword [esi + TRPMCPU.uActiveErrorCode], 0
+ mov dword [esi + TRPMCPU.enmActiveType], TRPM_TRAP
+ mov dword [esi + TRPMCPU.uActiveCR2], edx
+%if GC_ARCH_BITS == 64 ; zero out the high dwords.
+ mov dword [esi + TRPMCPU.uActiveErrorCode + 4], 0
+ mov dword [esi + TRPMCPU.uActiveCR2 + 4], 0
+%endif
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Update statistics.
+ ;
+ mov edi, IMP(g_TRPM)
+ movzx edx, byte [%$STK_VECTOR] ; vector number
+ imul edx, edx, byte STAMCOUNTER_size
+ add edx, [edi + TRPM.paStatHostIrqRC]
+ STAM_COUNTER_INC edx
+%endif
+
+ ;
+ ; Check if we're in the raw-mode context (RC / hypervisor) when this happened.
+ ;
+ test dword [%$STK_EFLAGS], X86_EFL_VM
+ jnz short .gc_not_raw_mode_context
+
+ test byte [%$STK_CS], 3h ; check RPL of the cs selector
+ jz .rc_in_raw_mode_context
+
+ ;
+ ; Trap in guest code.
+ ;
+.gc_not_raw_mode_context:
+ and dword [ebx + CPUMCTXCORE.eflags], ~X86_EFL_RF ; Clear RF.
+ ; The guest shall not see this in it's state.
+%ifdef DEBUG_STUFF_INT
+ xor eax, eax
+ mov ecx, 'intG' ; indicate trap in GC.
+ movzx edx, byte [%$STK_VECTOR]
+ call trpmDbgDumpRegisterFrame
+%endif
+
+ ;
+ ; Switch back to the host and process it there.
+ ;
+ mov edx, IMP(g_VM)
+ mov eax, VINF_EM_RAW_INTERRUPT
+ call [edx + VM.pfnVMMRCToHostAsm]
+
+ ;
+ ; We've returned!
+ ;
+
+ ; Reset TRPM state
+ xor edx, edx
+ dec edx ; edx = 0ffffffffh
+ xchg [esi + TRPMCPU.uActiveVector], edx
+ mov [esi + TRPMCPU.uPrevVector], edx
+
+%ifdef VBOX_STRICT
+ ; Call CPUM to check sanity.
+ mov edx, IMP(g_VM)
+ push edx
+ call NAME(CPUMRCAssertPreExecutionSanity)
+ add esp, 4
+%endif
+
+ ; For v8086 mode we must branch off before we enable write protection.
+ test dword [ebx + CPUMCTXCORE.eflags], X86_EFL_VM
+ jnz .gc_V86_return
+
+ ; enable WP
+ mov eax, cr0 ;; @todo try elimiate this read.
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+
+ ; restore guest state and start executing again.
+ mov eax, [ebx + CPUMCTXCORE.eax]
+ mov [%$STK_SAVED_EAX], eax
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ mov edx, [ebx + CPUMCTXCORE.edx]
+ mov eax, [ebx + CPUMCTXCORE.ebx]
+ mov [%$STK_SAVED_EBX], eax
+ mov ebp, [ebx + CPUMCTXCORE.ebp]
+ mov esi, [ebx + CPUMCTXCORE.esi]
+ mov edi, [ebx + CPUMCTXCORE.edi]
+
+ mov eax, [ebx + CPUMCTXCORE.esp]
+ mov [%$STK_ESP], eax
+ mov eax, dword [ebx + CPUMCTXCORE.ss.Sel]
+ mov [%$STK_SS], eax
+ mov eax, [ebx + CPUMCTXCORE.eflags]
+ mov [%$STK_EFLAGS], eax
+ mov eax, dword [ebx + CPUMCTXCORE.cs.Sel]
+ mov [%$STK_CS], eax
+ mov eax, [ebx + CPUMCTXCORE.eip]
+ mov [%$STK_EIP], eax
+
+ mov ax, [ebx + CPUMCTXCORE.gs.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_GS
+ mov gs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.fs.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_FS
+ mov fs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.es.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_ES
+ mov es, ax
+
+ mov ax, [ebx + CPUMCTXCORE.ds.Sel]
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_MOV_DS
+ mov ds, ax
+
+ ; finally restore our scratch register eax and ebx.
+ pop ebx
+ pop eax
+ add esp, 16 + 4 ; skip segregs, and vector number.
+
+ TRPM_NP_GP_HANDLER NAME(trpmRCTrapInGeneric), TRPM_TRAP_IN_IRET
+ iret
+
+ALIGNCODE(16)
+.gc_V86_return:
+ ;
+ ; We may be returning to V8086 while having entered from protected mode!
+ ; So, we have to push the whole stack frame. There's code in CPUMRC that
+ ; does exactly that, so call it instead of duplicating it.
+ ;
+ push ebx
+ extern NAME(CPUMGCCallV86Code)
+ call NAME(CPUMGCCallV86Code)
+ int3 ; doesn't return...
+
+
+ ; -+- Entry point -+-
+ ;
+ ; We're in hypervisor mode which means no guest context
+ ; and special care to be taken to restore the hypervisor
+ ; context correctly.
+ ;
+ ; ATM the only place this can happen is when entering a trap handler.
+ ; We make ASSUMPTIONS about this in respects to the WP CR0 bit
+ ;
+ALIGNCODE(16)
+.rc_in_raw_mode_context:
+ ; fix ss:esp.
+ lea ecx, [%$STK_ESP] ; calc esp at trap
+ mov [ebx + CPUMCTXCORE.esp], ecx ; update esp in register frame
+ mov [ebx + CPUMCTXCORE.ss.Sel], ss ; update ss in register frame
+
+%ifdef DEBUG_STUFF_INT
+ xor eax, eax
+ mov ecx, 'intH' ; indicate trap in RC.
+ movzx edx, byte [%$STK_VECTOR]
+ call trpmDbgDumpRegisterFrame
+%endif
+
+ mov edx, IMP(g_VM)
+ mov eax, VINF_EM_RAW_INTERRUPT_HYPER
+ call [edx + VM.pfnVMMRCToHostAsmNoReturn]
+%ifdef DEBUG_STUFF_INT
+ COM_S_CHAR '!'
+%endif
+
+ ;
+ ; We've returned!
+ ; Continue(/Resume/Restart/Whatever) hypervisor execution.
+ ;
+
+ ; Reset TRPM state - don't record this.
+ ;mov esi, IMP(g_TRPMCPU)
+ mov dword [esi + TRPMCPU.uActiveVector], 0ffffffffh
+
+ ;
+ ; Restore the hypervisor context and return.
+ ;
+ mov eax, [ebx + CPUMCTXCORE.eax]
+ mov [%$STK_SAVED_EAX], eax
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ mov edx, [ebx + CPUMCTXCORE.edx]
+ mov eax, [ebx + CPUMCTXCORE.ebx]
+ mov [%$STK_SAVED_EBX], eax
+ mov ebp, [ebx + CPUMCTXCORE.ebp]
+ mov esi, [ebx + CPUMCTXCORE.esi]
+ mov edi, [ebx + CPUMCTXCORE.edi]
+
+ ; skipping esp & ss.
+
+ mov eax, [ebx + CPUMCTXCORE.eflags]
+ mov [%$STK_EFLAGS], eax
+ mov eax, dword [ebx + CPUMCTXCORE.cs.Sel]
+ mov [%$STK_CS], eax
+ mov eax, [ebx + CPUMCTXCORE.eip]
+ mov [%$STK_EIP], eax
+
+ mov ax, [ebx + CPUMCTXCORE.gs.Sel]
+ mov gs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.fs.Sel]
+ mov fs, ax
+
+ mov ax, [ebx + CPUMCTXCORE.es.Sel]
+ mov es, ax
+
+ mov ax, [ebx + CPUMCTXCORE.ds.Sel]
+ mov ds, ax
+
+ ; finally restore our scratch register eax and ebx.
+ pop ebx
+ pop eax
+ add esp, 16 + 4 ; skip segregs, and vector number.
+
+ iret
+%pop
+ENDPROC TRPMGCHandlerInterupt
+
+
+
+;;
+; Trap handler for #MC
+;
+; This handler will forward the #MC to the host OS. Since this
+; is generalized in the generic interrupt handler, we just disable
+; interrupts and push vector number and jump to the generic code.
+;
+; Stack:
+; 10 SS (only if ring transition.)
+; c ESP (only if ring transition.)
+; 8 EFLAGS
+; 4 CS
+; 0 EIP
+;
+; @uses none
+;
+ALIGNCODE(16)
+BEGINPROC_EXPORTED TRPMGCHandlerTrap12
+ push byte 12h
+ jmp ti_GenericInterrupt
+ENDPROC TRPMGCHandlerTrap12
+
+
+
+
+;;
+; Trap handler for double fault (#DF).
+;
+; This is a special trap handler executes in separate task with own TSS, with
+; one of the intermediate memory contexts instead of the shadow context.
+; The handler will unconditionally print an report to the comport configured
+; for the COM_S_* macros before attempting to return to the host. If it it ends
+; up double faulting more than 10 times, it will simply cause an triple fault
+; to get us out of the mess.
+;
+; @param esp Half way down the hypervisor stack + the trap frame.
+; @param ebp Half way down the hypervisor stack.
+; @param eflags Interrupts disabled, nested flag is probably set (we don't care).
+; @param ecx The address of the hypervisor TSS.
+; @param edi Same as ecx.
+; @param eax Same as ecx.
+; @param edx Address of the VM structure.
+; @param esi Same as edx.
+; @param ebx Same as edx.
+; @param ss Hypervisor DS.
+; @param ds Hypervisor DS.
+; @param es Hypervisor DS.
+; @param fs 0
+; @param gs 0
+;
+;
+; @remark To be able to catch errors with WP turned off, it is required that the
+; TSS GDT descriptor and the TSSes are writable (X86_PTE_RW). See SELM.cpp
+; for how to enable this.
+;
+; @remark It is *not* safe to resume the VMM after a double fault. (At least not
+; without clearing the busy flag of the TssTrap8 and fixing whatever cause it.)
+;
+ALIGNCODE(16)
+BEGINPROC_EXPORTED TRPMGCHandlerTrap08
+ ; be careful.
+ cli
+ cld
+
+ ;
+ ; Disable write protection.
+ ;
+ mov eax, cr0
+ and eax, ~X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+
+ ;
+ ; Load Hypervisor DS and ES (get it from the SS) - paranoia, but the TSS could be overwritten.. :)
+ ;
+ mov eax, ss
+ mov ds, eax
+ mov es, eax
+
+ COM_S_PRINT 10,13,'*** Guru Meditation 00000008 - Double Fault! ***',10,13
+
+ COM_S_PRINT 'VM='
+ COM_S_DWORD_REG edx
+ COM_S_PRINT ' prevTSS='
+ COM_S_DWORD_REG ecx
+ COM_S_PRINT ' prevCR3='
+ mov eax, [ecx + VBOXTSS.cr3]
+ COM_S_DWORD_REG eax
+ COM_S_PRINT ' prevLdtr='
+ movzx eax, word [ecx + VBOXTSS.selLdt]
+ COM_S_DWORD_REG eax
+ COM_S_NEWLINE
+
+ ;
+ ; Create CPUMCTXCORE structure.
+ ;
+ mov ebx, IMP(g_trpmHyperCtxCore) ; It's raw-mode context, actually.
+
+ mov eax, [ecx + VBOXTSS.eip]
+ mov [ebx + CPUMCTXCORE.eip], eax
+%if GC_ARCH_BITS == 64
+ ; zero out the high dword
+ mov dword [ebx + CPUMCTXCORE.eip + 4], 0
+%endif
+ mov eax, [ecx + VBOXTSS.eflags]
+ mov [ebx + CPUMCTXCORE.eflags], eax
+
+ movzx eax, word [ecx + VBOXTSS.cs]
+ mov dword [ebx + CPUMCTXCORE.cs.Sel], eax
+ movzx eax, word [ecx + VBOXTSS.ds]
+ mov dword [ebx + CPUMCTXCORE.ds.Sel], eax
+ movzx eax, word [ecx + VBOXTSS.es]
+ mov dword [ebx + CPUMCTXCORE.es.Sel], eax
+ movzx eax, word [ecx + VBOXTSS.fs]
+ mov dword [ebx + CPUMCTXCORE.fs.Sel], eax
+ movzx eax, word [ecx + VBOXTSS.gs]
+ mov dword [ebx + CPUMCTXCORE.gs.Sel], eax
+ movzx eax, word [ecx + VBOXTSS.ss]
+ mov [ebx + CPUMCTXCORE.ss.Sel], eax
+ mov eax, [ecx + VBOXTSS.esp]
+ mov [ebx + CPUMCTXCORE.esp], eax
+%if GC_ARCH_BITS == 64
+ ; zero out the high dword
+ mov dword [ebx + CPUMCTXCORE.esp + 4], 0
+%endif
+ mov eax, [ecx + VBOXTSS.ecx]
+ mov [ebx + CPUMCTXCORE.ecx], eax
+ mov eax, [ecx + VBOXTSS.edx]
+ mov [ebx + CPUMCTXCORE.edx], eax
+ mov eax, [ecx + VBOXTSS.ebx]
+ mov [ebx + CPUMCTXCORE.ebx], eax
+ mov eax, [ecx + VBOXTSS.eax]
+ mov [ebx + CPUMCTXCORE.eax], eax
+ mov eax, [ecx + VBOXTSS.ebp]
+ mov [ebx + CPUMCTXCORE.ebp], eax
+ mov eax, [ecx + VBOXTSS.esi]
+ mov [ebx + CPUMCTXCORE.esi], eax
+ mov eax, [ecx + VBOXTSS.edi]
+ mov [ebx + CPUMCTXCORE.edi], eax
+
+ ;
+ ; Show regs
+ ;
+ mov eax, 0ffffffffh
+ mov ecx, 'trpH' ; indicate trap.
+ mov edx, 08h ; vector number
+ call trpmDbgDumpRegisterFrame
+
+ ;
+ ; Should we try go back?
+ ;
+ inc dword [df_Count]
+ cmp dword [df_Count], byte 10
+ jb df_to_host
+ jmp df_tripple_fault
+df_Count: dd 0
+
+ ;
+ ; Try return to the host.
+ ;
+df_to_host:
+ COM_S_PRINT 'Trying to return to host...',10,13
+ mov edx, IMP(g_VM)
+ mov eax, VERR_TRPM_PANIC
+ call [edx + VM.pfnVMMRCToHostAsmNoReturn]
+ jmp short df_to_host
+
+ ;
+ ; Perform a tripple fault.
+ ;
+df_tripple_fault:
+ COM_S_PRINT 'Giving up - tripple faulting the machine...',10,13
+ push byte 0
+ push byte 0
+ sidt [esp]
+ mov word [esp], 0
+ lidt [esp]
+ xor eax, eax
+ mov dword [eax], 0
+ jmp df_tripple_fault
+
+ENDPROC TRPMGCHandlerTrap08
+
+
+
+
+;;
+; Internal procedure used to dump registers.
+;
+; @param ebx Pointer to CPUMCTXCORE.
+; @param edx Vector number
+; @param ecx 'trap' if trap, 'int' if interrupt.
+; @param eax Error code if trap.
+;
+trpmDbgDumpRegisterFrame:
+ sub esp, byte 8 ; working space for sidt/sgdt/etc
+
+; Init _must_ be done on host before crashing!
+; push edx
+; push eax
+; COM_INIT
+; pop eax
+; pop edx
+
+ cmp ecx, 'trpH'
+ je near tddrf_trpH
+ cmp ecx, 'trpG'
+ je near tddrf_trpG
+ cmp ecx, 'intH'
+ je near tddrf_intH
+ cmp ecx, 'intG'
+ je near tddrf_intG
+ cmp ecx, 'resH'
+ je near tddrf_resH
+ COM_S_PRINT 10,13,'*** Bogus Dump Code '
+ jmp tddrf_regs
+
+%if 1 ; the verbose version
+
+tddrf_intG:
+ COM_S_PRINT 10,13,'*** Interrupt (Guest) '
+ COM_S_DWORD_REG edx
+ jmp tddrf_regs
+
+tddrf_intH:
+ COM_S_PRINT 10,13,'*** Interrupt (Hypervisor) '
+ COM_S_DWORD_REG edx
+ jmp tddrf_regs
+
+tddrf_trpG:
+ COM_S_PRINT 10,13,'*** Trap '
+ jmp tddrf_trap_rest
+
+%else ; the short version
+
+tddrf_intG:
+ COM_S_CHAR 'I'
+ jmp tddrf_ret
+
+tddrf_intH:
+ COM_S_CHAR 'i'
+ jmp tddrf_ret
+
+tddrf_trpG:
+ COM_S_CHAR 'T'
+ jmp tddrf_ret
+
+%endif ; the short version
+
+tddrf_trpH:
+ COM_S_PRINT 10,13,'*** Guru Meditation '
+ jmp tddrf_trap_rest
+
+tddrf_resH:
+ COM_S_PRINT 10,13,'*** Resuming Hypervisor Trap '
+ jmp tddrf_trap_rest
+
+tddrf_trap_rest:
+ COM_S_DWORD_REG edx
+ COM_S_PRINT ' ErrorCode='
+ COM_S_DWORD_REG eax
+ COM_S_PRINT ' cr2='
+ mov ecx, cr2
+ COM_S_DWORD_REG ecx
+
+tddrf_regs:
+ COM_S_PRINT ' ***',10,13,'cs:eip='
+ movzx ecx, word [ebx + CPUMCTXCORE.cs.Sel]
+ COM_S_DWORD_REG ecx
+ COM_S_CHAR ':'
+ mov ecx, [ebx + CPUMCTXCORE.eip]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' ss:esp='
+ movzx ecx, word [ebx + CPUMCTXCORE.ss.Sel]
+ COM_S_DWORD_REG ecx
+ COM_S_CHAR ':'
+ mov ecx, [ebx + CPUMCTXCORE.esp]
+ COM_S_DWORD_REG ecx
+
+
+ sgdt [esp]
+ COM_S_PRINT 10,13,' gdtr='
+ movzx ecx, word [esp]
+ COM_S_DWORD_REG ecx
+ COM_S_CHAR ':'
+ mov ecx, [esp + 2]
+ COM_S_DWORD_REG ecx
+
+ sidt [esp]
+ COM_S_PRINT ' idtr='
+ movzx ecx, word [esp]
+ COM_S_DWORD_REG ecx
+ COM_S_CHAR ':'
+ mov ecx, [esp + 2]
+ COM_S_DWORD_REG ecx
+
+
+ str [esp] ; yasm BUG! it generates sldt [esp] here! YASMCHECK!
+ COM_S_PRINT 10,13,' tr='
+ movzx ecx, word [esp]
+ COM_S_DWORD_REG ecx
+
+ sldt [esp]
+ COM_S_PRINT ' ldtr='
+ movzx ecx, word [esp]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' eflags='
+ mov ecx, [ebx + CPUMCTXCORE.eflags]
+ COM_S_DWORD_REG ecx
+
+
+ COM_S_PRINT 10,13,'cr0='
+ mov ecx, cr0
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' cr2='
+ mov ecx, cr2
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' cr3='
+ mov ecx, cr3
+ COM_S_DWORD_REG ecx
+ COM_S_PRINT ' cr4='
+ mov ecx, cr4
+ COM_S_DWORD_REG ecx
+
+
+ COM_S_PRINT 10,13,' ds='
+ movzx ecx, word [ebx + CPUMCTXCORE.ds.Sel]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' es='
+ movzx ecx, word [ebx + CPUMCTXCORE.es.Sel]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' fs='
+ movzx ecx, word [ebx + CPUMCTXCORE.fs.Sel]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' gs='
+ movzx ecx, word [ebx + CPUMCTXCORE.gs.Sel]
+ COM_S_DWORD_REG ecx
+
+
+ COM_S_PRINT 10,13,'eax='
+ mov ecx, [ebx + CPUMCTXCORE.eax]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' ebx='
+ mov ecx, [ebx + CPUMCTXCORE.ebx]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' ecx='
+ mov ecx, [ebx + CPUMCTXCORE.ecx]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' edx='
+ mov ecx, [ebx + CPUMCTXCORE.edx]
+ COM_S_DWORD_REG ecx
+
+
+ COM_S_PRINT 10,13,'esi='
+ mov ecx, [ebx + CPUMCTXCORE.esi]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' edi='
+ mov ecx, [ebx + CPUMCTXCORE.edi]
+ COM_S_DWORD_REG ecx
+
+ COM_S_PRINT ' ebp='
+ mov ecx, [ebx + CPUMCTXCORE.ebp]
+ COM_S_DWORD_REG ecx
+
+
+ COM_S_NEWLINE
+
+tddrf_ret:
+ add esp, byte 8
+ ret
+
diff --git a/src/VBox/VMM/VMMRC/VMMRC.cpp b/src/VBox/VMM/VMMRC/VMMRC.cpp
new file mode 100644
index 00000000..1599bf28
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRC.cpp
@@ -0,0 +1,464 @@
+/* $Id: VMMRC.cpp $ */
+/** @file
+ * VMM - Raw-mode Context.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VMM
+#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/pgm.h>
+#include "VMMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/sup.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/initterm.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Default logger instance. */
+extern "C" DECLIMPORT(RTLOGGERRC) g_Logger;
+extern "C" DECLIMPORT(RTLOGGERRC) g_RelLogger;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static int vmmGCTest(PVM pVM, unsigned uOperation, unsigned uArg);
+static DECLCALLBACK(int) vmmGCTestTmpPFHandler(PVM pVM, PCPUMCTXCORE pRegFrame);
+static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame);
+DECLASM(bool) vmmRCSafeMsrRead(uint32_t uMsr, uint64_t *pu64Value);
+DECLASM(bool) vmmRCSafeMsrWrite(uint32_t uMsr, uint64_t u64Value);
+
+
+
+/**
+ * The GC entry point.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param uOperation Which operation to execute (VMMRCOPERATION).
+ * @param uArg Argument to that operation.
+ */
+VMMRCDECL(int) VMMRCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...)
+{
+ /** @todo */
+ switch (uOperation)
+ {
+ /*
+ * Init RC modules.
+ */
+ case VMMRC_DO_VMMRC_INIT:
+ {
+ /*
+ * Validate the svn revision (uArg) and build type (ellipsis).
+ */
+ if (uArg != VMMGetSvnRev())
+ return VERR_VMM_RC_VERSION_MISMATCH;
+
+ va_list va;
+ va_start(va, uArg);
+
+ uint32_t uBuildType = va_arg(va, uint32_t);
+ if (uBuildType != vmmGetBuildType())
+ {
+ va_end(va);
+ return VERR_VMM_RC_VERSION_MISMATCH;
+ }
+
+ /*
+ * Initialize the runtime.
+ */
+ uint64_t u64TS = va_arg(va, uint64_t);
+
+ va_end(va);
+
+ int rc = RTRCInit(u64TS);
+ Log(("VMMRCEntry: VMMRC_DO_VMMRC_INIT - uArg=%u (svn revision) u64TS=%RX64; rc=%Rrc\n", uArg, u64TS, rc));
+ AssertRCReturn(rc, rc);
+
+ rc = PGMRegisterStringFormatTypes();
+ AssertRCReturn(rc, rc);
+
+ rc = PGMRCDynMapInit(pVM);
+ AssertRCReturn(rc, rc);
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Testcase which is used to test interrupt forwarding.
+ * It spins for a while with interrupts enabled.
+ */
+ case VMMRC_DO_TESTCASE_HYPER_INTERRUPT:
+ {
+ uint32_t volatile i = 0;
+ ASMIntEnable();
+ while (i < _2G32)
+ i++;
+ ASMIntDisable();
+ return 0;
+ }
+
+ /*
+ * Testcase which simply returns, this is used for
+ * profiling of the switcher.
+ */
+ case VMMRC_DO_TESTCASE_NOP:
+ return 0;
+
+ /*
+ * Testcase executes a privileged instruction to force a world switch. (in both SVM & VMX)
+ */
+ case VMMRC_DO_TESTCASE_HM_NOP:
+ ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
+ return 0;
+
+ /*
+ * Delay for ~100us.
+ */
+ case VMMRC_DO_TESTCASE_INTERRUPT_MASKING:
+ {
+ uint64_t u64MaxTicks = (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) != ~(uint64_t)0
+ ? SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage)
+ : _2G)
+ / 10000;
+ uint64_t u64StartTSC = ASMReadTSC();
+ uint64_t u64TicksNow;
+ uint32_t volatile i = 0;
+
+ do
+ {
+ /* waste some time and protect against getting stuck. */
+ for (uint32_t volatile j = 0; j < 1000; j++, i++)
+ if (i > _2G32)
+ return VERR_GENERAL_FAILURE;
+
+ /* check if we're done.*/
+ u64TicksNow = ASMReadTSC() - u64StartTSC;
+ } while (u64TicksNow < u64MaxTicks);
+
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Trap testcases and unknown operations.
+ */
+ default:
+ if ( uOperation >= VMMRC_DO_TESTCASE_TRAP_FIRST
+ && uOperation < VMMRC_DO_TESTCASE_TRAP_LAST)
+ return vmmGCTest(pVM, uOperation, uArg);
+ return VERR_INVALID_PARAMETER;
+ }
+}
+
+
+/**
+ * Internal RC logger worker: Flush logger.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pLogger The logger instance to flush.
+ * @remark This function must be exported!
+ */
+VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger)
+{
+ PVM pVM = &g_VM;
+ NOREF(pLogger);
+ if (pVM->vmm.s.fRCLoggerFlushingDisabled)
+ return VINF_SUCCESS; /* fail quietly. */
+ return VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
+}
+
+
+/**
+ * Flush logger if almost full.
+ *
+ * @param pVM The cross context VM structure.
+ */
+VMMRCDECL(void) VMMRCLogFlushIfFull(PVM pVM)
+{
+ if ( pVM->vmm.s.pRCLoggerRC
+ && pVM->vmm.s.pRCLoggerRC->offScratch >= (sizeof(pVM->vmm.s.pRCLoggerRC->achScratch)*3/4))
+ {
+ if (pVM->vmm.s.fRCLoggerFlushingDisabled)
+ return; /* fail quietly. */
+ VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
+ }
+}
+
+
+/**
+ * Switches from guest context to host context.
+ *
+ * @param pVM The cross context VM structure.
+ * @param rc The status code.
+ */
+VMMRCDECL(void) VMMRCGuestToHost(PVM pVM, int rc)
+{
+ pVM->vmm.s.pfnRCToHost(rc);
+}
+
+
+/**
+ * Calls the ring-0 host code.
+ *
+ * @param pVM The cross context VM structure.
+ */
+DECLASM(void) vmmRCProbeFireHelper(PVM pVM)
+{
+ pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_TRACER);
+}
+
+
+
+/**
+ * Execute the trap testcase.
+ *
+ * There is some common code here, that's why we're collecting them
+ * like this. Odd numbered variation (uArg) are executed with write
+ * protection (WP) enabled.
+ *
+ * @returns VINF_SUCCESS if it was a testcase setup up to continue and did so successfully.
+ * @returns VERR_NOT_IMPLEMENTED if the testcase wasn't implemented.
+ * @returns VERR_GENERAL_FAILURE if the testcase continued when it shouldn't.
+ *
+ * @param pVM The cross context VM structure.
+ * @param uOperation The testcase.
+ * @param uArg The variation. See function description for odd / even details.
+ *
+ * @remark Careful with the trap 08 testcase and WP, it will triple
+ * fault the box if the TSS, the Trap8 TSS and the fault TSS
+ * GDTE are in pages which are read-only.
+ * See bottom of SELMR3Init().
+ */
+static int vmmGCTest(PVM pVM, unsigned uOperation, unsigned uArg)
+{
+ /*
+ * Set up the testcase.
+ */
+#if 0
+ switch (uOperation)
+ {
+ default:
+ break;
+ }
+#endif
+
+ /*
+ * Enable WP if odd variation.
+ */
+ if (uArg & 1)
+ vmmGCEnableWP();
+
+ /*
+ * Execute the testcase.
+ */
+ int rc = VERR_NOT_IMPLEMENTED;
+ switch (uOperation)
+ {
+ //case VMMRC_DO_TESTCASE_TRAP_0:
+ //case VMMRC_DO_TESTCASE_TRAP_1:
+ //case VMMRC_DO_TESTCASE_TRAP_2:
+
+ case VMMRC_DO_TESTCASE_TRAP_3:
+ {
+ if (uArg <= 1)
+ rc = vmmGCTestTrap3();
+ break;
+ }
+
+ //case VMMRC_DO_TESTCASE_TRAP_4:
+ //case VMMRC_DO_TESTCASE_TRAP_5:
+ //case VMMRC_DO_TESTCASE_TRAP_6:
+ //case VMMRC_DO_TESTCASE_TRAP_7:
+
+ case VMMRC_DO_TESTCASE_TRAP_8:
+ {
+#ifndef DEBUG_bird /** @todo dynamic check that this won't triple fault... */
+ if (uArg & 1)
+ break;
+#endif
+ if (uArg <= 1)
+ rc = vmmGCTestTrap8();
+ break;
+ }
+
+ //VMMRC_DO_TESTCASE_TRAP_9,
+ //VMMRC_DO_TESTCASE_TRAP_0A,
+ //VMMRC_DO_TESTCASE_TRAP_0B,
+ //VMMRC_DO_TESTCASE_TRAP_0C,
+
+ case VMMRC_DO_TESTCASE_TRAP_0D:
+ {
+ if (uArg <= 1)
+ rc = vmmGCTestTrap0d();
+ break;
+ }
+
+ case VMMRC_DO_TESTCASE_TRAP_0E:
+ {
+ if (uArg <= 1)
+ rc = vmmGCTestTrap0e();
+ else if (uArg == 2 || uArg == 4)
+ {
+ /*
+ * Test the use of a temporary #PF handler.
+ */
+ rc = TRPMGCSetTempHandler(pVM, X86_XCPT_PF, uArg != 4 ? vmmGCTestTmpPFHandler : vmmGCTestTmpPFHandlerCorruptFS);
+ if (RT_SUCCESS(rc))
+ {
+ rc = vmmGCTestTrap0e();
+
+ /* in case it didn't fire. */
+ int rc2 = TRPMGCSetTempHandler(pVM, X86_XCPT_PF, NULL);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+ }
+ break;
+ }
+ }
+
+ /*
+ * Re-enable WP.
+ */
+ if (uArg & 1)
+ vmmGCDisableWP();
+
+ return rc;
+}
+
+
+
+/**
+ * Reads a range of MSRs.
+ *
+ * This is called directly via VMMR3CallRC.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param uMsr The MSR to start at.
+ * @param cMsrs The number of MSRs to read.
+ * @param paResults Where to store the results. This must be large
+ * enough to hold at least @a cMsrs result values.
+ */
+extern "C" VMMRCDECL(int)
+VMMRCTestReadMsrs(PVM pVM, uint32_t uMsr, uint32_t cMsrs, PVMMTESTMSRENTRY paResults)
+{
+ AssertReturn(cMsrs <= 16384, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(paResults, VERR_INVALID_POINTER);
+ ASMIntEnable(); /* Run with interrupts enabled, so we can query more MSRs in one block. */
+ RT_NOREF_PV(pVM);
+
+ for (uint32_t i = 0; i < cMsrs; i++, uMsr++)
+ {
+ if (vmmRCSafeMsrRead(uMsr, &paResults[i].uValue))
+ paResults[i].uMsr = uMsr;
+ else
+ paResults[i].uMsr = UINT64_MAX;
+ }
+
+ ASMIntDisable();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Tries to write the given value to an MSR, returns the effect and restors the
+ * original value.
+ *
+ * This is called directly via VMMR3CallRC.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param uMsr The MSR to start at.
+ * @param u32ValueLow The low part of the value to write.
+ * @param u32ValueHi The high part of the value to write.
+ * @param puValueBefore The value before writing.
+ * @param puValueAfter The value read back after writing.
+ */
+extern "C" VMMRCDECL(int)
+VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi,
+ uint64_t *puValueBefore, uint64_t *puValueAfter)
+{
+ AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER);
+ AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER);
+ ASMIntDisable();
+ RT_NOREF_PV(pVM);
+
+ int rc = VINF_SUCCESS;
+ uint64_t uValueBefore = UINT64_MAX;
+ uint64_t uValueAfter = UINT64_MAX;
+ if (vmmRCSafeMsrRead(uMsr, &uValueBefore))
+ {
+ if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi)))
+ rc = VERR_WRITE_PROTECT;
+ if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc))
+ rc = VERR_READ_ERROR;
+ vmmRCSafeMsrWrite(uMsr, uValueBefore);
+ }
+ else
+ rc = VERR_ACCESS_DENIED;
+
+ *puValueBefore = uValueBefore;
+ *puValueAfter = uValueAfter;
+ return rc;
+}
+
+
+
+/**
+ * Temporary \#PF trap handler for the \#PF test case.
+ *
+ * @returns VBox status code (appropriate for GC return).
+ * In this context RT_SUCCESS means to restart the instruction.
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame Trap register frame.
+ */
+static DECLCALLBACK(int) vmmGCTestTmpPFHandler(PVM pVM, PCPUMCTXCORE pRegFrame)
+{
+ if (pRegFrame->eip == (uintptr_t)vmmGCTestTrap0e_FaultEIP)
+ {
+ pRegFrame->eip = (uintptr_t)vmmGCTestTrap0e_ResumeEIP;
+ return VINF_SUCCESS;
+ }
+ NOREF(pVM);
+ return VERR_INTERNAL_ERROR;
+}
+
+
+/**
+ * Temporary \#PF trap handler for the \#PF test case, this one messes up the fs
+ * selector.
+ *
+ * @returns VBox status code (appropriate for GC return).
+ * In this context RT_SUCCESS means to restart the instruction.
+ * @param pVM The cross context VM structure.
+ * @param pRegFrame Trap register frame.
+ */
+static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame)
+{
+ int rc = vmmGCTestTmpPFHandler(pVM, pRegFrame);
+ pRegFrame->fs.Sel = 0x30;
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMRC/VMMRC.def b/src/VBox/VMM/VMMRC/VMMRC.def
new file mode 100644
index 00000000..8586ca88
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRC.def
@@ -0,0 +1,106 @@
+; $Id: VMMRC.def $
+;; @file
+; VMM Raw-mode Context DLL - Definition file.
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+LIBRARY VMMRC.rc
+EXPORTS
+ ; data
+
+ ; code
+ CPUMGCResumeGuest
+ CPUMGCResumeGuestV86
+ PDMCritSectEnter
+ PDMCritSectEnterDebug
+ PDMCritSectLeave
+ PDMCritSectIsOwner
+ PDMQueueAlloc
+ PDMQueueInsert
+ PGMHandlerPhysicalPageTempOff
+ PGMShwMakePageWritable
+ PGMPhysSimpleWriteGCPhys
+ PGMPhysSimpleReadGCPtr
+ PGMPhysSimpleWriteGCPtr
+ PGMPhysReadGCPtr
+ PGMPhysWriteGCPtr
+ PGMPhysSimpleDirtyWriteGCPtr
+ RTLogDefaultInstance
+ RTLogDefaultInstanceEx
+ RTLogRelGetDefaultInstance
+ RTLogRelGetDefaultInstanceEx
+ RTTimeMilliTS
+ RTTraceBufAddMsgF
+ RTTraceBufAddPos
+ RTTraceBufAddPosMsgF
+ SELMGetHyperCS
+ TMTimerFromMilli
+ TMTimerFromMicro
+ TMTimerFromNano
+ TMTimerGet
+ TMTimerGetFreq
+ TMTimerIsActive
+ TMTimerIsLockOwner
+ TMTimerLock
+ TMTimerSet
+ TMTimerSetRelative
+ TMTimerSetMillies
+ TMTimerSetMicro
+ TMTimerSetNano
+ TMTimerSetFrequencyHint
+ TMTimerStop
+ TMTimerUnlock
+ TRPMGCHandlerGeneric
+ TRPMGCHandlerInterupt
+ TRPMGCHandlerTrap08
+ TRPMGCHandlerTrap12
+ MMGCRamWriteNoTrapHandler
+ MMGCRamReadNoTrapHandler
+ VMMGetCpu
+ VMMGetSvnRev
+ VMMRCProbeFire
+ vmmGCLoggerFlush
+ vmmGCLoggerWrapper
+ vmmGCRelLoggerWrapper
+ vmmGCTestTrap0d_FaultEIP
+ vmmGCTestTrap0e_FaultEIP
+ vmmGCTestTrap3_FaultEIP
+ vmmGCTestTrap8_FaultEIP
+ VMSetError
+ VMSetErrorV
+
+ ; runtime
+ nocrt_memchr
+ nocrt_memcmp
+ nocrt_memcpy
+ memcpy=nocrt_memcpy ; not-os2
+ nocrt_memmove
+ nocrt_memset
+ memset=nocrt_memset ; not-os2
+ nocrt_strcpy
+ nocrt_strcmp
+ nocrt_strchr
+ RTAssertMsg1Weak
+ RTAssertMsg2Weak
+ RTAssertShouldPanic
+ RTLogDefaultInstance
+ RTTimeNanoTSLegacySyncInvarNoDelta
+ RTTimeNanoTSLegacySyncInvarWithDelta
+ RTTimeNanoTSLegacyAsync
+ RTTimeNanoTSLFenceSyncInvarNoDelta
+ RTTimeNanoTSLFenceSyncInvarWithDelta
+ RTTimeNanoTSLFenceAsync
+ RTTimeNanoTS
+ RTCrc32
+ ASMMultU64ByU32DivByU32 ; not-os2
+
diff --git a/src/VBox/VMM/VMMRC/VMMRC.mac b/src/VBox/VMM/VMMRC/VMMRC.mac
new file mode 100644
index 00000000..6f204121
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRC.mac
@@ -0,0 +1,194 @@
+; $Id: VMMRC.mac $
+;; @file
+; VMMRC - Raw-mode Context Assembly Macros.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef __VMMRC_mac__
+%define __VMMRC_mac__
+
+%include "VBox/asmdefs.mac"
+
+
+;; @def VMMR0_SEG
+; Set the output segment to one of the special VMMR0 segments.
+; @param %1 The segment name.
+; @remark Use BEGINCODE to switch back to the code segment.
+
+;; @def VMMR0_SEG_CODE
+; Set the output segment to one of the special VMMR0 code segments.
+; @param %1 The segment name.
+%ifdef ASM_FORMAT_OMF
+ %macro VMMR0_SEG 1
+ segment VMMR0.%1 public CLASS=CONST align=1 use32 flat
+ %endmacro
+
+ %macro VMMR0_CODE_SEG 1
+ segment VMMR0.%1 public CLASS=CODE align=16 use32 flat
+ %endmacro
+%endif
+
+%ifdef ASM_FORMAT_ELF
+ %macro VMMR0_SEG 1
+ %ifndef DEFINED_VMMR0_SEG.%1
+ %define DEFINED_VMMR0_SEG.%1 1
+ [section .VMMR0.%1 progbits alloc noexec nowrite align=1 ]
+ %else
+ [section .VMMR0.%1 ]
+ %endif
+ %endmacro
+
+ %macro VMMR0_CODE_SEG 1
+ %ifndef DEFINED_VMMR0_CODE_SEG.%1
+ %define DEFINED_VMMR0_CODE_SEG.%1 1
+ [section .VMMR0.%1 progbits alloc exec nowrite align=16 ]
+ %else
+ [section .VMMR0.%1 ]
+ %endif
+ %endmacro
+%endif
+
+%ifdef ASM_FORMAT_MACHO
+ %ifdef __YASM__
+ %macro VMMR0_SEG 1
+ %ifndef DEFINED_VMMR0_SEG.%1
+ %define DEFINED_VMMR0_SEG.%1 1
+ [section VMMR0 %1 align=1 ]
+ %else
+ [section VMMR0 %1 ]
+ %endif
+ %endmacro
+ %else
+ %macro VMMR0_SEG 1
+ [section VMMR0.%1 rdata align=1 ]
+ %endmacro
+ %endif
+
+ %ifdef __YASM__
+ %macro VMMR0_CODE_SEG 1
+ %ifndef DEFINED_VMMR0_CODE_SEG.%1
+ %define DEFINED_VMMR0_CODE_SEG.%1 1
+ [section VMMR0 %1 exec align=16 ]
+ %else
+ [section VMMR0 %1 ]
+ %endif
+ %endmacro
+ %else
+ %macro VMMR0_CODE_SEG 1
+ [section VMMR0.%1 exec align=16 ]
+ %endmacro
+ %endif
+%endif
+
+%ifdef ASM_FORMAT_PE
+ %macro VMMR0_SEG 1
+ %ifndef DEFINED_VMMR0_SEG.%1
+ %define DEFINED_VMMR0_SEG.%1 1
+ [section .rdata$VMMR0.%1 align=1 ]
+ %else
+ [section .rdata$VMMR0.%1]
+ %endif
+ %endmacro
+
+ %macro VMMR0_CODE_SEG 1
+ %ifndef DEFINED_VMMR0_CODE_SEG.%1
+ %define DEFINED_VMMR0_CODE_SEG.%1 1
+ [section .text$VMMR0.%1 align=16 ]
+ %else
+ [section .text$VMMR0.%1]
+ %endif
+ %endmacro
+%endif
+
+%ifnmacro VMMR0_SEG
+ %error "VMMR0_CODE_SEG / ASM_FORMAT_xxx"
+%endif
+%ifnmacro VMMR0_CODE_SEG
+ %error "VMMR0_CODE_SEG / ASM_FORMAT_xxx"
+%endif
+
+
+;; @def TRPM_HANDLER
+; Sets up a trap handler.
+;
+; @param %1 The segment name.
+; @param %2 The end address. Use 0 to just handle one instruction.
+; @param %3 Address of the handler function.
+; @param %4 The user data member.
+%macro TRPM_HANDLER 4
+
+VMMR0_SEG %1 ; switch to the record segment.
+
+ dd %%current_instr ; uStartEip
+ dd %2 ; uEndEip
+ dd %3 ; pfnHandler
+ dd %4 ; pvUser
+
+BEGINCODE ; back to the code segment.
+%%current_instr:
+
+%endmacro
+
+;; @def TRPM_NP_HANDLER
+; Sets up a segment not present fault handler for the current (=next) instruction.
+;
+; @param %1 Address of the handler function.
+; @param %2 The user data member.
+%macro TRPM_NP_HANDLER 2
+TRPM_HANDLER Trap0b, 0, %1, %2
+%endmacro
+
+
+;; @def TRPM_GP_HANDLER
+; Sets up a general protection fault handler for the current (=next) instruction.
+;
+; @param %1 Address of the handler function.
+; @param %2 The user data member.
+%macro TRPM_GP_HANDLER 2
+TRPM_HANDLER Trap0d, 0, %1, %2
+%endmacro
+
+
+;; @def TRPM_PF_HANDLER
+; Sets up a page fault handler for the current (=next) instruction.
+;
+; @param %1 Address of the handler function.
+; @param %2 The user data member.
+%macro TRPM_PF_HANDLER 2
+TRPM_HANDLER Trap0e, 0, %1, %2
+%endmacro
+
+
+;; @def TRPM_NP_GP_HANDLER
+; Sets up a segment not present fault and general protection fault handler
+; for the current (=next) instruction.
+;
+; @param %1 Address of the handler function.
+; @param %2 The user data member.
+%macro TRPM_NP_GP_HANDLER 2
+TRPM_HANDLER Trap0b, 0, %1, %2
+TRPM_HANDLER Trap0d, 0, %1, %2
+%endmacro
+
+
+
+;; @def PATCH_HLP_SEG
+; Set the output segment a special code segment for patch helpers (runs in ring-1 or ring-2).
+; @param %1 The segment name.
+; @remark Use BEGINCODE to switch back to the code segment.
+%macro BEGIN_PATCH_HLP_SEG 0
+VMMR0_CODE_SEG PatchHlp
+%endmacro
+
+%endif
diff --git a/src/VBox/VMM/VMMRC/VMMRC0.asm b/src/VBox/VMM/VMMRC/VMMRC0.asm
new file mode 100644
index 00000000..c8b5e897
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRC0.asm
@@ -0,0 +1,40 @@
+; $Id: VMMRC0.asm $
+;; @file
+; VMMRC0 - The first object module in the link.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%include "VMMRC.mac"
+
+
+;;
+; Start the Trap0b segment.
+VMMR0_SEG Trap0b
+GLOBALNAME g_aTrap0bHandlers
+
+;;
+; Start the Trap0d segment.
+VMMR0_SEG Trap0d
+GLOBALNAME g_aTrap0dHandlers
+
+;;
+; Start the Trap0e segment.
+VMMR0_SEG Trap0e
+GLOBALNAME g_aTrap0eHandlers
+
+;;
+; Start the patch helper segment
+BEGIN_PATCH_HLP_SEG
+EXPORTEDNAME g_PatchHlpBegin
+
diff --git a/src/VBox/VMM/VMMRC/VMMRC99.asm b/src/VBox/VMM/VMMRC/VMMRC99.asm
new file mode 100644
index 00000000..b3dbf266
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRC99.asm
@@ -0,0 +1,47 @@
+; $Id: VMMRC99.asm $
+;; @file
+; VMMRC99 - The last object module in the link.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%include "VMMRC.mac"
+
+
+;;
+; End the Trap0b segment.
+VMMR0_SEG Trap0b
+GLOBALNAME g_aTrap0bHandlersEnd
+ dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
+
+;;
+; End the Trap0d segment.
+VMMR0_SEG Trap0d
+GLOBALNAME g_aTrap0dHandlersEnd
+ dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
+
+;;
+; End the Trap0e segment.
+VMMR0_SEG Trap0e
+GLOBALNAME g_aTrap0eHandlersEnd
+ dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
+
+;;
+; End the patch helper segment
+BEGIN_PATCH_HLP_SEG
+EXPORTEDNAME g_PatchHlpEnd
+ dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
diff --git a/src/VBox/VMM/VMMRC/VMMRCA.asm b/src/VBox/VMM/VMMRC/VMMRCA.asm
new file mode 100644
index 00000000..ca482f10
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRCA.asm
@@ -0,0 +1,397 @@
+; $Id: VMMRCA.asm $
+;; @file
+; VMMRC - Raw-mode Context Virtual Machine Monitor assembly routines.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "iprt/x86.mac"
+%include "VBox/sup.mac"
+%include "VBox/vmm/vm.mac"
+%include "VMMInternal.mac"
+%include "VMMRC.mac"
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+;; save all registers before loading special values for the faulting.
+%macro SaveAndLoadAll 0
+ pushad
+ push ds
+ push es
+ push fs
+ push gs
+ call NAME(vmmGCTestLoadRegs)
+%endmacro
+
+;; restore all registers after faulting.
+%macro RestoreAll 0
+ pop gs
+ pop fs
+ pop es
+ pop ds
+ popad
+%endmacro
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern IMPNAME(g_VM)
+extern IMPNAME(g_Logger)
+extern IMPNAME(g_RelLogger)
+extern NAME(RTLogLogger)
+extern NAME(vmmRCProbeFireHelper)
+extern NAME(TRPMRCTrapHyperHandlerSetEIP)
+
+
+BEGINCODE
+
+;/**
+; * Internal GC logger worker: Logger wrapper.
+; */
+;VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
+EXPORTEDNAME vmmGCLoggerWrapper
+%ifdef __YASM__
+%ifdef ASM_FORMAT_ELF
+ push dword IMP(g_Logger) ; YASM BUG #67! YASMCHECK!
+%else
+ push IMP(g_Logger)
+%endif
+%else
+ push IMP(g_Logger)
+%endif
+ call NAME(RTLogLogger)
+ add esp, byte 4
+ ret
+ENDPROC vmmGCLoggerWrapper
+
+
+;/**
+; * Internal GC logger worker: Logger (release) wrapper.
+; */
+;VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
+EXPORTEDNAME vmmGCRelLoggerWrapper
+%ifdef __YASM__
+%ifdef ASM_FORMAT_ELF
+ push dword IMP(g_RelLogger) ; YASM BUG #67! YASMCHECK!
+%else
+ push IMP(g_RelLogger)
+%endif
+%else
+ push IMP(g_RelLogger)
+%endif
+ call NAME(RTLogLogger)
+ add esp, byte 4
+ ret
+ENDPROC vmmGCRelLoggerWrapper
+
+
+;;
+; Enables write protection.
+BEGINPROC vmmGCEnableWP
+ push eax
+ mov eax, cr0
+ or eax, X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+ pop eax
+ ret
+ENDPROC vmmGCEnableWP
+
+
+;;
+; Disables write protection.
+BEGINPROC vmmGCDisableWP
+ push eax
+ mov eax, cr0
+ and eax, ~X86_CR0_WRITE_PROTECT
+ mov cr0, eax
+ pop eax
+ ret
+ENDPROC vmmGCDisableWP
+
+
+;;
+; Load special register set expected upon faults.
+; All registers are changed.
+BEGINPROC vmmGCTestLoadRegs
+ mov eax, ss
+ mov ds, eax
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov edi, 001234567h
+ mov esi, 042000042h
+ mov ebp, 0ffeeddcch
+ mov ebx, 089abcdefh
+ mov ecx, 0ffffaaaah
+ mov edx, 077778888h
+ mov eax, 0f0f0f0f0h
+ ret
+ENDPROC vmmGCTestLoadRegs
+
+
+;;
+; A Trap 3 testcase.
+GLOBALNAME vmmGCTestTrap3
+ SaveAndLoadAll
+
+ int 3
+EXPORTEDNAME vmmGCTestTrap3_FaultEIP
+
+ RestoreAll
+ mov eax, 0ffffffffh
+ ret
+ENDPROC vmmGCTestTrap3
+
+
+;;
+; A Trap 8 testcase.
+GLOBALNAME vmmGCTestTrap8
+ SaveAndLoadAll
+
+ sub esp, byte 8
+ sidt [esp]
+ mov word [esp], 111 ; make any #PF double fault.
+ lidt [esp]
+ add esp, byte 8
+
+ COM_S_CHAR '!'
+
+ xor eax, eax
+EXPORTEDNAME vmmGCTestTrap8_FaultEIP
+ mov eax, [eax]
+
+
+ COM_S_CHAR '2'
+
+ RestoreAll
+ mov eax, 0ffffffffh
+ ret
+ENDPROC vmmGCTestTrap8
+
+
+;;
+; A simple Trap 0d testcase.
+GLOBALNAME vmmGCTestTrap0d
+ SaveAndLoadAll
+
+ push ds
+EXPORTEDNAME vmmGCTestTrap0d_FaultEIP
+ ltr [esp]
+ pop eax
+
+ RestoreAll
+ mov eax, 0ffffffffh
+ ret
+ENDPROC vmmGCTestTrap0d
+
+
+;;
+; A simple Trap 0e testcase.
+GLOBALNAME vmmGCTestTrap0e
+ SaveAndLoadAll
+
+ xor eax, eax
+EXPORTEDNAME vmmGCTestTrap0e_FaultEIP
+ mov eax, [eax]
+
+ RestoreAll
+ mov eax, 0ffffffffh
+ ret
+
+EXPORTEDNAME vmmGCTestTrap0e_ResumeEIP
+ RestoreAll
+ xor eax, eax
+ ret
+ENDPROC vmmGCTestTrap0e
+
+
+
+;;
+; Safely reads an MSR.
+; @returns boolean
+; @param uMsr The MSR to red.
+; @param pu64Value Where to return the value on success.
+;
+GLOBALNAME vmmRCSafeMsrRead
+ push ebp
+ mov ebp, esp
+ pushf
+ cli
+ push esi
+ push edi
+ push ebx
+ push ebp
+
+ mov ecx, [ebp + 8] ; The MSR to read.
+ mov eax, 0deadbeefh
+ mov edx, 0deadbeefh
+
+TRPM_GP_HANDLER NAME(TRPMRCTrapHyperHandlerSetEIP), .trapped
+ rdmsr
+
+ mov ecx, [ebp + 0ch] ; Where to store the result.
+ mov [ecx], eax
+ mov [ecx + 4], edx
+
+ mov eax, 1
+.return:
+ pop ebp
+ pop ebx
+ pop edi
+ pop esi
+ popf
+ leave
+ ret
+
+.trapped:
+ mov eax, 0
+ jmp .return
+ENDPROC vmmRCSafeMsrRead
+
+
+;;
+; Safely writes an MSR.
+; @returns boolean
+; @param uMsr The MSR to red.
+; @param u64Value The value to write.
+;
+GLOBALNAME vmmRCSafeMsrWrite
+ push ebp
+ mov ebp, esp
+ pushf
+ cli
+ push esi
+ push edi
+ push ebx
+ push ebp
+
+ mov ecx, [ebp + 8] ; The MSR to write to.
+ mov eax, [ebp + 12] ; The value to write.
+ mov edx, [ebp + 16]
+
+TRPM_GP_HANDLER NAME(TRPMRCTrapHyperHandlerSetEIP), .trapped
+ wrmsr
+
+ mov eax, 1
+.return:
+ pop ebp
+ pop ebx
+ pop edi
+ pop esi
+ popf
+ leave
+ ret
+
+.trapped:
+ mov eax, 0
+ jmp .return
+ENDPROC vmmRCSafeMsrWrite
+
+
+
+;;
+; The raw-mode context equivalent of SUPTracerFireProbe.
+;
+; See also SUPLibTracerA.asm.
+;
+EXPORTEDNAME VMMRCProbeFire
+ push ebp
+ mov ebp, esp
+
+ ;
+ ; Save edx and eflags so we can use them.
+ ;
+ pushf
+ push edx
+
+ ;
+ ; Get the address of the tracer context record after first checking
+ ; that host calls hasn't been disabled.
+ ;
+ mov edx, IMP(g_VM)
+ add edx, [edx + VM.offVMCPU]
+ cmp dword [edx + VMCPU.vmm + VMMCPU.cCallRing3Disabled], 0
+ jnz .return
+ add edx, VMCPU.vmm + VMMCPU.TracerCtx
+
+ ;
+ ; Save the X86 context.
+ ;
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.eax], eax
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.ecx], ecx
+ pop eax
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.edx], eax
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.ebx], ebx
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.esi], esi
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.edi], edi
+ pop eax
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.eflags], eax
+ mov eax, [ebp + 4]
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.eip], eax
+ mov eax, [ebp]
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.ebp], eax
+ lea eax, [ebp + 4*2]
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.esp], eax
+
+ mov ecx, [ebp + 4*2]
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.uVtgProbeLoc], ecx
+
+ mov eax, [ecx + 4] ; VTGPROBELOC::idProbe.
+ mov [edx + SUPDRVTRACERUSRCTX32.idProbe], eax
+ mov dword [edx + SUPDRVTRACERUSRCTX32.cBits], 32
+
+ ; Copy the arguments off the stack.
+%macro COPY_ONE_ARG 1
+ mov eax, [ebp + 12 + %1 * 4]
+ mov [edx + SUPDRVTRACERUSRCTX32.u.X86.aArgs + %1*4], eax
+%endmacro
+ COPY_ONE_ARG 0
+ COPY_ONE_ARG 1
+ COPY_ONE_ARG 2
+ COPY_ONE_ARG 3
+ COPY_ONE_ARG 4
+ COPY_ONE_ARG 5
+ COPY_ONE_ARG 6
+ COPY_ONE_ARG 7
+ COPY_ONE_ARG 8
+ COPY_ONE_ARG 9
+ COPY_ONE_ARG 10
+ COPY_ONE_ARG 11
+ COPY_ONE_ARG 12
+ COPY_ONE_ARG 13
+ COPY_ONE_ARG 14
+ COPY_ONE_ARG 15
+ COPY_ONE_ARG 16
+ COPY_ONE_ARG 17
+ COPY_ONE_ARG 18
+ COPY_ONE_ARG 19
+
+ ;
+ ; Call the helper (too lazy to do the VMM structure stuff).
+ ;
+ mov ecx, IMP(g_VM)
+ push ecx
+ call NAME(vmmRCProbeFireHelper)
+
+.return:
+ leave
+ ret
+ENDPROC VMMRCProbeFire
+
diff --git a/src/VBox/VMM/VMMRC/VMMRCBuiltin.def b/src/VBox/VMM/VMMRC/VMMRCBuiltin.def
new file mode 100644
index 00000000..a90cb691
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRCBuiltin.def
@@ -0,0 +1,32 @@
+; $Id: VMMRCBuiltin.def $
+;; @file
+; VMM Raw-mode Context Builtin DLL - Definition file for generating import library.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+LIBRARY VMMRCBuiltin.rc
+EXPORTS
+ ; data
+ g_VM DATA
+ g_CPUM DATA
+ g_TRPM DATA
+ g_TRPMCPU DATA
+ g_Logger DATA
+ g_RelLogger DATA
+ g_pSUPGlobalInfoPage DATA
+ g_trpmGuestCtxCore DATA ; for TRPMRCHandlersA.asm only
+ g_trpmHyperCtxCore DATA ; for TRPMRCHandlersA.asm only
+
+ ; code
+
diff --git a/src/VBox/VMM/VMMRC/VMMRCDeps.cpp b/src/VBox/VMM/VMMRC/VMMRCDeps.cpp
new file mode 100644
index 00000000..a2ca1481
--- /dev/null
+++ b/src/VBox/VMM/VMMRC/VMMRCDeps.cpp
@@ -0,0 +1,41 @@
+/* $Id: VMMRCDeps.cpp $ */
+/** @file
+ * VMMRC Runtime Dependencies.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include <iprt/crc.h>
+#include <iprt/string.h>
+
+#if defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
+RT_C_DECLS_BEGIN
+extern uint64_t __udivdi3(uint64_t, uint64_t);
+extern uint64_t __umoddi3(uint64_t, uint64_t);
+RT_C_DECLS_END
+#endif // RT_OS_SOLARIS || RT_OS_FREEBSD
+
+PFNRT g_VMMRCDeps[] =
+{
+ (PFNRT)memset,
+ (PFNRT)memcpy,
+ (PFNRT)memchr,
+ (PFNRT)memcmp,
+ (PFNRT)RTCrc32,
+#if defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
+ (PFNRT)__udivdi3,
+ (PFNRT)__umoddi3,
+#endif // RT_OS_SOLARIS || RT_OS_FREEBSD
+ NULL
+};
+