summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/VMMRZ
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/VMM/VMMRZ
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/VMM/VMMRZ')
-rw-r--r--src/VBox/VMM/VMMRZ/CPUMRZ.cpp158
-rw-r--r--src/VBox/VMM/VMMRZ/CPUMRZA.asm383
-rw-r--r--src/VBox/VMM/VMMRZ/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMRZ/VMMRZ.cpp113
4 files changed, 654 insertions, 0 deletions
diff --git a/src/VBox/VMM/VMMRZ/CPUMRZ.cpp b/src/VBox/VMM/VMMRZ/CPUMRZ.cpp
new file mode 100644
index 00000000..3dc62e90
--- /dev/null
+++ b/src/VBox/VMM/VMMRZ/CPUMRZ.cpp
@@ -0,0 +1,158 @@
+/* $Id: CPUMRZ.cpp $ */
+/** @file
+ * CPUM - Raw-mode and ring-0 context.
+ */
+
+/*
+ * Copyright (C) 2016-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/vmm/hm.h>
+#include <iprt/assert.h>
+#include <iprt/x86.h>
+
+
+
+
+/**
+ * Prepares the host FPU/SSE/AVX stuff for IEM action.
+ *
+ * This will make sure the FPU/SSE/AVX guest state is _not_ loaded in the CPU.
+ * This will make sure the FPU/SSE/AVX host state is saved.
+ * Finally, it will make sure the FPU/SSE/AVX host features can be safely
+ * accessed.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu)
+{
+ pVCpu->cpum.s.fChanged |= CPUM_CHANGED_FPU_REM;
+ switch (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST))
+ {
+ case 0:
+ if (cpumRZSaveHostFPUState(&pVCpu->cpum.s) == VINF_CPUM_HOST_CR0_MODIFIED)
+ HMR0NotifyCpumModifiedHostCr0(pVCpu);
+ Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #0 - %#x\n", ASMGetCR0()));
+ break;
+
+ case CPUM_USED_FPU_HOST:
+ Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #1 - %#x\n", ASMGetCR0()));
+ break;
+
+ case CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST:
+ cpumRZSaveGuestFpuState(&pVCpu->cpum.s, true /*fLeaveFpuAccessible*/);
+#ifdef IN_RING0
+ HMR0NotifyCpumUnloadedGuestFpuState(pVCpu);
+#endif
+ Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #2 - %#x\n", ASMGetCR0()));
+ break;
+
+ default:
+ AssertFailed();
+ }
+
+}
+
+
+/**
+ * Makes sure the FPU/SSE/AVX guest state is saved in CPUMCPU::Guest and will be
+ * reloaded before direct use.
+ *
+ * No promisses about the FPU/SSE/AVX host features are made.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu)
+{
+ CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
+}
+
+
+/**
+ * Makes sure the FPU/SSE/AVX state in CPUMCPU::Guest is up to date.
+ *
+ * This will not cause CPUM_USED_FPU_GUEST to change.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu)
+{
+ if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
+ {
+ Assert(pVCpu->cpum.s.Guest.fUsedFpuGuest);
+ cpumRZSaveGuestFpuState(&pVCpu->cpum.s, false /*fLeaveFpuAccessible*/);
+ pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST;
+ pVCpu->cpum.s.Guest.fUsedFpuGuest = true;
+ Log7(("CPUMRZFpuStateActualizeForRead\n"));
+ }
+}
+
+
+/**
+ * Makes sure the XMM0..XMM15 and MXCSR state in CPUMCPU::Guest is up to date.
+ *
+ * This will not cause CPUM_USED_FPU_GUEST to change.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu)
+{
+#if defined(VBOX_WITH_KERNEL_USING_XMM) && HC_ARCH_BITS == 64
+ NOREF(pVCpu);
+#else
+ if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
+ {
+ Assert(pVCpu->cpum.s.Guest.fUsedFpuGuest);
+ cpumRZSaveGuestSseRegisters(&pVCpu->cpum.s);
+ Log7(("CPUMRZFpuStateActualizeSseForRead\n"));
+ }
+#endif
+}
+
+
+/**
+ * Makes sure the YMM0..YMM15 and MXCSR state in CPUMCPU::Guest is up to date.
+ *
+ * This will not cause CPUM_USED_FPU_GUEST to change.
+ *
+ * @param pVCpu The cross context virtual CPU structure.
+ */
+VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu)
+{
+ if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
+ {
+ Assert(pVCpu->cpum.s.Guest.fUsedFpuGuest);
+ cpumRZSaveGuestAvxRegisters(&pVCpu->cpum.s);
+ Log7(("CPUMRZFpuStateActualizeAvxForRead\n"));
+ }
+}
+
diff --git a/src/VBox/VMM/VMMRZ/CPUMRZA.asm b/src/VBox/VMM/VMMRZ/CPUMRZA.asm
new file mode 100644
index 00000000..ae204ee6
--- /dev/null
+++ b/src/VBox/VMM/VMMRZ/CPUMRZA.asm
@@ -0,0 +1,383 @@
+ ; $Id: CPUMRZA.asm $
+;; @file
+; CPUM - Raw-mode and Ring-0 Context Assembly Routines.
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_ASM_WITH_SEH64
+%include "VBox/asmdefs.mac"
+%include "CPUMInternal.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/err.mac"
+
+
+
+BEGINCODE
+
+
+;;
+; Saves the host FPU/SSE/AVX state.
+;
+; Will return with CR0.EM and CR0.TS cleared! This is the normal state in ring-0.
+;
+; @returns VINF_SUCCESS (0) or VINF_CPUM_HOST_CR0_MODIFIED. (EAX)
+; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
+;
+align 16
+BEGINPROC cpumRZSaveHostFPUState
+ push xBP
+ SEH64_PUSH_xBP
+ mov xBP, xSP
+ SEH64_SET_FRAME_xBP 0
+SEH64_END_PROLOGUE
+
+ ;
+ ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
+ ;
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_MSC
+ mov r11, rcx
+ %else
+ mov r11, rdi
+ %endif
+ %define pCpumCpu r11
+ %define pXState r10
+%else
+ push ebx
+ push esi
+ mov ebx, dword [ebp + 8]
+ %define pCpumCpu ebx
+ %define pXState esi
+%endif
+
+ pushf ; The darwin kernel can get upset or upset things if an
+ cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
+
+ ;
+ ; We may have to update CR0, indirectly or directly. We must report any
+ ; changes to the VT-x code.
+ ;
+ CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value (xAX scratch)
+
+ ;
+ ; Save the host state (xsave/fxsave will cause thread FPU state to be
+ ; loaded on systems where we are allowed to use it in ring-0.
+ ;
+ CPUMR0_SAVE_HOST
+
+ or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM) ; Latter is not necessarily true, but normally yes.
+ popf
+
+ mov eax, ecx ; The return value from above.
+%ifdef RT_ARCH_X86
+ pop esi
+ pop ebx
+%endif
+ leave
+ ret
+%undef pCpumCpu
+%undef pXState
+ENDPROC cpumRZSaveHostFPUState
+
+
+;;
+; Saves the guest FPU/SSE/AVX state.
+;
+; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
+; @param fLeaveFpuAccessible x86:[ebp+c] gcc:sil msc:dl Whether to restore CR0 and XCR0 on
+; the way out. Only really applicable to RC.
+;
+; @remarks 64-bit Windows drivers shouldn't use AVX registers without saving+loading:
+; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
+; However the compiler docs have different idea:
+; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
+; We'll go with the former for now.
+;
+align 16
+BEGINPROC cpumRZSaveGuestFpuState
+ push xBP
+ SEH64_PUSH_xBP
+ mov xBP, xSP
+ SEH64_SET_FRAME_xBP 0
+SEH64_END_PROLOGUE
+
+ ;
+ ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
+ ;
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_MSC
+ mov r11, rcx
+ %else
+ mov r11, rdi
+ %endif
+ %define pCpumCpu r11
+ %define pXState r10
+%else
+ push ebx
+ push esi
+ mov ebx, dword [ebp + 8]
+ %define pCpumCpu ebx
+ %define pXState esi
+%endif
+ pushf ; The darwin kernel can get upset or upset things if an
+ cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
+
+ %ifdef IN_RC
+ mov ecx, cr0 ; ecx = saved cr0
+ test ecx, X86_CR0_TS | X86_CR0_EM
+ jz .skip_cr0_write
+ mov eax, ecx
+ and eax, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, eax
+.skip_cr0_write:
+ %endif
+
+ %ifndef VBOX_WITH_KERNEL_USING_XMM
+ CPUMR0_SAVE_GUEST
+ %else
+ ;
+ ; The XMM0..XMM15 registers have been saved already. We exploit the
+ ; host state here to temporarly save the non-volatile XMM registers,
+ ; so we can load the guest ones while saving. This is safe.
+ ;
+
+ ; Save caller's XMM registers.
+ lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
+ movdqa [pXState + X86FXSTATE.xmm6 ], xmm6
+ movdqa [pXState + X86FXSTATE.xmm7 ], xmm7
+ movdqa [pXState + X86FXSTATE.xmm8 ], xmm8
+ movdqa [pXState + X86FXSTATE.xmm9 ], xmm9
+ movdqa [pXState + X86FXSTATE.xmm10], xmm10
+ movdqa [pXState + X86FXSTATE.xmm11], xmm11
+ movdqa [pXState + X86FXSTATE.xmm12], xmm12
+ movdqa [pXState + X86FXSTATE.xmm13], xmm13
+ movdqa [pXState + X86FXSTATE.xmm14], xmm14
+ movdqa [pXState + X86FXSTATE.xmm15], xmm15
+ stmxcsr [pXState + X86FXSTATE.MXCSR]
+
+ ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM.
+ lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
+ movdqa xmm0, [pXState + X86FXSTATE.xmm0]
+ movdqa xmm1, [pXState + X86FXSTATE.xmm1]
+ movdqa xmm2, [pXState + X86FXSTATE.xmm2]
+ movdqa xmm3, [pXState + X86FXSTATE.xmm3]
+ movdqa xmm4, [pXState + X86FXSTATE.xmm4]
+ movdqa xmm5, [pXState + X86FXSTATE.xmm5]
+ movdqa xmm6, [pXState + X86FXSTATE.xmm6]
+ movdqa xmm7, [pXState + X86FXSTATE.xmm7]
+ movdqa xmm8, [pXState + X86FXSTATE.xmm8]
+ movdqa xmm9, [pXState + X86FXSTATE.xmm9]
+ movdqa xmm10, [pXState + X86FXSTATE.xmm10]
+ movdqa xmm11, [pXState + X86FXSTATE.xmm11]
+ movdqa xmm12, [pXState + X86FXSTATE.xmm12]
+ movdqa xmm13, [pXState + X86FXSTATE.xmm13]
+ movdqa xmm14, [pXState + X86FXSTATE.xmm14]
+ movdqa xmm15, [pXState + X86FXSTATE.xmm15]
+ ldmxcsr [pXState + X86FXSTATE.MXCSR]
+
+ CPUMR0_SAVE_GUEST
+
+ ; Restore caller's XMM registers.
+ lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
+ movdqa xmm6, [pXState + X86FXSTATE.xmm6 ]
+ movdqa xmm7, [pXState + X86FXSTATE.xmm7 ]
+ movdqa xmm8, [pXState + X86FXSTATE.xmm8 ]
+ movdqa xmm9, [pXState + X86FXSTATE.xmm9 ]
+ movdqa xmm10, [pXState + X86FXSTATE.xmm10]
+ movdqa xmm11, [pXState + X86FXSTATE.xmm11]
+ movdqa xmm12, [pXState + X86FXSTATE.xmm12]
+ movdqa xmm13, [pXState + X86FXSTATE.xmm13]
+ movdqa xmm14, [pXState + X86FXSTATE.xmm14]
+ movdqa xmm15, [pXState + X86FXSTATE.xmm15]
+ ldmxcsr [pXState + X86FXSTATE.MXCSR]
+
+ %endif
+
+ and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST
+ mov byte [pCpumCpu + CPUMCPU.Guest.fUsedFpuGuest], 0
+ %ifdef IN_RC
+ test byte [ebp + 0ch], 1 ; fLeaveFpuAccessible
+ jz .no_cr0_restore
+ CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET ecx
+.no_cr0_restore:
+ %endif
+ popf
+%ifdef RT_ARCH_X86
+ pop esi
+ pop ebx
+%endif
+ leave
+ ret
+%undef pCpumCpu
+%undef pXState
+ENDPROC cpumRZSaveGuestFpuState
+
+
+;;
+; Saves the guest XMM0..15 registers and MXCSR.
+;
+; The purpose is to actualize the register state for read-only use, so CR0 is
+; restored in raw-mode context (so, the FPU/SSE/AVX CPU features can be
+; inaccessible upon return).
+;
+; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
+;
+align 16
+BEGINPROC cpumRZSaveGuestSseRegisters
+ push xBP
+ SEH64_PUSH_xBP
+ mov xBP, xSP
+ SEH64_SET_FRAME_xBP 0
+SEH64_END_PROLOGUE
+
+%ifndef VBOX_WITH_KERNEL_USING_XMM
+ ;
+ ; Load xCX with the guest pXState.
+ ;
+ %ifdef ASM_CALL64_GCC
+ mov xCX, rdi
+ %elifdef RT_ARCH_X86
+ mov xCX, dword [ebp + 8]
+ %endif
+ lea xCX, [xCX + CPUMCPU.Guest.XState]
+
+ %ifdef IN_RC
+ ; Temporarily grant access to the SSE state. xDX must be preserved until CR0 is restored!
+ mov edx, cr0
+ test edx, X86_CR0_TS | X86_CR0_EM
+ jz .skip_cr0_write
+ mov eax, edx
+ and eax, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, eax
+.skip_cr0_write:
+ %endif
+
+ ;
+ ; Do the job.
+ ;
+ stmxcsr [xCX + X86FXSTATE.MXCSR]
+ movdqa [xCX + X86FXSTATE.xmm0 ], xmm0
+ movdqa [xCX + X86FXSTATE.xmm1 ], xmm1
+ movdqa [xCX + X86FXSTATE.xmm2 ], xmm2
+ movdqa [xCX + X86FXSTATE.xmm3 ], xmm3
+ movdqa [xCX + X86FXSTATE.xmm4 ], xmm4
+ movdqa [xCX + X86FXSTATE.xmm5 ], xmm5
+ movdqa [xCX + X86FXSTATE.xmm6 ], xmm6
+ movdqa [xCX + X86FXSTATE.xmm7 ], xmm7
+ %if ARCH_BITS == 64
+ movdqa [xCX + X86FXSTATE.xmm8 ], xmm8
+ movdqa [xCX + X86FXSTATE.xmm9 ], xmm9
+ movdqa [xCX + X86FXSTATE.xmm10], xmm10
+ movdqa [xCX + X86FXSTATE.xmm11], xmm11
+ movdqa [xCX + X86FXSTATE.xmm12], xmm12
+ movdqa [xCX + X86FXSTATE.xmm13], xmm13
+ movdqa [xCX + X86FXSTATE.xmm14], xmm14
+ movdqa [xCX + X86FXSTATE.xmm15], xmm15
+ %endif
+
+ %ifdef IN_RC
+ CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET edx ; Restore CR0 if we changed it above.
+ %endif
+
+%endif ; !VBOX_WITH_KERNEL_USING_XMM
+
+ leave
+ ret
+ENDPROC cpumRZSaveGuestSseRegisters
+
+;;
+; Saves the guest YMM0..15 registers.
+;
+; The purpose is to actualize the register state for read-only use, so CR0 is
+; restored in raw-mode context (so, the FPU/SSE/AVX CPU features can be
+; inaccessible upon return).
+;
+; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
+;
+align 16
+BEGINPROC cpumRZSaveGuestAvxRegisters
+ push xBP
+ SEH64_PUSH_xBP
+ mov xBP, xSP
+ SEH64_SET_FRAME_xBP 0
+%ifdef IN_RC
+ push xBX
+%endif
+SEH64_END_PROLOGUE
+
+ ;
+ ; Load xCX with the guest pXState.
+ ;
+%ifdef ASM_CALL64_GCC
+ mov xCX, rdi
+%elifdef RT_ARCH_X86
+ mov xCX, dword [ebp + 8]
+%endif
+ lea xCX, [xCX + CPUMCPU.Guest.XState]
+
+%ifdef IN_RC
+ ; Temporarily grant access to the SSE state. xBX must be preserved until CR0 is restored!
+ mov ebx, cr0
+ test ebx, X86_CR0_TS | X86_CR0_EM
+ jz .skip_cr0_write
+ mov eax, ebx
+ and eax, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, eax
+.skip_cr0_write:
+%endif
+
+ ;
+ ; Use XSAVE to do the job.
+ ;
+ ; Drivers shouldn't use AVX registers without saving+loading:
+ ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
+ ; However the compiler docs have different idea:
+ ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
+ ; We'll go with the former for now.
+ ;
+%ifdef VBOX_WITH_KERNEL_USING_XMM
+ mov eax, XSAVE_C_YMM
+%else
+ mov eax, XSAVE_C_YMM | XSAVE_C_SSE ; The SSE component includes MXCSR.
+%endif
+ xor edx, edx
+%if ARCH_BITS == 64
+ o64 xsave [xCX]
+%else
+ xsave [xCX]
+%endif
+
+%ifdef IN_RC
+ CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET ebx ; Restore CR0 if we changed it above.
+ pop xBX
+%endif
+ leave
+ ret
+ENDPROC cpumRZSaveGuestAvxRegisters
+
diff --git a/src/VBox/VMM/VMMRZ/Makefile.kup b/src/VBox/VMM/VMMRZ/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMRZ/Makefile.kup
diff --git a/src/VBox/VMM/VMMRZ/VMMRZ.cpp b/src/VBox/VMM/VMMRZ/VMMRZ.cpp
new file mode 100644
index 00000000..ce089f4a
--- /dev/null
+++ b/src/VBox/VMM/VMMRZ/VMMRZ.cpp
@@ -0,0 +1,113 @@
+/* $Id: VMMRZ.cpp $ */
+/** @file
+ * VMM - Virtual Machine Monitor, Raw-mode and ring-0 context code.
+ */
+
+/*
+ * Copyright (C) 2009-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_VMM
+#include <VBox/vmm/vmm.h>
+#include "VMMInternal.h"
+#include <VBox/vmm/vmcc.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/errcore.h>
+#include <iprt/string.h>
+
+
+/**
+ * Disables all host calls, except certain fatal ones.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @thread EMT.
+ */
+VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPUCC pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
+#endif
+
+ Assert(pVCpu->vmmr0.s.cCallRing3Disabled < 16);
+ if (ASMAtomicUoIncU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 1)
+ {
+#ifdef IN_RC
+ pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = true;
+#else
+ pVCpu->vmmr0.s.fLogFlushingDisabled = true;
+#endif
+ }
+
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ ASMSetFlags(fFlags);
+#endif
+}
+
+
+/**
+ * Counters VMMRZCallRing3Disable() and re-enables host calls.
+ *
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ * @thread EMT.
+ */
+VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPUCC pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
+#endif
+
+ Assert(pVCpu->vmmr0.s.cCallRing3Disabled > 0);
+ if (ASMAtomicUoDecU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 0)
+ {
+#ifdef IN_RC
+ pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false;
+#else
+ pVCpu->vmmr0.s.fLogFlushingDisabled = false;
+#endif
+ }
+
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ ASMSetFlags(fFlags);
+#endif
+}
+
+
+/**
+ * Checks whether its possible to call host context or not.
+ *
+ * @returns true if it's safe, false if it isn't.
+ * @param pVCpu The cross context virtual CPU structure of the calling EMT.
+ */
+VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPUCC pVCpu)
+{
+ VMCPU_ASSERT_EMT(pVCpu);
+ Assert(pVCpu->vmmr0.s.cCallRing3Disabled <= 16);
+ return pVCpu->vmmr0.s.cCallRing3Disabled == 0;
+}
+