summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/VMMSwitcher
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/VMMSwitcher')
-rw-r--r--src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm32
-rw-r--r--src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm31
-rw-r--r--src/VBox/VMM/VMMSwitcher/32BitToPAE.asm33
-rw-r--r--src/VBox/VMM/VMMSwitcher/AMD64Stub.asm111
-rw-r--r--src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm35
-rw-r--r--src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm35
-rw-r--r--src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac1259
-rw-r--r--src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac2015
-rw-r--r--src/VBox/VMM/VMMSwitcher/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm33
-rw-r--r--src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm32
-rw-r--r--src/VBox/VMM/VMMSwitcher/PAEToPAE.asm32
-rw-r--r--src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac1148
-rw-r--r--src/VBox/VMM/VMMSwitcher/X86Stub.asm110
14 files changed, 4906 insertions, 0 deletions
diff --git a/src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm b/src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm
new file mode 100644
index 00000000..72e1c4d2
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm
@@ -0,0 +1,32 @@
+; $Id: 32BitTo32Bit.asm $
+;; @file
+; VMM - World Switchers, 32-Bit to 32-Bit.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_32_TO_32
+%define SWITCHER_DESCRIPTION "32-bit to/from 32-bit"
+%define NAME_OVERLOAD(name) vmmR3Switcher32BitTo32Bit_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_32BIT_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_32BIT_CR3
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/PAEand32Bit.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm b/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm
new file mode 100644
index 00000000..f16b8f03
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm
@@ -0,0 +1,31 @@
+; $Id: 32BitToAMD64.asm $
+;; @file
+; VMM - World Switchers, 32-Bit to AMD64 intermediate context.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_32_TO_AMD64
+%define SWITCHER_DESCRIPTION "32-bit to/from AMD64 intermediate context"
+%define NAME_OVERLOAD(name) vmmR3Switcher32BitToAMD64_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_32BIT_CR3
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/LegacyandAMD64.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/32BitToPAE.asm b/src/VBox/VMM/VMMSwitcher/32BitToPAE.asm
new file mode 100644
index 00000000..7e45d0fd
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/32BitToPAE.asm
@@ -0,0 +1,33 @@
+; $Id: 32BitToPAE.asm $
+;; @file
+; VMM - World Switchers, 32-Bit to 32-Bit.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_32_TO_PAE
+%define SWITCHER_DESCRIPTION "32-bit to/from PAE"
+%define NAME_OVERLOAD(name) vmmR3Switcher32BitToPAE_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_32BIT_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_PAE_CR3
+%define NEED_PAE_ON_32BIT_HOST 1
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/PAEand32Bit.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/AMD64Stub.asm b/src/VBox/VMM/VMMSwitcher/AMD64Stub.asm
new file mode 100644
index 00000000..4a9f614e
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/AMD64Stub.asm
@@ -0,0 +1,111 @@
+; $Id: AMD64Stub.asm $
+;; @file
+; VMM - World Switchers, AMD64 Stub.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64Stub_ %+ name
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/err.mac"
+%include "VMMSwitcher.mac"
+
+
+BEGINCODE
+GLOBALNAME Start
+
+BITS 32
+
+BEGINPROC vmmR0ToRawMode
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmR0ToRawMode
+
+BITS 32
+BEGINPROC vmmRCCallTrampoline
+.tight_loop:
+ int3
+ jmp .tight_loop
+ENDPROC vmmRCCallTrampoline
+
+BEGINPROC vmmRCToHost
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHost
+
+BEGINPROC vmmRCToHostAsmNoReturn
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHostAsmNoReturn
+
+BEGINPROC vmmRCToHostAsm
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHostAsm
+
+GLOBALNAME End
+
+;
+; The description string (in the text section).
+;
+NAME(Description):
+ db "AMD64 Stub."
+ db 0
+
+
+;
+; Dummy fixups.
+;
+BEGINDATA
+GLOBALNAME Fixups
+ db FIX_THE_END ; final entry.
+GLOBALNAME FixupsEnd
+
+
+;;
+; The switcher definition structure.
+ALIGNDATA(16)
+GLOBALNAME Def
+ istruc VMMSWITCHERDEF
+ at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
+ at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
+ at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
+ at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF 0
+ at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_STUB
+ at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
+ at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
+ at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
+ ; disasm help
+ at VMMSWITCHERDEF.offHCCode0, dd 0
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offHCCode1, dd 0
+ at VMMSWITCHERDEF.cbHCCode1, dd 0
+ at VMMSWITCHERDEF.offIDCode0, dd 0
+ at VMMSWITCHERDEF.cbIDCode0, dd 0
+ at VMMSWITCHERDEF.offIDCode1, dd 0
+ at VMMSWITCHERDEF.cbIDCode1, dd 0
+ at VMMSWITCHERDEF.offGCCode, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.cbGCCode, dd NAME(End) - NAME(vmmRCCallTrampoline)
+
+ iend
+
diff --git a/src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm b/src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm
new file mode 100644
index 00000000..d66e5a02
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm
@@ -0,0 +1,35 @@
+; $Id: AMD64To32Bit.asm $
+;; @file
+; VMM - World Switchers, AMD64 to 32-bit
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%undef SWITCHER_TO_PAE
+%define SWITCHER_TO_32BIT 1
+%define SWITCHER_TYPE VMMSWITCHER_AMD64_TO_32
+%define SWITCHER_DESCRIPTION "AMD64 to/from 32-bit"
+%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64To32Bit_ %+ name
+;%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_AMD64_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_32BIT_CR3
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/AMD64andLegacy.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm b/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
new file mode 100644
index 00000000..f5fc3962
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
@@ -0,0 +1,35 @@
+; $Id: AMD64ToPAE.asm $
+;; @file
+; VMM - World Switchers, PAE to PAE
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TO_PAE 1
+%undef SWITCHER_TO_32BIT
+%define SWITCHER_TYPE VMMSWITCHER_AMD64_TO_PAE
+%define SWITCHER_DESCRIPTION "AMD64 to/from PAE"
+%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
+;%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_AMD64_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_PAE_CR3
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/AMD64andLegacy.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac b/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
new file mode 100644
index 00000000..c7f26a02
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
@@ -0,0 +1,1259 @@
+; $Id: AMD64andLegacy.mac $
+;; @file
+; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;%define DEBUG_STUFF 1
+;%define STRICT_IF 1
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/apic.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/vmm/stam.mac"
+%include "VBox/vmm/vm.mac"
+%include "VBox/err.mac"
+%include "CPUMInternal.mac"
+%include "VMMSwitcher.mac"
+
+
+;
+; Start the fixup records
+; We collect the fixups in the .data section as we go along
+; It is therefore VITAL that no-one is using the .data section
+; for anything else between 'Start' and 'End'.
+;
+BEGINDATA
+GLOBALNAME Fixups
+
+
+
+BEGINCODE
+GLOBALNAME Start
+
+BITS 64
+
+;;
+; The C interface.
+;
+; @param pVM gcc: rdi msc:rcx The cross context VM structure.
+;
+BEGINPROC vmmR0ToRawMode
+%ifdef DEBUG_STUFF
+ COM64_S_NEWLINE
+ COM64_S_CHAR '^'
+%endif
+ ;
+ ; The ordinary version of the code.
+ ;
+
+ %ifdef STRICT_IF
+ pushf
+ pop rax
+ test eax, X86_EFL_IF
+ jz .if_clear_in
+ mov eax, 0c0ffee00h
+ ret
+.if_clear_in:
+ %endif
+
+ ;
+ ; make r9 = pVM and rdx = pCpum.
+ ; rax, rcx and r8 are scratch here after.
+ %ifdef RT_OS_WINDOWS
+ mov r9, rcx
+ %else
+ mov r9, rdi
+ %endif
+ lea rdx, [r9 + VM.cpum]
+
+ %ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ lea r8, [r9 + VM.StatSwitcherToGC]
+ STAM64_PROFILE_ADV_START r8
+ %endif
+
+ ;
+ ; Call worker (far return).
+ ;
+ mov eax, cs
+ push rax
+ call NAME(vmmR0ToRawModeAsm)
+
+ %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Unblock Local APIC NMI vectors
+ ; Do this here to ensure the host CS is already restored
+ mov r8d, [rdx + CPUM.offCPUMCPU0]
+ mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors]
+ test ecx, ecx
+ jz gth64_apic_done
+ cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
+ je gth64_x2apic
+
+ ; Legacy xAPIC mode:
+ mov r8, [rdx + r8 + CPUMCPU.pvApicBase]
+ shr ecx, 1
+ jnc gth64_nolint0
+ and dword [r8 + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
+gth64_nolint0:
+ shr ecx, 1
+ jnc gth64_nolint1
+ and dword [r8 + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
+gth64_nolint1:
+ shr ecx, 1
+ jnc gth64_nopc
+ and dword [r8 + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
+gth64_nopc:
+ shr ecx, 1
+ jnc gth64_notherm
+ and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
+gth64_notherm:
+ shr ecx, 1
+ jnc gth64_nocmci
+ and dword [r8 + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
+gth64_nocmci:
+ shr ecx, 1
+ jnc gth64_noeilvt0
+ and dword [r8 + APIC_REG_EILVT0], ~APIC_REG_LVT_MASKED
+gth64_noeilvt0:
+ shr ecx, 1
+ jnc gth64_noeilvt1
+ and dword [r8 + APIC_REG_EILVT1], ~APIC_REG_LVT_MASKED
+gth64_noeilvt1:
+ shr ecx, 1
+ jnc gth64_noeilvt2
+ and dword [r8 + APIC_REG_EILVT2], ~APIC_REG_LVT_MASKED
+gth64_noeilvt2:
+ shr ecx, 1
+ jnc gth64_noeilvt3
+ and dword [r8 + APIC_REG_EILVT3], ~APIC_REG_LVT_MASKED
+gth64_noeilvt3:
+
+ jmp gth64_apic_done
+
+ ; x2APIC mode:
+gth64_x2apic:
+ mov r8, rax ; save rax
+ mov r10, rcx
+ shr r10d, 1
+ jnc gth64_x2_nolint0
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth64_x2_nolint0:
+ shr r10d, 1
+ jnc gth64_x2_nolint1
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth64_x2_nolint1:
+ shr r10d, 1
+ jnc gth64_x2_nopc
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth64_x2_nopc:
+ shr r10d, 1
+ jnc gth64_x2_notherm
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth64_x2_notherm:
+ shr r10d, 1
+ jnc gth64_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth64_x2_nocmci:
+ mov rax, r8 ; restore rax
+
+gth64_apic_done:
+ %endif
+
+ %ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ lea r8, [r9 + VM.StatSwitcherToGC]
+ STAM64_PROFILE_ADV_STOP r8
+ %endif
+
+ ret
+ENDPROC vmmR0ToRawMode
+
+
+
+; *****************************************************************************
+; vmmR0ToRawModeAsm
+;
+; Phase one of the switch from host to guest context (host MMU context)
+;
+; INPUT:
+; - edx virtual address of CPUM structure (valid in host context)
+;
+; USES/DESTROYS:
+; - eax, ecx, edx, r8
+;
+; ASSUMPTION:
+; - current CS and DS selectors are wide open
+;
+; *****************************************************************************
+ALIGNCODE(16)
+BEGINPROC vmmR0ToRawModeAsm
+ ;; Store the offset from CPUM to CPUMCPU in r8
+ mov r8d, [rdx + CPUM.offCPUMCPU0]
+
+ ;;
+ ;; Save CPU host context
+ ;; Skip eax, edx and ecx as these are not preserved over calls.
+ ;;
+ ; general registers.
+ ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
+ mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
+ ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
+ ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
+ mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
+ mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
+ mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
+ mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
+ ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
+ ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
+ mov [rdx + r8 + CPUMCPU.Host.r10], r10
+ mov [rdx + r8 + CPUMCPU.Host.r11], r11
+ mov [rdx + r8 + CPUMCPU.Host.r12], r12
+ mov [rdx + r8 + CPUMCPU.Host.r13], r13
+ mov [rdx + r8 + CPUMCPU.Host.r14], r14
+ mov [rdx + r8 + CPUMCPU.Host.r15], r15
+ ; selectors.
+ mov [rdx + r8 + CPUMCPU.Host.ds], ds
+ mov [rdx + r8 + CPUMCPU.Host.es], es
+ mov [rdx + r8 + CPUMCPU.Host.fs], fs
+ mov [rdx + r8 + CPUMCPU.Host.gs], gs
+ mov [rdx + r8 + CPUMCPU.Host.ss], ss
+ ; MSRs
+ mov rbx, rdx
+ mov ecx, MSR_K8_FS_BASE
+ rdmsr
+ mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
+ mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
+ mov ecx, MSR_K8_GS_BASE
+ rdmsr
+ mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
+ mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ mov [rbx + r8 + CPUMCPU.Host.efer], eax
+ mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
+ mov rdx, rbx
+ ; special registers.
+ sldt [rdx + r8 + CPUMCPU.Host.ldtr]
+ sidt [rdx + r8 + CPUMCPU.Host.idtr]
+ sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
+ str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
+ ; flags
+ pushf
+ pop qword [rdx + r8 + CPUMCPU.Host.rflags]
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Block Local APIC NMI vectors
+ cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
+ je htg_x2apic
+
+ ; Legacy xAPIC mode. No write completion required when writing to the
+ ; LVT registers as we have mapped the APIC page non-cacheable and the
+ ; MMIO is CPU-local.
+ mov rbx, [rdx + r8 + CPUMCPU.pvApicBase]
+ or rbx, rbx
+ jz htg_apic_done
+ xor edi, edi ; fApicDisVectors
+ mov eax, [rbx + APIC_REG_LVT_LINT0]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_LVT_LINT0], eax
+htg_nolint0:
+ mov eax, [rbx + APIC_REG_LVT_LINT1]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_LVT_LINT1], eax
+htg_nolint1:
+ mov eax, [rbx + APIC_REG_LVT_PC]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_LVT_PC], eax
+htg_nopc:
+ mov eax, [rbx + APIC_REG_VERSION]
+ shr eax, 16
+ push rax
+ cmp al, 5
+ jb htg_notherm
+ je htg_nocmci
+ mov eax, [rbx + APIC_REG_LVT_CMCI]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_LVT_CMCI], eax
+htg_nocmci:
+ mov eax, [rbx + APIC_REG_LVT_THMR]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_LVT_THMR], eax
+htg_notherm:
+ pop rax
+ test ah, ah
+ jns htg_noeilvt
+
+ ; AMD Extended LVT registers
+ mov esi, [rbx + 0x400]
+ shr esi, 16
+ and esi, 0xff
+ jz htg_noeilvt
+ mov ebp, 0x20
+htg_tsteilvtx:
+ mov eax, [rbx + APIC_REG_EILVT0]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_noeilvtx
+ or edi, ebp
+ or eax, APIC_REG_LVT_MASKED
+ mov [rbx + APIC_REG_EILVT0], eax
+htg_noeilvtx:
+ add rbx, 0x10 ; clobbers rbx!
+ shl ebp, 1
+ dec esi
+ jnz htg_tsteilvtx
+
+htg_noeilvt:
+ mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
+ jmp htg_apic_done
+
+ ; x2APIC mode:
+htg_x2apic:
+ mov r15, rdx ; save rdx
+ xor edi, edi ; fApicDisVectors
+
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint0:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint1:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nopc:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
+ rdmsr
+ shr eax, 16
+ cmp al, 5
+ jb htg_x2_notherm
+ je htg_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nocmci:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_notherm:
+ mov rdx, r15
+ mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
+htg_apic_done:
+
+%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+
+ FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
+ ; save MSR_IA32_SYSENTER_CS register.
+ mov rbx, rdx ; save edx
+ mov ecx, MSR_IA32_SYSENTER_CS
+ rdmsr ; edx:eax <- MSR[ecx]
+ mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
+ mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
+ xor eax, eax ; load 0:0 to cause #GP upon sysenter
+ xor edx, edx
+ wrmsr
+ mov rdx, rbx ; restore edx
+ jmp short htg_no_sysenter
+
+ALIGNCODE(16)
+htg_no_sysenter:
+
+ ;; handle use flags.
+ mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
+ and esi, ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) ; Clear CPUM_USED_* flags.
+ mov [rdx + r8 + CPUMCPU.fUseFlags], esi
+
+ ; debug registers.
+ test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
+ jnz htg_debug_regs_save
+htg_debug_regs_no:
+ DEBUG_CHAR('a') ; trashes esi
+
+ ; control registers.
+ mov rax, cr0
+ mov [rdx + r8 + CPUMCPU.Host.cr0], rax
+ ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
+ ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
+ mov rax, cr3
+ mov [rdx + r8 + CPUMCPU.Host.cr3], rax
+ mov rax, cr4
+ mov [rdx + r8 + CPUMCPU.Host.cr4], rax
+
+ ;;
+ ;; Start switching to VMM context.
+ ;;
+
+ ;
+ ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
+ ; Also disable WP. (eax==cr4 now)
+ ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
+ ;
+ and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
+ mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
+ DEBUG_CHAR('b') ; trashes esi
+ ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
+ ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
+ ; simplify this operation a bit (and improve locality of the data).
+
+ ;
+ ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
+ ; FXSAVE and XSAVE support on the host CPU
+ ;
+ and ecx, [rdx + CPUM.CR4.AndMask]
+ or eax, ecx
+ or eax, [rdx + CPUM.CR4.OrMask]
+ mov cr4, rax
+ DEBUG_CHAR('c') ; trashes esi
+
+ mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
+ and eax, X86_CR0_EM
+ or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
+ mov cr0, rax
+ DEBUG_CHAR('0') ; trashes esi
+
+
+ ; Load new gdt so we can do far jump to guest code after cr3 reload.
+ lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
+ DEBUG_CHAR('1') ; trashes esi
+
+ ; Store the hypervisor cr3 for later loading
+ mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
+
+ ;;
+ ;; Load Intermediate memory context.
+ ;;
+ FIXUP FIX_INTER_AMD64_CR3, 1
+ mov eax, 0ffffffffh
+ mov cr3, rax
+ DEBUG_CHAR('2') ; trashes esi
+
+ ;;
+ ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
+ ;;
+ jmp far [NAME(fpIDEnterTarget) wrt rip]
+
+; 16:32 Pointer to IDEnterTarget.
+NAME(fpIDEnterTarget):
+ FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
+dd 0
+ FIXUP FIX_HYPER_CS, 0
+dd 0
+
+
+;;
+; Detour for saving the host DR7 and DR6.
+; esi and rdx must be preserved.
+htg_debug_regs_save:
+DEBUG_S_CHAR('s');
+ mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
+ mov [rdx + r8 + CPUMCPU.Host.dr7], rax
+ mov ecx, X86_DR7_INIT_VAL
+ cmp eax, ecx
+ je .htg_debug_regs_dr7_disabled
+ mov dr7, rcx
+.htg_debug_regs_dr7_disabled:
+ mov rax, dr6 ; just in case we save the state register too.
+ mov [rdx + r8 + CPUMCPU.Host.dr6], rax
+ ; save host DR0-3?
+ test esi, CPUM_USE_DEBUG_REGS_HYPER
+ jz htg_debug_regs_no
+DEBUG_S_CHAR('S');
+ mov rax, dr0
+ mov [rdx + r8 + CPUMCPU.Host.dr0], rax
+ mov rbx, dr1
+ mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
+ mov rcx, dr2
+ mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
+ mov rax, dr3
+ mov [rdx + r8 + CPUMCPU.Host.dr3], rax
+ or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
+ jmp htg_debug_regs_no
+
+
+ ; We're now on identity mapped pages in 32-bit compatibility mode.
+BITS 32
+ALIGNCODE(16)
+GLOBALNAME IDEnterTarget
+ DEBUG_CHAR('3')
+
+ ; 2. Deactivate long mode by turning off paging.
+ mov ebx, cr0
+ and ebx, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
+ mov cr0, ebx
+ DEBUG_CHAR('4')
+
+ ; 3. Load intermediate page table.
+ FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
+ mov edx, 0ffffffffh
+ mov cr3, edx
+
+ ; 4. Disable long mode.
+ ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ DEBUG_CHAR('5')
+ and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
+ wrmsr
+ DEBUG_CHAR('6')
+
+%ifndef SWITCHER_TO_PAE
+ ; 4b. Disable PAE.
+ mov eax, cr4
+ and eax, ~X86_CR4_PAE
+ mov cr4, eax
+%else
+%endif
+
+ ; 5. Enable paging.
+ or ebx, X86_CR0_PG
+ mov cr0, ebx
+ jmp short just_a_jump
+just_a_jump:
+ DEBUG_CHAR('7')
+
+ ;;
+ ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
+ ;;
+ FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
+ jmp near NAME(JmpGCTarget)
+
+
+ ;;
+ ;; When we arrive at this label we're at the
+ ;; guest code mapping of the switching code.
+ ;;
+ALIGNCODE(16)
+GLOBALNAME JmpGCTarget
+ DEBUG_CHAR('-')
+ ; load final cr3 and do far jump to load cs.
+ mov cr3, ebp ; ebp set above
+ DEBUG_CHAR('0')
+
+ ;;
+ ;; We're in VMM MMU context and VMM CS is loaded.
+ ;; Setup the rest of the VMM state.
+ ;;
+ ; Load selectors
+ DEBUG_CHAR('1')
+ FIXUP FIX_HYPER_DS, 1
+ mov eax, 0ffffh
+ mov ds, eax
+ mov es, eax
+ xor eax, eax
+ mov gs, eax
+ mov fs, eax
+ ; Load pCpum into EDX
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+ ; Activate guest IDT
+ DEBUG_CHAR('2')
+ lidt [edx + CPUMCPU.Hyper.idtr]
+
+ ; Setup the stack.
+ DEBUG_CHAR('3')
+ mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
+ mov ss, ax
+ mov esp, [edx + CPUMCPU.Hyper.esp]
+
+ ; Restore TSS selector; must mark it as not busy before using ltr (!)
+ DEBUG_S_CHAR('4')
+ FIXUP FIX_GC_TSS_GDTE_DW2, 2
+ and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
+ DEBUG_S_CHAR('5')
+ ltr word [edx + CPUMCPU.Hyper.tr.Sel]
+ DEBUG_S_CHAR('6')
+
+ ; Activate the ldt (now we can safely crash).
+ lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
+ DEBUG_S_CHAR('7')
+
+ ;; Use flags.
+ mov esi, [edx + CPUMCPU.fUseFlags]
+
+ ; debug registers
+ test esi, CPUM_USE_DEBUG_REGS_HYPER
+ jnz htg_debug_regs_guest
+htg_debug_regs_guest_done:
+ DEBUG_S_CHAR('9')
+
+ ; General registers (sans edx).
+ mov eax, [edx + CPUMCPU.Hyper.eax]
+ mov ebx, [edx + CPUMCPU.Hyper.ebx]
+ mov ecx, [edx + CPUMCPU.Hyper.ecx]
+ mov ebp, [edx + CPUMCPU.Hyper.ebp]
+ mov esi, [edx + CPUMCPU.Hyper.esi]
+ mov edi, [edx + CPUMCPU.Hyper.edi]
+ DEBUG_S_CHAR('!')
+
+ ;;
+ ;; Return to the VMM code which either called the switcher or
+ ;; the code set up to run by HC.
+ ;;
+ push dword [edx + CPUMCPU.Hyper.eflags]
+ push cs
+ push dword [edx + CPUMCPU.Hyper.eip]
+ mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
+
+%ifdef DEBUG_STUFF
+ COM32_S_PRINT ';eip='
+ push eax
+ mov eax, [esp + 8]
+ COM32_S_DWORD_REG eax
+ pop eax
+ COM32_S_CHAR ';'
+%endif
+%ifdef VBOX_WITH_STATISTICS
+ push eax
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
+ mov eax, 0ffffffffh
+ STAM32_PROFILE_ADV_STOP eax
+ pop eax
+%endif
+
+ iret ; Use iret to make debugging and TF/RF work.
+
+;;
+; Detour for saving host DR0-3 and loading hypervisor debug registers.
+; esi and edx must be preserved.
+htg_debug_regs_guest:
+ DEBUG_S_CHAR('D')
+ DEBUG_S_CHAR('R')
+ DEBUG_S_CHAR('x')
+ ; load hyper DR0-7
+ mov ebx, [edx + CPUMCPU.Hyper.dr]
+ mov dr0, ebx
+ mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
+ mov dr1, ecx
+ mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
+ mov dr2, eax
+ mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
+ mov dr3, ebx
+ mov ecx, X86_DR6_INIT_VAL
+ mov dr6, ecx
+ mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
+ mov dr7, eax
+ or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
+ jmp htg_debug_regs_guest_done
+
+ENDPROC vmmR0ToRawModeAsm
+
+
+;;
+; Trampoline for doing a call when starting the hyper visor execution.
+;
+; Push any arguments to the routine.
+; Push the argument frame size (cArg * 4).
+; Push the call target (_cdecl convention).
+; Push the address of this routine.
+;
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCCallTrampoline
+%ifdef DEBUG_STUFF
+ COM32_S_CHAR 'c'
+ COM32_S_CHAR 't'
+ COM32_S_CHAR '!'
+%endif
+
+ ; call routine
+ pop eax ; call address
+ pop edi ; argument count.
+%ifdef DEBUG_STUFF
+ COM32_S_PRINT ';eax='
+ COM32_S_DWORD_REG eax
+ COM32_S_CHAR ';'
+%endif
+ call eax ; do call
+ add esp, edi ; cleanup stack
+
+ ; return to the host context (eax = C returncode).
+%ifdef DEBUG_STUFF
+ COM32_S_CHAR '`'
+%endif
+.to_host_again:
+ call NAME(vmmRCToHostAsm)
+ mov eax, VERR_VMM_SWITCHER_IPE_1
+ jmp .to_host_again
+ENDPROC vmmRCCallTrampoline
+
+
+
+;;
+; The C interface.
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHost
+%ifdef DEBUG_STUFF
+ push esi
+ COM_NEWLINE
+ DEBUG_CHAR('b')
+ DEBUG_CHAR('a')
+ DEBUG_CHAR('c')
+ DEBUG_CHAR('k')
+ DEBUG_CHAR('!')
+ COM_NEWLINE
+ pop esi
+%endif
+ mov eax, [esp + 4]
+ jmp NAME(vmmRCToHostAsm)
+ENDPROC vmmRCToHost
+
+
+;;
+; vmmRCToHostAsmNoReturn
+;
+; This is an entry point used by TRPM when dealing with raw-mode traps,
+; i.e. traps in the hypervisor code. This will not return and saves no
+; state, because the caller has already saved the state.
+;
+; @param eax Return code.
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHostAsmNoReturn
+ DEBUG_S_CHAR('%')
+
+%ifdef VBOX_WITH_STATISTICS
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_STOP edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+%endif
+
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+
+ jmp vmmRCToHostAsm_SaveNoGeneralRegs
+ENDPROC vmmRCToHostAsmNoReturn
+
+
+;;
+; vmmRCToHostAsm
+;
+; This is an entry point used by TRPM to return to host context when an
+; interrupt occured or an guest trap needs handling in host context. It
+; is also used by the C interface above.
+;
+; The hypervisor context is saved and it will return to the caller if
+; host context so desires.
+;
+; @param eax Return code.
+; @uses eax, edx, ecx (or it may use them in the future)
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHostAsm
+ DEBUG_S_CHAR('%')
+ push edx
+
+%ifdef VBOX_WITH_STATISTICS
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_STOP edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+%endif
+
+ ;
+ ; Load the CPUM pointer.
+ ;
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+
+ ; Save register context.
+ pop dword [edx + CPUMCPU.Hyper.edx]
+ pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
+ mov dword [edx + CPUMCPU.Hyper.esp], esp
+ mov dword [edx + CPUMCPU.Hyper.eax], eax
+ mov dword [edx + CPUMCPU.Hyper.ebx], ebx
+ mov dword [edx + CPUMCPU.Hyper.ecx], ecx
+ mov dword [edx + CPUMCPU.Hyper.esi], esi
+ mov dword [edx + CPUMCPU.Hyper.edi], edi
+ mov dword [edx + CPUMCPU.Hyper.ebp], ebp
+
+ ; special registers which may change.
+vmmRCToHostAsm_SaveNoGeneralRegs:
+%ifdef STRICT_IF
+ pushf
+ pop ecx
+ test ecx, X86_EFL_IF
+ jz .if_clear_out
+ mov eax, 0c0ffee01h
+ cli
+.if_clear_out:
+%endif
+ mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
+
+ ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
+ sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
+
+ ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
+ ; FPU context is saved before restore of host saving (another) branch.
+
+ ; Disable debug registers if active so they cannot trigger while switching.
+ test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
+ jz .gth_disabled_dr7
+ mov eax, X86_DR7_INIT_VAL
+ mov dr7, eax
+.gth_disabled_dr7:
+
+ ;;
+ ;; Load Intermediate memory context.
+ ;;
+ FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
+ mov eax, 0ffffffffh
+ mov cr3, eax
+ DEBUG_CHAR('?')
+
+ ;; We're now in intermediate memory context!
+
+ ;;
+ ;; 0. Jump to identity mapped location
+ ;;
+ FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
+ jmp near NAME(IDExitTarget)
+
+ ; We're now on identity mapped pages!
+ALIGNCODE(16)
+GLOBALNAME IDExitTarget
+ DEBUG_CHAR('1')
+
+ ; 1. Disable paging.
+ mov ebx, cr0
+ and ebx, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
+ mov cr0, ebx
+ DEBUG_CHAR('2')
+
+ ; 2. Enable PAE.
+%ifdef SWITCHER_TO_PAE
+ ; - already enabled
+%else
+ mov ecx, cr4
+ or ecx, X86_CR4_PAE
+ mov cr4, ecx
+%endif
+
+ ; 3. Load long mode intermediate CR3.
+ FIXUP FIX_INTER_AMD64_CR3, 1
+ mov ecx, 0ffffffffh
+ mov cr3, ecx
+ DEBUG_CHAR('3')
+
+ ; 4. Enable long mode.
+ mov ebp, edx
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ or eax, MSR_K6_EFER_LME
+ wrmsr
+ mov edx, ebp
+ DEBUG_CHAR('4')
+
+ ; 5. Enable paging.
+ or ebx, X86_CR0_PG
+ mov cr0, ebx
+ DEBUG_CHAR('5')
+
+ ; Jump from compatibility mode to 64-bit mode.
+ FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
+ jmp 0ffffh:0fffffffeh
+
+ ;
+ ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
+ ; Move on to the HC mapping.
+ ;
+BITS 64
+ALIGNCODE(16)
+NAME(IDExit64Mode):
+ DEBUG_CHAR('6')
+ jmp [NAME(pHCExitTarget) wrt rip]
+
+; 64-bit jump target
+NAME(pHCExitTarget):
+FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
+dq 0ffffffffffffffffh
+
+; 64-bit pCpum address.
+NAME(pCpumHC):
+FIXUP FIX_HC_64BIT_CPUM, 0
+dq 0ffffffffffffffffh
+
+ ;
+ ; When we arrive here we're at the host context
+ ; mapping of the switcher code.
+ ;
+ALIGNCODE(16)
+GLOBALNAME HCExitTarget
+ DEBUG_CHAR('9')
+
+ ; Clear high dword of the CPUMCPU pointer
+ and rdx, 0ffffffffh
+
+ ; load final cr3
+ mov rsi, [rdx + CPUMCPU.Host.cr3]
+ mov cr3, rsi
+ DEBUG_CHAR('@')
+
+ ;;
+ ;; Restore Host context.
+ ;;
+ ; Load CPUM pointer into edx
+ mov rdx, [NAME(pCpumHC) wrt rip]
+ ; Load the CPUMCPU offset.
+ mov r8d, [rdx + CPUM.offCPUMCPU0]
+
+ ; activate host gdt and idt
+ lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
+ DEBUG_CHAR('0')
+ lidt [rdx + r8 + CPUMCPU.Host.idtr]
+ DEBUG_CHAR('1')
+ ; Restore TSS selector; must mark it as not busy before using ltr (!)
+%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
+ movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
+ and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
+ add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
+ and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
+ ltr word [rdx + r8 + CPUMCPU.Host.tr]
+%else
+ movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
+ and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
+ add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
+ mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
+ mov ebx, ecx ; save original value
+ and ecx, ~0200h ; clear busy flag (2nd type2 bit)
+ mov [rax + 4], ccx ; not using xchg here is paranoia..
+ ltr word [rdx + r8 + CPUMCPU.Host.tr]
+ xchg [rax + 4], ebx ; using xchg is paranoia too...
+%endif
+ ; activate ldt
+ DEBUG_CHAR('2')
+ lldt [rdx + r8 + CPUMCPU.Host.ldtr]
+ ; Restore segment registers
+ mov eax, [rdx + r8 + CPUMCPU.Host.ds]
+ mov ds, eax
+ mov eax, [rdx + r8 + CPUMCPU.Host.es]
+ mov es, eax
+ mov eax, [rdx + r8 + CPUMCPU.Host.fs]
+ mov fs, eax
+ mov eax, [rdx + r8 + CPUMCPU.Host.gs]
+ mov gs, eax
+ ; restore stack
+ mov eax, [rdx + r8 + CPUMCPU.Host.ss]
+ mov ss, eax
+ mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
+
+ FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
+ ; restore MSR_IA32_SYSENTER_CS register.
+ mov rbx, rdx ; save edx
+ mov ecx, MSR_IA32_SYSENTER_CS
+ mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
+ mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
+ wrmsr ; MSR[ecx] <- edx:eax
+ mov rdx, rbx ; restore edx
+ jmp short gth_sysenter_no
+
+ALIGNCODE(16)
+gth_sysenter_no:
+
+ ;; @todo AMD syscall
+
+ ; Restore FPU if guest has used it.
+ ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
+ mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
+ test esi, (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
+ jz gth_fpu_no
+ mov rcx, cr0
+ and rcx, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, rcx
+
+ mov r10, rdx ; Save rdx.
+
+ test esi, CPUM_USED_FPU_GUEST
+ jz gth_fpu_host
+
+ mov eax, [r10 + r8 + CPUMCPU.Guest.fXStateMask]
+ mov r9, [r10 + r8 + CPUMCPU.Guest.pXStateR0]
+ or eax, eax
+ jz gth_fpu_guest_fxsave
+ mov edx, [r10 + r8 + CPUMCPU.Guest.fXStateMask + 4]
+ xsave [r9]
+ jmp gth_fpu_host
+gth_fpu_guest_fxsave:
+ fxsave [r9]
+
+gth_fpu_host:
+ mov eax, [r10 + r8 + CPUMCPU.Host.fXStateMask]
+ mov r9, [r10 + r8 + CPUMCPU.Host.pXStateR0]
+ or eax, eax
+ jz gth_fpu_host_fxrstor
+ mov edx, [r10 + r8 + CPUMCPU.Host.fXStateMask + 4]
+ xrstor [r9] ; We saved 32-bit state, so only restore 32-bit.
+ jmp gth_fpu_done
+gth_fpu_host_fxrstor:
+ fxrstor [r9] ; We saved 32-bit state, so only restore 32-bit.
+
+gth_fpu_done:
+ mov rdx, r10 ; Restore rdx.
+ jmp gth_fpu_no
+
+ALIGNCODE(16)
+gth_fpu_no:
+
+ ; Control registers.
+ ; Would've liked to have these higher up in case of crashes, but
+ ; the fpu stuff must be done before we restore cr0.
+ mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
+ test rcx, X86_CR4_PCIDE
+ jz gth_no_pcide
+ mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
+ and rax, ~0xfff ; clear the PCID in cr3
+ mov cr3, rax
+ mov cr4, rcx
+ mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
+ mov cr3, rax ; reload it with the right PCID.
+ jmp gth_restored_cr4
+gth_no_pcide:
+ mov cr4, rcx
+gth_restored_cr4:
+ mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
+ mov cr0, rcx
+ ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
+ ;mov cr2, rcx
+
+ ; Restore MSRs
+ mov rbx, rdx
+ mov ecx, MSR_K8_FS_BASE
+ mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
+ mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
+ wrmsr
+ mov ecx, MSR_K8_GS_BASE
+ mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
+ mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
+ wrmsr
+ mov ecx, MSR_K6_EFER
+ mov eax, [rbx + r8 + CPUMCPU.Host.efer]
+ mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
+ wrmsr
+ mov rdx, rbx
+
+ ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
+ test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
+ jnz gth_debug_regs_restore
+gth_debug_regs_done:
+ and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
+
+ ; Restore general registers.
+ mov eax, edi ; restore return code. eax = return code !!
+ ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
+ mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
+ ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
+ ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
+ mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
+ mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
+ mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
+ mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
+ ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
+ ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
+ mov r10, [rdx + r8 + CPUMCPU.Host.r10]
+ mov r11, [rdx + r8 + CPUMCPU.Host.r11]
+ mov r12, [rdx + r8 + CPUMCPU.Host.r12]
+ mov r13, [rdx + r8 + CPUMCPU.Host.r13]
+ mov r14, [rdx + r8 + CPUMCPU.Host.r14]
+ mov r15, [rdx + r8 + CPUMCPU.Host.r15]
+
+ ; finally restore flags. (probably not required)
+ push qword [rdx + r8 + CPUMCPU.Host.rflags]
+ popf
+
+
+%ifdef DEBUG_STUFF
+ COM64_S_CHAR '4'
+%endif
+ db 048h
+ retf
+
+;;
+; Detour for restoring the host debug registers.
+; edx and edi must be preserved.
+gth_debug_regs_restore:
+ DEBUG_S_CHAR('d')
+ mov rax, dr7 ; Some DR7 paranoia first...
+ mov ecx, X86_DR7_INIT_VAL
+ cmp rax, rcx
+ je .gth_debug_skip_dr7_disabling
+ mov dr7, rcx
+.gth_debug_skip_dr7_disabling:
+ test esi, CPUM_USED_DEBUG_REGS_HOST
+ jz .gth_debug_regs_dr7
+
+ DEBUG_S_CHAR('r')
+ mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
+ mov dr0, rax
+ mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
+ mov dr1, rbx
+ mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
+ mov dr2, rcx
+ mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
+ mov dr3, rax
+.gth_debug_regs_dr7:
+ mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
+ mov dr6, rbx
+ mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
+ mov dr7, rcx
+
+ ; We clear the USED flags in the main code path.
+ jmp gth_debug_regs_done
+
+ENDPROC vmmRCToHostAsm
+
+
+GLOBALNAME End
+;
+; The description string (in the text section).
+;
+NAME(Description):
+ db SWITCHER_DESCRIPTION
+ db 0
+
+extern NAME(Relocate)
+
+;
+; End the fixup records.
+;
+BEGINDATA
+ db FIX_THE_END ; final entry.
+GLOBALNAME FixupsEnd
+
+;;
+; The switcher definition structure.
+ALIGNDATA(16)
+GLOBALNAME Def
+ istruc VMMSWITCHERDEF
+ at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
+ at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
+ at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
+ at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
+ at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
+ at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
+ at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
+ at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
+ ; disasm help
+ at VMMSWITCHERDEF.offHCCode0, dd 0
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
+ at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
+ at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
+ at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
+ at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
+
+ iend
+
diff --git a/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac b/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
new file mode 100644
index 00000000..80aa5eb4
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
@@ -0,0 +1,2015 @@
+; $Id: LegacyandAMD64.mac $
+;; @file
+; VMM - World Switchers, 32-bit to AMD64 intermediate context.
+;
+; This is used for running 64-bit guest on 32-bit hosts, not
+; normal raw-mode. All the code involved is contained in this
+; file.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+;; @note These values are from the HM64ON32OP enum in hm.h.
+%define HM64ON32OP_VMXRCStartVM64 1
+%define HM64ON32OP_SVMRCVMRun64 2
+%define HM64ON32OP_HMRCSaveGuestFPU64 3
+%define HM64ON32OP_HMRCSaveGuestDebug64 4
+%define HM64ON32OP_HMRCTestSwitcher64 5
+
+;;
+; This macro is used for storing a debug code in a CMOS location.
+;
+; If we tripple fault or something, the debug code can be retrieved and we
+; might have a clue as to where the problem occurred. The code is currently
+; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
+; Extreme4 X79 asrock mainboard.
+;
+; @param %1 The debug code (byte)
+; @note Trashes AL.
+;
+%macro DEBUG_CMOS_TRASH_AL 1
+%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
+ mov al, 3
+ out 72h, al
+ mov al, %1
+ out 73h, al
+ in al, 73h
+%endif
+%endmacro
+
+;;
+; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
+; doesn't trash any registers.
+;
+%macro DEBUG_CMOS_STACK64 1
+%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
+ push rax
+ DEBUG_CMOS_TRASH_AL %1
+ pop rax
+%endif
+%endmacro
+
+;;
+; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
+; doesn't trash any registers.
+;
+%macro DEBUG_CMOS_STACK32 1
+%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
+ push eax
+ DEBUG_CMOS_TRASH_AL %1
+ pop eax
+%endif
+%endmacro
+
+
+;; Stubs for making OS/2 compile (though, not work).
+%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
+ %macro vmwrite 2,
+ int3
+ %endmacro
+ %define vmlaunch int3
+ %define vmresume int3
+ %define vmsave int3
+ %define vmload int3
+ %define vmrun int3
+ %define clgi int3
+ %define stgi int3
+ %macro invlpga 2,
+ int3
+ %endmacro
+%endif
+
+;; Debug options
+;%define DEBUG_STUFF 1
+;%define STRICT_IF 1
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "iprt/x86.mac"
+%include "VBox/err.mac"
+%include "VBox/apic.mac"
+
+%include "VBox/vmm/cpum.mac"
+%include "VBox/vmm/stam.mac"
+%include "VBox/vmm/vm.mac"
+%include "VBox/vmm/hm_vmx.mac"
+%include "CPUMInternal.mac"
+%include "HMInternal.mac"
+%include "VMMSwitcher.mac"
+
+
+;
+; Start the fixup records
+; We collect the fixups in the .data section as we go along
+; It is therefore VITAL that no-one is using the .data section
+; for anything else between 'Start' and 'End'.
+;
+BEGINDATA
+GLOBALNAME Fixups
+
+
+
+BEGINCODE
+GLOBALNAME Start
+
+BITS 32
+
+;;
+; The C interface.
+; @param [esp + 04h] Param 1 - VM handle
+; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
+; structure for the calling EMT.
+;
+BEGINPROC vmmR0ToRawMode
+%ifdef DEBUG_STUFF
+ COM32_S_NEWLINE
+ COM32_S_CHAR '^'
+%endif
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_START edx
+%endif
+
+ push ebp
+ mov ebp, [esp + 12] ; CPUMCPU offset
+
+ ; turn off interrupts
+ pushf
+ cli
+ ;DEBUG_CMOS_STACK32 10h
+
+ ;
+ ; Call worker.
+ ;
+ FIXUP FIX_HC_CPUM_OFF, 1, 0
+ mov edx, 0ffffffffh
+ push cs ; allow for far return and restore cs correctly.
+ call NAME(vmmR0ToRawModeAsm)
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Restore blocked Local APIC NMI vectors
+ ; Do this here to ensure the host CS is already restored
+ mov ecx, [edx + CPUMCPU.fApicDisVectors]
+ test ecx, ecx
+ jz gth_apic_done
+ cmp byte [edx + CPUMCPU.fX2Apic], 1
+ je gth_x2apic
+
+ ; Legacy xAPIC mode:
+ mov edx, [edx + CPUMCPU.pvApicBase]
+ shr ecx, 1
+ jnc gth_nolint0
+ and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
+gth_nolint0:
+ shr ecx, 1
+ jnc gth_nolint1
+ and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
+gth_nolint1:
+ shr ecx, 1
+ jnc gth_nopc
+ and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
+gth_nopc:
+ shr ecx, 1
+ jnc gth_notherm
+ and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
+gth_notherm:
+ shr ecx, 1
+ jnc gth_nocmci
+ and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
+gth_nocmci:
+ jmp gth_apic_done
+
+ ; x2APIC mode:
+gth_x2apic:
+ ;DEBUG_CMOS_STACK32 7ch
+ push eax ; save eax
+ push ebx ; save it for fApicDisVectors
+ push edx ; save edx just in case.
+ mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
+ shr ebx, 1
+ jnc gth_x2_nolint0
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nolint0:
+ shr ebx, 1
+ jnc gth_x2_nolint1
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nolint1:
+ shr ebx, 1
+ jnc gth_x2_nopc
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nopc:
+ shr ebx, 1
+ jnc gth_x2_notherm
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_notherm:
+ shr ebx, 1
+ jnc gth_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nocmci:
+ pop edx
+ pop ebx
+ pop eax
+
+gth_apic_done:
+%endif
+
+ ; restore original flags
+ ;DEBUG_CMOS_STACK32 7eh
+ popf
+ pop ebp
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_STOP edx
+%endif
+
+ ;DEBUG_CMOS_STACK32 7fh
+ ret
+
+ENDPROC vmmR0ToRawMode
+
+; *****************************************************************************
+; vmmR0ToRawModeAsm
+;
+; Phase one of the switch from host to guest context (host MMU context)
+;
+; INPUT:
+; - edx virtual address of CPUM structure (valid in host context)
+; - ebp offset of the CPUMCPU structure relative to CPUM.
+;
+; USES/DESTROYS:
+; - eax, ecx, edx, esi
+;
+; ASSUMPTION:
+; - current CS and DS selectors are wide open
+;
+; *****************************************************************************
+ALIGNCODE(16)
+BEGINPROC vmmR0ToRawModeAsm
+ ;;
+ ;; Save CPU host context
+ ;; Skip eax, edx and ecx as these are not preserved over calls.
+ ;;
+ CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ ; phys address of scratch page
+ mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
+ mov cr2, eax
+
+ mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
+%endif
+
+ ; general registers.
+ mov [edx + CPUMCPU.Host.ebx], ebx
+ mov [edx + CPUMCPU.Host.edi], edi
+ mov [edx + CPUMCPU.Host.esi], esi
+ mov [edx + CPUMCPU.Host.esp], esp
+ mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
+ ; selectors.
+ mov [edx + CPUMCPU.Host.ds], ds
+ mov [edx + CPUMCPU.Host.es], es
+ mov [edx + CPUMCPU.Host.fs], fs
+ mov [edx + CPUMCPU.Host.gs], gs
+ mov [edx + CPUMCPU.Host.ss], ss
+ ; special registers.
+ DEBUG32_S_CHAR('s')
+ DEBUG32_S_CHAR(';')
+ sldt [edx + CPUMCPU.Host.ldtr]
+ sidt [edx + CPUMCPU.Host.idtr]
+ sgdt [edx + CPUMCPU.Host.gdtr]
+ str [edx + CPUMCPU.Host.tr]
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
+%endif
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Block Local APIC NMI vectors
+ DEBUG32_S_CHAR('f')
+ DEBUG32_S_CHAR(';')
+ cmp byte [edx + CPUMCPU.fX2Apic], 1
+ je htg_x2apic
+
+ ; Legacy xAPIC mode. No write completion required when writing to the
+ ; LVT registers as we have mapped the APIC pages as non-cacheable and
+ ; the MMIO is CPU-local.
+ mov ebx, [edx + CPUMCPU.pvApicBase]
+ or ebx, ebx
+ jz htg_apic_done
+ mov eax, [ebx + APIC_REG_LVT_LINT0]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_LINT0], eax
+htg_nolint0:
+ mov eax, [ebx + APIC_REG_LVT_LINT1]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_LINT1], eax
+htg_nolint1:
+ mov eax, [ebx + APIC_REG_LVT_PC]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_PC], eax
+htg_nopc:
+ mov eax, [ebx + APIC_REG_VERSION]
+ shr eax, 16
+ cmp al, 5
+ jb htg_notherm
+ je htg_nocmci
+ mov eax, [ebx + APIC_REG_LVT_CMCI]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_CMCI], eax
+htg_nocmci:
+ mov eax, [ebx + APIC_REG_LVT_THMR]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_THMR], eax
+htg_notherm:
+ mov [edx + CPUMCPU.fApicDisVectors], edi
+ jmp htg_apic_done
+
+ ; x2APIC mode:
+htg_x2apic:
+ mov esi, edx ; Save edx.
+ xor edi, edi ; fApicDisVectors
+
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint0:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint1:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nopc:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
+ rdmsr
+ shr eax, 16
+ cmp al, 5
+ jb htg_x2_notherm
+ je htg_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nocmci:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_notherm:
+ mov edx, esi ; Restore edx.
+ mov [edx + CPUMCPU.fApicDisVectors], edi
+
+htg_apic_done:
+%endif
+
+ ; control registers.
+ mov eax, cr0
+ mov [edx + CPUMCPU.Host.cr0], eax
+ ;Skip cr2; assume host os don't stuff things in cr2. (safe)
+ mov eax, cr3
+ mov [edx + CPUMCPU.Host.cr3], eax
+ mov esi, cr4 ; esi = cr4, we'll modify it further down.
+ mov [edx + CPUMCPU.Host.cr4], esi
+
+ DEBUG32_S_CHAR('c')
+ DEBUG32_S_CHAR(';')
+
+ ; save the host EFER msr
+ mov ebx, edx
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ mov [ebx + CPUMCPU.Host.efer], eax
+ mov [ebx + CPUMCPU.Host.efer + 4], edx
+ mov edx, ebx
+ DEBUG32_S_CHAR('e')
+ DEBUG32_S_CHAR(';')
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
+%endif
+
+ ; Load new gdt so we can do a far jump after going into 64 bits mode
+ ;DEBUG_CMOS_STACK32 16h
+ lgdt [edx + CPUMCPU.Hyper.gdtr]
+
+ DEBUG32_S_CHAR('g')
+ DEBUG32_S_CHAR('!')
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
+%endif
+
+ ;;
+ ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
+ ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
+ ;;
+ and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
+ | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
+ mov cr4, esi
+
+ ;;
+ ;; Load Intermediate memory context.
+ ;;
+ FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
+ mov eax, 0ffffffffh
+ mov cr3, eax
+ DEBUG32_CHAR('?')
+%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
+ DEBUG_CMOS_TRASH_AL 17h
+%endif
+
+ ;;
+ ;; Jump to identity mapped location
+ ;;
+ FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
+ jmp near NAME(IDEnterTarget)
+
+
+ ; We're now on identity mapped pages!
+ALIGNCODE(16)
+GLOBALNAME IDEnterTarget
+ DEBUG32_CHAR('1')
+ DEBUG_CMOS_TRASH_AL 19h
+
+ ; 1. Disable paging.
+ mov ebx, cr0
+ and ebx, ~X86_CR0_PG
+ mov cr0, ebx
+ DEBUG32_CHAR('2')
+ DEBUG_CMOS_TRASH_AL 1ah
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov eax, cr2
+ mov dword [eax], 3
+%endif
+
+ ; 2. Enable PAE.
+ mov ecx, cr4
+ or ecx, X86_CR4_PAE
+ mov cr4, ecx
+ DEBUG_CMOS_TRASH_AL 1bh
+
+ ; 3. Load long mode intermediate CR3.
+ FIXUP FIX_INTER_AMD64_CR3, 1
+ mov ecx, 0ffffffffh
+ mov cr3, ecx
+ DEBUG32_CHAR('3')
+ DEBUG_CMOS_TRASH_AL 1ch
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov eax, cr2
+ mov dword [eax], 4
+%endif
+
+ ; 4. Enable long mode.
+ mov esi, edx
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ FIXUP FIX_EFER_OR_MASK, 1
+ or eax, 0ffffffffh
+ and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
+ wrmsr
+ mov edx, esi
+ DEBUG32_CHAR('4')
+ DEBUG_CMOS_TRASH_AL 1dh
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov eax, cr2
+ mov dword [eax], 5
+%endif
+
+ ; 5. Enable paging.
+ or ebx, X86_CR0_PG
+ ; Disable ring 0 write protection too
+ and ebx, ~X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+ DEBUG32_CHAR('5')
+
+ ; Jump from compatibility mode to 64-bit mode.
+ FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
+ jmp 0ffffh:0fffffffeh
+
+ ;
+ ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
+BITS 64
+ALIGNCODE(16)
+NAME(IDEnter64Mode):
+ DEBUG64_CHAR('6')
+ DEBUG_CMOS_TRASH_AL 1eh
+ jmp [NAME(pICEnterTarget) wrt rip]
+
+; 64-bit jump target
+NAME(pICEnterTarget):
+FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
+dq 0ffffffffffffffffh
+
+; 64-bit pCpum address.
+NAME(pCpumIC):
+FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
+dq 0ffffffffffffffffh
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+NAME(pMarker):
+db 'Switch_marker'
+%endif
+
+ ;
+ ; When we arrive here we're in 64 bits mode in the intermediate context
+ ;
+ALIGNCODE(16)
+GLOBALNAME ICEnterTarget
+ ;DEBUG_CMOS_TRASH_AL 1fh
+ ; Load CPUM pointer into rdx
+ mov rdx, [NAME(pCpumIC) wrt rip]
+ CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
+
+ mov rax, cs
+ mov ds, rax
+ mov es, rax
+
+ ; Invalidate fs & gs
+ mov rax, 0
+ mov fs, rax
+ mov gs, rax
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
+%endif
+
+ ; Setup stack.
+ DEBUG64_CHAR('7')
+ mov rsp, 0
+ mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
+ mov ss, ax
+ mov esp, [rdx + CPUMCPU.Hyper.esp]
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
+%endif
+
+%ifdef VBOX_WITH_64ON32_IDT
+ ; Set up emergency trap handlers.
+ lidt [rdx + CPUMCPU.Hyper.idtr]
+%endif
+
+ DEBUG64_S_CHAR('8')
+
+ ; Check if we need to restore the guest FPU state
+ mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
+ test esi, CPUM_SYNC_FPU_STATE
+ jz near htg_fpu_no
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
+%endif
+
+ mov rax, cr0
+ mov rcx, rax ; save old CR0
+ and rax, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, rax
+
+ mov eax, [rdx + CPUMCPU.Guest.fXStateMask]
+ mov ebx, [rdx + CPUMCPU.Guest.pXStateRC]
+ or eax, eax
+ jz htg_fpu_fxrstor
+ mov r9, rdx
+ mov edx, [rdx + CPUMCPU.Guest.fXStateMask + 4]
+ o64 xsave [rbx]
+ mov rdx, r9
+ jmp htg_fpu_done
+htg_fpu_fxrstor:
+ o64 fxrstor [rbx] ; (use explicit REX prefix, see @bugref{6398})
+htg_fpu_done:
+ mov cr0, rcx ; and restore old CR0 again
+
+ and esi, ~CPUM_SYNC_FPU_STATE
+ or esi, CPUM_USED_FPU_GUEST
+ mov [rdx + CPUMCPU.fUseFlags], esi
+
+htg_fpu_no:
+ ; Check if we need to restore the guest debug state
+ test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
+ jz htg_debug_done
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
+%endif
+ test esi, CPUM_SYNC_DEBUG_REGS_HYPER
+ jnz htg_debug_hyper
+
+ ; Guest values in DRx, letting the guest access them directly.
+ mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
+ mov dr0, rax
+ mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
+ mov dr1, rax
+ mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
+ mov dr2, rax
+ mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
+ mov dr3, rax
+ mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
+ mov dr6, rax ; not required for AMD-V
+
+ and esi, ~CPUM_SYNC_DEBUG_REGS_GUEST
+ or esi, CPUM_USED_DEBUG_REGS_GUEST
+ mov [rdx + CPUMCPU.fUseFlags], esi
+ jmp htg_debug_done
+
+htg_debug_hyper:
+ ; Combined values in DRx, intercepting all accesses.
+ mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
+ mov dr0, rax
+ mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
+ mov dr1, rax
+ mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
+ mov dr2, rax
+ mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
+ mov dr3, rax
+ mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
+ mov dr6, rax ; not required for AMD-V
+
+ and esi, ~CPUM_SYNC_DEBUG_REGS_HYPER
+ or esi, CPUM_USED_DEBUG_REGS_HYPER
+ mov [rdx + CPUMCPU.fUseFlags], esi
+
+htg_debug_done:
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
+%endif
+
+ ;
+ ; "Call" the specified helper function.
+ ;
+
+ ; parameter for all helper functions (pCtx) (in addition to rdx = pCPUM ofc)
+ DEBUG64_CHAR('9')
+ lea rsi, [rdx + CPUMCPU.Guest]
+ lea rax, [htg_return wrt rip]
+ push rax ; return address
+
+ ; load the hypervisor function address
+ mov r9, [rdx + CPUMCPU.Hyper.eip]
+ cmp r9d, HM64ON32OP_VMXRCStartVM64
+ jz NAME(VMXRCStartVM64)
+ cmp r9d, HM64ON32OP_SVMRCVMRun64
+ jz NAME(SVMRCVMRun64)
+ cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
+ jz NAME(HMRCSaveGuestFPU64)
+ cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
+ jz NAME(HMRCSaveGuestDebug64)
+ cmp r9d, HM64ON32OP_HMRCTestSwitcher64
+ jz NAME(HMRCTestSwitcher64)
+ mov eax, VERR_HM_INVALID_HM64ON32OP
+htg_return:
+ DEBUG64_CHAR('r')
+
+ ; Load CPUM pointer into rdx
+ mov rdx, [NAME(pCpumIC) wrt rip]
+ CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
+%endif
+
+ ; Save the return code
+ mov dword [rdx + CPUMCPU.u32RetCode], eax
+
+ ; now let's switch back
+ jmp NAME(vmmRCToHostAsm) ; rax = returncode.
+
+ENDPROC vmmR0ToRawModeAsm
+
+
+
+
+;
+;
+; HM code (used to be HMRCA.asm at one point).
+; HM code (used to be HMRCA.asm at one point).
+; HM code (used to be HMRCA.asm at one point).
+;
+;
+
+;; @def MYPUSHSEGS
+; Macro saving all segment registers on the stack.
+; @param 1 full width register name
+%macro MYPUSHSEGS 1
+ mov %1, es
+ push %1
+ mov %1, ds
+ push %1
+%endmacro
+
+;; @def MYPOPSEGS
+; Macro restoring all segment registers on the stack
+; @param 1 full width register name
+%macro MYPOPSEGS 1
+ pop %1
+ mov ds, %1
+ pop %1
+ mov es, %1
+%endmacro
+
+
+;/**
+; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
+; *
+; * @returns VBox status code
+; * @param HCPhysCpuPage VMXON physical address [rsp+8]
+; * @param HCPhysVmcs VMCS physical address [rsp+16]
+; * @param pCache VMCS cache [rsp+24]
+; * @param pVM The cross context VM structure. [rbp+28h]
+; * @param pVCpu The cross context virtual CPU structure. [rbp+30h]
+; * @param pCtx Guest context (rsi)
+; */
+BEGINPROC VMXRCStartVM64
+ push rbp
+ mov rbp, rsp
+ DEBUG_CMOS_STACK64 20h
+
+ ; Make sure VT-x instructions are allowed.
+ mov rax, cr4
+ or rax, X86_CR4_VMXE
+ mov cr4, rax
+
+ ; Enter VMX Root Mode.
+ vmxon [rbp + 8 + 8]
+ jnc .vmxon_success
+ mov rax, VERR_VMX_INVALID_VMXON_PTR
+ jmp .vmstart64_vmxon_failed
+
+.vmxon_success:
+ jnz .vmxon_success2
+ mov rax, VERR_VMX_VMXON_FAILED
+ jmp .vmstart64_vmxon_failed
+
+.vmxon_success2:
+ ; Activate the VMCS pointer
+ vmptrld [rbp + 16 + 8]
+ jnc .vmptrld_success
+ mov rax, VERR_VMX_INVALID_VMCS_PTR
+ jmp .vmstart64_vmxoff_end
+
+.vmptrld_success:
+ jnz .vmptrld_success2
+ mov rax, VERR_VMX_VMPTRLD_FAILED
+ jmp .vmstart64_vmxoff_end
+
+.vmptrld_success2:
+
+ ; Save the VMCS pointer on the stack
+ push qword [rbp + 16 + 8];
+
+ ; Save segment registers.
+ MYPUSHSEGS rax
+
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
+ mov rbx, [rbp + 24 + 8] ; pCache
+
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov qword [rbx + VMCSCACHE.uPos], 2
+ %endif
+
+ %ifdef DEBUG
+ mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
+ mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
+ mov rax, [rbp + 16 + 8] ; HCPhysVmcs
+ mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
+ mov [rbx + VMCSCACHE.TestIn.pCache], rbx
+ mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
+ %endif
+
+ mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
+ cmp ecx, 0
+ je .no_cached_writes
+ mov rdx, rcx
+ mov rcx, 0
+ jmp .cached_write
+
+ALIGN(16)
+.cached_write:
+ mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
+ vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
+ inc rcx
+ cmp rcx, rdx
+ jl .cached_write
+
+ mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
+.no_cached_writes:
+
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov qword [rbx + VMCSCACHE.uPos], 3
+ %endif
+ ; Save the pCache pointer.
+ push rbx
+%endif
+
+ ; Save the host state that's relevant in the temporary 64-bit mode.
+ mov rdx, cr0
+ mov eax, VMX_VMCS_HOST_CR0
+ vmwrite rax, rdx
+
+ mov rdx, cr3
+ mov eax, VMX_VMCS_HOST_CR3
+ vmwrite rax, rdx
+
+ mov rdx, cr4
+ mov eax, VMX_VMCS_HOST_CR4
+ vmwrite rax, rdx
+
+ mov rdx, cs
+ mov eax, VMX_VMCS_HOST_FIELD_CS
+ vmwrite rax, rdx
+
+ mov rdx, ss
+ mov eax, VMX_VMCS_HOST_FIELD_SS
+ vmwrite rax, rdx
+
+%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
+ sub rsp, 16
+ str [rsp]
+ movsx rdx, word [rsp]
+ mov eax, VMX_VMCS_HOST_FIELD_TR
+ vmwrite rax, rdx
+ add rsp, 16
+%endif
+
+ sub rsp, 16
+ sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
+ mov eax, VMX_VMCS_HOST_GDTR_BASE
+ vmwrite rax, [rsp + 6 + 2]
+ add rsp, 16
+
+%ifdef VBOX_WITH_64ON32_IDT
+ sub rsp, 16
+ sidt [rsp + 6]
+ mov eax, VMX_VMCS_HOST_IDTR_BASE
+ vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
+ add rsp, 16
+ ;call NAME(vmm64On32PrintIdtr)
+%endif
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov qword [rbx + VMCSCACHE.uPos], 4
+%endif
+
+ ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
+
+ ; First we have to save some final CPU context registers.
+ lea rdx, [.vmlaunch64_done wrt rip]
+ mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
+ vmwrite rax, rdx
+ ; Note: assumes success!
+
+ ; Manual save and restore:
+ ; - General purpose registers except RIP, RSP
+ ; - XCR0
+ ;
+ ; Trashed:
+ ; - CR2 (we don't care)
+ ; - LDTR (reset to 0)
+ ; - DRx (presumably not changed at all)
+ ; - DR7 (reset to 0x400)
+ ; - EFLAGS (reset to RT_BIT(1); not relevant)
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov qword [rbx + VMCSCACHE.uPos], 5
+%endif
+
+ ;
+ ; Save the host XCR0 and load the guest one if necessary.
+ ; Note! Trashes rdx and rcx.
+ ;
+ mov rax, [rbp + 30h] ; pVCpu
+ test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
+ jz .xcr0_before_skip
+
+ xor ecx, ecx
+ xgetbv ; Save the host one on the stack.
+ push rdx
+ push rax
+
+ mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
+ mov edx, [xSI + CPUMCTX.aXcr + 4]
+ xor ecx, ecx ; paranoia
+ xsetbv
+
+ push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
+ jmp .xcr0_before_done
+
+.xcr0_before_skip:
+ push 3fh ; indicate that we need not.
+.xcr0_before_done:
+
+ ; Save the pCtx pointer
+ push rsi
+
+ ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
+ mov rbx, qword [rsi + CPUMCTX.cr2]
+ mov rdx, cr2
+ cmp rdx, rbx
+ je .skipcr2write64
+ mov cr2, rbx
+
+.skipcr2write64:
+ mov eax, VMX_VMCS_HOST_RSP
+ vmwrite rax, rsp
+ ; Note: assumes success!
+ ; Don't mess with ESP anymore!!!
+
+ ; Save Guest's general purpose registers.
+ mov rax, qword [rsi + CPUMCTX.eax]
+ mov rbx, qword [rsi + CPUMCTX.ebx]
+ mov rcx, qword [rsi + CPUMCTX.ecx]
+ mov rdx, qword [rsi + CPUMCTX.edx]
+ mov rbp, qword [rsi + CPUMCTX.ebp]
+ mov r8, qword [rsi + CPUMCTX.r8]
+ mov r9, qword [rsi + CPUMCTX.r9]
+ mov r10, qword [rsi + CPUMCTX.r10]
+ mov r11, qword [rsi + CPUMCTX.r11]
+ mov r12, qword [rsi + CPUMCTX.r12]
+ mov r13, qword [rsi + CPUMCTX.r13]
+ mov r14, qword [rsi + CPUMCTX.r14]
+ mov r15, qword [rsi + CPUMCTX.r15]
+
+ ; Save rdi & rsi.
+ mov rdi, qword [rsi + CPUMCTX.edi]
+ mov rsi, qword [rsi + CPUMCTX.esi]
+
+ vmlaunch
+ jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
+
+ALIGNCODE(16)
+.vmlaunch64_done:
+%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
+ push rdx
+ mov rdx, [rsp + 8] ; pCtx
+ lidt [rdx + CPUMCPU.Hyper.idtr]
+ pop rdx
+%endif
+ jc near .vmstart64_invalid_vmcs_ptr
+ jz near .vmstart64_start_failed
+
+ push rdi
+ mov rdi, [rsp + 8] ; pCtx
+
+ mov qword [rdi + CPUMCTX.eax], rax
+ mov qword [rdi + CPUMCTX.ebx], rbx
+ mov qword [rdi + CPUMCTX.ecx], rcx
+ mov qword [rdi + CPUMCTX.edx], rdx
+ mov qword [rdi + CPUMCTX.esi], rsi
+ mov qword [rdi + CPUMCTX.ebp], rbp
+ mov qword [rdi + CPUMCTX.r8], r8
+ mov qword [rdi + CPUMCTX.r9], r9
+ mov qword [rdi + CPUMCTX.r10], r10
+ mov qword [rdi + CPUMCTX.r11], r11
+ mov qword [rdi + CPUMCTX.r12], r12
+ mov qword [rdi + CPUMCTX.r13], r13
+ mov qword [rdi + CPUMCTX.r14], r14
+ mov qword [rdi + CPUMCTX.r15], r15
+ mov rax, cr2
+ mov qword [rdi + CPUMCTX.cr2], rax
+
+ pop rax ; The guest edi we pushed above
+ mov qword [rdi + CPUMCTX.edi], rax
+
+ pop rsi ; pCtx (needed in rsi by the macros below)
+
+ ; Restore the host xcr0 if necessary.
+ pop rcx
+ test ecx, ecx
+ jnz .xcr0_after_skip
+ pop rax
+ pop rdx
+ xsetbv ; ecx is already zero.
+.xcr0_after_skip:
+
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ pop rdi ; Saved pCache
+
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 7
+ %endif
+ %ifdef DEBUG
+ mov [rdi + VMCSCACHE.TestOut.pCache], rdi
+ mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
+ mov rax, cr8
+ mov [rdi + VMCSCACHE.TestOut.cr8], rax
+ %endif
+
+ mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
+ cmp ecx, 0 ; Can't happen
+ je .no_cached_reads
+ jmp .cached_read
+
+ALIGN(16)
+.cached_read:
+ dec rcx
+ mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
+ vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
+ cmp rcx, 0
+ jnz .cached_read
+.no_cached_reads:
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 8
+ %endif
+%endif
+
+ ; Restore segment registers.
+ MYPOPSEGS rax
+
+ mov eax, VINF_SUCCESS
+
+%ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 9
+%endif
+.vmstart64_end:
+
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ %ifdef DEBUG
+ mov rdx, [rsp] ; HCPhysVmcs
+ mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
+ %endif
+%endif
+
+ ; Write back the data and disable the VMCS.
+ vmclear qword [rsp] ; Pushed pVMCS
+ add rsp, 8
+
+.vmstart64_vmxoff_end:
+ ; Disable VMX root mode.
+ vmxoff
+.vmstart64_vmxon_failed:
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ %ifdef DEBUG
+ cmp eax, VINF_SUCCESS
+ jne .skip_flags_save
+
+ pushf
+ pop rdx
+ mov [rdi + VMCSCACHE.TestOut.eflags], rdx
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 12
+ %endif
+.skip_flags_save:
+ %endif
+%endif
+ pop rbp
+ ret
+
+
+.vmstart64_invalid_vmcs_ptr:
+ pop rsi ; pCtx (needed in rsi by the macros below)
+
+ ; Restore the host xcr0 if necessary.
+ pop rcx
+ test ecx, ecx
+ jnz .xcr0_after_skip2
+ pop rax
+ pop rdx
+ xsetbv ; ecx is already zero.
+.xcr0_after_skip2:
+
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ pop rdi ; pCache
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 10
+ %endif
+
+ %ifdef DEBUG
+ mov [rdi + VMCSCACHE.TestOut.pCache], rdi
+ mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
+ %endif
+%endif
+
+ ; Restore segment registers.
+ MYPOPSEGS rax
+
+ ; Restore all general purpose host registers.
+ mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
+ jmp .vmstart64_end
+
+.vmstart64_start_failed:
+ pop rsi ; pCtx (needed in rsi by the macros below)
+
+ ; Restore the host xcr0 if necessary.
+ pop rcx
+ test ecx, ecx
+ jnz .xcr0_after_skip3
+ pop rax
+ pop rdx
+ xsetbv ; ecx is already zero.
+.xcr0_after_skip3:
+
+%ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ pop rdi ; pCache
+
+ %ifdef DEBUG
+ mov [rdi + VMCSCACHE.TestOut.pCache], rdi
+ mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
+ %endif
+ %ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ mov dword [rdi + VMCSCACHE.uPos], 11
+ %endif
+%endif
+
+ ; Restore segment registers.
+ MYPOPSEGS rax
+
+ ; Restore all general purpose host registers.
+ mov eax, VERR_VMX_UNABLE_TO_START_VM
+ jmp .vmstart64_end
+ENDPROC VMXRCStartVM64
+
+
+;;
+; Prepares for and executes VMRUN (64 bits guests)
+;
+; @returns VBox status code
+; @param HCPhysVMCB Physical address of host VMCB [rbp+10h]
+; @param HCPhysVMCB Physical address of guest VMCB [rbp+18h]
+; @param pVM The cross context VM structure. [rbp+20h]
+; @param pVCpu The cross context virtual CPU structure. [rbp+28h]
+; @param pCtx Guest context [rsi]
+;
+BEGINPROC SVMRCVMRun64
+ push rbp
+ mov rbp, rsp
+ pushf
+ DEBUG_CMOS_STACK64 30h
+
+ ; Manual save and restore:
+ ; - General purpose registers except RIP, RSP, RAX
+ ;
+ ; Trashed:
+ ; - CR2 (we don't care)
+ ; - LDTR (reset to 0)
+ ; - DRx (presumably not changed at all)
+ ; - DR7 (reset to 0x400)
+
+ ;
+ ; Save the host XCR0 and load the guest one if necessary.
+ ;
+ mov rax, [rbp + 28h] ; pVCpu
+ test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
+ jz .xcr0_before_skip
+
+ xor ecx, ecx
+ xgetbv ; Save the host one on the stack.
+ push rdx
+ push rax
+
+ mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
+ mov edx, [xSI + CPUMCTX.aXcr + 4]
+ xor ecx, ecx ; paranoia
+ xsetbv
+
+ push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
+ jmp .xcr0_before_done
+
+.xcr0_before_skip:
+ push 3fh ; indicate that we need not.
+.xcr0_before_done:
+
+ ; Save the Guest CPU context pointer.
+ push rsi ; Push for saving the state at the end
+
+ ; Save host fs, gs, sysenter msr etc
+ mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
+ push rax ; Save for the vmload after vmrun
+ vmsave
+
+ ; Setup eax for VMLOAD
+ mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
+
+ ; Restore Guest's general purpose registers.
+ ; rax is loaded from the VMCB by VMRUN.
+ mov rbx, qword [rsi + CPUMCTX.ebx]
+ mov rcx, qword [rsi + CPUMCTX.ecx]
+ mov rdx, qword [rsi + CPUMCTX.edx]
+ mov rdi, qword [rsi + CPUMCTX.edi]
+ mov rbp, qword [rsi + CPUMCTX.ebp]
+ mov r8, qword [rsi + CPUMCTX.r8]
+ mov r9, qword [rsi + CPUMCTX.r9]
+ mov r10, qword [rsi + CPUMCTX.r10]
+ mov r11, qword [rsi + CPUMCTX.r11]
+ mov r12, qword [rsi + CPUMCTX.r12]
+ mov r13, qword [rsi + CPUMCTX.r13]
+ mov r14, qword [rsi + CPUMCTX.r14]
+ mov r15, qword [rsi + CPUMCTX.r15]
+ mov rsi, qword [rsi + CPUMCTX.esi]
+
+ ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
+ clgi
+ sti
+
+ ; Load guest fs, gs, sysenter msr etc
+ vmload
+ ; Run the VM
+ vmrun
+
+ ; rax is in the VMCB already; we can use it here.
+
+ ; Save guest fs, gs, sysenter msr etc.
+ vmsave
+
+ ; Load host fs, gs, sysenter msr etc.
+ pop rax ; Pushed above
+ vmload
+
+ ; Set the global interrupt flag again, but execute cli to make sure IF=0.
+ cli
+ stgi
+
+ pop rax ; pCtx
+
+ mov qword [rax + CPUMCTX.ebx], rbx
+ mov qword [rax + CPUMCTX.ecx], rcx
+ mov qword [rax + CPUMCTX.edx], rdx
+ mov qword [rax + CPUMCTX.esi], rsi
+ mov qword [rax + CPUMCTX.edi], rdi
+ mov qword [rax + CPUMCTX.ebp], rbp
+ mov qword [rax + CPUMCTX.r8], r8
+ mov qword [rax + CPUMCTX.r9], r9
+ mov qword [rax + CPUMCTX.r10], r10
+ mov qword [rax + CPUMCTX.r11], r11
+ mov qword [rax + CPUMCTX.r12], r12
+ mov qword [rax + CPUMCTX.r13], r13
+ mov qword [rax + CPUMCTX.r14], r14
+ mov qword [rax + CPUMCTX.r15], r15
+
+ ;
+ ; Restore the host xcr0 if necessary.
+ ;
+ pop rcx
+ test ecx, ecx
+ jnz .xcr0_after_skip
+ pop rax
+ pop rdx
+ xsetbv ; ecx is already zero.
+.xcr0_after_skip:
+
+ mov eax, VINF_SUCCESS
+
+ popf
+ pop rbp
+ ret
+ENDPROC SVMRCVMRun64
+
+;/**
+; * Saves the guest FPU context
+; *
+; * @returns VBox status code
+; * @param pCtx Guest context [rsi]
+; * @param pCPUM Pointer to CPUMCPU [rdx]
+; */
+BEGINPROC HMRCSaveGuestFPU64
+ DEBUG_CMOS_STACK64 40h
+ mov rax, cr0
+ mov rcx, rax ; save old CR0
+ and rax, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, rax
+
+ mov eax, [rsi + CPUMCTX.fXStateMask]
+ mov ebx, [rsi + CPUMCTX.pXStateRC]
+ test eax, eax
+ jz .use_fxsave
+ mov edx, [rsi + CPUMCTX.fXStateMask + 4]
+ o64 xsave [rbx]
+ jmp .done
+
+.use_fxsave:
+ o64 fxsave [rbx] ; (use explicit REX prefix, see @bugref{6398})
+
+.done:
+ mov cr0, rcx ; and restore old CR0 again
+
+ and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST
+
+ mov eax, VINF_SUCCESS
+ ret
+ENDPROC HMRCSaveGuestFPU64
+
+;/**
+; * Saves the guest debug context (DR0-3, DR6)
+; *
+; * @returns VBox status code
+; * @param pCtx Guest context [rsi]
+; */
+BEGINPROC HMRCSaveGuestDebug64
+ DEBUG_CMOS_STACK64 41h
+ mov rax, dr0
+ mov qword [rsi + CPUMCTX.dr + 0*8], rax
+ mov rax, dr1
+ mov qword [rsi + CPUMCTX.dr + 1*8], rax
+ mov rax, dr2
+ mov qword [rsi + CPUMCTX.dr + 2*8], rax
+ mov rax, dr3
+ mov qword [rsi + CPUMCTX.dr + 3*8], rax
+ mov rax, dr6
+ mov qword [rsi + CPUMCTX.dr + 6*8], rax
+ mov eax, VINF_SUCCESS
+ ret
+ENDPROC HMRCSaveGuestDebug64
+
+;/**
+; * Dummy callback handler
+; *
+; * @returns VBox status code
+; * @param param1 Parameter 1 [rsp+8]
+; * @param param2 Parameter 2 [rsp+12]
+; * @param param3 Parameter 3 [rsp+16]
+; * @param param4 Parameter 4 [rsp+20]
+; * @param param5 Parameter 5 [rsp+24]
+; * @param pCtx Guest context [rsi]
+; */
+BEGINPROC HMRCTestSwitcher64
+ DEBUG_CMOS_STACK64 42h
+ mov eax, [rsp+8]
+ ret
+ENDPROC HMRCTestSwitcher64
+
+
+%ifdef VBOX_WITH_64ON32_IDT
+;
+; Trap handling.
+;
+
+;; Here follows an array of trap handler entry points, 8 byte in size.
+BEGINPROC vmm64On32TrapHandlers
+%macro vmm64On32TrapEntry 1
+GLOBALNAME vmm64On32Trap %+ i
+ db 06ah, i ; push imm8 - note that this is a signextended value.
+ jmp NAME(%1)
+ ALIGNCODE(8)
+%assign i i+1
+%endmacro
+%assign i 0 ; start counter.
+ vmm64On32TrapEntry vmm64On32Trap ; 0
+ vmm64On32TrapEntry vmm64On32Trap ; 1
+ vmm64On32TrapEntry vmm64On32Trap ; 2
+ vmm64On32TrapEntry vmm64On32Trap ; 3
+ vmm64On32TrapEntry vmm64On32Trap ; 4
+ vmm64On32TrapEntry vmm64On32Trap ; 5
+ vmm64On32TrapEntry vmm64On32Trap ; 6
+ vmm64On32TrapEntry vmm64On32Trap ; 7
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
+ vmm64On32TrapEntry vmm64On32Trap ; 9
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; a
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; b
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; c
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; d
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; e
+ vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
+ vmm64On32TrapEntry vmm64On32Trap ; 10
+ vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
+ vmm64On32TrapEntry vmm64On32Trap ; 12
+ vmm64On32TrapEntry vmm64On32Trap ; 13
+%rep (0x100 - 0x14)
+ vmm64On32TrapEntry vmm64On32Trap
+%endrep
+ENDPROC vmm64On32TrapHandlers
+
+;; Fake an error code and jump to the real thing.
+BEGINPROC vmm64On32Trap
+ push qword [rsp]
+ jmp NAME(vmm64On32TrapErrCode)
+ENDPROC vmm64On32Trap
+
+
+;;
+; Trap frame:
+; [rbp + 38h] = ss
+; [rbp + 30h] = rsp
+; [rbp + 28h] = eflags
+; [rbp + 20h] = cs
+; [rbp + 18h] = rip
+; [rbp + 10h] = error code (or trap number)
+; [rbp + 08h] = trap number
+; [rbp + 00h] = rbp
+; [rbp - 08h] = rax
+; [rbp - 10h] = rbx
+; [rbp - 18h] = ds
+;
+BEGINPROC vmm64On32TrapErrCode
+ push rbp
+ mov rbp, rsp
+ push rax
+ push rbx
+ mov ax, ds
+ push rax
+ sub rsp, 20h
+
+ mov ax, cs
+ mov ds, ax
+
+%if 1
+ COM64_S_NEWLINE
+ COM64_S_CHAR '!'
+ COM64_S_CHAR 't'
+ COM64_S_CHAR 'r'
+ COM64_S_CHAR 'a'
+ COM64_S_CHAR 'p'
+ movzx eax, byte [rbp + 08h]
+ COM64_S_DWORD_REG eax
+ COM64_S_CHAR '!'
+%endif
+
+%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
+ sidt [rsp]
+ movsx eax, word [rsp]
+ shr eax, 12 ; div by 16 * 256 (0x1000).
+%else
+ ; hardcoded VCPU(0) for now...
+ mov rbx, [NAME(pCpumIC) wrt rip]
+ mov eax, [rbx + CPUM.offCPUMCPU0]
+%endif
+ push rax ; Save the offset for rbp later.
+
+ add rbx, rax ; rbx = CPUMCPU
+
+ ;
+ ; Deal with recursive traps due to vmxoff (lazy bird).
+ ;
+ lea rax, [.vmxoff_trap_location wrt rip]
+ cmp rax, [rbp + 18h]
+ je .not_vmx_root
+
+ ;
+ ; Save the context.
+ ;
+ mov rax, [rbp - 8]
+ mov [rbx + CPUMCPU.Hyper.eax], rax
+ mov [rbx + CPUMCPU.Hyper.ecx], rcx
+ mov [rbx + CPUMCPU.Hyper.edx], rdx
+ mov rax, [rbp - 10h]
+ mov [rbx + CPUMCPU.Hyper.ebx], rax
+ mov rax, [rbp]
+ mov [rbx + CPUMCPU.Hyper.ebp], rax
+ mov rax, [rbp + 30h]
+ mov [rbx + CPUMCPU.Hyper.esp], rax
+ mov [rbx + CPUMCPU.Hyper.edi], rdi
+ mov [rbx + CPUMCPU.Hyper.esi], rsi
+ mov [rbx + CPUMCPU.Hyper.r8], r8
+ mov [rbx + CPUMCPU.Hyper.r9], r9
+ mov [rbx + CPUMCPU.Hyper.r10], r10
+ mov [rbx + CPUMCPU.Hyper.r11], r11
+ mov [rbx + CPUMCPU.Hyper.r12], r12
+ mov [rbx + CPUMCPU.Hyper.r13], r13
+ mov [rbx + CPUMCPU.Hyper.r14], r14
+ mov [rbx + CPUMCPU.Hyper.r15], r15
+
+ mov rax, [rbp + 18h]
+ mov [rbx + CPUMCPU.Hyper.eip], rax
+ movzx ax, [rbp + 20h]
+ mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
+ mov ax, [rbp + 38h]
+ mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
+ mov ax, [rbp - 18h]
+ mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
+
+ mov rax, [rbp + 28h]
+ mov [rbx + CPUMCPU.Hyper.eflags], rax
+
+ mov rax, cr2
+ mov [rbx + CPUMCPU.Hyper.cr2], rax
+
+ mov rax, [rbp + 10h]
+ mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
+ movzx eax, byte [rbp + 08h]
+ mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
+
+ ;
+ ; Finally, leave VMX root operation before trying to return to the host.
+ ;
+ mov rax, cr4
+ test rax, X86_CR4_VMXE
+ jz .not_vmx_root
+.vmxoff_trap_location:
+ vmxoff
+.not_vmx_root:
+
+ ;
+ ; Go back to the host.
+ ;
+ pop rbp
+ mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
+ jmp NAME(vmmRCToHostAsm)
+ENDPROC vmm64On32TrapErrCode
+
+;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
+ALIGNCODE(16)
+GLOBALNAME vmm64On32Idt
+%assign i 0
+%rep 256
+ dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
+ dq 0
+%assign i (i + 1)
+%endrep
+
+
+ %if 0
+;; For debugging purposes.
+BEGINPROC vmm64On32PrintIdtr
+ push rax
+ push rsi ; paranoia
+ push rdi ; ditto
+ sub rsp, 16
+
+ COM64_S_CHAR ';'
+ COM64_S_CHAR 'i'
+ COM64_S_CHAR 'd'
+ COM64_S_CHAR 't'
+ COM64_S_CHAR 'r'
+ COM64_S_CHAR '='
+ sidt [rsp + 6]
+ mov eax, [rsp + 8 + 4]
+ COM64_S_DWORD_REG eax
+ mov eax, [rsp + 8]
+ COM64_S_DWORD_REG eax
+ COM64_S_CHAR ':'
+ movzx eax, word [rsp + 6]
+ COM64_S_DWORD_REG eax
+ COM64_S_CHAR '!'
+
+ add rsp, 16
+ pop rdi
+ pop rsi
+ pop rax
+ ret
+ENDPROC vmm64On32PrintIdtr
+ %endif
+
+ %if 1
+;; For debugging purposes.
+BEGINPROC vmm64On32DumpCmos
+ push rax
+ push rdx
+ push rcx
+ push rsi ; paranoia
+ push rdi ; ditto
+ sub rsp, 16
+
+%if 0
+ mov al, 3
+ out 72h, al
+ mov al, 68h
+ out 73h, al
+%endif
+
+ COM64_S_NEWLINE
+ COM64_S_CHAR 'c'
+ COM64_S_CHAR 'm'
+ COM64_S_CHAR 'o'
+ COM64_S_CHAR 's'
+ COM64_S_CHAR '0'
+ COM64_S_CHAR ':'
+
+ xor ecx, ecx
+.loop1:
+ mov al, cl
+ out 70h, al
+ in al, 71h
+ COM64_S_BYTE_REG eax
+ COM64_S_CHAR ' '
+ inc ecx
+ cmp ecx, 128
+ jb .loop1
+
+ COM64_S_NEWLINE
+ COM64_S_CHAR 'c'
+ COM64_S_CHAR 'm'
+ COM64_S_CHAR 'o'
+ COM64_S_CHAR 's'
+ COM64_S_CHAR '1'
+ COM64_S_CHAR ':'
+ xor ecx, ecx
+.loop2:
+ mov al, cl
+ out 72h, al
+ in al, 73h
+ COM64_S_BYTE_REG eax
+ COM64_S_CHAR ' '
+ inc ecx
+ cmp ecx, 128
+ jb .loop2
+
+%if 0
+ COM64_S_NEWLINE
+ COM64_S_CHAR 'c'
+ COM64_S_CHAR 'm'
+ COM64_S_CHAR 'o'
+ COM64_S_CHAR 's'
+ COM64_S_CHAR '2'
+ COM64_S_CHAR ':'
+ xor ecx, ecx
+.loop3:
+ mov al, cl
+ out 74h, al
+ in al, 75h
+ COM64_S_BYTE_REG eax
+ COM64_S_CHAR ' '
+ inc ecx
+ cmp ecx, 128
+ jb .loop3
+
+ COM64_S_NEWLINE
+ COM64_S_CHAR 'c'
+ COM64_S_CHAR 'm'
+ COM64_S_CHAR 'o'
+ COM64_S_CHAR 's'
+ COM64_S_CHAR '3'
+ COM64_S_CHAR ':'
+ xor ecx, ecx
+.loop4:
+ mov al, cl
+ out 72h, al
+ in al, 73h
+ COM64_S_BYTE_REG eax
+ COM64_S_CHAR ' '
+ inc ecx
+ cmp ecx, 128
+ jb .loop4
+
+ COM64_S_NEWLINE
+%endif
+
+ add rsp, 16
+ pop rdi
+ pop rsi
+ pop rcx
+ pop rdx
+ pop rax
+ ret
+ENDPROC vmm64On32DumpCmos
+ %endif
+
+%endif ; VBOX_WITH_64ON32_IDT
+
+
+
+;
+;
+; Back to switcher code.
+; Back to switcher code.
+; Back to switcher code.
+;
+;
+
+
+
+;;
+; Trampoline for doing a call when starting the hyper visor execution.
+;
+; Push any arguments to the routine.
+; Push the argument frame size (cArg * 4).
+; Push the call target (_cdecl convention).
+; Push the address of this routine.
+;
+;
+BITS 64
+ALIGNCODE(16)
+BEGINPROC vmmRCCallTrampoline
+%ifdef DEBUG_STUFF
+ COM64_S_CHAR 'c'
+ COM64_S_CHAR 't'
+ COM64_S_CHAR '!'
+%endif
+ int3
+ENDPROC vmmRCCallTrampoline
+
+
+;;
+; The C interface.
+;
+BITS 64
+ALIGNCODE(16)
+BEGINPROC vmmRCToHost
+%ifdef DEBUG_STUFF
+ push rsi
+ COM_NEWLINE
+ COM_CHAR 'b'
+ COM_CHAR 'a'
+ COM_CHAR 'c'
+ COM_CHAR 'k'
+ COM_CHAR '!'
+ COM_NEWLINE
+ pop rsi
+%endif
+ int3
+ENDPROC vmmRCToHost
+
+;;
+; vmmRCToHostAsm
+;
+; This is an alternative entry point which we'll be using
+; when the we have saved the guest state already or we haven't
+; been messing with the guest at all.
+;
+; @param rbp The virtual cpu number.
+; @param
+;
+BITS 64
+ALIGNCODE(16)
+BEGINPROC vmmRCToHostAsm
+NAME(vmmRCToHostAsmNoReturn):
+ ;; We're still in the intermediate memory context!
+
+ ;;
+ ;; Switch to compatibility mode, placing ourselves in identity mapped code.
+ ;;
+ jmp far [NAME(fpIDEnterTarget) wrt rip]
+
+; 16:32 Pointer to IDEnterTarget.
+NAME(fpIDEnterTarget):
+ FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
+dd 0
+ FIXUP FIX_HYPER_CS, 0
+dd 0
+
+ ; We're now on identity mapped pages!
+ALIGNCODE(16)
+GLOBALNAME IDExitTarget
+BITS 32
+ DEBUG32_CHAR('1')
+
+ ; 1. Deactivate long mode by turning off paging.
+ mov ebx, cr0
+ and ebx, ~X86_CR0_PG
+ mov cr0, ebx
+ DEBUG32_CHAR('2')
+
+ ; 2. Load intermediate page table.
+ FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
+ mov edx, 0ffffffffh
+ mov cr3, edx
+ DEBUG32_CHAR('3')
+
+ ; 3. Disable long mode.
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ DEBUG32_CHAR('5')
+ and eax, ~(MSR_K6_EFER_LME)
+ wrmsr
+ DEBUG32_CHAR('6')
+
+%ifndef NEED_PAE_ON_HOST
+ ; 3b. Disable PAE.
+ mov eax, cr4
+ and eax, ~X86_CR4_PAE
+ mov cr4, eax
+ DEBUG32_CHAR('7')
+%endif
+
+ ; 4. Enable paging.
+ or ebx, X86_CR0_PG
+ mov cr0, ebx
+ jmp short just_a_jump
+just_a_jump:
+ DEBUG32_CHAR('8')
+
+ ;;
+ ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
+ ;;
+ FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
+ jmp near NAME(ICExitTarget)
+
+ ;;
+ ;; When we arrive at this label we're at the host mapping of the
+ ;; switcher code, but with intermediate page tables.
+ ;;
+BITS 32
+ALIGNCODE(16)
+GLOBALNAME ICExitTarget
+ DEBUG32_CHAR('9')
+ ;DEBUG_CMOS_TRASH_AL 70h
+
+ ; load the hypervisor data selector into ds & es
+ FIXUP FIX_HYPER_DS, 1
+ mov eax, 0ffffh
+ mov ds, eax
+ mov es, eax
+ DEBUG32_CHAR('a')
+
+ FIXUP FIX_GC_CPUM_OFF, 1, 0
+ mov edx, 0ffffffffh
+ CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
+
+ DEBUG32_CHAR('b')
+ mov esi, [edx + CPUMCPU.Host.cr3]
+ mov cr3, esi
+ DEBUG32_CHAR('c')
+
+ ;; now we're in host memory context, let's restore regs
+ FIXUP FIX_HC_CPUM_OFF, 1, 0
+ mov edx, 0ffffffffh
+ CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
+ DEBUG32_CHAR('e')
+
+ ; restore the host EFER
+ mov ebx, edx
+ mov ecx, MSR_K6_EFER
+ mov eax, [ebx + CPUMCPU.Host.efer]
+ mov edx, [ebx + CPUMCPU.Host.efer + 4]
+ DEBUG32_CHAR('f')
+ wrmsr
+ mov edx, ebx
+ DEBUG32_CHAR('g')
+
+ ; activate host gdt and idt
+ lgdt [edx + CPUMCPU.Host.gdtr]
+ DEBUG32_CHAR('0')
+ lidt [edx + CPUMCPU.Host.idtr]
+ DEBUG32_CHAR('1')
+
+ ; Restore TSS selector; must mark it as not busy before using ltr (!)
+ ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
+ movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
+ and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
+ add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
+ and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
+ ltr word [edx + CPUMCPU.Host.tr]
+
+ ; activate ldt
+ DEBUG32_CHAR('2')
+ lldt [edx + CPUMCPU.Host.ldtr]
+
+ ; Restore segment registers
+ mov eax, [edx + CPUMCPU.Host.ds]
+ mov ds, eax
+ mov eax, [edx + CPUMCPU.Host.es]
+ mov es, eax
+ mov eax, [edx + CPUMCPU.Host.fs]
+ mov fs, eax
+ mov eax, [edx + CPUMCPU.Host.gs]
+ mov gs, eax
+ ; restore stack
+ lss esp, [edx + CPUMCPU.Host.esp]
+
+ ; Control registers.
+ mov ecx, [edx + CPUMCPU.Host.cr4]
+ mov cr4, ecx
+ mov ecx, [edx + CPUMCPU.Host.cr0]
+ mov cr0, ecx
+ ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
+ ;mov cr2, ecx
+
+ ; restore general registers.
+ mov edi, [edx + CPUMCPU.Host.edi]
+ mov esi, [edx + CPUMCPU.Host.esi]
+ mov ebx, [edx + CPUMCPU.Host.ebx]
+ mov ebp, [edx + CPUMCPU.Host.ebp]
+
+ ; store the return code in eax
+ DEBUG_CMOS_TRASH_AL 79h
+ mov eax, [edx + CPUMCPU.u32RetCode]
+ retf
+ENDPROC vmmRCToHostAsm
+
+
+GLOBALNAME End
+;
+; The description string (in the text section).
+;
+NAME(Description):
+ db SWITCHER_DESCRIPTION
+ db 0
+
+extern NAME(Relocate)
+
+;
+; End the fixup records.
+;
+BEGINDATA
+ db FIX_THE_END ; final entry.
+GLOBALNAME FixupsEnd
+
+;;
+; The switcher definition structure.
+ALIGNDATA(16)
+GLOBALNAME Def
+ istruc VMMSWITCHERDEF
+ at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
+ at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
+ at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
+ at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
+ at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
+ at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
+ at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
+ at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
+ ; disasm help
+ at VMMSWITCHERDEF.offHCCode0, dd 0
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
+ at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
+ at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
+ at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
+%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
+ at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
+%else
+ at VMMSWITCHERDEF.offGCCode, dd 0
+%endif
+ at VMMSWITCHERDEF.cbGCCode, dd 0
+
+ iend
+
diff --git a/src/VBox/VMM/VMMSwitcher/Makefile.kup b/src/VBox/VMM/VMMSwitcher/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/Makefile.kup
diff --git a/src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm b/src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm
new file mode 100644
index 00000000..4b5e68b0
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm
@@ -0,0 +1,33 @@
+; $Id: PAETo32Bit.asm $
+;; @file
+; VMM - World Switchers, PAE to PAE
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_PAE_TO_32BIT
+%define SWITCHER_DESCRIPTION "PAE to/from 32-Bit"
+%define NAME_OVERLOAD(name) vmmR3SwitcherPAETo32Bit_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_PAE_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_32BIT_CR3
+%define NEED_32BIT_ON_PAE_HOST 1
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/PAEand32Bit.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm b/src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm
new file mode 100644
index 00000000..e9c4b715
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm
@@ -0,0 +1,32 @@
+; $Id: PAEToAMD64.asm $
+;; @file
+; VMM - World Switchers, PAE to AMD64
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_PAE_TO_AMD64
+%define SWITCHER_DESCRIPTION "PAE to/from AMD64 intermediate context"
+%define NAME_OVERLOAD(name) vmmR3SwitcherPAEToAMD64_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_PAE_CR3
+%define NEED_PAE_ON_HOST 1
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/LegacyandAMD64.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/PAEToPAE.asm b/src/VBox/VMM/VMMSwitcher/PAEToPAE.asm
new file mode 100644
index 00000000..7a375cb3
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/PAEToPAE.asm
@@ -0,0 +1,32 @@
+; $Id: PAEToPAE.asm $
+;; @file
+; VMM - World Switchers, PAE to PAE
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define SWITCHER_TYPE VMMSWITCHER_PAE_TO_PAE
+%define SWITCHER_DESCRIPTION "PAE to/from PAE"
+%define NAME_OVERLOAD(name) vmmR3SwitcherPAEToPAE_ %+ name
+%define SWITCHER_FIX_INTER_CR3_HC FIX_INTER_PAE_CR3
+%define SWITCHER_FIX_INTER_CR3_GC FIX_INTER_PAE_CR3
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VMMSwitcher/PAEand32Bit.mac"
+
diff --git a/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac b/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
new file mode 100644
index 00000000..de506474
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
@@ -0,0 +1,1148 @@
+; $Id: PAEand32Bit.mac $
+;; @file
+; VMM - World Switchers, template for PAE and 32-Bit.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;%define DEBUG_STUFF 1
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/apic.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/vmm/stam.mac"
+%include "VBox/vmm/vm.mac"
+%include "VBox/err.mac"
+%include "CPUMInternal.mac"
+%include "VMMSwitcher.mac"
+
+%undef NEED_ID
+%ifdef NEED_PAE_ON_32BIT_HOST
+%define NEED_ID
+%endif
+%ifdef NEED_32BIT_ON_PAE_HOST
+%define NEED_ID
+%endif
+
+
+
+;
+; Start the fixup records
+; We collect the fixups in the .data section as we go along
+; It is therefore VITAL that no-one is using the .data section
+; for anything else between 'Start' and 'End'.
+;
+BEGINDATA
+GLOBALNAME Fixups
+
+
+
+BEGINCODE
+GLOBALNAME Start
+
+;;
+; The C interface.
+;
+BEGINPROC vmmR0ToRawMode
+
+%ifdef DEBUG_STUFF
+ COM_S_NEWLINE
+ COM_S_CHAR '^'
+%endif
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_START edx
+%endif
+
+ ;
+ ; Call worker.
+ ;
+ FIXUP FIX_HC_CPUM_OFF, 1, 0
+ mov edx, 0ffffffffh
+ push cs ; allow for far return and restore cs correctly.
+ call NAME(vmmR0ToRawModeAsm)
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Restore blocked Local APIC NMI vectors
+ ; Do this here to ensure the host CS is already restored
+ mov ecx, [edx + CPUMCPU.fApicDisVectors]
+ test ecx, ecx
+ jz gth_apic_done
+ cmp byte [edx + CPUMCPU.fX2Apic], 1
+ je gth_x2apic
+
+ ; Legacy xAPIC mode:
+ mov edx, [edx + CPUMCPU.pvApicBase]
+ shr ecx, 1
+ jnc gth_nolint0
+ and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
+gth_nolint0:
+ shr ecx, 1
+ jnc gth_nolint1
+ and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
+gth_nolint1:
+ shr ecx, 1
+ jnc gth_nopc
+ and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
+gth_nopc:
+ shr ecx, 1
+ jnc gth_notherm
+ and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
+gth_notherm:
+ shr ecx, 1
+ jnc gth_nocmci
+ and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
+gth_nocmci:
+ jmp gth_apic_done
+
+ ; x2APIC mode:
+gth_x2apic:
+ push eax ; save eax
+ push ebx ; save it for fApicDisVectors
+ push edx ; save edx just in case.
+ mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
+ shr ebx, 1
+ jnc gth_x2_nolint0
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nolint0:
+ shr ebx, 1
+ jnc gth_x2_nolint1
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nolint1:
+ shr ebx, 1
+ jnc gth_x2_nopc
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nopc:
+ shr ebx, 1
+ jnc gth_x2_notherm
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_notherm:
+ shr ebx, 1
+ jnc gth_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ and eax, ~APIC_REG_LVT_MASKED
+ wrmsr
+gth_x2_nocmci:
+ pop edx
+ pop ebx
+ pop eax
+
+gth_apic_done:
+%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+
+%ifdef VBOX_WITH_STATISTICS
+ ;
+ ; Switcher stats.
+ ;
+ FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_STOP edx
+%endif
+
+ ret
+ENDPROC vmmR0ToRawMode
+
+
+
+; *****************************************************************************
+; vmmR0ToRawModeAsm
+;
+; Phase one of the switch from host to guest context (host MMU context)
+;
+; INPUT:
+; - edx virtual address of CPUM structure (valid in host context)
+;
+; USES/DESTROYS:
+; - eax, ecx, edx
+;
+; ASSUMPTION:
+; - current CS and DS selectors are wide open
+;
+; *****************************************************************************
+ALIGNCODE(16)
+BEGINPROC vmmR0ToRawModeAsm
+ ;;
+ ;; Save CPU host context
+ ;; Skip eax, edx and ecx as these are not preserved over calls.
+ ;;
+ CPUMCPU_FROM_CPUM(edx)
+ ; general registers.
+ mov [edx + CPUMCPU.Host.ebx], ebx
+ mov [edx + CPUMCPU.Host.edi], edi
+ mov [edx + CPUMCPU.Host.esi], esi
+ mov [edx + CPUMCPU.Host.esp], esp
+ mov [edx + CPUMCPU.Host.ebp], ebp
+ ; selectors.
+ mov [edx + CPUMCPU.Host.ds], ds
+ mov [edx + CPUMCPU.Host.es], es
+ mov [edx + CPUMCPU.Host.fs], fs
+ mov [edx + CPUMCPU.Host.gs], gs
+ mov [edx + CPUMCPU.Host.ss], ss
+ ; special registers.
+ sldt [edx + CPUMCPU.Host.ldtr]
+ sidt [edx + CPUMCPU.Host.idtr]
+ sgdt [edx + CPUMCPU.Host.gdtr]
+ str [edx + CPUMCPU.Host.tr]
+ ; flags
+ pushfd
+ pop dword [edx + CPUMCPU.Host.eflags]
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ ; Block Local APIC NMI vectors
+ cmp byte [edx + CPUMCPU.fX2Apic], 1
+ je htg_x2apic
+
+ ; Legacy xAPIC mode. No write completion required when writing to the
+ ; LVT registers as we have mapped the APIC page non-cacheable and the
+ ; MMIO is CPU-local.
+ mov ebx, [edx + CPUMCPU.pvApicBase]
+ or ebx, ebx
+ jz htg_apic_done
+ xor edi, edi ; fApicDisVectors
+
+ mov eax, [ebx + APIC_REG_LVT_LINT0]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_LINT0], eax
+htg_nolint0:
+ mov eax, [ebx + APIC_REG_LVT_LINT1]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_LINT1], eax
+htg_nolint1:
+ mov eax, [ebx + APIC_REG_LVT_PC]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_PC], eax
+htg_nopc:
+ mov eax, [ebx + APIC_REG_VERSION]
+ shr eax, 16
+ cmp al, 5
+ jb htg_notherm
+ je htg_nocmci
+ mov eax, [ebx + APIC_REG_LVT_CMCI]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_CMCI], eax
+htg_nocmci:
+ mov eax, [ebx + APIC_REG_LVT_THMR]
+ mov ecx, eax
+ and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ecx, APIC_REG_LVT_MODE_NMI
+ jne htg_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ mov [ebx + APIC_REG_LVT_THMR], eax
+htg_notherm:
+ mov [edx + CPUMCPU.fApicDisVectors], edi
+ jmp htg_apic_done
+
+ ; x2APIC mode:
+htg_x2apic:
+ mov esi, edx ; Save edx.
+ xor edi, edi ; fApicDisVectors
+
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint0
+ or edi, 0x01
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint0:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nolint1
+ or edi, 0x02
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nolint1:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nopc
+ or edi, 0x04
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nopc:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
+ rdmsr
+ shr eax, 16
+ cmp al, 5
+ jb htg_x2_notherm
+ je htg_x2_nocmci
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_nocmci
+ or edi, 0x10
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_nocmci:
+ mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
+ rdmsr
+ mov ebx, eax
+ and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
+ cmp ebx, APIC_REG_LVT_MODE_NMI
+ jne htg_x2_notherm
+ or edi, 0x08
+ or eax, APIC_REG_LVT_MASKED
+ wrmsr
+htg_x2_notherm:
+ mov edx, esi ; Restore edx.
+ mov [edx + CPUMCPU.fApicDisVectors], edi
+
+htg_apic_done:
+%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+
+ FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
+ ; save MSR_IA32_SYSENTER_CS register.
+ mov ecx, MSR_IA32_SYSENTER_CS
+ mov ebx, edx ; save edx
+ rdmsr ; edx:eax <- MSR[ecx]
+ mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
+ mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
+ xor eax, eax ; load 0:0 to cause #GP upon sysenter
+ xor edx, edx
+ wrmsr
+ xchg ebx, edx ; restore edx
+ jmp short htg_no_sysenter
+
+ALIGNCODE(16)
+htg_no_sysenter:
+
+ FIXUP FIX_NO_SYSCALL_JMP, 0, htg_no_syscall - NAME(Start) ; this will insert a jmp htg_no_syscall if host doesn't use syscall.
+ ; clear MSR_K6_EFER_SCE.
+ mov ebx, edx ; save edx
+ mov ecx, MSR_K6_EFER
+ rdmsr ; edx:eax <- MSR[ecx]
+ and eax, ~MSR_K6_EFER_SCE
+ wrmsr
+ mov edx, ebx ; restore edx
+ jmp short htg_no_syscall
+
+ALIGNCODE(16)
+htg_no_syscall:
+
+ ;; handle use flags.
+ mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
+ and esi, ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) ; Clear CPUM_USED_* flags.
+ mov [edx + CPUMCPU.fUseFlags], esi
+
+ ; debug registers.
+ test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
+ jnz htg_debug_regs_save_dr7and6
+htg_debug_regs_no:
+
+ ; control registers.
+ mov eax, cr0
+ mov [edx + CPUMCPU.Host.cr0], eax
+ ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
+ ;mov [edx + CPUMCPU.Host.cr2], eax
+ mov eax, cr3
+ mov [edx + CPUMCPU.Host.cr3], eax
+ mov eax, cr4
+ mov [edx + CPUMCPU.Host.cr4], eax
+
+ ;;
+ ;; Start switching to VMM context.
+ ;;
+
+ ;
+ ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
+ ; Also disable WP. (eax==cr4 now)
+ ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
+ ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
+ ;
+ and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
+ mov ecx, [edx + CPUMCPU.Guest.cr4]
+ ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
+ ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
+ ; simplify this operation a bit (and improve locality of the data).
+
+ ;
+ ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
+ ; FXSAVE and XSAVE support on the host CPU
+ ;
+ CPUM_FROM_CPUMCPU(edx)
+ and ecx, [edx + CPUM.CR4.AndMask]
+ or eax, ecx
+ or eax, [edx + CPUM.CR4.OrMask]
+ mov cr4, eax
+
+ CPUMCPU_FROM_CPUM(edx)
+ mov eax, [edx + CPUMCPU.Guest.cr0]
+ and eax, X86_CR0_EM
+ or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
+ mov cr0, eax
+
+ ; Load new gdt so we can do far jump to guest code after cr3 reload.
+ lgdt [edx + CPUMCPU.Hyper.gdtr]
+ DEBUG_CHAR('1') ; trashes esi
+
+ ; Store the hypervisor cr3 for later loading
+ mov ebp, [edx + CPUMCPU.Hyper.cr3]
+
+ ;;
+ ;; Load Intermediate memory context.
+ ;;
+ FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
+ mov eax, 0ffffffffh
+ mov cr3, eax
+ DEBUG_CHAR('2') ; trashes esi
+
+%ifdef NEED_ID
+ ;;
+ ;; Jump to identity mapped location
+ ;;
+ FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
+ jmp near NAME(IDEnterTarget)
+
+ ; We're now on identity mapped pages!
+ALIGNCODE(16)
+GLOBALNAME IDEnterTarget
+ DEBUG_CHAR('3')
+ mov edx, cr4
+%ifdef NEED_PAE_ON_32BIT_HOST
+ or edx, X86_CR4_PAE
+%else
+ and edx, ~X86_CR4_PAE
+%endif
+ mov eax, cr0
+ and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
+ mov cr0, eax
+ DEBUG_CHAR('4')
+ mov cr4, edx
+ FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
+ mov edx, 0ffffffffh
+ mov cr3, edx
+ or eax, X86_CR0_PG
+ DEBUG_CHAR('5')
+ mov cr0, eax
+ DEBUG_CHAR('6')
+%endif
+
+ ;;
+ ;; Jump to guest code mapping of the code and load the Hypervisor CS.
+ ;;
+ FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
+ jmp 0fff8h:0deadfaceh
+
+
+ ;;
+ ;; When we arrive at this label we're at the
+ ;; guest code mapping of the switching code.
+ ;;
+ALIGNCODE(16)
+GLOBALNAME FarJmpGCTarget
+ DEBUG_CHAR('-')
+ ; load final cr3 and do far jump to load cs.
+ mov cr3, ebp ; ebp set above
+ DEBUG_CHAR('0')
+
+ ;;
+ ;; We're in VMM MMU context and VMM CS is loaded.
+ ;; Setup the rest of the VMM state.
+ ;;
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+ ; Activate guest IDT
+ DEBUG_CHAR('1')
+ lidt [edx + CPUMCPU.Hyper.idtr]
+ ; Load selectors
+ DEBUG_CHAR('2')
+ FIXUP FIX_HYPER_DS, 1
+ mov eax, 0ffffh
+ mov ds, eax
+ mov es, eax
+ xor eax, eax
+ mov gs, eax
+ mov fs, eax
+
+ ; Setup stack.
+ DEBUG_CHAR('3')
+ mov eax, [edx + CPUMCPU.Hyper.ss.Sel]
+ mov ss, ax
+ mov esp, [edx + CPUMCPU.Hyper.esp]
+
+ ; Restore TSS selector; must mark it as not busy before using ltr (!)
+ DEBUG_CHAR('4')
+ FIXUP FIX_GC_TSS_GDTE_DW2, 2
+ and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
+ DEBUG_CHAR('5')
+ ltr word [edx + CPUMCPU.Hyper.tr.Sel]
+ DEBUG_CHAR('6')
+
+ ; Activate the ldt (now we can safely crash).
+ lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
+ DEBUG_CHAR('7')
+
+ ;; use flags.
+ mov esi, [edx + CPUMCPU.fUseFlags]
+
+ ; debug registers
+ test esi, CPUM_USE_DEBUG_REGS_HYPER
+ jnz htg_debug_regs_guest
+htg_debug_regs_guest_done:
+ DEBUG_CHAR('9')
+
+%ifdef VBOX_WITH_NMI
+ ;
+ ; Setup K7 NMI.
+ ;
+ mov esi, edx
+ ; clear all PerfEvtSeln registers
+ xor eax, eax
+ xor edx, edx
+ mov ecx, MSR_K7_PERFCTR0
+ wrmsr
+ mov ecx, MSR_K7_PERFCTR1
+ wrmsr
+ mov ecx, MSR_K7_PERFCTR2
+ wrmsr
+ mov ecx, MSR_K7_PERFCTR3
+ wrmsr
+
+ mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
+ mov ecx, MSR_K7_EVNTSEL0
+ wrmsr
+ mov eax, 02329B000h
+ mov edx, 0fffffffeh ; -1.6GHz * 5
+ mov ecx, MSR_K7_PERFCTR0
+ wrmsr
+
+ FIXUP FIX_GC_APIC_BASE_32BIT, 1
+ mov eax, 0f0f0f0f0h
+ add eax, 0340h ; APIC_LVTPC
+ mov dword [eax], 0400h ; APIC_DM_NMI
+
+ xor edx, edx
+ mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
+ mov ecx, MSR_K7_EVNTSEL0
+ wrmsr
+
+ mov edx, esi
+%endif
+
+ ; General registers (sans edx).
+ mov eax, [edx + CPUMCPU.Hyper.eax]
+ mov ebx, [edx + CPUMCPU.Hyper.ebx]
+ mov ecx, [edx + CPUMCPU.Hyper.ecx]
+ mov ebp, [edx + CPUMCPU.Hyper.ebp]
+ mov esi, [edx + CPUMCPU.Hyper.esi]
+ mov edi, [edx + CPUMCPU.Hyper.edi]
+ DEBUG_S_CHAR('!')
+
+ ;;
+ ;; Return to the VMM code which either called the switcher or
+ ;; the code set up to run by HC.
+ ;;
+ push dword [edx + CPUMCPU.Hyper.eflags]
+ push cs
+ push dword [edx + CPUMCPU.Hyper.eip]
+ mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
+
+%ifdef DEBUG_STUFF
+ COM_S_PRINT ';eip='
+ push eax
+ mov eax, [esp + 8]
+ COM_S_DWORD_REG eax
+ pop eax
+ COM_S_CHAR ';'
+%endif
+%ifdef VBOX_WITH_STATISTICS
+ push edx
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_STOP edx
+ pop edx
+%endif
+
+ iret ; Use iret to make debugging and TF/RF work.
+
+;;
+; Detour for saving the host DR7 and DR6.
+; esi and edx must be preserved.
+htg_debug_regs_save_dr7and6:
+DEBUG_S_CHAR('s');
+ mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
+ mov [edx + CPUMCPU.Host.dr7], eax
+ xor eax, eax ; clear everything. (bit 12? is read as 1...)
+ mov dr7, eax
+ mov eax, dr6 ; just in case we save the state register too.
+ mov [edx + CPUMCPU.Host.dr6], eax
+ jmp htg_debug_regs_no
+
+;;
+; Detour for saving host DR0-3 and loading hypervisor debug registers.
+; esi and edx must be preserved.
+htg_debug_regs_guest:
+ DEBUG_S_CHAR('D')
+ DEBUG_S_CHAR('R')
+ DEBUG_S_CHAR('x')
+ ; save host DR0-3.
+ mov eax, dr0
+ mov [edx + CPUMCPU.Host.dr0], eax
+ mov ebx, dr1
+ mov [edx + CPUMCPU.Host.dr1], ebx
+ mov ecx, dr2
+ mov [edx + CPUMCPU.Host.dr2], ecx
+ mov eax, dr3
+ mov [edx + CPUMCPU.Host.dr3], eax
+ or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
+
+ ; load hyper DR0-7
+ mov ebx, [edx + CPUMCPU.Hyper.dr]
+ mov dr0, ebx
+ mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
+ mov dr1, ecx
+ mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
+ mov dr2, eax
+ mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
+ mov dr3, ebx
+ mov ecx, X86_DR6_INIT_VAL
+ mov dr6, ecx
+ mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
+ mov dr7, eax
+ or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
+ jmp htg_debug_regs_guest_done
+
+ENDPROC vmmR0ToRawModeAsm
+
+
+;;
+; Trampoline for doing a call when starting the hyper visor execution.
+;
+; Push any arguments to the routine.
+; Push the argument frame size (cArg * 4).
+; Push the call target (_cdecl convention).
+; Push the address of this routine.
+;
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCCallTrampoline
+%ifdef DEBUG_STUFF
+ COM_S_CHAR 'c'
+ COM_S_CHAR 't'
+ COM_S_CHAR '!'
+%endif
+
+ ; call routine
+ pop eax ; call address
+ pop edi ; argument count.
+%ifdef DEBUG_STUFF
+ COM_S_PRINT ';eax='
+ COM_S_DWORD_REG eax
+ COM_S_CHAR ';'
+%endif
+ call eax ; do call
+ add esp, edi ; cleanup stack
+
+ ; return to the host context.
+%ifdef DEBUG_STUFF
+ COM_S_CHAR '`'
+%endif
+.to_host_again:
+ call NAME(vmmRCToHostAsm)
+ mov eax, VERR_VMM_SWITCHER_IPE_1
+ jmp .to_host_again
+ENDPROC vmmRCCallTrampoline
+
+
+
+;;
+; The C interface.
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHost
+%ifdef DEBUG_STUFF
+ push esi
+ COM_NEWLINE
+ DEBUG_CHAR('b')
+ DEBUG_CHAR('a')
+ DEBUG_CHAR('c')
+ DEBUG_CHAR('k')
+ DEBUG_CHAR('!')
+ COM_NEWLINE
+ pop esi
+%endif
+ mov eax, [esp + 4]
+ jmp NAME(vmmRCToHostAsm)
+ENDPROC vmmRCToHost
+
+
+;;
+; vmmRCToHostAsmNoReturn
+;
+; This is an entry point used by TRPM when dealing with raw-mode traps,
+; i.e. traps in the hypervisor code. This will not return and saves no
+; state, because the caller has already saved the state.
+;
+; @param eax Return code.
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHostAsmNoReturn
+ DEBUG_S_CHAR('%')
+
+%ifdef VBOX_WITH_STATISTICS
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_STOP edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM32_PROFILE_ADV_START edx
+%endif
+
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+
+ jmp vmmRCToHostAsm_SaveNoGeneralRegs
+ENDPROC vmmRCToHostAsmNoReturn
+
+
+;;
+; vmmRCToHostAsm
+;
+; This is an entry point used by TRPM to return to host context when an
+; interrupt occured or an guest trap needs handling in host context. It
+; is also used by the C interface above.
+;
+; The hypervisor context is saved and it will return to the caller if
+; host context so desires.
+;
+; @param eax Return code.
+; @uses eax, edx, ecx (or it may use them in the future)
+;
+ALIGNCODE(16)
+BEGINPROC vmmRCToHostAsm
+ DEBUG_S_CHAR('%')
+ push edx
+
+%ifdef VBOX_WITH_STATISTICS
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_STOP edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_START edx
+
+ FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
+ mov edx, 0ffffffffh
+ STAM_PROFILE_ADV_START edx
+%endif
+
+ ;
+ ; Load the CPUMCPU pointer.
+ ;
+ FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
+ mov edx, 0ffffffffh
+
+ ; Save register context.
+ pop dword [edx + CPUMCPU.Hyper.edx]
+ pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
+ mov dword [edx + CPUMCPU.Hyper.esp], esp
+ mov dword [edx + CPUMCPU.Hyper.eax], eax
+ mov dword [edx + CPUMCPU.Hyper.ebx], ebx
+ mov dword [edx + CPUMCPU.Hyper.ecx], ecx
+ mov dword [edx + CPUMCPU.Hyper.esi], esi
+ mov dword [edx + CPUMCPU.Hyper.edi], edi
+ mov dword [edx + CPUMCPU.Hyper.ebp], ebp
+
+ ; special registers which may change.
+vmmRCToHostAsm_SaveNoGeneralRegs:
+ mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
+ ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
+ sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
+
+ ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
+ ; FPU context is saved before restore of host saving (another) branch.
+
+ ; Disable debug regsiters if active so they cannot trigger while switching.
+ test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
+ jz .gth_disabled_dr7
+ mov eax, X86_DR7_INIT_VAL
+ mov dr7, eax
+.gth_disabled_dr7:
+
+%ifdef VBOX_WITH_NMI
+ ;
+ ; Disarm K7 NMI.
+ ;
+ mov esi, edx
+
+ xor edx, edx
+ xor eax, eax
+ mov ecx, MSR_K7_EVNTSEL0
+ wrmsr
+
+ mov edx, esi
+%endif
+
+
+ ;;
+ ;; Load Intermediate memory context.
+ ;;
+ mov ecx, [edx + CPUMCPU.Host.cr3]
+ FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
+ mov eax, 0ffffffffh
+ mov cr3, eax
+ DEBUG_CHAR('?')
+
+ ;; We're now in intermediate memory context!
+%ifdef NEED_ID
+ ;;
+ ;; Jump to identity mapped location
+ ;;
+ FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
+ jmp near NAME(IDExitTarget)
+
+ ; We're now on identity mapped pages!
+ALIGNCODE(16)
+GLOBALNAME IDExitTarget
+ DEBUG_CHAR('1')
+ mov edx, cr4
+%ifdef NEED_PAE_ON_32BIT_HOST
+ and edx, ~X86_CR4_PAE
+%else
+ or edx, X86_CR4_PAE
+%endif
+ mov eax, cr0
+ and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
+ mov cr0, eax
+ DEBUG_CHAR('2')
+ mov cr4, edx
+ FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
+ mov edx, 0ffffffffh
+ mov cr3, edx
+ or eax, X86_CR0_PG
+ DEBUG_CHAR('3')
+ mov cr0, eax
+ DEBUG_CHAR('4')
+
+ ;;
+ ;; Jump to HC mapping.
+ ;;
+ FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
+ jmp near NAME(HCExitTarget)
+%else
+ ;;
+ ;; Jump to HC mapping.
+ ;;
+ FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
+ jmp near NAME(HCExitTarget)
+%endif
+
+
+ ;
+ ; When we arrive here we're at the host context
+ ; mapping of the switcher code.
+ ;
+ALIGNCODE(16)
+GLOBALNAME HCExitTarget
+ DEBUG_CHAR('9')
+ ; load final cr3
+ mov cr3, ecx
+ DEBUG_CHAR('@')
+
+
+ ;;
+ ;; Restore Host context.
+ ;;
+ ; Load CPUM pointer into edx
+ FIXUP FIX_HC_CPUM_OFF, 1, 0
+ mov edx, 0ffffffffh
+ CPUMCPU_FROM_CPUM(edx)
+ ; activate host gdt and idt
+ lgdt [edx + CPUMCPU.Host.gdtr]
+ DEBUG_CHAR('0')
+ lidt [edx + CPUMCPU.Host.idtr]
+ DEBUG_CHAR('1')
+ ; Restore TSS selector; must mark it as not busy before using ltr (!)
+%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
+ movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
+ and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
+ add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
+ and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
+ ltr word [edx + CPUMCPU.Host.tr]
+%else
+ movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
+ and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
+ add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
+ mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
+ mov ebx, ecx ; save original value
+ and ecx, ~0200h ; clear busy flag (2nd type2 bit)
+ mov [eax + 4], ecx ; not using xchg here is paranoia..
+ ltr word [edx + CPUMCPU.Host.tr]
+ xchg [eax + 4], ebx ; using xchg is paranoia too...
+%endif
+ ; activate ldt
+ DEBUG_CHAR('2')
+ lldt [edx + CPUMCPU.Host.ldtr]
+ ; Restore segment registers
+ mov eax, [edx + CPUMCPU.Host.ds]
+ mov ds, eax
+ mov eax, [edx + CPUMCPU.Host.es]
+ mov es, eax
+ mov eax, [edx + CPUMCPU.Host.fs]
+ mov fs, eax
+ mov eax, [edx + CPUMCPU.Host.gs]
+ mov gs, eax
+ ; restore stack
+ lss esp, [edx + CPUMCPU.Host.esp]
+
+
+ FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
+ ; restore MSR_IA32_SYSENTER_CS register.
+ mov ecx, MSR_IA32_SYSENTER_CS
+ mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
+ mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
+ xchg edx, ebx ; save/load edx
+ wrmsr ; MSR[ecx] <- edx:eax
+ xchg edx, ebx ; restore edx
+ jmp short gth_sysenter_no
+
+ALIGNCODE(16)
+gth_sysenter_no:
+
+ FIXUP FIX_NO_SYSCALL_JMP, 0, gth_syscall_no - NAME(Start) ; this will insert a jmp gth_syscall_no if host doesn't use syscall.
+ ; set MSR_K6_EFER_SCE.
+ mov ebx, edx ; save edx
+ mov ecx, MSR_K6_EFER
+ rdmsr
+ or eax, MSR_K6_EFER_SCE
+ wrmsr
+ mov edx, ebx ; restore edx
+ jmp short gth_syscall_no
+
+ALIGNCODE(16)
+gth_syscall_no:
+
+ ; Restore FPU if guest has used it.
+ ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
+ mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
+ test esi, (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
+ jz near gth_fpu_no
+ mov ecx, cr0
+ and ecx, ~(X86_CR0_TS | X86_CR0_EM)
+ mov cr0, ecx
+
+ mov ebx, edx ; save edx
+
+ test esi, CPUM_USED_FPU_GUEST
+ jz gth_fpu_host
+
+ mov eax, [ebx + CPUMCPU.Guest.fXStateMask]
+ mov ecx, [ebx + CPUMCPU.Guest.pXStateR0]
+ test eax, eax
+ jz gth_fpu_guest_fxsave
+ mov edx, [ebx + CPUMCPU.Guest.fXStateMask + 4]
+ xsave [ecx]
+ jmp gth_fpu_host
+gth_fpu_guest_fxsave:
+ fxsave [ecx]
+
+gth_fpu_host:
+ mov eax, [ebx + CPUMCPU.Host.fXStateMask]
+ mov ecx, [ebx + CPUMCPU.Host.pXStateR0]
+ test eax, eax
+ jz gth_fpu_host_fxrstor
+ mov edx, [ebx + CPUMCPU.Host.fXStateMask + 4]
+ xrstor [ecx]
+ jmp gth_fpu_done
+gth_fpu_host_fxrstor:
+ fxrstor [ecx]
+
+gth_fpu_done:
+ mov edx, ebx ; restore edx
+gth_fpu_no:
+
+ ; Control registers.
+ ; Would've liked to have these higher up in case of crashes, but
+ ; the fpu stuff must be done before we restore cr0.
+ mov ecx, [edx + CPUMCPU.Host.cr4]
+ mov cr4, ecx
+ mov ecx, [edx + CPUMCPU.Host.cr0]
+ mov cr0, ecx
+ ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is a waste of time.
+ ;mov cr2, ecx
+
+ ; restore debug registers (if modified) (esi must still be fUseFlags!)
+ ; (must be done after cr4 reload because of the debug extension.)
+ test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
+ jnz gth_debug_regs_restore
+gth_debug_regs_done:
+
+ ; restore general registers.
+ mov eax, edi ; restore return code. eax = return code !!
+ mov edi, [edx + CPUMCPU.Host.edi]
+ mov esi, [edx + CPUMCPU.Host.esi]
+ mov ebx, [edx + CPUMCPU.Host.ebx]
+ mov ebp, [edx + CPUMCPU.Host.ebp]
+ push dword [edx + CPUMCPU.Host.eflags]
+ popfd
+
+%ifdef DEBUG_STUFF
+; COM_S_CHAR '4'
+%endif
+ retf
+
+;;
+; Detour for restoring the host debug registers.
+; edx and edi must be preserved.
+gth_debug_regs_restore:
+ DEBUG_S_CHAR('d')
+ mov eax, dr7 ; Some DR7 paranoia first...
+ mov ecx, X86_DR7_INIT_VAL
+ cmp eax, ecx
+ je .gth_debug_skip_dr7_disabling
+ mov dr7, ecx
+.gth_debug_skip_dr7_disabling:
+ test esi, CPUM_USED_DEBUG_REGS_HOST
+ jz .gth_debug_regs_dr7
+
+ DEBUG_S_CHAR('r')
+ mov eax, [edx + CPUMCPU.Host.dr0]
+ mov dr0, eax
+ mov ebx, [edx + CPUMCPU.Host.dr1]
+ mov dr1, ebx
+ mov ecx, [edx + CPUMCPU.Host.dr2]
+ mov dr2, ecx
+ mov eax, [edx + CPUMCPU.Host.dr3]
+ mov dr3, eax
+.gth_debug_regs_dr7:
+ mov ebx, [edx + CPUMCPU.Host.dr6]
+ mov dr6, ebx
+ mov ecx, [edx + CPUMCPU.Host.dr7]
+ mov dr7, ecx
+
+ and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
+ jmp gth_debug_regs_done
+
+ENDPROC vmmRCToHostAsm
+
+
+GLOBALNAME End
+;
+; The description string (in the text section).
+;
+NAME(Description):
+ db SWITCHER_DESCRIPTION
+ db 0
+
+extern NAME(Relocate)
+
+;
+; End the fixup records.
+;
+BEGINDATA
+ db FIX_THE_END ; final entry.
+GLOBALNAME FixupsEnd
+
+;;
+; The switcher definition structure.
+ALIGNDATA(16)
+GLOBALNAME Def
+ istruc VMMSWITCHERDEF
+ at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
+ at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
+ at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
+ at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
+ at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
+ at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
+ at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
+ at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
+ ; disasm help
+ at VMMSWITCHERDEF.offHCCode0, dd 0
+%ifdef NEED_ID
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
+%else
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
+%endif
+ at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
+%ifdef NEED_ID
+ at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
+ at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
+ at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
+%else
+ at VMMSWITCHERDEF.offIDCode0, dd 0
+ at VMMSWITCHERDEF.cbIDCode0, dd 0
+ at VMMSWITCHERDEF.offIDCode1, dd 0
+ at VMMSWITCHERDEF.cbIDCode1, dd 0
+%endif
+ at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
+%ifdef NEED_ID
+ at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
+%else
+ at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
+%endif
+
+ iend
+
diff --git a/src/VBox/VMM/VMMSwitcher/X86Stub.asm b/src/VBox/VMM/VMMSwitcher/X86Stub.asm
new file mode 100644
index 00000000..d4aa6cd4
--- /dev/null
+++ b/src/VBox/VMM/VMMSwitcher/X86Stub.asm
@@ -0,0 +1,110 @@
+; $Id: X86Stub.asm $
+;; @file
+; VMM - World Switchers, X86 Stub.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define NAME_OVERLOAD(name) vmmR3SwitcherX86Stub_ %+ name
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/err.mac"
+%include "VMMSwitcher.mac"
+
+
+BEGINCODE
+GLOBALNAME Start
+BITS 32
+
+BEGINPROC vmmR0ToRawMode
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmR0ToRawMode
+
+BEGINPROC vmmRCCallTrampoline
+.tight_loop:
+ int3
+ jmp .tight_loop
+ENDPROC vmmRCCallTrampoline
+
+BEGINPROC vmmRCToHost
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHost
+
+BEGINPROC vmmRCToHostAsmNoReturn
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHostAsmNoReturn
+
+BEGINPROC vmmRCToHostAsm
+ mov eax, VERR_VMM_SWITCHER_STUB
+ ret
+ENDPROC vmmRCToHostAsm
+
+GLOBALNAME End
+
+;
+; The description string (in the text section).
+;
+NAME(Description):
+ db "X86 Stub."
+ db 0
+
+
+;
+; Dummy fixups.
+;
+BEGINDATA
+GLOBALNAME Fixups
+ db FIX_THE_END ; final entry.
+GLOBALNAME FixupsEnd
+
+
+;;
+; The switcher definition structure.
+ALIGNDATA(16)
+GLOBALNAME Def
+ istruc VMMSWITCHERDEF
+ at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
+ at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
+ at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
+ at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF 0
+ at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_X86_STUB
+ at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
+ at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
+ at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
+ at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
+ ; disasm help
+ at VMMSWITCHERDEF.offHCCode0, dd 0
+ at VMMSWITCHERDEF.cbHCCode0, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.offHCCode1, dd 0
+ at VMMSWITCHERDEF.cbHCCode1, dd 0
+ at VMMSWITCHERDEF.offIDCode0, dd 0
+ at VMMSWITCHERDEF.cbIDCode0, dd 0
+ at VMMSWITCHERDEF.offIDCode1, dd 0
+ at VMMSWITCHERDEF.cbIDCode1, dd 0
+ at VMMSWITCHERDEF.offGCCode, dd NAME(vmmRCCallTrampoline) - NAME(Start)
+ at VMMSWITCHERDEF.cbGCCode, dd NAME(End) - NAME(vmmRCCallTrampoline)
+
+ iend
+