summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/VMMR0/HMR0UtilA.asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/VMM/VMMR0/HMR0UtilA.asm
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/VMM/VMMR0/HMR0UtilA.asm')
-rw-r--r--src/VBox/VMM/VMMR0/HMR0UtilA.asm469
1 files changed, 469 insertions, 0 deletions
diff --git a/src/VBox/VMM/VMMR0/HMR0UtilA.asm b/src/VBox/VMM/VMMR0/HMR0UtilA.asm
new file mode 100644
index 00000000..6695e21a
--- /dev/null
+++ b/src/VBox/VMM/VMMR0/HMR0UtilA.asm
@@ -0,0 +1,469 @@
+; $Id: HMR0UtilA.asm $
+;; @file
+; HM - Ring-0 VMX & SVM Helpers.
+;
+
+;
+; Copyright (C) 2006-2023 Oracle and/or its affiliates.
+;
+; This file is part of VirtualBox base platform packages, as
+; available from https://www.virtualbox.org.
+;
+; This program is free software; you can redistribute it and/or
+; modify it under the terms of the GNU General Public License
+; as published by the Free Software Foundation, in version 3 of the
+; License.
+;
+; This program is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with this program; if not, see <https://www.gnu.org/licenses>.
+;
+; SPDX-License-Identifier: GPL-3.0-only
+;
+
+;*********************************************************************************************************************************
+;* Header Files *
+;*********************************************************************************************************************************
+%include "VBox/asmdefs.mac"
+%include "VBox/err.mac"
+%include "VBox/vmm/hm_vmx.mac"
+%include "iprt/x86.mac"
+
+
+
+BEGINCODE
+
+;;
+; Executes VMWRITE, 64-bit value.
+;
+; @returns VBox status code.
+; @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
+; @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
+;
+ALIGNCODE(16)
+BEGINPROC VMXWriteVmcs64
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ xor rax, rax
+ vmwrite rdi, rsi
+ %else
+ and ecx, 0ffffffffh
+ xor rax, rax
+ vmwrite rcx, rdx
+ %endif
+%else ; RT_ARCH_X86
+ mov ecx, [esp + 4] ; idxField
+ lea edx, [esp + 8] ; &u64Data
+ vmwrite ecx, [edx] ; low dword
+ jz .done
+ jc .done
+ inc ecx
+ xor eax, eax
+ vmwrite ecx, [edx + 4] ; high dword
+.done:
+%endif ; RT_ARCH_X86
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_FIELD
+.the_end:
+ ret
+ENDPROC VMXWriteVmcs64
+
+
+;;
+; Executes VMREAD, 64-bit value.
+;
+; @returns VBox status code.
+; @param idxField VMCS index.
+; @param pData Where to store VM field value.
+;
+;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
+ALIGNCODE(16)
+BEGINPROC VMXReadVmcs64
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ xor rax, rax
+ vmread [rsi], rdi
+ %else
+ and ecx, 0ffffffffh
+ xor rax, rax
+ vmread [rdx], rcx
+ %endif
+%else ; RT_ARCH_X86
+ mov ecx, [esp + 4] ; idxField
+ mov edx, [esp + 8] ; pData
+ vmread [edx], ecx ; low dword
+ jz .done
+ jc .done
+ inc ecx
+ xor eax, eax
+ vmread [edx + 4], ecx ; high dword
+.done:
+%endif ; RT_ARCH_X86
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_FIELD
+.the_end:
+ ret
+ENDPROC VMXReadVmcs64
+
+
+;;
+; Executes VMREAD, 32-bit value.
+;
+; @returns VBox status code.
+; @param idxField VMCS index.
+; @param pu32Data Where to store VM field value.
+;
+;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
+ALIGNCODE(16)
+BEGINPROC VMXReadVmcs32
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ xor rax, rax
+ vmread r10, rdi
+ mov [rsi], r10d
+ %else
+ and ecx, 0ffffffffh
+ xor rax, rax
+ vmread r10, rcx
+ mov [rdx], r10d
+ %endif
+%else ; RT_ARCH_X86
+ mov ecx, [esp + 4] ; idxField
+ mov edx, [esp + 8] ; pu32Data
+ xor eax, eax
+ vmread [edx], ecx
+%endif ; RT_ARCH_X86
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_FIELD
+.the_end:
+ ret
+ENDPROC VMXReadVmcs32
+
+
+;;
+; Executes VMWRITE, 32-bit value.
+;
+; @returns VBox status code.
+; @param idxField VMCS index.
+; @param u32Data Where to store VM field value.
+;
+;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
+ALIGNCODE(16)
+BEGINPROC VMXWriteVmcs32
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ and esi, 0ffffffffh
+ xor rax, rax
+ vmwrite rdi, rsi
+ %else
+ and ecx, 0ffffffffh
+ and edx, 0ffffffffh
+ xor rax, rax
+ vmwrite rcx, rdx
+ %endif
+%else ; RT_ARCH_X86
+ mov ecx, [esp + 4] ; idxField
+ mov edx, [esp + 8] ; u32Data
+ xor eax, eax
+ vmwrite ecx, edx
+%endif ; RT_ARCH_X86
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_FIELD
+.the_end:
+ ret
+ENDPROC VMXWriteVmcs32
+
+
+;;
+; Executes VMXON.
+;
+; @returns VBox status code.
+; @param HCPhysVMXOn Physical address of VMXON structure.
+;
+;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
+BEGINPROC VMXEnable
+%ifdef RT_ARCH_AMD64
+ xor rax, rax
+ %ifdef ASM_CALL64_GCC
+ push rdi
+ %else
+ push rcx
+ %endif
+ vmxon [rsp]
+%else ; RT_ARCH_X86
+ xor eax, eax
+ vmxon [esp + 4]
+%endif ; RT_ARCH_X86
+ jnc .good
+ mov eax, VERR_VMX_INVALID_VMXON_PTR
+ jmp .the_end
+
+.good:
+ jnz .the_end
+ mov eax, VERR_VMX_VMXON_FAILED
+
+.the_end:
+%ifdef RT_ARCH_AMD64
+ add rsp, 8
+%endif
+ ret
+ENDPROC VMXEnable
+
+
+;;
+; Executes VMXOFF.
+;
+;DECLASM(void) VMXDisable(void);
+BEGINPROC VMXDisable
+ vmxoff
+.the_end:
+ ret
+ENDPROC VMXDisable
+
+
+;;
+; Executes VMCLEAR.
+;
+; @returns VBox status code.
+; @param HCPhysVmcs Physical address of VM control structure.
+;
+;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
+ALIGNCODE(16)
+BEGINPROC VMXClearVmcs
+%ifdef RT_ARCH_AMD64
+ xor rax, rax
+ %ifdef ASM_CALL64_GCC
+ push rdi
+ %else
+ push rcx
+ %endif
+ vmclear [rsp]
+%else ; RT_ARCH_X86
+ xor eax, eax
+ vmclear [esp + 4]
+%endif ; RT_ARCH_X86
+ jnc .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+.the_end:
+%ifdef RT_ARCH_AMD64
+ add rsp, 8
+%endif
+ ret
+ENDPROC VMXClearVmcs
+
+
+;;
+; Executes VMPTRLD.
+;
+; @returns VBox status code.
+; @param HCPhysVmcs Physical address of VMCS structure.
+;
+;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
+ALIGNCODE(16)
+BEGINPROC VMXLoadVmcs
+%ifdef RT_ARCH_AMD64
+ xor rax, rax
+ %ifdef ASM_CALL64_GCC
+ push rdi
+ %else
+ push rcx
+ %endif
+ vmptrld [rsp]
+%else
+ xor eax, eax
+ vmptrld [esp + 4]
+%endif
+ jnc .the_end
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+.the_end:
+%ifdef RT_ARCH_AMD64
+ add rsp, 8
+%endif
+ ret
+ENDPROC VMXLoadVmcs
+
+
+;;
+; Executes VMPTRST.
+;
+; @returns VBox status code.
+; @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
+;
+;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
+BEGINPROC VMXGetCurrentVmcs
+%ifdef RT_OS_OS2
+ mov eax, VERR_NOT_SUPPORTED
+ ret
+%else
+ %ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ vmptrst qword [rdi]
+ %else
+ vmptrst qword [rcx]
+ %endif
+ %else
+ vmptrst qword [esp+04h]
+ %endif
+ xor eax, eax
+.the_end:
+ ret
+%endif
+ENDPROC VMXGetCurrentVmcs
+
+
+;;
+; Invalidate a page using INVEPT.
+;
+; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
+; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
+;
+;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
+BEGINPROC VMXR0InvEPT
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ xor rax, rax
+; invept rdi, qword [rsi]
+ DB 0x66, 0x0F, 0x38, 0x80, 0x3E
+ %else
+ and ecx, 0ffffffffh
+ xor rax, rax
+; invept rcx, qword [rdx]
+ DB 0x66, 0x0F, 0x38, 0x80, 0xA
+ %endif
+%else
+ mov ecx, [esp + 4]
+ mov edx, [esp + 8]
+ xor eax, eax
+; invept ecx, qword [edx]
+ DB 0x66, 0x0F, 0x38, 0x80, 0xA
+%endif
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_INVALID_PARAMETER
+.the_end:
+ ret
+ENDPROC VMXR0InvEPT
+
+
+;;
+; Invalidate a page using INVVPID.
+;
+; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
+; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
+;
+;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
+BEGINPROC VMXR0InvVPID
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ and edi, 0ffffffffh
+ xor rax, rax
+; invvpid rdi, qword [rsi]
+ DB 0x66, 0x0F, 0x38, 0x81, 0x3E
+ %else
+ and ecx, 0ffffffffh
+ xor rax, rax
+; invvpid rcx, qword [rdx]
+ DB 0x66, 0x0F, 0x38, 0x81, 0xA
+ %endif
+%else
+ mov ecx, [esp + 4]
+ mov edx, [esp + 8]
+ xor eax, eax
+; invvpid ecx, qword [edx]
+ DB 0x66, 0x0F, 0x38, 0x81, 0xA
+%endif
+ jnc .valid_vmcs
+ mov eax, VERR_VMX_INVALID_VMCS_PTR
+ ret
+.valid_vmcs:
+ jnz .the_end
+ mov eax, VERR_INVALID_PARAMETER
+.the_end:
+ ret
+ENDPROC VMXR0InvVPID
+
+
+%if GC_ARCH_BITS == 64
+;;
+; Executes INVLPGA.
+;
+; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
+; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
+;
+;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
+BEGINPROC SVMR0InvlpgA
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ mov rax, rdi
+ mov rcx, rsi
+ %else
+ mov rax, rcx
+ mov rcx, rdx
+ %endif
+%else
+ mov eax, [esp + 4]
+ mov ecx, [esp + 0Ch]
+%endif
+ invlpga [xAX], ecx
+ ret
+ENDPROC SVMR0InvlpgA
+
+%else ; GC_ARCH_BITS != 64
+;;
+; Executes INVLPGA
+;
+; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
+; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
+;
+;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
+BEGINPROC SVMR0InvlpgA
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ movzx rax, edi
+ mov ecx, esi
+ %else
+ ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
+ ; "Perhaps unexpectedly, instructions that move or generate 32-bit register
+ ; values also set the upper 32 bits of the register to zero. Consequently
+ ; there is no need for an instruction movzlq."
+ mov eax, ecx
+ mov ecx, edx
+ %endif
+%else
+ mov eax, [esp + 4]
+ mov ecx, [esp + 8]
+%endif
+ invlpga [xAX], ecx
+ ret
+ENDPROC SVMR0InvlpgA
+
+%endif ; GC_ARCH_BITS != 64
+